+ ./ya make . -T --test-size=small --test-size=medium --stat --test-threads 52 --link-threads 12 -DUSE_EAT_MY_DATA --build relwithdebinfo -DDEBUGINFO_LINES_ONLY --bazel-remote-store --bazel-remote-base-uri http://cachesrv.internal:8081 --bazel-remote-username cache_user --bazel-remote-password-file /tmp/tmp.QGtQR8Wun2 --bazel-remote-put --dist-cache-max-file-size=209715200 -A --retest --stat -DCONSISTENT_DEBUG --no-dir-outputs --test-failure-code 0 --build-all --cache-size 2TB --force-build-depends --log-file /home/runner/actions_runner/_work/ydb/ydb/tmp/results/ya_log.txt --evlog-file /home/runner/actions_runner/_work/ydb/ydb/tmp/results/try_1/ya_evlog.jsonl --junit /home/runner/actions_runner/_work/ydb/ydb/tmp/results/try_1/junit.xml --build-results-report /home/runner/actions_runner/_work/ydb/ydb/tmp/results/try_1/report.json --output /home/runner/actions_runner/_work/ydb/ydb/tmp/out Output root is subdirectory of Arcadia root, this may cause non-idempotent build Configuring dependencies for platform default-linux-x86_64-relwithdebinfo [2 ymakes processing] [7310/7322 modules configured] [2 ymakes processing] [7988/7989 modules configured] [2 ymakes processing] [8292/8294 modules configured] [2 ymakes processing] [8356/8356 modules configured] Configuring dependencies for platform tools [3 ymakes processing] [8999/8999 modules configured] [3 ymakes processing] [8999/8999 modules configured] [170/170 modules rendered] [2 ymakes processing] [8999/8999 modules configured] [5172/5304 modules rendered] [2 ymakes processing] [8999/8999 modules configured] [5304/5304 modules rendered] Configuring dependencies for platform test_tool_tc1-global Configuring tests execution Configuring local and dist store caches Configuration done. Preparing for execution |33.3%| CLEANING SYMRES |12.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/deprecated/yajl/libcontrib-deprecated-yajl.a |14.2%| [AR] {BAZEL_DOWNLOAD} $(B)/certs/libcerts.global.a |15.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/logger/liblibrary-cpp-logger.global.a |15.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lua/liblibrary-cpp-lua.a |16.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lcs/liblibrary-cpp-lcs.a |16.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/mon/analytics/liblwtrace-mon-analytics.a |16.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/malloc/api/libcpp-malloc-api.a |17.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/neon64/liblibs-base64-neon64.a |17.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/mon/libcpp-lwtrace-mon.global.a |17.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/logger/global/libcpp-logger-global.a |17.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/double-conversion/libcontrib-libs-double-conversion.a |17.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/pcre/pcre16/liblibs-pcre-pcre16.a |17.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lfalloc/alloc_profiler/libcpp-lfalloc-alloc_profiler.a |17.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lfalloc/dbg_info/libcpp-lfalloc-dbg_info.a |17.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/protos/libcpp-lwtrace-protos.a |17.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/bitseq/libcpp-containers-bitseq.a |18.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/liblibrary-cpp-lwtrace.a |18.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/logger/liblibrary-cpp-logger.a |18.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/l2_distance/liblibrary-cpp-l2_distance.a |19.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/enum_codegen/libcpp-deprecated-enum_codegen.a |19.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/avro/liblibs-apache-avro.a |20.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/BinaryFormat/libllvm16-lib-BinaryFormat.a |20.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/mon/libcpp-lwtrace-mon.a |20.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/yson/libcpp-json-yson.a |21.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/messagebus/libcpp-monlib-messagebus.a |21.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/enumbitset/liblibrary-cpp-enumbitset.a |22.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/simple/libcpp-http-simple.a |22.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/pages/libmonlib-service-pages.a |22.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/prometheus/libmonlib-encode-prometheus.a |22.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/fast_sax/libcpp-json-fast_sax.a |22.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/writer/libcpp-json-writer.a |22.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/ipmath/liblibrary-cpp-ipmath.a |23.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/crc32c/libcpp-digest-crc32c.a |23.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/kmp/libcpp-deprecated-kmp.a |23.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/deprecated/json/libmonlib-deprecated-json.a |22.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/text/libmonlib-encode-text.a |22.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dbg_output/liblibrary-cpp-dbg_output.a |22.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/diff/liblibrary-cpp-diff.a |23.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cpuid_check/liblibrary-cpp-cpuid_check.global.a |23.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/accessors/libcpp-deprecated-accessors.a |23.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/base/libpublic-lib-base.a |23.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/common/libclient-yc_public-common.a |23.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/iam/libclient-yc_public-iam.a |23.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.a |23.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool/libgateway-behaviour-resource_pool.global.a |24.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/validation/libcore-config-validation.a |24.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/coroutine/engine/libcpp-coroutine-engine.a |24.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/data_events/libcore-tx-data_events.a |24.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/sliding_window/liblibrary-cpp-sliding_window.a |25.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/ssse3/libinternal-proxies-ssse3.a |25.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/query_data/libcore-kqp-query_data.a |25.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/out/libapi-protos-out.a |26.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/coroutine/listener/libcpp-coroutine-listener.a |26.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/operation/libclient-yc_private-operation.a |24.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/servicecontrol/libclient-yc_private-servicecontrol.a |24.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/libproviders-s3-proto.a |24.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/liblibrary-cpp-json.a |25.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/path_generator/libproviders-s3-path_generator.a |26.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/worker_manager/interface/libdq-worker_manager-interface.a |26.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/grpc/server/liblibrary-grpc-server.a |26.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/resourcemanager/libclient-yc_private-resourcemanager.a |26.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/scalar/liblibrary-formats-arrow-scalar.a |27.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/simple_builder/liblibrary-formats-arrow-simple_builder.a |27.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/db_pool/protos/liblibrary-db_pool-protos.a |27.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/util/liblibrary-actors-util.a |27.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/actors/libproviders-solomon-actors.a |28.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/liblibrary-folder_service-proto.a |28.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/fq/libpublic-lib-fq.a |28.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/fyamlcpp/libydb-library-fyamlcpp.a |29.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/libydb-library-yaml_config.a |29.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/expr_nodes/libproviders-generic-expr_nodes.a |29.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/dnscachelib/liblibrary-actors-dnscachelib.a |29.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_kernels/libydb-library-arrow_kernels.a |29.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/Columns/liblibrary-arrow_clickhouse-Columns.a |30.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/dnsresolver/liblibrary-actors-dnsresolver.a |30.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/testlib/liblibrary-actors-testlib.a |30.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/iam/libclient-yc_private-iam.a |31.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/validation/liblibrary-formats-arrow-validation.a |31.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/protos/libdq-api-protos.a |31.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tx_allocator_client/libcore-tx-tx_allocator_client.a |32.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/libreader-simple_reader-iterator.a |32.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_parquet/libydb-library-arrow_parquet.a |32.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/db_pool/libydb-library-db_pool.a |32.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tracing/service/libtx-tracing-service.a |32.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/helpers/liblibrary-actors-helpers.a |32.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/common/yql_parser/libydb_cli-common-yql_parser.a |33.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/ttl/libschemeshard-olap-ttl.a |33.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/dump/files/libydb_cli-dump-files.a |33.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/http/liblibrary-actors-http.a |33.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tiering/tier/libtx-tiering-tier.a |33.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/time_cast/libcore-tx-time_cast.a |32.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/http_proxy/error/liblibrary-http_proxy-error.a |33.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/table/libschemeshard-olap-table.a |33.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/liboperations-alter-in_store.a |33.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/abstract/liboperations-alter-abstract.a |33.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/config_shards/libalter-in_store-config_shards.a |34.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/locks/libcore-tx-locks.a |35.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ydb_convert/libydb-core-ydb_convert.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceproxy/public/libtx-sequenceproxy-public.a |35.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/common/libalter-in_store-common.a |35.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/scheme_cache/libcore-tx-scheme_cache.a |35.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/transactions/libcolumnshard-transactions-transactions.a |34.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.global.a |34.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/libydb-core-tx.a |35.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/tx_chain/libolap-bg_tasks-tx_chain.a |35.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/events/tables_erased/libsubscriber-events-tables_erased.a |35.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.a |36.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.a |36.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/dump/util/libydb_cli-dump-util.a |36.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/data_events/common/libtx-data_events-common.a |36.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.global.a |36.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/storage/tier/libsession-storage-tier.global.a |36.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/granule/libcolumnshard-normalizer-granule.global.a |36.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/program/libcore-tx-program.a |36.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/skip_index/libstorage-indexes-skip_index.a |36.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/include/ydb-cpp-sdk/client/topic/libydb-cpp-sdk-client-topic.a |36.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/conveyor/usage/libtx-conveyor-usage.a |37.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/protos/libcolumnshard-export-protos.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.a |37.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/common/libcolumnshard-export-common.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/libstorage-indexes-portions.a |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/storage/abstract/libsession-storage-abstract.a |38.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/counters/libstorage-actualizer-counters.a |38.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/constructor/liboptimizer-lbuckets-constructor.global.a |38.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/libindexes-portions-extractor.global.a |38.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/abstract/libstorage-actualizer-abstract.a |38.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/portions/libreader-sys_view-portions.global.a |38.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.global.a |39.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.a |39.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/abstract/libchanges-compaction-abstract.a |38.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/portions/extractor/libindexes-portions-extractor.a |38.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/selector/liblcbuckets-constructor-selector.global.a |39.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/destination/session/libdata_sharing-destination-session.a |39.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/source/events/libdata_sharing-source-events.a |39.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/destination/transactions/libdata_sharing-destination-transactions.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/optimizer/libreader-sys_view-optimizer.global.a |39.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_reader/libtx-columnshard-data_reader.a |39.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.a |39.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/dictionary/libchanges-compaction-dictionary.global.a |40.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/common/session/libdata_sharing-common-session.a |40.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/libtx-columnshard-engines.a |40.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_reader/libtx-columnshard-blobs_reader.a |40.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.global.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/libtx-columnshard-data_accessor.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.global.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/source/transactions/libdata_sharing-source-transactions.a |42.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/destination/events/libdata_sharing-destination-events.a |42.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/libcore-scheme-protos.a |42.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/controller/libtx-replication-controller.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/common/libcolumnshard-blobs_action-common.a |42.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/manager/libcolumnshard-data_sharing-manager.a |41.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/libcore-protos-schemeshard.a |41.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/security/ldap_auth_provider/libcore-security-ldap_auth_provider.a |41.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/events/libcolumnshard-blobs_action-events.a |41.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/config/libcore-persqueue-config.a |41.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/session/libcolumnshard-bg_tasks-session.a |42.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/events/libcolumnshard-bg_tasks-events.a |43.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/balance_coverage/libcore-tx-balance_coverage.a |43.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/security/certificate_check/libcore-security-certificate_check.a |43.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/sessions/libcore-sys_view-sessions.a |43.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/metering/libydb-core-metering.a |43.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/counters/libtx-columnshard-counters.a |43.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/mon_alloc/libydb-core-mon_alloc.a |44.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/initiator/status/libdata_sharing-initiator-status.a |42.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/transactions/libcolumnshard-bg_tasks-transactions.a |42.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/mon/libydb-core-mon.a |43.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/utils/libkqp-gateway-utils.a |43.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/auth/libcore-sys_view-auth.a |43.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_locks/manager/libcolumnshard-data_locks-manager.a |44.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/local_rpc/libkqp-gateway-local_rpc.a |44.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/purecalc/libcore-persqueue-purecalc.a |44.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/codecs/libcore-persqueue-codecs.a |44.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/mind/bscontroller/libcore-mind-bscontroller.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/node_service/libcore-kqp-node_service.a |45.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/writer/libcore-persqueue-writer.a |45.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_streaming/libydb-core-grpc_streaming.a |45.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/query_compiler/libcore-kqp-query_compiler.a |44.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/opt/peephole/libkqp-opt-peephole.a |44.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/proxy_service/libcore-kqp-proxy_service.a |45.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/events/liblibs-row_dispatcher-events.a |45.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/events/liblibs-quota_manager-events.a |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/cancelation/protos/libgrpc_services-cancelation-protos.a |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/graph_params/proto/liblibs-graph_params-proto.a |46.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/db_schema/libfq-libs-db_schema.a |46.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.global.a |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/libcore-tx-columnshard.a |45.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/compile_service/libcore-kqp-compile_service.a |45.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/events/libfq-libs-events.a |46.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/common/liblibs-compute-common.a |46.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/libcore-kqp-gateway.a |46.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/rows/libformats-arrow-rows.a |46.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/hash/libformats-arrow-hash.a |46.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/common/liblibrary-formats-arrow-accessor-common.a |46.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/common/libformats-arrow-common.a |46.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/program/libformats-arrow-program.global.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/filestore/core/libcore-filestore-core.a |47.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/libcore-kqp-common.a |47.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/docapi/libydb-core-docapi.a |47.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/health_check/libydb-core-health_check.a |47.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/client/scheme_cache_lib/libcore-client-scheme_cache_lib.a |47.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/object_storage/inference/libexternal_sources-object_storage-inference.a |47.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/libcore-blobstorage-vdisk.a |47.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/ingress/libblobstorage-vdisk-ingress.a |47.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_endpoints/libclient-impl-ydb_endpoints.a |48.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/libvdisk-hulldb-base.a |48.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/splitter/libformats-arrow-splitter.a |48.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/compute_actor/libcore-kqp-compute_actor.a |48.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/libvdisk-hulldb-barriers.a |48.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/bulksst_add/libvdisk-hulldb-bulksst_add.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/host/libcore-kqp-host.a |47.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/libydb-core-external_sources.a |47.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/dsproxy/mock/libblobstorage-dsproxy-mock.a |48.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/plain/libarrow-accessor-plain.a |48.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/yson/libcpp-yt-yson.a |48.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/libcore-formats-arrow.a |48.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/libblobstorage-vdisk-anubis_osiris.a |48.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/engine/libydb-core-engine.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/client/minikql_compile/libcore-client-minikql_compile.a |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/balance/libblobstorage-vdisk-balance.a |49.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/other/libcore-blobstorage-other.a |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson_pull/libyson_pull.a |49.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/global/libcpp-yt-global.a |49.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/libvdisk-hulldb-generic.a |49.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/malloc/libcpp-yt-malloc.a |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/backtrace/cursors/libunwind/libbacktrace-cursors-libunwind.a |49.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/defrag/libblobstorage-vdisk-defrag.a |49.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/terminate_handler/liblibrary-cpp-terminate_handler.a |49.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/huge/libblobstorage-vdisk-huge.a |49.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unified_agent_client/liblibrary-cpp-unified_agent_client.global.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/light_rw_lock/libcpp-threading-light_rw_lock.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/unittest/libcpp-testing-unittest.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/cancellation/libcpp-threading-cancellation.a |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/backpressure/libcore-blobstorage-backpressure.a |49.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/plain_status/libimpl-ydb_internal-plain_status.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/program/libformats-arrow-program.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/common/libcpp-testing-common.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/stack_vector/libcpp-containers-stack_vector.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/paged_vector/libcpp-containers-paged_vector.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/comptable/liblibrary-cpp-comptable.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/intrusive_avl_tree/libcpp-containers-intrusive_avl_tree.a |50.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/ztstrbuf/libcpp-string_utils-ztstrbuf.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cgiparam/liblibrary-cpp-cgiparam.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/levenshtein_diff/libcpp-string_utils-levenshtein_diff.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/archive/liblibrary-cpp-archive.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/dragonbox/libdragonbox.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/common/proto/libbackup-common-proto.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/codecs/liblibrary-cpp-codecs.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unified_agent_client/liblibrary-cpp-unified_agent_client.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/fastlz/libblockcodecs-codecs-fastlz.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/context/impl_common/libboost-context-impl_common.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/chrono/librestricted-boost-chrono.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/disjoint_interval_tree/libcpp-containers-disjoint_interval_tree.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/compproto/liblibrary-cpp-compproto.a |52.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-cal/librestricted-aws-aws-c-cal.a |52.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/base/libydb-core-base.a |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/brotli/libblockcodecs-codecs-brotli.global.a |52.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/program_options/librestricted-boost-program_options.a |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-http/librestricted-aws-aws-c-http.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/binsaver/liblibrary-cpp-binsaver.a |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/time/libabseil-cpp-absl-time.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/cityhash-1.0.2/libcontrib-restricted-cityhash-1.0.2.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/log/libabseil-cpp-absl-log.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-event-stream/librestricted-aws-aws-c-event-stream.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/libapi-protos.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/opt/libcore-kqp-opt.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/case_insensitive_string/liblibrary-cpp-case_insensitive_string.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/flags/libabseil-cpp-absl-flags.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/value_helpers/libimpl-ydb_internal-value_helpers.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/synchronization/libabseil-cpp-absl-synchronization.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-sdkutils/librestricted-aws-aws-c-sdkutils.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/random/libabseil-cpp-tstring-y_absl-random.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/s2n/librestricted-aws-s2n.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/snappy/libcontrib-libs-snappy.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/status/libabseil-cpp-tstring-y_absl-status.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/locale/librestricted-boost-locale.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/sasl/libcontrib-libs-sasl.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/synchronization/libabseil-cpp-tstring-y_absl-synchronization.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-common/librestricted-aws-aws-c-common.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/debugging/libabseil-cpp-absl-debugging.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/hash/libabseil-cpp-absl-hash.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/base/libabseil-cpp-absl-base.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/NetSSL_OpenSSL/liblibs-poco-NetSSL_OpenSSL.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/JSON/liblibs-poco-JSON.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/nodewarden/libcore-blobstorage-nodewarden.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/CFGuard/liblib-Transforms-CFGuard.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/ObjCARC/liblib-Transforms-ObjCARC.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/params/libsrc-client-params.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openldap/libraries/liblber/libopenldap-libraries-liblber.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/re2/libcontrib-libs-re2.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/strings/libabseil-cpp-tstring-y_absl-strings.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zstd06/libcontrib-libs-zstd06.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/IRPrinter/libllvm16-lib-IRPrinter.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Net/liblibs-poco-Net.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tcmalloc/no_percpu_cache/liblibs-tcmalloc-no_percpu_cache.global.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Frontend/OpenMP/liblib-Frontend-OpenMP.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/dsproxy/libcore-blobstorage-dsproxy.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/MC/MCParser/liblib-MC-MCParser.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/Orc/Shared/libExecutionEngine-Orc-Shared.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/console/libcore-cms-console.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/Disassembler/libTarget-X86-Disassembler.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/liburing/libcontrib-libs-liburing.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/MCJIT/liblib-ExecutionEngine-MCJIT.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openssl/libcontrib-libs-openssl.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/arch/sse42/libfarmhash-arch-sse42.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/MCTargetDesc/libTarget-X86-MCTargetDesc.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_thread/liblibs-libevent-event_thread.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libfyaml/libcontrib-libs-libfyaml.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/fmt/libcontrib-libs-fmt.a |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/DWARF/liblib-DebugInfo-DWARF.a |53.3%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/hive/libcore-mind-hive.a |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/flatbuffers/libcontrib-libs-flatbuffers.a |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/crcutil/libcontrib-libs-crcutil.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxxabi-parts/liblibs-cxxsupp-libcxxabi-parts.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cctz/tzdata/liblibs-cctz-tzdata.global.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/plain64/liblibs-base64-plain64.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libbz2/libcontrib-libs-libbz2.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/builtins/liblibs-cxxsupp-builtins.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/ssse3/liblibs-base64-ssse3.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/brotli/dec/liblibs-brotli-dec.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/brotli/common/liblibs-brotli-common.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/avx2/liblibs-base64-avx2.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/c-ares/libcontrib-libs-c-ares.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/third_party/address_sorting/libgrpc-third_party-address_sorting.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/antlr3_cpp_runtime/libcontrib-libs-antlr3_cpp_runtime.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libidn/static/liblibs-libidn-static.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/GlobalISel/liblib-CodeGen-GlobalISel.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libaio/static/liblibs-libaio-static.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/curl/libcontrib-libs-curl.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/backtrace/libcontrib-libs-backtrace.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cctz/libcontrib-libs-cctz.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/libcontrib-libs-farmhash.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/neon32/liblibs-base64-neon32.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/expat/libcontrib-libs-expat.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_core/liblibs-libevent-event_core.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/proto/libsrc-client-proto.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_extra/liblibs-libevent-event_extra.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/base64/plain32/liblibs-base64-plain32.a |54.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libc_compat/libcontrib-libs-libc_compat.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/third_party/upb/libgrpc-third_party-upb.a |54.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/proto/liblibs-quota_manager-proto.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libunwind/libcontrib-libs-libunwind.a |54.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libevent/event_openssl/liblibs-libevent-event_openssl.a |54.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/linuxvdso/libcontrib-libs-linuxvdso.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/include/libclient-persqueue_public-include.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/brotli/enc/liblibs-brotli-enc.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libssh2/libcontrib-libs-libssh2.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/linuxvdso/original/liblibs-linuxvdso-original.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/arch/sse41/libfarmhash-arch-sse41.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/deprecated/http-parser/libcontrib-deprecated-http-parser.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxxrt/liblibs-cxxsupp-libcxxrt.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/AsmParser/libllvm16-lib-AsmParser.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Demangle/libllvm16-lib-Demangle.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/TargetInfo/libTarget-X86-TargetInfo.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/orc-format/liblibs-apache-orc-format.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/MSF/liblib-DebugInfo-MSF.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/cxxsupp/libcxx/liblibs-cxxsupp-libcxx.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/uriparser/libcontrib-restricted-uriparser.a |54.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Remarks/libllvm16-lib-Remarks.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/libllvm16-lib-Target.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/query/impl/libclient-query-impl.a |54.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/AsmParser/libTarget-X86-AsmParser.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/orc/liblibs-apache-orc.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/MC/libllvm16-lib-MC.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/farmhash/arch/sse42_aesni/libfarmhash-arch-sse42_aesni.a |54.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/yaml-cpp/libcontrib-libs-yaml-cpp.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/PerfJITEvents/liblib-ExecutionEngine-PerfJITEvents.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lua/libcontrib-libs-lua.a |55.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/TargetParser/libllvm16-lib-TargetParser.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/fastlz/libcontrib-libs-fastlz.a |55.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/Symbolize/liblib-DebugInfo-Symbolize.a |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Bitstream/Reader/liblib-Bitstream-Reader.a |51.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut |52.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/AsmPrinter/liblib-CodeGen-AsmPrinter.a |52.3%| PREPARE $(VCS) |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/CodeView/liblib-DebugInfo-CodeView.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/resources/libsrc-client-resources.global.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/liblibs-aws-sdk-cpp-aws-cpp-sdk-core.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/antlr4_cpp_runtime/libcontrib-libs-antlr4_cpp_runtime.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Bitcode/Writer/liblib-Bitcode-Writer.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Bitcode/Reader/liblib-Bitcode-Reader.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/table/query_stats/libclient-table-query_stats.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libxml/libcontrib-libs-libxml.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/RuntimeDyld/liblib-ExecutionEngine-RuntimeDyld.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Object/libllvm16-lib-Object.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/MC/MCDisassembler/liblib-MC-MCDisassembler.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/scheme/libsrc-client-scheme.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/common/libclient-topic-common.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/IRReader/libllvm16-lib-IRReader.a |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/resources/libsrc-client-resources.a |51.5%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/tx__start_tablet.cpp |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Linker/libllvm16-lib-Linker.a |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/exceptions/libclient-types-exceptions.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/libllvm16-lib-ExecutionEngine.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ExecutionEngine/Orc/TargetProcess/libExecutionEngine-Orc-TargetProcess.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Support/libllvm16-lib-Support.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/credentials/login/libtypes-credentials-login.a |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/grpc/client/libsdk-library-grpc-client-v3.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/googleapis-common-protos/libcontrib-libs-googleapis-common-protos.a |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/DebugInfo/PDB/liblib-DebugInfo-PDB.a |52.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/domain_info.cpp |52.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/drain.cpp |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/ProfileData/libllvm16-lib-ProfileData.a |52.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/hive.cpp |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/liblibs-aws-sdk-cpp-aws-cpp-sdk-s3.a |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/impl/libclient-persqueue_public-impl.a |52.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/hive_log.cpp |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Passes/libllvm16-lib-Passes.a |52.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/leader_tablet_info.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__lock_tablet.cpp |53.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__block_storage_result.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__update_tablet_metrics.cpp |52.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__init_scheme.cpp |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Utils/liblib-Transforms-Utils.a |52.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/hive_impl.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/storage_pool_info.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/boot_queue.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__update_tablet_status.cpp |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/hive_domains.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/follower_tablet_info.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/sequencer.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__update_tablets_object.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/node_info.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/fill.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/hive_statics.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/balancer.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__process_boot_queue.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__status.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__configure_subdomain.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__update_domain.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tablet_move_info.cpp |53.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__delete_tablet_result.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__update_tablet_groups.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__cut_tablet_history.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__update_dc_followers.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__generate_data_ut.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__delete_node.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__sync_tablets.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__switch_drain.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__unlock_tablet.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__disconnect_node.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__tablet_owners_reply.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__kill_node.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__reassign_groups.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/monitoring.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__response_tablet_seq.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__stop_tablet.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__resume_tablet.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__restart_tablet.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__seize_tablets_reply.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__reassign_groups_on_decommit.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__request_tablet_owners.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__request_tablet_seq.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__register_node.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__release_tablets_reply.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__process_pending_operations.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/storage_group_info.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__release_tablets.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__seize_tablets.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/storage_balancer.cpp |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/libcore-tx-datashard.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/impl/libclient-topic-impl.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/expr_nodes_gen/libessentials-core-expr_nodes_gen.a |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tablet_info.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__adopt_tablet.cpp |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/libessentials-core-issue.a |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__load_everything.cpp |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/libydb-core-grpc_services.a |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__create_tablet.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__configure_scale_recommender.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Target/X86/liblib-Target-X86.a |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/tx__delete_tablet.cpp |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/pg_settings/libessentials-core-pg_settings.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/events/libkqp-common-events.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/IR/libllvm16-lib-IR.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/libessentials-core-file_storage.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/compilation/libkqp-common-compilation.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/expr_nodes/libessentials-core-expr_nodes.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/SelectionDAG/liblib-CodeGen-SelectionDAG.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/protos/liblibs-row_dispatcher-protos.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/arrow_kernels/registry/libcore-arrow_kernels-registry.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/cbo/libessentials-core-cbo.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/shared_resources/interface/liblibs-shared_resources-interface.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/codecs/libclient-topic-codecs.global.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/yt_download/libyt-lib-yt_download.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/extract_predicate/libessentials-core-extract_predicate.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/expr_nodes/libproviders-s3-expr_nodes.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceshard/public/libtx-sequenceshard-public.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/io/libcpp-mapreduce-io.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/IPO/liblib-Transforms-IPO.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/service/libcore-graph-service.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/worker_manager/libproviders-dq-worker_manager.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/shared_resources/libfq-libs-shared_resources.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr3_ansi/libv1-lexer-antlr3_ansi.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/libproviders-yt-codec.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/common/libproviders-yt-common.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4/libv1-lexer-antlr4.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v0/lexer/libsql-v0-lexer.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/simple/libkqp-common-simple.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/shard/protos/libgraph-shard-protos.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sharding/libcore-tx-sharding.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/type_ann/libessentials-core-type_ann.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_caching/libydb-core-grpc_caching.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tiering/abstract/libtx-tiering-abstract.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/aclib/libydb-library-aclib.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/cancelation/libcore-grpc_services-cancelation.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Analysis/libllvm16-lib-Analysis.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/expr_nodes/libcore-kqp-expr_nodes.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/pages/tablesorter/libservice-pages-tablesorter.global.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/libcpp-monlib-service.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/federated_query/libcore-kqp-federated_query.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/io_formats/arrow/scheme/libio_formats-arrow-scheme.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/object_listers/libproviders-s3-object_listers.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/service/exception_policy/libudf-service-exception_policy.global.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/libessentials-public-udf.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/io_formats/ydb_dump/libcore-io_formats-ydb_dump.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/pg/expr_nodes/libproviders-pg-expr_nodes.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/actors/libkqp-gateway-actors.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/parser/libcommon-schema-parser.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/codec/arrow/libcommon-codec-arrow.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/io_formats/cell_maker/libcore-io_formats-cell_maker.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/activation/libproviders-common-activation.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/config/libproviders-common-config.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/libyql-essentials-protos.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/util/evlog/libcore-util-evlog.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/dq/libproviders-common-dq.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/metrics/protos/libcommon-metrics-protos.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/support/libpublic-udf-support.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/pg_dummy/libessentials-sql-pg_dummy.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/libproviders-common-schema.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/codec/libproviders-common-codec.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/gateway/libproviders-common-gateway.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/gateways_utils/libproviders-common-gateways_utils.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/libyql-essentials-sql.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/skiff/libcommon-schema-skiff.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/arrow/libpublic-udf-arrow.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/jaeger_tracing/libydb-core-jaeger_tracing.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/expr/libcommon-schema-expr.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/plan/libyql-utils-plan.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/schema/mkql/libcommon-schema-mkql.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/settings/libessentials-sql-settings.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/interconnect/liblibrary-actors-interconnect.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/wrappers/events/libcore-wrappers-events.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/structured_token/libproviders-common-structured_token.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kesus/proxy/libcore-kesus-proxy.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/comp_nodes/libproviders-common-comp_nodes.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/decimal/libessentials-public-decimal.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr3/libv1-lexer-antlr3.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/format/libsql-v1-format.global.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/libpublic-issue-protos.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/yaml/libcontrib-libs-yaml.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4_ansi/libv1-lexer-antlr4_ansi.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/libconnector-api-service.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/transform/libproviders-common-transform.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/format/libsql-v1-format.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/udf_resolve/libproviders-common-udf_resolve.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/config/libessentials-providers-config.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/actors/libproviders-generic-actors.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/keyvalue/protos/libcore-keyvalue-protos.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/libyql-essentials-core.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/wrappers/ut_helpers/libcore-wrappers-ut_helpers.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/libsql-v1-lexer.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/libessentials-public-issue.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr3_ansi/libv1-proto_parser-antlr3_ansi.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/langver/libessentials-public-langver.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/pg/provider/libproviders-pg-provider.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/types/libessentials-public-types.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/batch/libkqp-common-batch.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr3/libv1-proto_parser-antlr3.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/provider/libproviders-common-provider.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/queues/fifo/libymq-queues-fifo.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/libproviders-common-proto.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/queues/common/libymq-queues-common.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/mkql/libproviders-common-mkql.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.global.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/resource_pool_classifier/libgateway-behaviour-resource_pool_classifier.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/wrappers/libydb-core-wrappers.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/result/provider/libproviders-result-provider.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr4/libv1-proto_parser-antlr4.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/buffer/libkqp-common-buffer.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/result_format/libessentials-public-result_format.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/antlr4_ansi/libv1-proto_parser-antlr4_ansi.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/grpc/libcontrib-libs-grpc.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/proxy_service/proto/libkqp-proxy_service-proto.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/common/shutdown/libkqp-common-shutdown.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/proto_parser/libsql-v1-proto_parser.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tx_proxy/libcore-tx-tx_proxy.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/types/binary_json/libessentials-types-binary_json.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/getopt/liblibrary-cpp-getopt.global.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/execprofile/liblibrary-cpp-execprofile.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/service/pages/resources/libservice-pages-resources.global.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/keyvalue/libydb-core-keyvalue.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/queues/std/libymq-queues-std.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/backtrace/libessentials-utils-backtrace.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_ansi_antlr4/libproto_ast-gen-v1_ansi_antlr4.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/protos/libapi-service-protos.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kesus/tablet/libcore-kesus-tablet.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/counters/libcore-kqp-counters.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/icu/libcontrib-libs-icu.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/CodeGen/libllvm16-lib-CodeGen.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v0/libessentials-sql-v0.a |53.3%| PREPARE $(YMAKE_PYTHON3-4256832079) |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/types/dynumber/libessentials-types-dynumber.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/accessor/libydb-library-accessor.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/executer_actor/shards_resolver/libkqp-executer_actor-shards_resolver.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/failure_injector/libessentials-utils-failure_injector.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/fetch/libessentials-utils-fetch.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/libyql-essentials-utils.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/workload_service/tables/libkqp-workload_service-tables.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/log/proto/libutils-log-proto.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/log/libessentials-utils-log.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/actor_type/liblibrary-actors-actor_type.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/table/libgateway-behaviour-table.global.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/libcpp/libgeneric-connector-libcpp.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/providers/stat/expr_nodes/libproviders-stat-expr_nodes.a |53.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/runtime/ut/ydb-library-yql-dq-runtime-ut |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/threading/libessentials-utils-threading.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/extension_common/libsrc-client-extension_common.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/opt/logical/libkqp-opt-logical.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/core/harmonizer/libactors-core-harmonizer.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/common/libcpp-mapreduce-common.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.global.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/libydb-core-protos.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/googletest/googletest/librestricted-googletest-googletest.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/histogram/hdr/libcpp-histogram-hdr.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/on_disk/chunks/libcpp-on_disk-chunks.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/DataStreams/liblibrary-arrow_clickhouse-DataStreams.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/proto/libproviders-generic-proto.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hdr_histogram/libcontrib-libs-hdr_histogram.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/http/libcpp-mapreduce-http.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/libcore-tx-schemeshard.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/libydb-core-kqp.global.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/view/libgateway-behaviour-view.global.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/operations/libbehaviour-tablestore-operations.global.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/http_client/libcpp-mapreduce-http_client.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/expr_nodes/libproviders-yt-expr_nodes.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/library/user_job_statistics/libmapreduce-library-user_job_statistics.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/proto/libcore-ymq-proto.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/workload_service/common/libkqp-workload_service-common.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/interface/logging/libmapreduce-interface-logging.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/big_integer/libcpp-openssl-big_integer.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/holders/libcpp-openssl-holders.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/base/libcore-ymq-base.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/cm_client/libproviders-pq-cm_client.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/googletest/googlemock/librestricted-googletest-googlemock.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/config_clusters/libyt-lib-config_clusters.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/expr_traits/libyt-lib-expr_traits.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/aclib/protos/liblibrary-aclib-protos.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/hash/libyt-lib-hash.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/interface/libcpp-mapreduce-interface.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/executer_actor/libcore-kqp-executer_actor.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/liblibrary-actors-protos.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/quoter/libydb-core-quoter.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/graph_reorder/libyt-lib-graph_reorder.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/lib/libyt-gateway-lib.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/graph/librestricted-boost-graph.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/serialization/librestricted-boost-serialization.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/init_yt_api/libyt-lib-init_yt_api.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/key_filter/libyt-lib-key_filter.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/qplayer/libyt-gateway-qplayer.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/events/libcore-persqueue-events.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/job/libproviders-yt-job.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/mind/address_classification/libcore-mind-address_classification.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/lambda_builder/libyt-lib-lambda_builder.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/wilson/liblibrary-actors-wilson.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/log/libyt-lib-log.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/runtime_corei7/liblibs-hyperscan-runtime_corei7.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/mkql_helpers/libyt-lib-mkql_helpers.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/driver/libsrc-client-driver.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/libapi-protos-annotations.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/res_pull/libyt-lib-res_pull.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/libessentials-sql-v1.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/runtime_avx2/liblibs-hyperscan-runtime_avx2.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/conclusion/libydb-library-conclusion.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/discovery/libsrc-client-discovery.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/runtime_core2/liblibs-hyperscan-runtime_core2.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/libydb-core-sys_view.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/core/liblibrary-actors-core.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/infer_schema/libyt-lib-infer_schema.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/schema/libyt-lib-schema.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/row_spec/libyt-lib-row_spec.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/url_mapper/libyt-lib-url_mapper.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/skiff/libyt-lib-skiff.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/cpp/mapreduce/client/libcpp-mapreduce-client.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/provider/libproviders-generic-provider.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/rm_service/libcore-kqp-rm_service.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/mind/libydb-core-mind.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/interconnect/mock/libactors-interconnect-mock.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/mkql_dq/libproviders-yt-mkql_dq.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/yson_helpers/libyt-lib-yson_helpers.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/pg_tables/libcore-sys_view-pg_tables.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/run_script_actor/libcore-kqp-run_script_actor.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/proto/libproviders-yt-proto.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/runtime/libcore-kqp-runtime.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/memory_log/liblibrary-actors-memory_log.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/ydb_discovery/libydb_cli_command_ydb_discovery.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.global.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/topics/libcore-kqp-topics.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/common/libcommon.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/opt/libproviders-yt-opt.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/ytflow/expr_nodes/libproviders-ytflow-expr_nodes.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/ytflow/integration/interface/libytflow-integration-interface.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/regex/hyperscan/libcpp-regex-hyperscan.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/opt/rbo/libkqp-opt-rbo.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/out/libcore-protos-out.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/build/libyt-yt-build.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/libapi-grpc-draft.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/ytflow/integration/proto/libytflow-integration-proto.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/query_tracker_client/libyt-client-query_tracker_client.a |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/workload_service/actors/libkqp-workload_service-actors.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/init/libcpp-openssl-init.global.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/apache/arrow/liblibs-apache-arrow.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/crypto/libcpp-openssl-crypto.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/session_actor/libcore-kqp-session_actor.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/native/libyt-gateway-native.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/expr_nodes/libproviders-pq-expr_nodes.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/bg_tasks/protos/libservices-bg_tasks-protos.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/csv/converter/libarrow-csv-converter.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/resource_pools/libydb-core-resource_pools.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/granules/libreader-sys_view-granules.global.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/actor/libcore-ymq-actor.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/libapi-grpc.a |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gmock_in_unittest/libcpp-testing-gmock_in_unittest.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gmock_in_unittest/libcpp-testing-gmock_in_unittest.global.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/https/libyt-core-https.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/libyt-yt-core.global.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/opt/physical/libkqp-opt-physical.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gtest_extensions/libcpp-testing-gtest_extensions.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/prof/liblibrary-actors-prof.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/misc/isa_crc64/libisa-l_crc_yt_patch.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/libydb-core-scheme.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/auth/libyt-library-auth.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/workload_service/libcore-kqp-workload_service.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/erasure/libyt-library-erasure.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/base32/libcpp-string_utils-base32.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/decimal/libyt-library-decimal.a |53.1%| PREPARE $(LLD_ROOT-3808007503) |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/libydb-library-mkql_proto.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/proto/libproviders-pq-proto.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/numeric/libyt-library-numeric.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/procfs/libyt-library-procfs.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_proto_split/libproto_ast-gen-v1_proto_split.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/testlib/common/libactors-testlib-common.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/undumpable/libyt-library-undumpable.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/re2/libyt-library-re2.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/signals/libyt-library-signals.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/http/libyt-core-http.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/profiling/libyt-library-profiling.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/profiling/resource_tracker/liblibrary-profiling-resource_tracker.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/profiling/resource_tracker/liblibrary-profiling-resource_tracker.global.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/common/libproviders-solomon-common.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/tvm/libyt-library-tvm.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/tracing/libyt-library-tracing.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/quoter/public/libcore-quoter-public.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/security/libydb-core-security.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/partition_key_range/libcore-persqueue-partition_key_range.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/tz_types/libyt-library-tz_types.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/coordination/libsrc-client-coordination.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/ytprof/api/liblibrary-ytprof-api.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/opt/physical/effects/libopt-physical-effects.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/global_plugins/libydb-library-global_plugins.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/formats/libyt_proto-yt-formats.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/result/expr_nodes/libproviders-result-expr_nodes.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/interface/libproviders-dq-interface.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/types/uuid/libessentials-types-uuid.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/liblibrary-formats-arrow-protos.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/metrics/libproviders-common-metrics.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/quantile_digest/libyt-library-quantile_digest.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/method/libcpp-openssl-method.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/arrow_resolve/libproviders-common-arrow_resolve.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/tier/libcolumnshard-blobs_action-tier.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/statistics/database/libcore-statistics-database.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/controller/libcore-backup-controller.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/show_create/libcore-sys_view-show_create.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/testlib/actors/libcore-testlib-actors.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/naming_conventions/libydb-library-naming_conventions.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/core/libyt_proto-yt-core.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/statistics/service/libcore-statistics-service.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/actors_factory/libproviders-s3-actors_factory.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/provider/libcore-kqp-provider.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/common/libcore-sys_view-common.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unicode/set/libcpp-unicode-set.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/libcore-file_storage-proto.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/draft/libsrc-client-draft.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tracing/libydb-core-tracing.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unicode/normalization/libcpp-unicode-normalization.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/provider/libproviders-pq-provider.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/histogram/libessentials-core-histogram.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/download/libcore-file_storage-download.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/pretty_types_print/protobuf/liblibrary-pretty_types_print-protobuf.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/nodes/libcore-sys_view-nodes.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/http_download/proto/libfile_storage-http_download-proto.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/ytalloc/api/libcpp-ytalloc-api.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson/json/libcpp-yson-json.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/re2/libjsonpath-rewrapper-re2.global.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/facade/libessentials-core-facade.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/libessentials-core-issue.global.a |53.3%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/test/test_import/libtest_import_udf.so |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/common/libproviders-s3-common.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/langver/libessentials-core-langver.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/manager/libcolumnshard-bg_tasks-manager.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/partition_stats/libcore-sys_view-partition_stats.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/minsketch/libessentials-core-minsketch.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/hyperscan/libcontrib-libs-hyperscan.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/audit/libfq-libs-audit.a |53.3%| PREPARE $(PYTHON) |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/libydb-library-arrow_clickhouse.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpointing_common/libfq-libs-checkpointing_common.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_ansi/libproto_ast-gen-v1_ansi.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/query_stats/libcore-sys_view-query_stats.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpointing/libfq-libs-checkpointing.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/processor/libcore-sys_view-processor.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/metrics/libfq-libs-metrics.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/health/libfq-libs-health.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/cloud_audit/libfq-libs-cloud_audit.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt_proto/yt/client/libyt_proto-yt-client.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/init/libfq-libs-init.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/common/librow_dispatcher-format_handler-common.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1_antlr4/libproto_ast-gen-v1_antlr4.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/parsers/librow_dispatcher-format_handler-parsers.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/liblibs-row_dispatcher-format_handler.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/filters/librow_dispatcher-format_handler-filters.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/actors/libfq-libs-actors.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/signer/libfq-libs-signer.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/test_connection/events/liblibs-test_connection-events.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/purecalc_compilation/liblibs-row_dispatcher-purecalc_compilation.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/internal/liblibs-control_plane_storage-internal.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/tasks_packer/libfq-libs-tasks_packer.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/peephole_opt/libessentials-core-peephole_opt.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/ydb/libfq-libs-ydb.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/utils/liblibs-rate_limiter-utils.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/quoter_service/liblibs-rate_limiter-quoter_service.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/base_utils/libbase_utils.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/test_connection/libfq-libs-test_connection.a |53.3%| PREPARE $(CLANG_FORMAT-1286082657) |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/events/liblibs-rate_limiter-events.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/breakpad/src/client/linux/libsrc-client-linux.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/events/liblibs-checkpoint_storage-events.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/read_rule/libfq-libs-read_rule.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/rate_limiter/control_plane_service/liblibs-rate_limiter-control_plane_service.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/audit/events/liblibs-audit-events.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/libfq-libs-row_dispatcher.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/libfq-libs-control_plane_proxy.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/ydb/synchronization_service/libcompute-ydb-synchronization_service.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/gateway/libfq-libs-gateway.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/hmac/libfq-libs-hmac.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/croaring/libcontrib-libs-croaring.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/libydb-core-cms.global.a |53.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_vdisk2/ydb-core-blobstorage-ut_vdisk2 |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/proto/liblibs-checkpoint_storage-proto.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_config/events/liblibs-control_plane_config-events.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/highwayhash/arch/avx2/libhighwayhash-arch-avx2.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/highwayhash/arch/sse41/libhighwayhash-arch-sse41.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/common/libdq-actors-common.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/libfq-libs-quota_manager.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/protos/libcolumnshard-bg_tasks-protos.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/udf_resolver/libcore-qplayer-udf_resolver.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/highwayhash/libcontrib-libs-highwayhash.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_lister/interface/libcore-url_lister-interface.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/user_data/libessentials-core-user_data.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/breakpad/src/liblibs-breakpad-src.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/proto/libjsonpath-rewrapper-proto.a |53.4%| [CP] {default-linux-x86_64, relwithdebinfo} $(B)/common_test.context |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/common/libcore-blobstorage-common.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_config/libfq-libs-control_plane_config.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/resource_pool_classifiers/libcore-sys_view-resource_pool_classifiers.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/local_db/libcolumnshard-data_accessor-local_db.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/services/mounts/libcore-services-mounts.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/private_client/libfq-libs-private_client.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/libsrc-client-topic.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/libydb-core-persqueue.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/resource_pools/libcore-sys_view-resource_pools.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/run/librun.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/storage/interface/libqplayer-storage-interface.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/checkpoint_storage/libfq-libs-checkpoint_storage.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/libydb-core-tablet_flat.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/codegen/llvm16/libminikql-codegen-llvm16.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/ydb/control_plane/libcompute-ydb-control_plane.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/ydb/liblibs-compute-ydb.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/cli_utils/libcli_utils.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/dom/libessentials-minikql-dom.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/sdk_core_access/libydb_sdk_core_access.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/computation/llvm16/libminikql-computation-llvm16.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/actors/liblibs-control_plane_proxy-actors.a |53.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/incrhuge/ut/ydb-core-blobstorage-incrhuge-ut |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/service/libcore-sys_view-service.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/libminikql-jsonpath-rewrapper.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/chunks_limiter/libydb-library-chunks_limiter.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/actor_log/libyql-utils-actor_log.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/lib/sharding/libservices-lib-sharding.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/libydb-services-metadata.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/operation_id/protos/liblibrary-operation_id-protos.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/status/libclient-types-status.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/openssl/io/libcpp-openssl-io.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/logs/libfq-libs-logs.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/tablets/libcore-sys_view-tablets.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/int128/liblibrary-cpp-int128.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/protos/libcore-tablet_flat-protos.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/storage/libcore-sys_view-storage.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/credentials/libproviders-s3-credentials.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/mock/libfq-libs-mock.a |53.5%| [CF] {default-linux-x86_64, relwithdebinfo} $(B)/library/cpp/build_info/build_info.cpp |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/libydb-core-cms.a |53.7%| [CF] {default-linux-x86_64, relwithdebinfo} $(B)/library/cpp/build_info/sandbox.cpp |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/getopt/small/libcpp-getopt-small.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/liblibrary-formats-arrow.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/credentials/oauth2_token_exchange/libtypes-credentials-oauth2_token_exchange.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/incrhuge/libcore-blobstorage-incrhuge.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/log_backend/liblibrary-actors-log_backend.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/credentials/libclient-types-credentials.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/uuid/libsrc-library-uuid.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/table/libsrc-client-table.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/common/antlr4/libparser-common-antlr4.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/libfq-libs-control_plane_storage.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/testlib/basics/libcore-testlib-basics.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/test_tablet/libydb-core-test_tablet.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/libydb-library-folder_service.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/tablet/libcore-grpc_services-tablet.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/common/libessentials-parser-common.a |53.7%| PREPARE $(FLAKE8_LINTER-sbr:6561765464) |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/events/libproviders-s3-events.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/modifier/liblibrary-formats-arrow-modifier.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/shard/libcore-graph-shard.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/libcore-config-protos.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/finalize_script_service/libcore-kqp-finalize_script_service.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/abstract/libcolumnshard-blobs_action-abstract.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_locks/locks/libcolumnshard-data_locks-locks.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/grpc/server/actors/libgrpc-server-actors.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hullop/hullcompdelete/libvdisk-hullop-hullcompdelete.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/lexer_common/libessentials-parser-lexer_common.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/tablet/libtx-columnshard-tablet.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/persqueue/obfuscate/libsdk-library-persqueue-obfuscate-v3.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/computation/libessentials-minikql-computation.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/result/libsrc-client-result.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/string_utils/helpers/liblibrary-string_utils-helpers.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/service/stub/libudf-service-stub.global.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/pdisk/mock/libblobstorage-pdisk-mock.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/libsrc-client-types.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/counters/libcolumnshard-blobs_action-counters.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/packedtypes/liblibrary-cpp-packedtypes.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/config/libcpp-messagebus-config.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/table_creator/libydb-library-table_creator.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/libtx-columnshard-blobs_action.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/issue/libsrc-library-issue.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/common/libservices-ext_index-common.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/common/context/libdata_sharing-common-context.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v0/libproto_ast-gen-v0.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/common/libchanges-compaction-common.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/request/libservices-metadata-request.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_catalog/libessentials-parser-pg_catalog.global.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet/libydb-core-tablet.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.global.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/local/libcolumnshard-blobs_action-local.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/sync_points/libsimple_reader-iterator-sync_points.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/lib/actors/libservices-lib-actors.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/statistics/libproviders-s3-statistics.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/protos/libcolumnshard-blobs_action-protos.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/splitter/liblibrary-formats-arrow-splitter.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/storages_manager/libcolumnshard-blobs_action-storages_manager.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/switch/liblibrary-formats-arrow-switch.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/abstract/libservices-metadata-abstract.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/pdisk_io/libydb-library-pdisk_io.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_wrapper/interface/libparser-pg_wrapper-interface.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/constructor/libreader-plain_reader-constructor.global.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blob_depot/libydb-core-blob_depot.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/libyql-essentials-minikql.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/abstract/libstorage-optimizer-abstract.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_catalog/proto/libparser-pg_catalog-proto.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/json/libcpp-protobuf-json.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/public/liblibrary-yaml_config-public.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_catalog/libessentials-parser-pg_catalog.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/pdisk_io/protos/liblibrary-pdisk_io-protos.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/log_backend/libydb-core-log_backend.a |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/solomon/actors/ut/dq_solomon_write_actor_ut.cpp |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/counters/libengines-changes-counters.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/persqueue_cluster_discovery/libydb-services-persqueue_cluster_discovery.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/antlr3/libparser-proto_ast-antlr3.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/secret/libservices-metadata-secret.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/memory_controller/libydb-core-memory_controller.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/antlr4/libparser-proto_ast-antlr4.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/abstract/libcolumnshard-data_accessor-abstract.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/transaction/libcolumnshard-blobs_action-transaction.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/jsonpath/libproto_ast-gen-jsonpath.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/libiconv/static/liblibs-libiconv-static.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/pgproxy/protos/libcore-pgproxy-protos.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/raw_socket/libydb-core-raw_socket.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/local_pgwire/libydb-core-local_pgwire.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/public_http/libydb-core-public_http.global.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/ast/libyql-essentials-ast.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/public_http/protos/libcore-public_http-protos.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/pgproxy/libydb-core-pgproxy.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/input_transforms/libdq-actors-input_transforms.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/transfer/libydb-core-transfer.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/discovery/libydb-core-discovery.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/persqueue_cluster_discovery/cluster_ordering/libservices-persqueue_cluster_discovery-cluster_ordering.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/libcolumnshard-common-protos.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/public_http/libydb-core-public_http.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/comp_nodes/libyql-dq-comp_nodes.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/state/libyql-dq-state.a |53.9%| PREPARE $(FLAKE8_PY3-715603131) |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/ast/serialize/libessentials-ast-serialize.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/arrow/interface/libcommon-arrow-interface.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/arrow/libproviders-common-arrow.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/cli_config_base/libcore-driver_lib-cli_config_base.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/statistics/aggregator/libcore-statistics-aggregator.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/http_proxy/libydb-core-http_proxy.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v1/libproto_ast-gen-v1.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/load_test/libydb-core-load_test.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/persqueue_v1/libydb-services-persqueue_v1.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ycloud/impl/liblibrary-ycloud-impl.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/blobs_action/bs/libcolumnshard-blobs_action-bs.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/modification/events/libdata_sharing-modification-events.a |53.7%| [ld] {default-linux-x86_64, relwithdebinfo} $(B)/tools/flake8_linter/flake8_linter |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/expr_nodes/libproviders-clickhouse-expr_nodes.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/proto/libproviders-clickhouse-proto.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/adapter/libolap-bg_tasks-adapter.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/ydb_proxy/libtx-replication-ydb_proxy.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/packers/liblibrary-cpp-packers.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/interop/libcpp-protobuf-interop.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/html/escape/libcpp-html-escape.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kafka_proxy/libydb-core-kafka_proxy.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/expr_nodes/libproviders-solomon-expr_nodes.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/libtx-columnshard-common.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/ut/common/libpersqueue-ut-common.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_accessor/in_mem/libcolumnshard-data_accessor-in_mem.global.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/pushdown/libproviders-generic-pushdown.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/syncer/libblobstorage-vdisk-syncer.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/http_proxy/authorization/liblibrary-http_proxy-authorization.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/modification/transactions/libdata_sharing-modification-transactions.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/top_keeper/libcpp-containers-top_keeper.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/task_meta/libproviders-pq-task_meta.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/datetime/libessentials-minikql-datetime.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/common/libtx-schemeshard-common.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/parser/libminikql-jsonpath-parser.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/common_client/libsrc-client-common_client.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/gateway/native/libpq-gateway-native.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/composite_serial/libarrow-accessor-composite_serial.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/arrow/libessentials-minikql-arrow.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_preprocessing/interface/libcore-url_preprocessing-interface.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/abstract/libengines-changes-abstract.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/range_helpers/libproviders-s3-range_helpers.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/provider/libproviders-clickhouse-provider.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/serializations/libproviders-s3-serializations.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/libydb-library-schlab.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.global.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/async_io/libproviders-pq-async_io.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/proto_ast/gen/v0_proto_split/libproto_ast-gen-v0_proto_split.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/services/mounts/libcore-services-mounts.global.a |53.9%| PREPARE $(FLAKE8_PY2-2255386470) |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/initiator/controller/libdata_sharing-initiator-controller.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/gateway/dummy/libpq-gateway-dummy.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tx_allocator/libcore-tx-tx_allocator.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/sfh/libcpp-digest-sfh.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/protos/libcore-issue-protos.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/http_download/libcore-file_storage-http_download.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/common/libstorage-actualizer-common.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/object_storage/libcore-external_sources-object_storage.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/defs/libcore-file_storage-defs.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/deprecated/client/liblib-deprecated-client.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/dq_integration/libessentials-core-dq_integration.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/initiator/controller/libdata_sharing-initiator-controller.global.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.global.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/credentials/libessentials-core-credentials.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/account_lockout/liblibrary-login-account_lockout.a |54.1%| PREPARE $(TEST_TOOL_HOST-sbr:8865992733) |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/dictionary/libarrow-accessor-dictionary.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/testlib/libydb-core-testlib.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/histogram/adaptive/protos/libhistogram-adaptive-protos.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/modification/tasks/libdata_sharing-modification-tasks.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/services/libessentials-core-services.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/arrow_kernels/request/libcore-arrow_kernels-request.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/histogram/adaptive/libcpp-histogram-adaptive.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/service/libproviders-dq-service.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/cache/liblibrary-login-cache.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/proto/libproviders-ydb-proto.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/protos/libcolumnshard-data_sharing-protos.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/hyperloglog/liblibrary-cpp-hyperloglog.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/secret/accessor/libmetadata-secret-accessor.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/serializer/libformats-arrow-serializer.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/actors/libproviders-s3-actors.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/expr_nodes/libproviders-ydb-expr_nodes.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/bg_tasks/abstract/libservices-bg_tasks-abstract.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/manager/libservices-metadata-manager.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_public/events/libclient-yc_public-events.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/common/libschemeshard-olap-common.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/plain/libarrow-accessor-plain.global.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/libsrc-client-federated_topic.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/predicate/libcolumnshard-engines-predicate.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/portions/libcolumnshard-engines-portions.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/buffered/libmonlib-encode-buffered.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/export/libsrc-client-export.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/kesus/libydb-services-kesus.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/metrics/libcpp-monlib-metrics.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/schema/libschemeshard-olap-schema.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/save_load/libformats-arrow-save_load.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/import/libsrc-client-import.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/provider/libproviders-ydb-provider.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/construction/libchanges-actualization-construction.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/decimal/libsrc-library-decimal.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sparsed/libarrow-accessor-sparsed.global.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/operation/libsrc-client-operation.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/impl/libclient-federated_topic-impl.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/provider/libproviders-s3-provider.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sparsed/libarrow-accessor-sparsed.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/rate_limiter/libsrc-client-rate_limiter.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/layout/libschemeshard-olap-layout.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/invoke_builtins/llvm16/libminikql-invoke_builtins-llvm16.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/command_base/libydb_cli_command_base.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/provider/exec/libdq-provider-exec.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/ss_tasks/libsrc-client-ss_tasks.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/coordinator/public/libtx-coordinator-public.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common/libengines-reader-common.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/coordinator/protos/libtx-coordinator-protos.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/operation/libclient-types-operation.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/conveyor/service/libtx-conveyor-service.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/experimental/libpublic-lib-experimental.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/limiter/service/libtx-limiter-service.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/limiter/usage/libtx-limiter-usage.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/persqueue_v1/actors/libservices-persqueue_v1-actors.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/datastreams/libsrc-client-datastreams.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/security/libydb-library-security.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/TextAPI/libllvm16-lib-TextAPI.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_stats/libclient-impl-ydb_stats.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/nghttp3/libcontrib-libs-nghttp3.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/exception/libcpp-monlib-exception.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/proto/liblibs-control_plane_storage-proto.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/long_tx_service/libcore-tx-long_tx_service.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/solomon_accessor/grpc/libsolomon-solomon_accessor-grpc.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/regex/pire/libcpp-regex-pire.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/libcolumnshard-engines-scheme.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/transfer/libalter-in_store-transfer.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/t1ha/libcontrib-libs-t1ha.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/reader/libformats-arrow-reader.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/xz/libcpp-streams-xz.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/liblibrary-login-protos.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/AggressiveInstCombine/liblib-Transforms-AggressiveInstCombine.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tcmalloc/no_percpu_cache/liblibs-tcmalloc-no_percpu_cache.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/coordinator/libcore-tx-coordinator.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/mediator/libcore-tx-mediator.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/libengines-changes-compaction.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/regex/pcre/libcpp-regex-pcre.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/openldap/libcontrib-libs-openldap.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme_types/libydb-core-scheme_types.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/tiering/libengines-scheme-tiering.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/plain/libchanges-compaction-plain.global.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/liblibrary-mkql_proto-protos.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/container/libabseil-cpp-tstring-y_absl-container.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/public/ydb_issue/libyql-public-ydb_issue.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ncloud/impl/liblibrary-ncloud-impl.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Coroutines/liblib-Transforms-Coroutines.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/constructor/libreader-simple_reader-constructor.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sparsed/libchanges-compaction-sparsed.global.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lz4/libcontrib-libs-lz4.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/counter_time_keeper/liblibrary-persqueue-counter_time_keeper.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/libcontrib-libs-protobuf.global.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/nayuki_md5/libcontrib-libs-nayuki_md5.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lzmasdk/libcontrib-libs-lzmasdk.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/lzma/libcontrib-libs-lzma.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/compaction/sub_columns/libchanges-compaction-sub_columns.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/XML/liblibs-poco-XML.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/constructor/libreader-sys_view-constructor.a |54.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/tools/dqrun/dqrun |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/libcontrib-libs-opentelemetry-proto.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/topic_parser/liblibrary-persqueue-topic_parser.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/nghttp2/libcontrib-libs-nghttp2.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/libcpp-digest-argonish.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/profiling/libabseil-cpp-tstring-y_absl-profiling.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/simdjson/libcontrib-libs-simdjson.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/util/libcpp-protobuf-util.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/protobuf_printer/libydb-library-protobuf_printer.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/provider/libproviders-yt-provider.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/tiering/libstorage-actualizer-tiering.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/ngtcp2/libcontrib-libs-ngtcp2.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/pcre/pcre32/liblibs-pcre-pcre32.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/loading/libcolumnshard-engines-loading.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/proto/libproviders-solomon-proto.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zlib/libcontrib-libs-zlib.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/insert_table/libcolumnshard-engines-insert_table.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/libcolumnshard-engines-changes.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/yaml/libcore-viewer-yaml.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/common_opt/libessentials-core-common_opt.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/pcre/libcontrib-libs-pcre.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/zstd/libcontrib-libs-zstd.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/query_actor/libydb-library-query_actor.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/libcolumnshard-engines-protos.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/timezone_conversion/liblibrary-cpp-timezone_conversion.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceshard/libcore-tx-sequenceshard.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unicode/punycode/libcpp-unicode-punycode.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/tld/liblibrary-cpp-tld.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Instrumentation/liblib-Transforms-Instrumentation.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/antlr_ast/gen/v1_ansi_antlr4/libantlr_ast-gen-v1_ansi_antlr4.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/ymq/http/libcore-ymq-http.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/InstCombine/liblib-Transforms-InstCombine.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/antlr_ast/gen/v1_antlr4/libantlr_ast-gen-v1_antlr4.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/mkql_simple_file/libproviders-common-mkql_simple_file.a |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_background_compaction.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Vectorize/liblib-Transforms-Vectorize.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/numeric/libabseil-cpp-absl-numeric.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Crypto/liblibs-poco-Crypto.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tcmalloc/malloc_extension/liblibs-tcmalloc-malloc_extension.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/actors/libyql-utils-actors.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-compression/librestricted-aws-aws-c-compression.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Util/liblibs-poco-Util.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom/libstorage-indexes-bloom.global.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/abstract/libengines-reader-abstract.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/test_helper/libtx-columnshard-test_helper.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/ut_common/libtx-datashard-ut_common.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/utf8proc/libcontrib-libs-utf8proc.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/actor/libengines-reader-actor.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/types/libabseil-cpp-tstring-y_absl-types.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/mon/liblibrary-schlab-mon.global.a |52.5%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/yt/yt/client/libyt-yt-client.a |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/common/libengines-scheme-common.a |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/xxhash/libcontrib-libs-xxhash.a |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/libdq-actors-protos.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/poco/Foundation/liblibs-poco-Foundation.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/pg/libessentials-sql-pg.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/mock/liblibrary-folder_service-mock.a |53.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/libcontrib-libs-protobuf.a |53.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydbd/main.cpp |53.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydbd/export.cpp |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/random/librestricted-boost-random.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ydb/libydb-services-ydb.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4_pure/libv1-lexer-antlr4_pure.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/codegen/llvm16/libcodec-codegen-llvm16.a |53.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/reflect/libsql-v1-reflect.global.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/codegen/llvm16/libcodec-codegen-llvm16.global.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/libydb-core-viewer.global.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/utils/network/libessentials-utils-network.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/antlr4_pure_ansi/libv1-lexer-antlr4_pure_ansi.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/reflect/libsql-v1-reflect.a |53.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/ut_helpers/libtx-schemeshard-ut_helpers.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/comp_nodes/llvm16/libyt-comp_nodes-llvm16.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/regex/libv1-lexer-regex.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/deprecated/split/libcpp-deprecated-split.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/base/libabseil-cpp-tstring-y_absl-base.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/schemu/liblibrary-schlab-schemu.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/deprecated/read_batch_converter/libpersqueue-deprecated-read_batch_converter.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/debugging/libabseil-cpp-tstring-y_absl-debugging.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/hash/libabseil-cpp-tstring-y_absl-hash.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/protos/liblibrary-schlab-protos.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/log/libabseil-cpp-tstring-y_absl-log.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/constructor/libreader-common_reader-constructor.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/numeric/libabseil-cpp-tstring-y_absl-numeric.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/solomon_accessor/client/libsolomon-solomon_accessor-client.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/thread_pool/libimpl-ydb_internal-thread_pool.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-auth/librestricted-aws-aws-c-auth.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/container/libabseil-cpp-absl-container.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/flags/libabseil-cpp-tstring-y_absl-flags.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/arrow/fbs/libclient-arrow-fbs.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/profiling/libabseil-cpp-absl-profiling.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/arrow/libyt-client-arrow.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-checksums/librestricted-aws-aws-checksums.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/schine/liblibrary-schlab-schine.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/common_reader/iterator/libreader-common_reader-iterator.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/random/libabseil-cpp-absl-random.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/duplicates/libreader-simple_reader-duplicates.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp-tstring/y_absl/time/libabseil-cpp-tstring-y_absl-time.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/status/libabseil-cpp-absl-status.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/file/libyt-gateway-file.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/collections/libsimple_reader-iterator-collections.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/abstract/liblibrary-workload-abstract.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/versions/libengines-scheme-versions.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/stock/liblibrary-workload-stock.global.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/comp_nodes/dq/llvm16/libcomp_nodes-dq-llvm16.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/chunks/libreader-sys_view-chunks.global.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/signal_backtrace/libydb-library-signal_backtrace.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/nc_private/accessservice/libclient-nc_private-accessservice.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/kv/liblibrary-workload-kv.global.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/stock/liblibrary-workload-stock.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/column_converters/libyt-library-column_converters.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/kv/liblibrary-workload-kv.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/skiff_ext/libyt-library-skiff_ext.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/plain_reader/iterator/libreader-plain_reader-iterator.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/scheme/libstorage-actualizer-scheme.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/strings/libabseil-cpp-absl-strings.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/abseil-cpp/absl/types/libabseil-cpp-absl-types.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/context/fcontext_impl/libboost-context-fcontext_impl.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/util/proto/libprotobuf-util-proto.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/transaction/libengines-reader-transaction.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/io/libcpp-http-io.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/session_pool/libimpl-ydb_internal-session_pool.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/selector/liblcbuckets-constructor-selector.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/schoot/liblibrary-schlab-schoot.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/client/formats/libyt-client-formats.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-crt-cpp/librestricted-aws-aws-crt-cpp.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/core/libyt-yt-core.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/reader/sys_view/abstract/libreader-sys_view-abstract.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-io/librestricted-aws-aws-c-io.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/llvm16/lib/Transforms/Scalar/liblib-Transforms-Scalar.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/abstract/libengines-scheme-abstract.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/transform/libyql-dq-transform.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/atomic/librestricted-boost-atomic.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-s3/librestricted-aws-aws-c-s3.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/bit_io/liblibrary-cpp-bit_io.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/column/libengines-scheme-column.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/bzip/libblockcodecs-codecs-bzip.global.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/aws/aws-c-mqtt/librestricted-aws-aws-c-mqtt.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/container/librestricted-boost-container.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/libydb-library-services.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/common/libscheme-defaults-common.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/retry/libimpl-ydb_internal-retry.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/sse/liblibrary-cpp-sse.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/libscheme-defaults-protos.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/legacy_zstd06/libblockcodecs-codecs-legacy_zstd06.global.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/exception/librestricted-boost-exception.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/avx2/libinternal-proxies-avx2.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/coroutine/librestricted-boost-coroutine.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/zlib/libblockcodecs-codecs-zlib.global.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/signals/libydb-library-signals.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/iostreams/librestricted-boost-iostreams.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/random_provider/liblibrary-cpp-random_provider.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/liblibrary-cpp-blockcodecs.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/liboptimizer-lcbuckets-planner.global.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/indexes/abstract/libscheme-indexes-abstract.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/protos/libyaml-config-protos.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/ref/libinternal-proxies-ref.a |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/client.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/client_common.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/batching_timestamp_provider.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/distributed_table_client.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/watermark_runtime_data.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/table_mount_cache.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/table_mount_cache_detail.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/public.cpp |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/helpers.cpp |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/libydb-core-viewer.a |53.7%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/acl.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/logical_type.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/query_client/query_statistics.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/query_client/query_builder.cpp |53.4%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/security_client.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/etc_client.cpp |53.6%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/common.cpp |53.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/config.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/helpers.cpp |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bloom_ngramm/libstorage-indexes-bloom_ngramm.global.a |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/helpers.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/internal_client.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/client_cache.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/delegating_transaction.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/queue_rowset.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/security_client/access_control.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/scheduler/operation_id_or_alias.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/file_reader.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/dynamic_table_transaction_mixin.cpp |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/partition_reader.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/signature/signature.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/producer_client.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/distributed_table_session.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/address_helpers.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/connection.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/signature/validator.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/queue_client/consumer_client.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/key.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/column_rename_descriptor.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/scheduler/spec_patch.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/signature/generator.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/public.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/scheduler/operation_cache.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/connection_impl.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/blob_reader.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/chunk_stripe_statistics.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/adapters.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/transaction.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/check_schema_compatibility.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/columnar.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/column_sort_schema.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/comparator.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/composite_compare.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/wire_row_stream.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/columnar_statistics.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/table_output.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/config.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/table_mount_cache.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/sticky_transaction_pool.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/helpers.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/transaction.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yt/library/formats/libyt-library-formats.a |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/public.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/config.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/row_batch.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/name_table.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/merge_table_schemas.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/data_statistics.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/key_bound.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/row_base.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/record_codegen_cpp.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/record_helpers.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/pipe.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/check_type_compatibility.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/row_buffer.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/table_consumer.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/config.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/time_text.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schemaless_dynamic_table_writer.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/serialize.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schemaless_row_reorderer.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schema.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/unversioned_value.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/versioned_reader.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/schema_serialization_helpers.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/timestamped_schema_helpers.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/job_tracker_client/helpers.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/parser/pg_wrapper/libessentials-parser-pg_wrapper.a |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/unversioned_row.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/table_upload_options.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/value_consumer.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/unordered_schemaful_reader.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/kafka/packet.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/versioned_io_options.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/public.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/validate_logical_type.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/versioned_row.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/helpers.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/options.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/tablet_client/config.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/public.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/wire_protocol.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/journal_reader.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/helpers.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/table_client/key_bound_compressor.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/noop_timestamp_provider.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/remote_timestamp_provider.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/config.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/ypath/parser_detail.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/ypath/rich.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/object_client/public.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/node_tracker_client/public.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/object_client/helpers.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/transaction_client/timestamp_provider_base.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/queue_transaction_mixin.cpp |54.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/graph/ut/ydb-core-graph-ut |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/query_tracker_client.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/public.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/private.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/journal_client.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/delegating_client.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/node_tracker_client/node_directory.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/timestamp_provider.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/operation_client.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/journal_writer.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/persistent_queue.cpp |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/metadata/extractor/libext_index-metadata-extractor.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/metadata/extractor/libext_index-metadata-extractor.global.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/auth/libydb-services-auth.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.global.a |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/config.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/api/protos/libapi-protos-persqueue-deprecated.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/common/libservices-metadata-common.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/api/grpc/libapi-grpc-persqueue-deprecated.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/rewrapper/hyperscan/libjsonpath-rewrapper-hyperscan.global.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/config/libydb-services-config.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/lib/auth/libservices-lib-auth.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/discovery/libydb-services-discovery.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/local_discovery/libydb-services-local_discovery.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/jsonpath/libessentials-minikql-jsonpath.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/metadata/libservices-ext_index-metadata.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/ds_table/libservices-metadata-ds_table.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/view/libydb-services-view.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/cms/libydb-services-cms.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/replication/libydb-services-replication.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ext_index/service/libservices-ext_index-service.a |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/row_stream.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/file_writer.cpp |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/backup/libydb-services-backup.a |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/target_cluster_injecting_channel.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/tablet/libydb-services-tablet.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/monitoring/libydb-services-monitoring.a |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/row_batch_writer.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rowset.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/table_writer.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/chunk_replica.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/row_batch_reader.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/table_reader.cpp |54.0%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/skynet.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/bundle_controller_client/bundle_controller_client.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/dynamic_config/libydb-services-dynamic_config.a |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/table_partition_reader.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/helpers.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/shuffle_client.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/maintenance/libydb-services-maintenance.a |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/table_client.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/helpers.cpp |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/rate_limiter/libydb-services-rate_limiter.a |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/replication_card_cache.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/node_tracker_client/helpers.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/config.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/deprecated/persqueue_v0/libservices-deprecated-persqueue_v0.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/llvm16/libminikql-comp_nodes-llvm16.a |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/hydra/version.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/replication_card.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chaos_client/replication_card_serialization.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/bundle_controller_client/bundle_controller_settings.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/ready_event_reader_base.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/helpers.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/infinite_entity.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/check_yson_token.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/merge_complex_types.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/iam_private/libsrc-client-iam_private.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/keyvalue/libydb-services-keyvalue.a |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/chunk_client/read_limit.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/cypress_client/public.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/hive/timestamp_map.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/uuid_text.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/election/public.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/journal_client/public.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/file_client/config.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/job_tracker_client/public.cpp |54.1%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/transaction_impl.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/workload.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/complex_types/yson_format_conversion.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/journal_client/config.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/method_helpers.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/config.cpp |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/kafka/protocol.cpp |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/ipv6_address/liblibrary-cpp-ipv6_address.a |54.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/misc/io_tags.cpp |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/store/libschemeshard-olap-store.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dwarf_backtrace/liblibrary-cpp-dwarf_backtrace.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/libydb-library-login.a |54.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kafka_proxy/ut/ydb-core-kafka_proxy-ut |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/mon/liblibrary-schlab-mon.a |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/kafka/requests.cpp |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/result_formatter/libfq-libs-result_formatter.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/gateway/libproviders-solomon-gateway.a |54.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/ymq/libydb-services-ymq.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/libfq-libs-db_id_async_resolver_impl.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/transformer/liblibrary-formats-arrow-transformer.a |54.2%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/client_base.cpp |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/local_gateway/libproviders-dq-local_gateway.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/value/libsrc-client-value.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/spack/libmonlib-encode-spack.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/json/libmonlib-encode-json.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/common/liboperations-alter-common.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/erasure/libydb-core-erasure.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/fq/libydb-services-fq.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/dictionary/libarrow-accessor-dictionary.global.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/events/libproviders-solomon-events.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/columns/libschemeshard-olap-columns.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/url/libcpp-string_utils-url.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/persqueue/topic_parser_public/libsdk-library-persqueue-topic_parser_public-v3.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/resharding/libalter-in_store-resharding.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/composite/liblibrary-formats-arrow-accessor-composite.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/planner/libproviders-dq-planner.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/datastreams/libydb-services-datastreams.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/mkql/libproviders-dq-mkql.a |54.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/stats_collector/libproviders-dq-stats_collector.a |54.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/legacy_protobuf/protos/libencode-legacy_protobuf-protos.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/operation_id/libsrc-library-operation_id.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/string/libcpp-yt-string.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/library/jwt/libsrc-library-jwt.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/yson_value/libpublic-lib-yson_value.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/xml/init/libcpp-xml-init.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/standalone/liboperations-alter-standalone.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/switch/libformats-arrow-switch.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/transformer/libformats-arrow-transformer.a |54.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/libydb-core-formats.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/libfq-libs-config.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/json/libcore-viewer-json.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/util/actorsys_test/libcore-util-actorsys_test.a |54.1%| PREPARE $(OS_SDK_ROOT-sbr:243881345) |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_proxy/events/liblibs-control_plane_proxy-events.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/service/libtx-replication-service.a |54.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/common_client/impl/libclient-common_client-impl.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/changes/actualization/controller/libchanges-actualization-controller.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/iam/libsrc-client-iam.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/liblibs-config-protos.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/events/liblibs-control_plane_storage-events.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/events/libolap-bg_tasks-events.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/cache/liblibrary-cpp-cache.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/actualizer/index/libstorage-actualizer-index.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/indent_text/libcpp-string_utils-indent_text.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.global.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_json/libydb-library-yaml_json.a |54.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/task_runner/libproviders-dq-task_runner.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/colorizer/liblibrary-cpp-colorizer.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/thread/librestricted-boost-thread.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/dictionary/libformats-arrow-dictionary.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/terminate_handler/liblibrary-cpp-terminate_handler.global.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/charset/liblibrary-cpp-charset.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/libydb-library-ydb_issue.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/data_sharing/source/session/libdata_sharing-source-session.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/chunks/libengines-storage-chunks.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/lz4/libblockcodecs-codecs-lz4.global.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/query/libsrc-client-query.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/atomizer/libcpp-containers-atomizer.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/lzma/libblockcodecs-codecs-lzma.global.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/libydb-library-ydb_issue.global.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/libstorage-indexes-bits_storage.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/purecalc_no_pg_wrapper/liblibs-row_dispatcher-purecalc_no_pg_wrapper.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/operators/libcolumnshard-transactions-operators.global.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/groupinfo/libcore-blobstorage-groupinfo.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/zc_memory_input/libcpp-streams-zc_memory_input.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/zstd/libcpp-streams-zstd.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/bits_storage/libstorage-indexes-bits_storage.global.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/lower_case/libcpp-digest-lower_case.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/snappy/libblockcodecs-codecs-snappy.global.a |54.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/codecs/zstd/libblockcodecs-codecs-zstd.global.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/adapters/issue/libcpp-adapters-issue.a |54.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/sse2/libinternal-proxies-sse2.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/granule/libengines-storage-granule.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/thrift/libcontrib-restricted-thrift.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/blockcodecs/core/libcpp-blockcodecs-core.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/ring_buffer/libcpp-containers-ring_buffer.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/resource/liblibrary-cpp-resource.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/protos/libcore-viewer-protos.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/token_accessor/grpc/libcommon-token_accessor-grpc.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/libschemeshard-olap-operations.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/codecs/greedy_dict/libcpp-codecs-greedy_dict.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/count_min_sketch/libstorage-indexes-count_min_sketch.global.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/categories_bloom/libstorage-indexes-categories_bloom.global.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/2d_array/libcpp-containers-2d_array.a |54.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/charset/lite/libcpp-charset-lite.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/indexes/max/libstorage-indexes-max.global.a |53.9%| [CC] {BAZEL_DOWNLOAD} $(S)/yt/yt/client/api/rpc_proxy/client_impl.cpp |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/argonish/internal/proxies/sse41/libinternal-proxies-sse41.a |53.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/absl_flat_hash/libcpp-containers-absl_flat_hash.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/bzip2/libcpp-streams-bzip2.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/config/liblibrary-cpp-config.a |54.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/actors/events/libdq-actors-events.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/malloc/tcmalloc/libcpp-malloc-tcmalloc.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/writer/buffer/libengines-writer-buffer.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/intrusive_rb_tree/libcpp-containers-intrusive_rb_tree.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/compact_vector/libcpp-containers-compact_vector.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/comptrie/libcpp-containers-comptrie.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lbuckets/planner/liboptimizer-lbuckets-planner.a |54.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/csv/libcpp-string_utils-csv.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/sorted_vector/libcpp-containers-sorted_vector.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/value/libpublic-lib-value.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/actor/libcolumnshard-export-actor.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/stack_array/libcpp-containers-stack_array.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/liblibrary-ydb_issue-proto.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/retry/liblibrary-cpp-retry.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/containers/str_map/libcpp-containers-str_map.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/actor/libmessagebus_actor.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/retry/protos/libcpp-retry-protos.a |53.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/relaxed_escaper/libcpp-string_utils-relaxed_escaper.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/operations/slice_builder/libcolumnshard-operations-slice_builder.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/compressors/libproviders-s3-compressors.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/level/liblcbuckets-constructor-level.global.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/sighandler/liblibrary-cpp-sighandler.a |54.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/constructor/liboptimizer-lcbuckets-constructor.global.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/scheme/liblibrary-cpp-scheme.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/hooks/abstract/libcolumnshard-hooks-abstract.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/make_request/libimpl-ydb_internal-make_request.a |53.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/clickhouse/client/libclickhouse_client_udf.global.a |53.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/liblibrary-cpp-messagebus.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/assert/libcpp-yt-assert.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/change_exchange/libydb-core-change_exchange.a |53.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/libyql-dq-actors.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/skiff/liblibrary-cpp-skiff.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/brotli/libcpp-streams-brotli.a |53.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lzma/libcpp-streams-lzma.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/libtx-columnshard-transactions.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/spilling/libdq-actors-spilling.a |53.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/hook/libcpp-testing-hook.a |53.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/unittest_main/libcpp-testing-unittest_main.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/selector/liblcbuckets-planner-selector.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/compute/libdq-actors-compute.a |53.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/storage/optimizer/lcbuckets/planner/level/liblcbuckets-planner-level.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/math/lib/libcommon-math-lib.a |53.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/unicode_base/lib/libcommon-unicode_base-lib.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/scheme_board/libcore-tx-scheme_board.a |53.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/math/libmath_udf.global.a |53.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/ydbd/ydbd |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/base64/libcpp-string_utils-base64.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/quote/libcpp-string_utils-quote.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/breakpad/libydb-library-breakpad.global.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/logging/plain_text_formatter/libyt-logging-plain_text_formatter.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/locks/libcolumnshard-transactions-locks.global.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/string/libstring_udf.global.a |53.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blockstore/core/libcore-blockstore-core.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/unicode_base/libunicode_udf.global.a |52.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/parse_size/libcpp-string_utils-parse_size.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/keys/libydb-library-keys.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/pq/common/libproviders-pq-common.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/extensions/solomon_stats/libclient-extensions-solomon_stats.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/indexes/libschemeshard-olap-indexes.a |52.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/operations/alter/in_store/schema/libalter-in_store-schema.a |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/json/proto/libprotobuf-json-proto.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/poor_man_openmp/libcpp-threading-poor_man_openmp.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/events/libcolumnshard-export-events.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/common/libyql-dq-common.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/atomic/libcpp-threading-atomic.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/protos/libcolumnshard-transactions-protos.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/writer/libcolumnshard-engines-writer.a |52.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/protobuf/libmessagebus_protobuf.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/type_info/liblibrary-cpp-type_info.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/equeue/libcpp-threading-equeue.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/cron/libcpp-threading-cron.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/misc/libcpp-yt-misc.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.a |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/future/libcpp-threading-future.a |52.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/selector/abstract/libsession-selector-abstract.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/libcolumnshard-export-session.global.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/hot_swap/libcpp-threading-hot_swap.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/db_id_async_resolver/libproviders-common-db_id_async_resolver.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/monitoring/libcpp-messagebus-monitoring.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/logger/libimpl-ydb_internal-logger.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/queue/libcpp-threading-queue.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/skip_list/libcpp-threading-skip_list.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/thread_local/libcpp-threading-thread_local.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson/liblibrary-cpp-yson.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/selector/backup/libsession-selector-backup.global.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/export/session/storage/s3/libsession-storage-s3.global.a |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/tasks/libyql-dq-tasks.a |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/string_utils/scan/libcpp-string_utils-scan.a |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/libyql-dq-proto.a |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/task_scheduler/libcpp-threading-task_scheduler.a |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/exception/libcpp-yt-exception.a |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/splitter/libtx-columnshard-splitter.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/limiter/grouped_memory/usage/liblimiter-grouped_memory-usage.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/threading/libcpp-yt-threading.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/time_provider/liblibrary-cpp-time_provider.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/backtrace/libcpp-yt-backtrace.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/type_info/tz/libcpp-type_info-tz.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/tdigest/liblibrary-cpp-tdigest.a |52.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/protos/libolap-bg_tasks-protos.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/misc/libcpp-http-misc.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/logging/libcpp-yt-logging.a |52.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/blocking_queue/libcpp-threading-blocking_queue.a |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/error/libcpp-yt-error.a |52.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/type_ann/libyql-dq-type_ann.a |52.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/unified_agent_client/proto/libcpp-unified_agent_client-proto.a |52.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/system/libcpp-yt-system.a |52.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/murmur/libcpp-digest-murmur.a |52.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/resources/libtx-columnshard-resources.a |52.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/loading/libtx-columnshard-loading.a |52.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/oldmodule/libcpp-messagebus-oldmodule.a |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/iterator/liblibrary-cpp-iterator.a |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/yson_string/libcpp-yt-yson_string.a |52.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/xml/document/libcpp-xml-document.a |52.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/kqp_session_common/libimpl-ydb_internal-kqp_session_common.a |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/memory/libcpp-yt-memory.a |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/www/libcpp-messagebus-www.a |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/roaring/libroaring.global.a |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yson/node/libcpp-yson-node.a |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/version/libversion_definition.a |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/uri/liblibrary-cpp-uri.a |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/knn/libknn_udf.global.a |52.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/datetime/libdatetime_udf.global.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/yt/cpu_clock/libcpp-yt-cpu_clock.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/portion/libcolumnshard-normalizer-portion.global.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/schema_version/libcolumnshard-normalizer-schema_version.global.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/column_families/libschemeshard-olap-column_families.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/sql_types/libessentials-core-sql_types.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/http_gateway/libproviders-common-http_gateway.a |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/common/libcpp-json-common.a |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/common/libcore-backup-common.a |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/arrow_clickhouse/Common/liblibrary-arrow_clickhouse-Common.a |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/disjoint_sets/liblibrary-cpp-disjoint_sets.a |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/old_crc/libcpp-digest-old_crc.a |51.9%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/www/libcpp-messagebus-www.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/runtime/libyql-dq-runtime.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/client/ydb_topic/include/libclient-ydb_topic-include.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/transactions/operators/ev_write/libtransactions-operators-ev_write.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/abstract/libcolumnshard-normalizer-abstract.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/tools/enum_parser/enum_serialization_runtime/libtools-enum_parser-enum_serialization_runtime.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/crypto/libcore-blobstorage-crypto.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/util/draft/libutil-draft.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/util/charset/libutil-charset.a |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/tablet/libcolumnshard-normalizer-tablet.global.a |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/operations/batch_builder/libcolumnshard-operations-batch_builder.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/operations/common/libcolumnshard-operations-common.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blob_depot/agent/libcore-blob_depot-agent.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/opt/libproviders-dq-opt.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/html/pcdata/libcpp-html-pcdata.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/json/easy_parse/libcpp-json-easy_parse.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/token_accessor/client/libcommon-token_accessor-client.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/messagebus/scheduler/libcpp-messagebus-scheduler.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/dsproxy/bridge/libblobstorage-dsproxy-bridge.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/ut_helpers/libproviders-common-ut_helpers.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/audit/libydb-core-audit.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/resource_subscriber/libtx-columnshard-resource_subscriber.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/operations/libtx-columnshard-operations.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/actorlib_impl/libydb-core-actorlib_impl.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/grpc_connections/libimpl-ydb_internal-grpc_connections.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/tx_reader/libtx-columnshard-tx_reader.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/impl/libcore-backup-impl.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/util/libyutil.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/base/libcore-blobstorage-base.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/crypto/chacha_512/libblobstorage-crypto-chacha_512.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/password_checker/liblibrary-login-password_checker.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/abstract/events/libsubscriber-abstract-events.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/subscriber/abstract/subscriber/libsubscriber-abstract-subscriber.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/json_value/libpublic-lib-json_value.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/lwtrace_probes/libcore-blobstorage-lwtrace_probes.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/base/generated/libcore-base-generated.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/pdisk/libcore-blobstorage-pdisk.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/splitter/abstract/libcolumnshard-splitter-abstract.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/compress_base/lib/libcommon-compress_base-lib.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperscan/libhyperscan_udf.global.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/runtime/libproviders-dq-runtime.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/actors/libproviders-dq-actors.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hullop/libblobstorage-vdisk-hullop.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/logs/dsv/libdsv_udf.global.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/digest/libdigest_udf.global.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/json/libjson_udf.global.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperloglog/libhyperloglog_udf.global.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/ip_base/lib/libcommon-ip_base-lib.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/histogram/libhistogram_udf.global.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/ip_base/libip_udf.global.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/yson2/libyson2_udf.global.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/pire/libpire_udf.global.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/re2/libre2_udf.global.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/json2/libjson2_udf.global.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/url_base/lib/libcommon-url_base-lib.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/libvdisk-hulldb-compstrat.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/stat/libstat_udf.global.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/hash/liblibrary-formats-arrow-hash.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/topfreq/libtopfreq_udf.global.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/task_runner/libdq-actors-task_runner.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/compress_base/libcompress_udf.global.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/recovery/libvdisk-hulldb-recovery.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/stat/static/libcommon-stat-static.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/datashard/libcore-tx-datashard.global.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/set/libset_udf.global.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/datetime2/libdatetime2_udf.global.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/scheme_types/libpublic-lib-scheme_types.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/digest/md5/libcpp-digest-md5.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/fetch/libcpp-http-fetch.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/ydb_serializable/replay/libpy3tools-ydb_serializable-replay.global.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/topfreq/static/libcommon-topfreq-static.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/insert_table/libcolumnshard-normalizer-insert_table.global.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/logger/libydb-library-logger.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/schlab/probes/liblibrary-schlab-probes.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/client/metadata/libcore-client-metadata.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/tablestore/libgateway-behaviour-tablestore.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/version/libversion.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/libvdisk-hulldb-cache_block.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/long_tx_service/public/libtx-long_tx_service-public.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/counters/libproviders-dq-counters.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/db_driver_state/libimpl-ydb_internal-db_driver_state.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/limiter/grouped_memory/service/liblimiter-grouped_memory-service.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/common/libproviders-dq-common.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/console/util/libcms-console-util.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/priorities/service/libtx-priorities-service.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/bg_tasks/transactions/libolap-bg_tasks-transactions.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/protos/libblobstorage-vdisk-protos.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/control/lib/libcore-control-lib.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/common/libtx-replication-common.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/url_base/liburl_udf.global.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/control/libydb-core-control.a |50.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/accessservice/libclient-yc_private-accessservice.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/priorities/usage/libtx-priorities-usage.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/manager/libschemeshard-olap-manager.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/util/libydb-core-util.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/dq_integration/transform/libcore-dq_integration-transform.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/localrecovery/libblobstorage-vdisk-localrecovery.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/gateway/behaviour/external_data_source/libgateway-behaviour-external_data_source.a |50.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/libvdisk-hulldb-fresh.a |50.2%| [AR] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/libpy3tests-stability-tool.global.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sub_columns/libarrow-accessor-sub_columns.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/init/libcore-config-init.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/http/server/libcpp-http-server.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/config/libproviders-dq-config.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/dynamic_counters/libcpp-monlib-dynamic_counters.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dns/liblibrary-cpp-dns.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/opt/libyql-dq-opt.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/libcpp-monlib-encode.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/types/fatal_error_handlers/libclient-types-fatal_error_handlers.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceproxy/libcore-tx-sequenceproxy.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/bg_tasks/abstract/libcolumnshard-bg_tasks-abstract.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/normalizer/tables/libcolumnshard-normalizer-tables.global.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/cli_base/libcli_base.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/api/grpc/libdq-api-grpc.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tracing/usage/libtx-tracing-usage.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/helper/libproviders-dq-helper.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/tiering/libcore-tx-tiering.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/table/impl/libclient-table-impl.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/console/validators/libcms-console-validators.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/abstract/libarrow-accessor-abstract.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dot_product/liblibrary-cpp-dot_product.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/repl/libblobstorage-vdisk-repl.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/impl/ydb_internal/common/libimpl-ydb_internal-common.a |50.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/solomon/actors/ut/ut_helpers.cpp |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/formats/arrow/accessor/sub_columns/libarrow-accessor-sub_columns.global.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/mime/types/libcpp-mime-types.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/query/libblobstorage-vdisk-query.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/graph/protos/libcore-graph-protos.a |50.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/protos/libfq-libs-protos.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_services/counters/libcore-grpc_services-counters.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/common/libfq-libs-common.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/grpc/libfq-libs-grpc.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/scrub/libblobstorage-vdisk-scrub.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/pushdown/libproviders-common-pushdown.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/debug/libydb-core-debug.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/top/libtop_udf.global.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/deprecated/kicli/liblib-deprecated-kicli.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/schemeshard/olap/options/libschemeshard-olap-options.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/boost/regex/librestricted-boost-regex.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/client/libfmr-coordinator-client.a |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/task_runner_actor/libproviders-dq-task_runner_actor.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/synclog/libblobstorage-vdisk-synclog.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/solomon/provider/libproviders-solomon-provider.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/engine/minikql/libcore-engine-minikql.a |50.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_user_attributes/ut_user_attributes.cpp |50.3%| [PY] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/objcopy_533f06087e794c7af638ea75dc.o |50.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/request_options/proto_helpers/libfmr-request_options-proto_helpers.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/recipe/libpy3kqprun_recipe.global.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/common/libblobstorage-vdisk-common.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/services/metadata/initializer/libservices-metadata-initializer.a |50.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp |50.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ut_utils/libpersqueue_public-ut-ut_utils.a |50.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp.cpp |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/provider/libproviders-dq-provider.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/fmr_tool_lib/libyt-fmr-fmr_tool_lib.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/interface/proto_helpers/libcoordinator-interface-proto_helpers.a |50.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |50.5%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/objcopy_4f055c289b3de8f2a1e827ae5c.o |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/ut/common/libkqp-ut-common.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/libpy3simple_queue.global.a |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/skeleton/libblobstorage-vdisk-skeleton.a |50.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/chunk_queue/libcpp-threading-chunk_queue.a |50.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_vdisk2/huge.cpp |50.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/dq_output_channel_ut.cpp |50.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_coordinator.cpp |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/client/server/libcore-client-server.a |50.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/dq_arrow_helpers_ut.cpp |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/libpy3oltp_workload.global.a |50.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/incrhuge/ut/incrhuge_log_merger_ut.cpp |50.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/runtime/ut/ut_helper.cpp |50.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/incrhuge/ut/incrhuge_id_dict_ut.cpp |50.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tool/objcopy_04f56802b68450abc8421282d0.o |50.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tool/objcopy_6403bfa5c5e35b29a21c73fb0e.o |50.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/direct_read_ut.cpp |50.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/yt_coordinator_service/impl/libcoordinator-yt_coordinator_service-impl.a |50.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_kafka_functions.cpp |50.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/datastreams/datastreams_ut.cpp |50.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_serialization.cpp |50.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/metarequest_ut.cpp |50.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/actors_ut.cpp |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/clickhouse/actors/libproviders-clickhouse-actors.a |50.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_transaction_actor.cpp |50.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/kafka_test_client.cpp |50.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/partition_stats/partition_stats_ut.cpp |50.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/tools/dqrun/dqrun.cpp |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/yt/actors/libproviders-yt-actors.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/actors/libproviders-ydb-actors.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/ydb/comp_nodes/libproviders-ydb-comp_nodes.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/pg_ext/libessentials-core-pg_ext.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/secret_masker/dummy/liblib-secret_masker-dummy.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/storage/file/libqplayer-storage-file.a |50.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_columnshard.cpp |50.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_analyze_datashard.cpp |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/impl/libfmr-coordinator-impl.global.a |50.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/dqrun/lib/libtools-dqrun-lib.a |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/ut_fat/blobstorage_node_warden_ut_fat.cpp |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/lib/yt_url_lister/libyt-lib-yt_url_lister.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/yt_coordinator_service/interface/libcoordinator-yt_coordinator_service-interface.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/cbo/simple/libcore-cbo-simple.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/utils/libyt-fmr-utils.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/yt_coordinator_service/file/libcoordinator-yt_coordinator_service-file.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/yt_job_service/impl/libfmr-yt_job_service-impl.a |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_protocol.cpp |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/interface/libfmr-coordinator-interface.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/tools/yql_facade_run/libessentials-tools-yql_facade_run.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/proto/libyt-fmr-proto.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/qplayer/storage/memory/libqplayer-storage-memory.a |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/incrhuge/ut/incrhuge_basic_ut.cpp |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/lexer/check/libv1-lexer-check.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tcmalloc/libcontrib-libs-tcmalloc.a |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_reboots/ut_reboots.cpp |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tcmalloc/libcontrib-libs-tcmalloc.global.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/hooks/testing/libcolumnshard-hooks-testing.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_preprocessing/libessentials-core-url_preprocessing.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/bindings/libyql-utils-bindings.a |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kafka_proxy/ut/ut_produce_actor.cpp |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/utils/actor_system/libyql-utils-actor_system.a |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/graph/ut/graph_ut.cpp |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/yt/dq_task_preprocessor/libproviders-yt-dq_task_preprocessor.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/antlr4-c3/libcontrib-libs-antlr4-c3.a |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_datashard.cpp |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/worker/impl/libfmr-worker-impl.a |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/aggregator/ut/ut_traverse_columnshard.cpp |50.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/yt_job_service/file/libfmr-yt_job_service-file.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/statistics/ut_common/libcore-statistics-ut_common.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job/impl/libfmr-job-impl.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job_factory/interface/libfmr-job_factory-interface.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job_factory/impl/libfmr-job_factory-impl.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/job/interface/libfmr-job-interface.a |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_stream_lookup.cpp |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/table_data_service/local/libfmr-table_data_service-local.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/table_data_service/interface/libfmr-table_data_service-interface.a |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_errors.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/replica_ut.cpp |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/yt_job_service/interface/libfmr-yt_job_service-interface.a |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/cost/kqp_cost_ut.cpp |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/coordinator/impl/libfmr-coordinator-impl.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/tools/ytrun/lib/libtools-ytrun-lib.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/fmr/request_options/libyt-fmr-request_options.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/url_lister/libessentials-core-url_lister.a |50.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_events.pb.{h, cc} |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ext_index/ut/ut_ext_index.cpp |50.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/persqueue_error_codes_v1.pb.{h, cc} |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/gateway/fmr/libyt-gateway-fmr.a |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ut_cdc_stream_reboots.cpp |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ut_utils/libtopic-ut-ut_utils.a |50.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/row_dispatcher.pb.{h, cc} |50.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/login.pb.{h, cc} |50.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_error_codes.pb.{h, cc} |50.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_pipe.{pb.h ... grpc.pb.h} |50.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_query_stats.pb.{h, cc} |50.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/minikql.{pb.h ... grpc.pb.h} |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/jemalloc/libcontrib-libs-jemalloc.a |50.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/pathid.{pb.h ... grpc.pb.h} |50.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/persqueue_common.pb.{h, cc} |50.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/key_range.{pb.h ... grpc.pb.h} |50.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/services_common.pb.{h, cc} |50.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view.{pb.h ... grpc.pb.h} |50.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_scheme.pb.{h, cc} |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/tbb/libcontrib-libs-tbb.a |51.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/query_stats.{pb.h ... grpc.pb.h} |51.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_topic.pb.{h, cc} |51.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/auth.{pb.h ... grpc.pb.h} |51.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/metrics/ut/ydb-core-fq-libs-metrics-ut |51.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/signer/ut/ydb-core-fq-libs-signer-ut |50.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/pushdown/ut/ydb-library-yql-providers-generic-pushdown-ut |50.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/solomon_emulator/bin/solomon_emulator |50.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/read_actors_factory.pb.{h, cc} |50.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/object_listers/ut/ydb-library-yql-providers-s3-object_listers-ut |50.9%| PREPARE $(WITH_JDK17-sbr:7832760150) |50.9%| PREPARE $(JDK_DEFAULT-472926544) |51.0%| PREPARE $(WITH_JDK-sbr:7832760150) |51.0%| PREPARE $(JDK17-472926544) |51.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/solomon_emulator/recipe/solomon_recipe |51.0%| PREPARE $(CLANG-1922233694) |51.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/simple_queue |51.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part1/ydb-tests-fq-yt-kqp_yt_file-part1 |50.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part2/ydb-tests-fq-yt-kqp_yt_file-part2 |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/tools/simple_json_diff/libpy3simple_json_diff.global.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/runner/result_convert/libpy3benchmarks-runner-result_convert.global.a |51.0%| [LD] {BAZEL_DOWNLOAD} $(B)/library/recipes/docker_compose/docker_compose |51.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part6/ydb-tests-fq-yt-kqp_yt_file-part6 |51.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/pending_fetcher.pb.{h, cc} |51.0%| PREPARE $(GDB) |51.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part18/ydb-tests-fq-yt-kqp_yt_file-part18 |51.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part12/ydb-tests-fq-yt-kqp_yt_file-part12 |51.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/resource_manager.pb.{h, cc} |51.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part9/ydb-tests-fq-yt-kqp_yt_file-part9 |51.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/base.{pb.h ... grpc.pb.h} |50.9%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk.{pb.h ... grpc.pb.h} |51.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_disk_color.{pb.h ... grpc.pb.h} |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/builtin_proto/protos_from_protobuf/libpy3protobuf-builtin_proto-protos_from_protobuf.global.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/protobuf/builtin_proto/protos_from_protoc/libpy3protobuf-builtin_proto-protos_from_protoc.global.a |51.0%| PREPARE $(CLANG-874354456) |51.0%| PREPARE $(CLANG18-1866954364) |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/patched/replxx/librestricted-patched-replxx.a |51.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/library/cpp/svnversion/svn_interface.c |51.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/channel_purpose.{pb.h ... grpc.pb.h} |51.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/library/cpp/build_info/build_info.cpp |51.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_config.{pb.h ... grpc.pb.h} |51.0%| [BI] {default-linux-x86_64, relwithdebinfo} $(B)/library/cpp/build_info/buildinfo_data.h |51.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/pq_read |51.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/checkpoint_coordinator.pb.{h, cc} |51.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_hive.{pb.h ... grpc.pb.h} |50.9%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/library/cpp/svnversion/svnversion.cpp |51.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/feature_flags.{pb.h ... grpc.pb.h} |51.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/library/cpp/build_info/build_info_static.cpp |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Jinja2/py3/libpy3python-Jinja2-py3.global.a |50.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/tests/ydb-tests-tools-kqprun-tests |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/MarkupSafe/py3/libpy3python-MarkupSafe-py3.global.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/MarkupSafe/py3/libpy3python-MarkupSafe-py3.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyJWT/py3/libpy3python-PyJWT-py3.global.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyHamcrest/py3/libpy3python-PyHamcrest-py3.global.a |51.0%| [CC] {default-linux-x86_64, relwithdebinfo} $(B)/library/cpp/build_info/sandbox.cpp |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cachetools/py3/libpy3python-cachetools-py3.global.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyYAML/py3/libpy3python-PyYAML-py3.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/aiohttp/libpy3contrib-python-aiohttp.a |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cryptography/py3/libpy3python-cryptography-py3.global.a |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/charset-normalizer/libpy3contrib-python-charset-normalizer.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cryptography/py3/libpy3python-cryptography-py3.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cffi/py3/libpy3python-cffi-py3.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyasn1-modules/py3/libpy3python-pyasn1-modules-py3.global.a |51.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tools/ydbd_slice/bin/ydbd_slice |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/protobuf/py3/libpy3python-protobuf-py3.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyasn1/py3/libpy3python-pyasn1-py3.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/wheel/libpy3contrib-python-wheel.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/rsa/py3/libpy3python-rsa-py3.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ruamel.yaml.clib/py3/libpy3python-ruamel.yaml.clib-py3.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/requests-oauthlib/libpy3contrib-python-requests-oauthlib.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/requests/py3/libpy3python-requests-py3.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/typing-extensions/py3/libpy3python-typing-extensions-py3.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/tenacity/py3/libpy3python-tenacity-py3.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/six/py3/libpy3python-six-py3.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pytest/py3/libpy3python-pytest-py3.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ruamel.yaml/py3/libpy3python-ruamel.yaml-py3.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/typeguard/libpy3contrib-python-typeguard.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/websocket-client/libpy3contrib-python-websocket-client.global.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pyrsistent/py3/libpy3python-pyrsistent-py3.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pycparser/py3/libpy3python-pycparser-py3.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/yarl/libpy3contrib-python-yarl.global.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/setuptools/py3/libpy3python-setuptools-py3.global.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/urllib3/py3/libpy3python-urllib3-py3.global.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/yarl/libpy3contrib-python-yarl.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/python-dateutil/py3/libpy3python-python-dateutil-py3.global.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/attrs/py3/libpy3python-attrs-py3.global.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ruamel.yaml.clib/py3/libpy3python-ruamel.yaml.clib-py3.a |51.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/ydb_recipe/ydb_recipe |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ydb/py3/libpy3python-ydb-py3.global.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/PyYAML/py3/libpy3python-PyYAML-py3.global.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/aiosignal/libpy3contrib-python-aiosignal.global.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/aiohttp/libpy3contrib-python-aiohttp.global.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/protobuf/py3/libpy3python-protobuf-py3.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/cffi/py3/libpy3python-cffi-py3.global.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/multidict/libpy3contrib-python-multidict.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/iniconfig/libpy3contrib-python-iniconfig.global.a |51.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/query_cache/ydb-tests-functional-query_cache |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/frozenlist/libpy3contrib-python-frozenlist.global.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/frozenlist/libpy3contrib-python-frozenlist.a |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/tests/ydb-tests-stress-olap_workload-tests |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/future/py3/libpy3python-future-py3.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/importlib-resources/libpy3contrib-python-importlib-resources.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/google-auth/py3/libpy3python-google-auth-py3.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.text/libpy3contrib-python-jaraco.text.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/idna/py3/libpy3python-idna-py3.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/more-itertools/py3/libpy3python-more-itertools-py3.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.context/libpy3contrib-python-jaraco.context.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.collections/libpy3contrib-python-jaraco.collections.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/grpcio/py3/libpy3python-grpcio-py3.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jaraco.functools/py3/libpy3python-jaraco.functools-py3.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jsonschema/py3/libpy3python-jsonschema-py3.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pluggy/py3/libpy3python-pluggy-py3.global.a |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/ydb_serializable/replay/replay |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/platformdirs/libpy3contrib-python-platformdirs.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/multidict/libpy3contrib-python-multidict.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/bucket_quoter/liblibrary-cpp-bucket_quoter.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/oauthlib/libpy3contrib-python-oauthlib.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/grpcio/py3/libpy3python-grpcio-py3.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/packaging/py3/libpy3python-packaging-py3.global.a |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/federated_query/kqp_federated_query_helpers_ut.cpp |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/key.{pb.h ... grpc.pb.h} |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/hive.{pb.h ... grpc.pb.h} |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/cms.{pb.h ... grpc.pb.h} |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/msgbus_kv.{pb.h ... grpc.pb.h} |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_stats.{pb.h ... grpc.pb.h} |51.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/recipe/kqprun_recipe |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/kubernetes/libpy3contrib-python-kubernetes.global.a |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/replication.{pb.h ... grpc.pb.h} |51.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/runner/result_convert/result_convert |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_disk_states.{pb.h ... grpc.pb.h} |51.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yds/ydb-tests-fq-yds |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/sys_view_types.{pb.h ... grpc.pb.h} |51.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/s3/ydb-tests-datashard-s3 |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_pool.{pb.h ... grpc.pb.h} |51.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/api/ydb-tests-functional-api |51.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/backpressure/ut_client/ydb-core-blobstorage-backpressure-ut_client |51.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/tools/simple_json_diff/simple_json_diff |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx.{pb.h ... grpc.pb.h} |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_coordination.pb.{h, cc} |51.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/mem_alloc/ydb-tests-fq-mem_alloc |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/accessor.pb.{h, cc} |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/rate_limiter.pb.{h, cc} |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/actors.pb.{h, cc} |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/services.{pb.h ... grpc.pb.h} |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compaction.{pb.h ... grpc.pb.h} |51.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/example/ydb-tests-example |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_transport.pb.{h, cc} |51.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/mvp/oidc_proxy/ut/ydb-mvp-oidc_proxy-ut |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_operation.pb.{h, cc} |51.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/lib/cmds/ut/ydb-public-tools-lib-cmds-ut |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bridge.{pb.h ... grpc.pb.h} |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/file_storage.pb.{h, cc} |51.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bind_channel_storage_pool.{pb.h ... grpc.pb.h} |51.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/fq.pb.{h, cc} |51.5%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/clickhouse.pb.{h, cc} |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/config/validation/auth_config_validator_ut/auth_config_validator_ut.cpp |51.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_limits.{pb.h ... grpc.pb.h} |51.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/cloud/ydb-tests-functional-sqs-cloud |51.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/tests/ydb-tests-stress-simple_queue-tests |51.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/mixedpy/ydb-tests-stress-mixedpy |51.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/minidumps/ydb-tests-functional-minidumps |51.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/common/ydb-tests-functional-sqs-common |51.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_tests/ydb-tests-functional-scheme_tests |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/service/libapps-etcd_proxy-service.global.a |51.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/script_execution/ydb-tests-functional-script_execution |51.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/parametrized_queries/ydb-tests-datashard-parametrized_queries |51.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/large/ydb-tests-functional-tpc-large |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/proto/libetcd-grpc.a |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/service/ut/etcd_service_ut.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut.cpp |51.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/log/tests/ydb-tests-stress-log-tests |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/etcd_proxy/service/libapps-etcd_proxy-service.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/libffi/libcontrib-restricted-libffi.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/llhttp/libcontrib-restricted-llhttp.a |51.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/messaging/ydb-tests-functional-sqs-messaging |51.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/with_quotas/ydb-tests-functional-sqs-with_quotas |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/lib2/py/libpy3python3-lib2-py.global.a |51.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/plans/ydb-tests-fq-plans |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/mdb_endpoint_generator_ut.cpp |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/factory/open_common/libstreams-factory-open_common.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lz/snappy/libstreams-lz-snappy.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lz/lz4/libstreams-lz-lz4.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/factory/open_by_signature/libstreams-factory-open_by_signature.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/Lib/libpy3tools-python3-Lib.global.a |51.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/ttl_tiering/ydb-tests-olap-ttl_tiering |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/streams/lz/libcpp-streams-lz.a |51.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/large/ydb-tests-functional-sqs-large |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/threading/local_executor/libcpp-threading-local_executor.a |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/external_data_source_ut.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/iceberg_ddl_ut.cpp |51.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/multinode/ydb-tests-functional-sqs-multinode |51.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/wardens/ydb-tests-functional-wardens |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/object_storage_ut.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/external_source_builder_ut.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/config/validation/validators_ut.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/runtime/ut/file_cache_ut.cpp |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/test_connection.pb.{h, cc} |51.6%| PREPARE $(CLANG16-1380963495) |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/library/cpp/lwtrace/protos/lwtrace.pb.{h, cc} |51.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/oltp_workload |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_events.{pb.h ... grpc.pb.h} |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/db_pool.pb.{h, cc} |51.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/select/ydb-tests-datashard-select |51.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/ttl/ydb-tests-datashard-ttl |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/data.pb.{h, cc} |51.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/private_proxy.pb.{h, cc} |51.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/vector_index/ydb-tests-datashard-vector_index |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/libcontrib-tools-python3.a |51.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/objcopy_c96ef635306ccee8a5cf6359f1.o |51.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dump_restore/ydb-tests-datashard-dump_restore |51.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/oltp_workload/tests/ydb-tests-stress-oltp_workload-tests |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/scheduler/libproviders-dq-scheduler.a |51.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ydb_cli/ydb-tests-functional-ydb_cli |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yaml_config/yaml_config_parser_ut.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yaml_config/console_dumper_ut.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_storage/internal/ut/utils_ut.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/scheduler/ut/dq_scheduler_ut.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yaml_config/yaml_config_proto2yaml_ut.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yaml_config/yaml_config_ut.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/ydb/main.cpp |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/ydb/commands/libcommands.a |51.6%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/apps/ydb/objcopy_774cbd1f10ee287899289ecb3f.o |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/token_accessor.pb.{h, cc} |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/validation/ut/ydb-core-config-validation-ut |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/validation/auth_config_validator_ut/core-config-validation-auth_config_validator_ut |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/ut/ydb-library-yaml_config-ut |51.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut |51.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/dq/runtime/ut/ydb-library-yql-providers-dq-runtime-ut |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/malloc/jemalloc/libcpp-malloc-jemalloc.a |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_mirror3of4/main.cpp |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/gen/tpcds-dbgen/libbenchmarks-gen-tpcds-dbgen.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/log/liblibrary-workload-log.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/clickbench/liblibrary-workload-clickbench.global.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/gen/tpcds-dbgen/libbenchmarks-gen-tpcds-dbgen.global.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/clickbench/liblibrary-workload-clickbench.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/queries/tpcds/libbenchmarks-queries-tpcds.global.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/gen/tpch-dbgen/libbenchmarks-gen-tpch-dbgen.a |51.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/dq/scheduler/ut/ydb-library-yql-providers-dq-scheduler-ut |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/backup/libkikimr_backup.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/csv/table/libarrow-csv-table.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/benchmark_base/liblibrary-workload-benchmark_base.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/queries/tpch/libbenchmarks-queries-tpch.global.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gtest/libcpp-testing-gtest.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gtest_main/libcpp-testing-gtest_main.a |51.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/ut/ydb-core-external_sources-ut |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpcds/liblibrary-workload-tpcds.global.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpc_base/liblibrary-workload-tpc_base.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/query/liblibrary-workload-query.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/log/liblibrary-workload-log.global.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/mixed/liblibrary-workload-mixed.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/libclicommands.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpc_base/liblibrary-workload-tpc_base.global.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpcds/liblibrary-workload-tpcds.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/query/liblibrary-workload-query.global.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/libydb_cli-commands-interactive.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/complete/libcommands-interactive-complete.a |51.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/tools/dump_ds_init/yaml-to-proto-dump-ds-init |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpch/liblibrary-workload-tpch.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpch/liblibrary-workload-tpch.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/stat_visualization/libpublic-lib-stat_visualization.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/highlight/color/libinteractive-highlight-color.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/highlight/libcommands-interactive-highlight.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/mixed/liblibrary-workload-mixed.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/transfer_workload/libtransfer_workload.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/topic/libtopic.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/import/liblib-ydb_cli-import.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/topic_workload/libtopic_workload.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/dump/liblib-ydb_cli-dump.a |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/debug/libsrc-client-debug.a |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yaml_config/tools/dump_ds_init/main.cpp |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/config/libsrc-client-config.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/cms/libsrc-client-cms.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/monitoring/libsrc-client-monitoring.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/deprecated/liblibrary-yaml_config-deprecated.a |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/s3/actors/ut/yql_arrow_push_down_ut.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/actors/grouped_issues_ut.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/io_formats/arrow/scheme/csv_arrow_ut.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/actors/actors_ut.cpp |51.3%| [BN] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tool/statistics_workload |51.6%| [BN] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tool/nemesis |51.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/dq/actors/ut/ydb-library-yql-providers-dq-actors-ut |51.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/ydb/ydb |51.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/s3/actors/ut/ydb-library-yql-providers-s3-actors-ut |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_dictionary.cpp |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/object/libcomplete-name-object.a |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_arrow.cpp |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/highlight/libsql-v1-highlight.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/core/libv1-complete-core.a |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_hash.cpp |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/antlr4/libv1-complete-antlr4.a |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_column_filter.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_reader.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/test_connection/ut/test_connection_ut.cpp |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/highlight/libsql-v1-highlight.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/analysis/global/libcomplete-analysis-global.a |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/formats/arrow/ut/ut_program_step.cpp |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/libcomplete-name-service.a >> test_workload.py::flake8 [GOOD] |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/static/libname-service-static.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/libsql-v1-complete.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/object/simple/libname-object-simple.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/binding/libname-service-binding.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/schema/libname-service-schema.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/ranking/libname-service-ranking.global.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/text/libv1-complete-text.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/ranking/libname-service-ranking.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/static/libname-service-static.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/name/service/union/libname-service-union.a |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/sql/v1/complete/syntax/libv1-complete-syntax.a |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/ut_pdiskfit/lib/libblobstorage-ut_pdiskfit-lib.a |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/provider/ut/pushdown/pushdown_ut.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/spilling/spilling_file_ut.cpp |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/health_config.pb.{h, cc} |51.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/olap_workload/tests/flake8 >> test_workload.py::flake8 [GOOD] |51.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/postgresql/ydb-tests-functional-postgresql |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/cores/libpy3library-python-cores.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/reservoir_sampling/libpy3library-python-reservoir_sampling.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/windows/libpy3library-python-windows.global.a >> __main__.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> tablet_scheme_tests.py::flake8 [GOOD] |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/yatest_lib/libpy3python-testing-yatest_lib.global.a |51.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/actors/spilling/ut/ydb-library-yql-dq-actors-spilling-ut |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/retry/libpy3library-python-retry.global.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/func/libpy3library-python-func.global.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/certifi/libpy3library-python-certifi.global.a |51.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/merge_split_common_table/fifo/flake8 >> test.py::flake8 [GOOD] |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/filelock/libpy3library-python-filelock.global.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/resource/libpy3library-python-resource.global.a |51.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_tests/flake8 >> tablet_scheme_tests.py::flake8 [GOOD] |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/fs/libpy3library-python-fs.global.a |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/io_formats/arrow/scheme/ut/ydb-core-io_formats-arrow-scheme-ut |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/yatest_common/libpy3python-testing-yatest_common.global.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/module/libpy3python-symbols-module.global.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/strings/libpy3library-python-strings.global.a |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/formats/arrow/ut/ydb-core-formats-arrow-ut |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/provider/ut/pushdown/yql-providers-generic-provider-ut-pushdown |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/runtime_py3/libpy3library-python-runtime_py3.global.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/runtime_py3/libpy3library-python-runtime_py3.a |51.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/ydb_serializable/replay/flake8 >> __main__.py::flake8 [GOOD] |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/module/libpy3python-symbols-module.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/strings/libpy3library-python-strings.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/svn_version/libpy3library-python-svn_version.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/registry/libpython-symbols-registry.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/libc/libpython-symbols-libc.global.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/symbols/python/libpy3cpython-symbols-python.global.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/svn_version/libpy3library-python-svn_version.global.a |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/ut/action_ut.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/nodewarden/ut_sequence/dsproxy_config_retrieval.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/ut/helpers_ut.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/ut/counters_ut.cpp >> test_partitioning.py::flake8 [GOOD] |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/ut/dlq_helpers_ut.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/ut/queue_attributes_ut.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/base/ut/secure_protobuf_printer_ut.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/pq/provider/ut/yql_pq_ut.cpp |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/testing/group_overseer/libblobstorage-testing-group_overseer.a |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/minikql_compile/yql_expr_minikql_compile_ut.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_testshard/main.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/s3/provider/yql_s3_listing_strategy_ut.cpp |51.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/flake8 >> test_partitioning.py::flake8 [GOOD] >> __main__.py::flake8 [GOOD] |51.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/core-fq-libs-control_plane_storage-internal-ut |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/checkpointing/ut/checkpoint_coordinator_ut.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/object_storage/inference/ut/arrow_inference_ut.cpp |51.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stability/tool/flake8 >> __main__.py::flake8 [GOOD] |51.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/federated_query/ut/ydb-core-kqp-federated_query-ut |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/libpy3core-config-protos.global.a |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/gateway/ut/metadata_conversion.cpp |51.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_mirror3of4/ydb-core-blobstorage-ut_mirror3of4 |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blockstore_config.{pb.h ... grpc.pb.h} >> simple_json_diff::import_test [GOOD] >> result_convert::import_test [GOOD] |51.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yaml_config/tools/simple_json_diff/import_test >> simple_json_diff::import_test [GOOD] |51.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/tests/ydb-tests-stress-transfer-tests |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/libpy3libs-config-protos.global.a |51.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/benchmarks/runner/result_convert/import_test >> result_convert::import_test [GOOD] |51.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/pgproxy/ut/ydb-core-pgproxy-ut |51.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/transfer |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/graph/shard/ut/shard_ut.cpp |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/quota_manager/ut_helpers/liblibs-quota_manager-ut_helpers.a |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_pdiskfit/ut/main.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/config/init/init_ut.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/control_plane_proxy/ut/control_plane_proxy_ut.cpp |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/global_worker_manager/libproviders-dq-global_worker_manager.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/common/http_gateway/mock/libcommon-http_gateway-mock.a |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/s3/provider/ut/ydb-library-yql-providers-s3-provider-ut |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/metrics/libproviders-dq-metrics.a |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/storagepoolmon/ut/storagepoolmon_ut.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/tools/dq/service_node/main.cpp |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/docs/generator/libpy3olap-docs-generator.global.a |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/data_integrity_trails.{pb.h ... grpc.pb.h} |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/dq/actors/yt/libdq-actors-yt.a |51.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit |51.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/defrag/ut/ydb-core-blobstorage-vdisk-defrag-ut |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statestorage.{pb.h ... grpc.pb.h} |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/ydb-core-fq-libs-db_id_async_resolver_impl-ut |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/benchmark_base/ut/ydb-library-workload-benchmark_base-ut |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/base/ut/ydb-core-ymq-base-ut |51.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/pq/provider/ut/ydb-library-yql-providers-pq-provider-ut |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/format_handler_ut.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_ru_calculator/ut_ru_calculator.cpp |51.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/object_storage/inference/ut/external_sources-object_storage-inference-ut |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/topic_parser_ut.cpp |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/common/libformat_handler-ut-common.a |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/topic_filter_ut.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/backup/impl/table_writer_ut.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/mkql_engine_flat_host_ut.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/mkql_engine_flat_ut.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/common/util_ut.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/common/rows_proto_splitter_ut.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/mkql_proto_ut.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/engine/kikimr_program_builder_ut.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut_fat/dsproxy_ut.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/common/cache_ut.cpp |51.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/docs/generator/generator |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_collector_ut.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/common/iceberg_processor_ut.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/common/entity_id_ut.cpp |51.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/dml/ydb-tests-datashard-dml |51.4%| [BN] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tool/olap_workload |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_ut.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_storage_read_request_ut.cpp |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/netclassifier.{pb.h ... grpc.pb.h} |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/resource_broker.{pb.h ... grpc.pb.h} |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/suite_tests/ydb-tests-functional-suite_tests |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/shared_cache.{pb.h ... grpc.pb.h} |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/ydb-tests-olap |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/config/init/ut/ydb-core-config-init-ut |51.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_service_config.{pb.h ... grpc.pb.h} |51.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/tools/dq/service_node/service_node |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/test_connection/ut/ydb-core-fq-libs-test_connection-ut |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/libpy3core-scheme-protos.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/libpy3columnshard-common-protos.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/libpy3core-protos-schemeshard.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/libpy3ydb-core-protos.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/protos/libpy3columnshard-engines-protos.global.a |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_execute_mkql_ut.cpp |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part3/ydb-tests-fq-yt-kqp_yt_file-part3 |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_change_schema_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/large_results/kqp_scriptexec_results_ut.cpp |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/ut/federated_query/common/libut-federated_query-common.a |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/splitter/ut/batch_slice.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/splitter/ut/ut_splitter.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/tablet/rpc_restart_tablet_ut.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_followers.cpp |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/ut_vdisk/lib/libblobstorage-ut_vdisk-lib.a |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_pdiskfit/ut/ydb-core-blobstorage-ut_pdiskfit-ut |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/server/msgbus_server_pq_metarequest_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/hulldb/cache_block/cache_block_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/actors/ut/database_resolver_ut.cpp >> ydb-tests-functional-script_execution::import_test [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/query_stats/query_stats_ut.cpp |51.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/encryption/ydb-tests-functional-encryption |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/backup/impl/local_partition_reader_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_constraints_ut.cpp |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/script_execution/import_test >> ydb-tests-functional-script_execution::import_test [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_acl_ut.cpp |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/engines/scheme/defaults/protos/libpy3scheme-defaults-protos.global.a |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/memory_controller/memory_controller_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_reassign.cpp |51.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/state/ut/ydb-library-yql-dq-state-ut |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/locks/range_treap_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/memory_controller/memtable_collection_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/ymq_ut.cpp >> Mvp::OpenIdConnectRequestWithIamTokenYandex [GOOD] >> Mvp::OpenIdConnectRequestWithIamTokenNebius [GOOD] >> Mvp::OpenIdConnectNonAuthorizeRequestWithOptionMethodYandex [GOOD] >> Mvp::OpenIdConnectNonAuthorizeRequestWithOptionMethodNebius [GOOD] >> Mvp::OpenIdConnectSessionServiceCheckValidCookieYandex [GOOD] >> Mvp::OpenIdConnectSessionServiceCheckValidCookieNebius [GOOD] >> Mvp::OpenIdConnectProxyOnHttpsHost [GOOD] >> Mvp::OpenIdConnectFixLocationHeader [GOOD] >> Mvp::OpenIdConnectExchangeNebius [GOOD] >> Mvp::OpenIdConnectSessionServiceCheckAuthorizationFail [GOOD] >> Mvp::OpenIdConnectFullAuthorizationFlow [GOOD] >> Mvp::OpenIdConnectFullAuthorizationFlowAjax [GOOD] >> Mvp::OpenIdConnectWrongStateAuthorizationFlow [GOOD] >> Mvp::OpenIdConnectWrongStateAuthorizationFlowAjax [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateAuthorizationFail |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/tenant_ut_pool.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/slow/pq_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scheme/kqp_scheme_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/tenant_node_enumeration_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/tenant_ut_local.cpp >> Mvp::OpenIdConnectSessionServiceCreateAuthorizationFail [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateAccessTokenInvalid [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateAccessTokenInvalidAjax [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateOpenIdScopeMissed [GOOD] >> Mvp::OpenIdConnectAllowedHostsList [GOOD] >> Mvp::OpenIdConnectHandleNullResponseFromProtectedResource [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateNotFoundCookie [GOOD] >> Mvp::OpenIdConnectSessionServiceCreateGetWrongStateAndWrongCookie [GOOD] >> Mvp::OidcImpersonationStartFlow [GOOD] >> Mvp::OidcImpersonationStartNeedServiceAccountId [GOOD] >> Mvp::OidcImpersonationStopFlow [GOOD] >> Mvp::OidcImpersonatedAccessToProtectedResource [GOOD] >> Mvp::OidcImpersonatedAccessNotAuthorized [GOOD] |51.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/objcopy_484246668d943fbae3b476ec7d.o |51.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/range_helpers/ut/ydb-library-yql-providers-s3-range_helpers-ut |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/kinesis_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/node_broker_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/pqrb_describes_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/ut_helpers.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/kesus_quoter_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/quoter_service_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/rm_service/kqp_rm_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/partition_chooser_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/list_all_topics_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/cache_eviction_ut.cpp |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/test/libs/rows/libtest-libs-rows.a |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/pq_ut.cpp |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/test/libs/table/libtest-libs-table.a ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/mvp/oidc_proxy/ut/unittest >> Mvp::OidcImpersonatedAccessNotAuthorized [GOOD] Test command err: 2025-06-03T10:20:24.769311Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-06-03T10:20:24.769487Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 200 2025-06-03T10:20:24.772795Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-06-03T10:20:24.772913Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 200 2025-06-03T10:20:24.774739Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-06-03T10:20:24.774762Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 204 2025-06-03T10:20:24.775662Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-06-03T10:20:24.775749Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 204 2025-06-03T10:20:24.776625Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-06-03T10:20:24.776641Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 204 2025-06-03T10:20:24.777411Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-06-03T10:20:24.777492Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 204 2025-06-03T10:20:24.782719Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-06-03T10:20:24.782743Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-06-03T10:20:24.782779Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 400 2025-06-03T10:20:24.782784Z :MVP DEBUG: oidc_protected_page.cpp:178: Try to send request to HTTPS port 2025-06-03T10:20:24.782787Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-06-03T10:20:24.782803Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 200 2025-06-03T10:20:24.784223Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-06-03T10:20:24.784243Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-06-03T10:20:24.784275Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 400 2025-06-03T10:20:24.793130Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-06-03T10:20:24.793152Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-06-03T10:20:24.793188Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 307 2025-06-03T10:20:24.794557Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-06-03T10:20:24.794576Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-06-03T10:20:24.794603Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 302 2025-06-03T10:20:24.795682Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-06-03T10:20:24.795697Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-06-03T10:20:24.795735Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 302 2025-06-03T10:20:24.796823Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-06-03T10:20:24.796835Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-06-03T10:20:24.796876Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 302 2025-06-03T10:20:24.798011Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-06-03T10:20:24.798024Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-06-03T10:20:24.798046Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 302 2025-06-03T10:20:24.802989Z :MVP DEBUG: oidc_protected_page_nebius.cpp:21: Start OIDC process 2025-06-03T10:20:24.803237Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_session_cookie_79632E6F617574682E7964622D766965776572: c2Vz****aWU= (CE0CB168)) 2025-06-03T10:20:24.803244Z :MVP DEBUG: oidc_protected_page_nebius.cpp:93: Exchange session token 2025-06-03T10:20:24.803382Z :MVP DEBUG: oidc_protected_page_nebius.cpp:50: Getting access token: 200 OK 2025-06-03T10:20:24.803399Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-06-03T10:20:24.803416Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 200 2025-06-03T10:20:24.810368Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 401 2025-06-03T10:20:24.817085Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 400 2025-06-03T10:20:24.817235Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-06-03T10:20:24.817325Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2025-06-03T10:20:24.818632Z :MVP DEBUG: oidc_session_create_yandex.cpp:69: SessionService.Create(): OK 2025-06-03T10:20:24.819887Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-06-03T10:20:24.819904Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-06-03T10:20:24.819934Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 200 2025-06-03T10:20:24.826562Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 400 2025-06-03T10:20:24.826699Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-06-03T10:20:24.826743Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2025-06-03T10:20:24.828000Z :MVP DEBUG: oidc_session_create_yandex.cpp:69: SessionService.Create(): OK 2025-06-03T10:20:24.829138Z :MVP DEBUG: oidc_protected_page_yandex.cpp:25: SessionService.Check(): OK 2025-06-03T10:20:24.829153Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-06-03T10:20:24.829179Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 200 2025-06-03T10:20:24.834291Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-06-03T10:20:24.834335Z :MVP DEBUG: oidc_session_create.cpp:51: Check state failed: Calculated digest is not equal expected digest 2025-06-03T10:20:24.839271Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-06-03T10:20:24.839315Z :MVP DEBUG: oidc_session_create.cpp:51: Check state failed: Calculated digest is not equal expected digest 2025-06-03T10:20:24.844161Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-06-03T10:20:24.844327Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2025-06-03T10:20:24.845987Z :MVP DEBUG: oidc_session_create_yandex.cpp:79: SessionService.Create(): 401 2025-06-03T10:20:24.850769Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-06-03T10:20:24.850903Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2025-06-03T10:20:24.852368Z :MVP DEBUG: oidc_session_create_yandex.cpp:79: SessionService.Create(): 400 2025-06-03T10:20:24.857127Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-06-03T10:20:24.857193Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2025-06-03T10:20:24.858736Z :MVP DEBUG: oidc_session_create_yandex.cpp:79: SessionService.Create(): 400 2025-06-03T10:20:24.863504Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-06-03T10:20:24.863669Z :MVP DEBUG: oidc_session_create.cpp:71: Incoming response from authorization server: 200 2025-06-03T10:20:24.865278Z :MVP DEBUG: oidc_session_create_yandex.cpp:79: SessionService.Create(): 412 2025-06-03T10:20:24.871345Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 400 2025-06-03T10:20:24.872727Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 400 2025-06-03T10:20:24.873945Z :MVP DEBUG: oidc_protected_page_yandex.cpp:33: SessionService.Check(): 400 2025-06-03T10:20:24.876760Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-06-03T10:20:24.876886Z :MVP DEBUG: oidc_protected_page.cpp:51: Can not process request to protected resource: GET /counters HTTP/1.1 Host: ydb.viewer.page Accept: */* Accept-Encoding: deflate Authorization: 2025-06-03T10:20:24.880317Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-06-03T10:20:24.880353Z :MVP DEBUG: oidc_session_create.cpp:43: Restore oidc context failed: Cannot find cookie ydb_oidc_cookie 2025-06-03T10:20:24.884908Z :MVP DEBUG: oidc_session_create.cpp:21: Restore oidc session 2025-06-03T10:20:24.884944Z :MVP DEBUG: oidc_session_create.cpp:51: Check state failed: Calculated digest is not equal expected digest 2025-06-03T10:20:24.889774Z :MVP DEBUG: oidc_impersonate_start_page_nebius.cpp:23: Start impersonation process 2025-06-03T10:20:24.889804Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_session_cookie_636C69656E745F6964: c2Vz****aWU= (CE0CB168)) 2025-06-03T10:20:24.889810Z :MVP DEBUG: oidc_impersonate_start_page_nebius.cpp:49: Request impersonated token 2025-06-03T10:20:24.889902Z :MVP DEBUG: oidc_impersonate_start_page_nebius.cpp:100: Incoming response from authorization server: 200 2025-06-03T10:20:24.889920Z :MVP DEBUG: oidc_impersonate_start_page_nebius.cpp:89: Set impersonated cookie: (__Host_impersonated_cookie_636C69656E745F6964: aW1w****bg== (B126DD61)) 2025-06-03T10:20:24.894600Z :MVP DEBUG: oidc_impersonate_start_page_nebius.cpp:23: Start impersonation process 2025-06-03T10:20:24.894625Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_session_cookie_636C69656E745F6964: c2Vz****aWU= (CE0CB168)) 2025-06-03T10:20:24.899308Z :MVP DEBUG: oidc_cleanup_page.cpp:20: Clear cookie: (__Host_impersonated_cookie_636C69656E745F6964) 2025-06-03T10:20:24.904201Z :MVP DEBUG: oidc_protected_page_nebius.cpp:21: Start OIDC process 2025-06-03T10:20:24.904225Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_session_cookie_636C69656E745F6964: c2Vz****aWU= (CE0CB168)) 2025-06-03T10:20:24.904230Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_impersonated_cookie_636C69656E745F6964: aW1w****ZQ== (1A20D8C0)) 2025-06-03T10:20:24.904234Z :MVP DEBUG: oidc_protected_page_nebius.cpp:104: Exchange impersonated token 2025-06-03T10:20:24.904267Z :MVP DEBUG: oidc_protected_page_nebius.cpp:50: Getting access token: 200 OK 2025-06-03T10:20:24.904283Z :MVP DEBUG: oidc_protected_page.cpp:83: Forward user request bypass OIDC 2025-06-03T10:20:24.904296Z :MVP DEBUG: oidc_protected_page.cpp:37: Incoming response for protected resource: 200 2025-06-03T10:20:24.908797Z :MVP DEBUG: oidc_protected_page_nebius.cpp:21: Start OIDC process 2025-06-03T10:20:24.908820Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_session_cookie_636C69656E745F6964: c2Vz****aWU= (CE0CB168)) 2025-06-03T10:20:24.908824Z :MVP DEBUG: openid_connect.cpp:256: Using cookie (__Host_impersonated_cookie_636C69656E745F6964: aW1w****ZQ== (1A20D8C0)) 2025-06-03T10:20:24.908828Z :MVP DEBUG: oidc_protected_page_nebius.cpp:104: Exchange impersonated token 2025-06-03T10:20:24.908915Z :MVP DEBUG: oidc_protected_page_nebius.cpp:50: Getting access token: 401 OK 2025-06-03T10:20:24.908919Z :MVP DEBUG: oidc_protected_page_nebius.cpp:62: Getting access token: {"error": "bad_token"} 2025-06-03T10:20:24.908922Z :MVP DEBUG: oidc_protected_page_nebius.cpp:118: Clear impersonated cookie (__Host_impersonated_cookie_636C69656E745F6964) and retry |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/internals_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/fetch_request_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/counters_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_compaction.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/coordinator/coordinator_volatile_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_rs.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/coordinator/coordinator_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_init.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_replication.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/sequenceproxy/sequenceproxy_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/idx_test/ydb_index_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/hive_metastore/ut/hive_metastore_client_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/hive_metastore/ut/hive_metastore_fetcher_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/ut_labeled.cpp |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/hive_metastore/libcore-external_sources-hive_metastore.a |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/ut_counters.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/ut_common.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/partitiongraph_ut.cpp |51.2%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/ut/objcopy_1d0482d354dc270d18e7123281.o |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/make_config.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/metering_sink_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/microseconds_sliding_window_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/public_http/http_router_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/hive_metastore/ut/common.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/rbo/kqp_rbo_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/ut_kqp.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/dread_cache_service/ut/caching_proxy_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/stop_pdisk.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/sourceid_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/partition_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/pqtablet_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/pqtablet_mock.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/quota_tracker_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/utils_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/user_info_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/type_codecs_ut.cpp |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/workload_service/ut/common/libworkload_service-ut-common.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/ut_blobstorage/lib/libblobstorage-ut_blobstorage-lib.a |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/inside_ydb_ut/inside_ydb_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/slow/autopartitioning_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_col_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/sysview/kqp_sys_view_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut_ftol/dsproxy_fault_tolerance_ut.cpp |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/external_sources/hive_metastore/hive_metastore_native/libexternal_sources-hive_metastore-hive_metastore_native.a |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/join/kqp_index_lookup_join_ut.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/join/kqp_flip_join_ut.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/join/kqp_join_ut.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/join/kqp_join_order_ut.cpp |51.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/aclib/protos/aclib.pb.{h, cc} |51.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_state_load_plan.pb.{h, cc} |51.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_common.pb.{h, cc} |51.0%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_export.pb.{h, cc} |51.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/ydb-core-blobstorage-vdisk-hulldb-cache_block-ut |50.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/column_family/compression/ydb-tests-olap-column_family-compression |50.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator |51.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/subdomains.{pb.h ... grpc.pb.h} |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/libpy3library-formats-arrow-protos.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/api/service/protos/libpy3api-service-protos.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/libpy3library-folder_service-proto.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/libpy3library-actors-protos.global.a |51.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/libpy3yql-dq-proto.global.a |51.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/libpy3dq-actors-protos.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/libpy3library-ydb_issue-proto.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/protos/libpy3library-mkql_proto-protos.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/login/protos/libpy3library-login-protos.global.a |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/services/libpy3ydb-library-services.global.a |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/export_s3_buffer_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/time_cast/time_cast_ut.cpp |51.1%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/testlib/s3_recipe_helper/liblibrary-testlib-s3_recipe_helper.a |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_stats.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/long_tx_service/long_tx_service_ut.cpp >> ydb-tests-functional-ydb_cli::import_test [GOOD] |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/ut_sequence/datashard_ut_sequence.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/sequenceshard/ut_sequenceshard.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/sequenceshard/ut_helpers.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_keys.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_sequence_reboots/ut_sequence_reboots.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/mediator/mediator_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_move_reboots/ut_move_reboots.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_read_table.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/provider/read_attributes_utils_ut.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_provider_ut.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/populator_ut.cpp |51.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ydb_cli/import_test >> ydb-tests-functional-ydb_cli::import_test [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/provider/yql_kikimr_gateway_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ut_topic_splitmerge.cpp |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/proto/libpy3providers-s3-proto.global.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/generic/connector/libcpp/ut_helpers/libconnector-libcpp-ut_helpers.a |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/yc_search_ut/index_events_processor_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/generic/actors/ut/yql_generic_lookup_actor_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/yc_search_ut/test_events_writer.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/service/ut/ut_http_request.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/service/ut/ut_column_statistics.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/service/ut/ut_basic_statistics.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_allocator_client/actor_client_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_column_stats.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_s3_plan_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_minstep.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/s3_recipe_ut_helpers.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ut_bsvolume_reboots.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator_ut_helpers.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_allocator_client/ut_helpers.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_backup/ut_backup.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_query_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/s3/kqp_federated_scheme_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_proxy_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/service/ut/ut_aggregation/ut_aggregate_statistics.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/proxy_service/kqp_script_executions_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_allocator/txallocator_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/external_sources/s3/ut/s3_aws_credentials_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_bsvolume/ut_bsvolume.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/monitoring_ut.cpp |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/replication/ut_helpers/libtx-replication-ut_helpers.a |51.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/common/ydb-tests-fq-common |50.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/etcd_proxy/service/ut/ydb-apps-etcd_proxy-service-ut |50.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard >> ydb-tests-functional-wardens::import_test [GOOD] |50.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/wardens/import_test >> ydb-tests-functional-wardens::import_test [GOOD] |50.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut |50.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers |50.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/locks/ut_range_treap/ydb-core-tx-locks-ut_range_treap |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/libpy3api-grpc.global.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/python/enable_v3_new_behavior/libpy3sdk-python-enable_v3_new_behavior.global.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/ydbd_slice/libpy3ydbd_slice.global.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/k8s_api/libpy3tools-cfg-k8s_api.global.a |50.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/types/libpy3essentials-public-types.global.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/libpy3public-issue-protos.global.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/fq_runner/libpy3tests-tools-fq_runner.global.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/libpy3yql-essentials-protos.global.a >> ydb-tests-fq-yds::import_test [GOOD] |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/libpy3providers-common-proto.global.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/oss/ydb_sdk_import/libpy3tests-oss-ydb_sdk_import.global.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/oss/canonical/libpy3tests-oss-canonical.global.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/wardens/libpy3tests-library-wardens.global.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/libpy3ydb-tests-library.global.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/grpc/draft/libpy3api-grpc-draft.global.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/libpy3api-protos.global.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/libpy3api-protos-annotations.global.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/clients/libpy3tests-library-clients.global.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/protos/libpy3core-issue-protos.global.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/file_storage/proto/libpy3core-file_storage-proto.global.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/walle/libpy3tools-cfg-walle.global.a |50.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/libpy3ydb-tools-cfg.global.a |50.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yds/import_test >> ydb-tests-fq-yds::import_test [GOOD] |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ut_helpers/libpublic-lib-ut_helpers.a |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/tools/kqprun/kqprun.cpp |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/src/proto/libkqprun-src-proto.a |50.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut |50.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_read_actor_ut.cpp |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/runlib/libtools-kqprun-runlib.a |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/kqprun/src/libtools-kqprun-src.a >> DataGeneratorState::SaveLoad [GOOD] >> DataGeneratorState::PortionProcessing [GOOD] >> ydb-tests-functional-postgresql::import_test [GOOD] |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_write_actor_ut.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/pq_async_io/ut/dq_pq_rd_read_actor_ut.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/query_actor/query_actor_ut.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/rate_limiter/rate_limiter_ut.cpp |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/pq_async_io/libtests-fq-pq_async_io.a |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/partition_end_watcher_ut.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/fq/ut_integration/ut_utils.cpp |50.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |50.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut |50.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/postgresql/import_test >> ydb-tests-functional-postgresql::import_test [GOOD] |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_cluster_discovery/cluster_discovery_service_ut.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/fq/ut_integration/fq_ut.cpp |50.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/conveyor_composite/usage/libtx-conveyor_composite-usage.a |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/config/bsconfig_ut.cpp |50.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/cache_ut.cpp |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/tests/liblibrary-persqueue-tests.a |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/conveyor_composite/ut/ut_simple.cpp |50.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/workload/benchmark_base/ut/unittest >> DataGeneratorState::PortionProcessing [GOOD] |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/quoter_performance_test/main.cpp |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/conveyor_composite/service/libtx-conveyor_composite-service.a |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_login/ut_login.cpp |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/idx_test/libpublic-lib-idx_test.a >> simple_queue::import_test [GOOD] |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_range_ops.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_backup_collection/ut_backup_collection.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/ydb_proxy/ydb_proxy_ut.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_data_cleanup.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_logging_ut.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/storage_tenant_ut.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/check_integrity.cpp |50.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/testlib/service_mocks/ldap_mock/libtestlib-service_mocks-ldap_mock.a |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/runtime/kqp_scan_spilling_ut.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/stream_creator_ut.cpp |50.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/ut_transform/ydb-library-yaml_config-ut_transform |51.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/mkql_proto/ut/helpers/libmkql_proto-ut-helpers.a |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/encrypted_storage_ut.cpp |50.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/large_results/ydb-core-kqp-ut-federated_query-large_results |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/data_integrity/kqp_data_integrity_trails_ut.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_table/ut_external_table.cpp |51.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/simple_queue/import_test >> simple_queue::import_test [GOOD] |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/ut/attributes_md5_ut.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/ut/metering_ut.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/ut/infly_ut.cpp >> PathListReaderTest::ReadsFilesListFromTreeParams [GOOD] >> S3FileTreeBuilderTest::Simple [GOOD] >> S3FileTreeBuilderTest::DeserializesTrailingSlash [GOOD] >> S3FileTreeBuilderTest::DeserializesManySlashes [GOOD] >> S3FileTreeBuilderTest::DeserializesRootSlash [GOOD] >> S3FileTreeBuilderTest::PassesFileWithZeroSize [GOOD] >> S3FileTreeBuilderTest::Interesting [GOOD] >> S3FileTreeBuilderTest::DeserializesLeadingSlash [GOOD] |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/ut/message_delay_stats_ut.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/actor/ut/sha256_ut.cpp |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/runtime/kqp_re2_ut.cpp |50.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_erase_rows.cpp |50.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/erasure/ut_perf/ydb-core-erasure-ut_perf |50.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/pqconfig.{pb.h ... grpc.pb.h} |50.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut |50.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/s3/range_helpers/ut/unittest >> S3FileTreeBuilderTest::DeserializesLeadingSlash [GOOD] |50.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/kqp/kqp_query_svc/ydb-tests-functional-kqp-kqp_query_svc >> TContinueFromStreamingOffsetsPlanTest::MapSeveralReadingsToOneIsAllowedOnlyInForceMode [GOOD] >> TContinueFromStreamingOffsetsPlanTest::DifferentPartitioning [GOOD] >> TContinueFromStreamingOffsetsPlanTest::NotMappedAllPartitions [GOOD] >> TContinueFromStreamingOffsetsPlanTest::ReadPartitionInSeveralPlacesIsOk [GOOD] >> TContinueFromStreamingOffsetsPlanTest::OneToOneMapping [GOOD] >> TContinueFromStreamingOffsetsPlanTest::MultipleTopics [GOOD] >> TContinueFromStreamingOffsetsPlanTest::Empty [GOOD] >> TContinueFromStreamingOffsetsPlanTest::AllTopicsMustBeUsedInNonForceMode [GOOD] |50.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |50.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots |50.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_bulk_upsert_olap_ut.cpp |50.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut |50.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_coordination_ut.cpp |50.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init >> ydb-tests-functional-sqs-multinode::import_test [GOOD] |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_login_ut.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_monitoring_ut.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_ldap_login_ut.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_logstore_ut.cpp |51.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/import_test >> ydb-tests-functional-sqs-multinode::import_test [GOOD] |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_bulk_upsert_ut.cpp |51.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/state/ut/unittest >> TContinueFromStreamingOffsetsPlanTest::AllTopicsMustBeUsedInNonForceMode [GOOD] |51.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_import_ut.cpp |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/trace_ut.cpp |51.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut |51.0%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/objcopy_1406195445f45d950dda89fcd8.o |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/backup_ut/encrypted_backup_ut.cpp |51.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/describe_topic_ut.cpp |51.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp |51.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs |51.0%| PREPARE $(BLACK_LINTER-sbr:8415400280) |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_index_table_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/local_partition_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/backup_ut/list_objects_in_s3_export_ut.cpp >> TVDiskDefrag::HugeHeapDefragmentationRequired [GOOD] >> TPGTest::TestLogin [GOOD] |51.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/backup_ut/backup_path_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/topic/ut/basic_usage_ut.cpp |51.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/backup_ut/ydb_backup_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_sample_k.cpp >> ydb-tests-datashard-dump_restore::import_test [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_local_kmeans.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_secondary_index.cpp |51.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut |51.1%| [ld] {default-linux-x86_64, relwithdebinfo} $(B)/tools/black_linter/black_linter |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/defrag/ut/unittest >> TVDiskDefrag::HugeHeapDefragmentationRequired [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_reshuffle_kmeans.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_scripting_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/build_index/ut/ut_prefix_kmeans.cpp ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/pgproxy/ut/unittest >> TPGTest::TestLogin [GOOD] Test command err: 2025-06-03T10:20:29.985514Z :PGWIRE INFO: sock_listener.cpp:66: Listening on [::]:11566 2025-06-03T10:20:29.986027Z :PGWIRE DEBUG: pg_connection.cpp:61: (#13,[::1]:41044) incoming connection opened 2025-06-03T10:20:29.986072Z :PGWIRE DEBUG: pg_connection.cpp:241: (#13,[::1]:41044) -> [1] 'i' "Initial" Size(15) protocol(0x00000300) user=user 2025-06-03T10:20:29.986104Z :PGWIRE DEBUG: pg_connection.cpp:241: (#13,[::1]:41044) <- [1] 'R' "Auth" Size(4) OK |51.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dump_restore/import_test >> ydb-tests-datashard-dump_restore::import_test [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_read_rows_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_olapstore_ut.cpp |51.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup >> TErasurePerfTest::Split [GOOD] >> TErasurePerfTest::Restore >> ydb-tests-datashard-vector_index::import_test [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_object_storage_ut.cpp |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_query_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_register_node_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_stats_ut.cpp |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/import_test >> ydb-tests-datashard-vector_index::import_test [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/topic/topic_write.cpp >> oltp_workload::import_test [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/topic/topic_read_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/lib/ydb_cli/topic/topic_write_ut.cpp |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_table_ut.cpp |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut >> ydb-tests-datashard-select::import_test [GOOD] |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/oltp_workload/import_test >> oltp_workload::import_test [GOOD] >> TPathTests::NormalizeEmpty [GOOD] >> TPathTests::TestRegexFromWildcards [GOOD] >> TPathTests::NormalizeNoSlashes [GOOD] >> TPathTests::NormalizeWithSlashes [GOOD] >> TPathTests::NormalizeSlashes [GOOD] >> ydb-tests-datashard-ttl::import_test [GOOD] >> ydb-tests-stress-oltp_workload-tests::import_test [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/import_test >> ydb-tests-datashard-select::import_test [GOOD] |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut >> ydb-tests-functional-tpc-large::import_test [GOOD] |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/engine/ut/ydb-core-engine-ut |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/import_test >> ydb-tests-datashard-ttl::import_test [GOOD] |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/main.cpp |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/oltp_workload/tests/import_test >> ydb-tests-stress-oltp_workload-tests::import_test [GOOD] |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tpc/large/import_test >> ydb-tests-functional-tpc-large::import_test [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/etcd_proxy/proxy.cpp |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/kqprun/kqprun |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut |51.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/follower_group.{pb.h ... grpc.pb.h} |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/s3/object_listers/ut/unittest >> TPathTests::NormalizeSlashes [GOOD] |51.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/basic_example/public-sdk-cpp-tests-integration-basic_example |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/topic_reader_ut.cpp |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/main.cpp |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/pgwire.cpp >> Signer::Basic [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/pg_ydb_proxy.cpp |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part4/ydb-tests-fq-yt-kqp_yt_file-part4 |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/apps/pgwire/pg_ydb_connection.cpp >> conftest.py::black [GOOD] >> test_join.py::black [GOOD] >> conftest.py::black [GOOD] >> test_clickhouse.py::black [GOOD] >> test_greenplum.py::black [GOOD] >> test_join.py::black [GOOD] >> test_mysql.py::black [GOOD] >> test_postgresql.py::black [GOOD] >> test_ydb.py::black [GOOD] |51.2%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/issue/protos/issue_severity.pb.{h, cc} |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/ydb/ydb_table_split_ut.cpp |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut |51.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config_units.{pb.h ... grpc.pb.h} >> ydb-tests-functional-query_cache::import_test [GOOD] >> generator::import_test [GOOD] |51.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/config/ut/ydb-services-config-ut |51.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/generic/streaming/black >> test_join.py::black [GOOD] |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/signer/ut/unittest >> Signer::Basic [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/docs/generator/import_test >> generator::import_test [GOOD] |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_info_types.cpp |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/generic/analytics/black >> test_ydb.py::black [GOOD] |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_flowcontrol_ut.cpp |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_proxy.{pb.h ... grpc.pb.h} |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/query_cache/import_test >> ydb-tests-functional-query_cache::import_test [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_commit_redo_limit.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_split_ut.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_decimal_types.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_backup.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_table_pg_types.cpp |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/scan/kqp_scan_ut.cpp |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_base/ut_base.cpp |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.{pb.h ... grpc.pb.h} |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep >> ydb-tests-functional-sqs-large::import_test [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/ut/ydb-core-mind-ut >> test.py::py2_flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/rbo/ydb-core-kqp-ut-rbo |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/large/import_test >> ydb-tests-functional-sqs-large::import_test [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/audit/ydb-tests-functional-audit |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/ut_user_attributes_reboots.cpp |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/dwarf_backtrace/registry/libcpp-dwarf_backtrace-registry.global.a |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/hybrid_file/part4/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/gateways.pb.{h, cc} >> test.py::test_kikimr_config_generator_generic_connector_config [GOOD] >> test_clickbench.py::flake8 [GOOD] >> test_external.py::flake8 [GOOD] >> test_tpcds.py::flake8 [GOOD] >> test_tpch.py::flake8 [GOOD] >> test_workload_simple_queue.py::flake8 [GOOD] |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_config.pb.{h, cc} |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut >> test_secondary_index.py::flake8 [GOOD] |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/pinger.pb.{h, cc} |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat >> test_cms_erasure.py::flake8 [GOOD] >> test_cms_restart.py::flake8 [GOOD] >> test_cms_state_storage.py::flake8 [GOOD] >> utils.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/load/flake8 >> test_workload_simple_queue.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/flake8 >> test_secondary_index.py::flake8 [GOOD] >> __main__.py::flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part13/ydb-tests-fq-yt-kqp_yt_file-part13 |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/flake8 >> utils.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/public/tools/lib/cmds/ut/py3test >> test.py::test_kikimr_config_generator_generic_connector_config [GOOD] >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/public/tools/local_ydb/flake8 >> __main__.py::flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator >> ydb-tests-stress-transfer-tests::import_test [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/generic/connector/tests/datasource/clickhouse/flake8 >> test.py::flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/benchmarks_init/ydb-tests-functional-benchmarks_init >> test.py::flake8 [GOOD] >> test_quoting.py::flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/transfer/tests/import_test >> ydb-tests-stress-transfer-tests::import_test [GOOD] >> test_serializable.py::flake8 [GOOD] >> test_postgres.py::flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_data_cleanup/ydb-core-tx-datashard-ut_data_cleanup >> __main__.py::flake8 [GOOD] >> Backpressure::MonteCarlo |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part18/flake8 >> test.py::flake8 [GOOD] |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/bootstrap.{pb.h ... grpc.pb.h} >> test_workload.py::flake8 [GOOD] >> main.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/flake8 >> test_quoting.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/large_serializable/flake8 >> test_serializable.py::flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/restarts/ydb-tests-fq-restarts |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/postgresql/flake8 >> test_postgres.py::flake8 [GOOD] >> ydb-tests-functional-encryption::import_test [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/log/tests/flake8 >> test_workload.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/node_broker/flake8 >> __main__.py::flake8 [GOOD] |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tools/combiner_perf/bin/main.cpp |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut >> test_parametrized_queries.py::flake8 [GOOD] >> TErasurePerfTest::Restore [GOOD] >> test_pdisk_format_info.py::flake8 [GOOD] >> TErasureSmallBlobSizePerfTest::StringErasureMode >> test_replication.py::flake8 [GOOD] >> test_self_heal.py::flake8 [GOOD] >> test_tablet_channel_migration.py::flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/node_broker/tests/flake8 >> test_workload.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/apps/dstool/flake8 >> main.py::flake8 [GOOD] |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/tools/combiner_perf/libkqp-tools-combiner_perf.a >> TErasureSmallBlobSizePerfTest::StringErasureMode [GOOD] >> TErasureSmallBlobSizePerfTest::ConvertToRopeMode [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/encryption/import_test >> ydb-tests-functional-encryption::import_test [GOOD] |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_pdisk_config.{pb.h ... grpc.pb.h} |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/flake8 >> test_parametrized_queries.py::flake8 [GOOD] |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/flake8 >> test_tablet_channel_migration.py::flake8 [GOOD] |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation >> ydb-tests-olap::import_test [GOOD] |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/tools/fqrun/fqrun.cpp |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/erasure/ut_perf/unittest >> TErasureSmallBlobSizePerfTest::ConvertToRopeMode [GOOD] |51.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/ut_util/ydb-core-tablet_flat-ut_util |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/metrics.{pb.h ... grpc.pb.h} |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/fqrun/src/libtools-fqrun-src.a >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/import_test >> ydb-tests-olap::import_test [GOOD] |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/tools/combiner_perf/bin/combiner_perf >> __main__.py::flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/common/ut/ydb-library-yql-providers-s3-common-ut |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/ut_blobstorage-ut_check_integrity |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_sequence/ut_sequence.cpp |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/hybrid_file/part7/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/perf/kqp_workload_ut.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/perf/kqp_query_perf_ut.cpp |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part2/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.3%| [PR] {BAZEL_DOWNLOAD} $(B)/ydb/core/base/generated/runtime_feature_flags.h |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/simple_queue/flake8 >> __main__.py::flake8 [GOOD] |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/actor/ut/ydb-core-ymq-actor-ut |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows >> test_sql.py::flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut >> test_ttl.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/storage.pb.{h, cc} >> kikimr_config.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/flake8 >> test_sql.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/kv/tests/flake8 >> test_workload.py::flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/flake8 >> test_ttl.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/library/ut/flake8 >> kikimr_config.py::flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/conveyor_composite/ut/ydb-core-tx-conveyor_composite-ut >> test.py::py2_flake8 [GOOD] >> ydb-tests-fq-common::import_test [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/hybrid_file/part8/py2_flake8 >> test.py::py2_flake8 [GOOD] >> ydb-tests-functional-suite_tests::import_test [GOOD] >> conftest.py::flake8 [GOOD] >> helpers.py::flake8 [GOOD] >> test_ctas.py::flake8 [GOOD] >> test_yt_reading.py::flake8 [GOOD] >> ydb-tests-olap-column_family-compression::import_test [GOOD] >> conftest.py::flake8 [GOOD] >> test_clickhouse.py::flake8 [GOOD] >> test_greenplum.py::flake8 [GOOD] >> test_join.py::flake8 [GOOD] >> test_mysql.py::flake8 [GOOD] >> test_postgresql.py::flake8 [GOOD] >> test_ydb.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part6/py2_flake8 >> test.py::py2_flake8 [GOOD] >> test_split_merge.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/common/import_test >> ydb-tests-fq-common::import_test [GOOD] >> conftest.py::flake8 [GOOD] >> test_unknown_data_source.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/import_test >> ydb-tests-functional-suite_tests::import_test [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_import/flake8 >> test_yt_reading.py::flake8 [GOOD] |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/column_family/compression/import_test >> ydb-tests-olap-column_family-compression::import_test [GOOD] >> test_config_migration.py::flake8 [GOOD] >> test_config_with_metadata.py::flake8 [GOOD] >> test_configuration_version.py::flake8 [GOOD] >> test_distconf.py::flake8 [GOOD] >> test_generate_dynamic_config.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_stats_mode.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/generic/analytics/flake8 >> test_ydb.py::flake8 [GOOD] |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 >> test_encryption.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut_pg/flat_database_pg_ut.cpp |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/flake8 >> test_generate_dynamic_config.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/flake8 >> test_split_merge.py::flake8 [GOOD] >> TMemoryPoolTest::Transactions [GOOD] >> TMemoryPoolTest::LongRollback [GOOD] >> UtilString::ShrinkToFit [GOOD] >> TMemoryPoolTest::TransactionsWithAlignment [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/common/flake8 >> test_unknown_data_source.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/plans/flake8 >> test_stats_mode.py::flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/pq_read/test/ydb-tests-tools-pq_read-test >> __main__.py::flake8 [GOOD] >> TMemoryPoolTest::AllocOneByte [GOOD] >> TMemoryPoolTest::AppendString [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/large_serializable/ydb-tests-functional-large_serializable |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/compressors/ut/ydb-library-yql-providers-s3-compressors-ut |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part3/flake8 >> test.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> TestUrlBuilder::BasicWithEncoding [GOOD] >> TestS3UrlEscape::EscapeUnescapeForceRet [GOOD] >> TestUrlBuilder::Basic [GOOD] |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_value.pb.{h, cc} |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yaml_config/tools/simple_json_diff/flake8 >> __main__.py::flake8 [GOOD] |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_table.pb.{h, cc} |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/encryption/flake8 >> test_encryption.py::flake8 [GOOD] >> TestS3UrlEscape::EscapeEscapedForce [GOOD] >> test.py::flake8 [GOOD] >> TestUrlBuilder::BasicWithAdditionalEncoding [GOOD] >> TestS3UrlEscape::EscapeAdditionalSymbols [GOOD] >> TestUrlBuilder::UriOnly [GOOD] >> test.py::py2_flake8 [GOOD] >> __main__.py::flake8 [GOOD] >> test_workload.py::flake8 [GOOD] |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/common.pb.{h, cc} |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part9/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut_util/unittest >> TMemoryPoolTest::AppendString [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/hybrid_file/part5/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part12/flake8 >> test.py::flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/blobstorage/ydb-tests-functional-blobstorage |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/driver/flake8 >> __main__.py::flake8 [GOOD] >> ydb-library-yaml_config-ut_transform::import_test [GOOD] >> ydb-tests-functional-scheme_tests::import_test [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part8/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/kqp/kqp_query_session/ydb-tests-functional-kqp-kqp_query_session |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/simple_queue/tests/flake8 >> test_workload.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/s3/common/ut/unittest >> TestUrlBuilder::UriOnly [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/providers/common/proto/gateways_config.pb.{h, cc} >> kqprun_recipe::import_test [GOOD] >> conftest.py::flake8 [GOOD] >> test_alter_compression.py::flake8 [GOOD] >> test_alter_tiering.py::flake8 [GOOD] >> test_insert.py::flake8 [GOOD] >> test_read_update_write_load.py::flake8 [GOOD] >> test_scheme_load.py::flake8 [GOOD] >> test_simple.py::flake8 [GOOD] >> base.py::flake8 [GOOD] >> data_correctness.py::flake8 [GOOD] >> data_migration_when_alter_ttl.py::flake8 [GOOD] >> tier_delete.py::flake8 [GOOD] >> ttl_delete_s3.py::flake8 [GOOD] >> ttl_unavailable_s3.py::flake8 [GOOD] >> unstable_connection.py::flake8 [GOOD] >> test_large_import.py::flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yaml_config/ut_transform/import_test >> ydb-library-yaml_config-ut_transform::import_test [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_tests/import_test >> ydb-tests-functional-scheme_tests::import_test [GOOD] >> allure_utils.py::flake8 [GOOD] >> remote_execution.py::flake8 [GOOD] >> results_processor.py::flake8 [GOOD] >> utils.py::flake8 [GOOD] >> ydb_cli.py::flake8 [GOOD] >> ydb_cluster.py::flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/dst_creator_ut.cpp >> tstool.py::flake8 [GOOD] >> ydb-tests-functional-sqs-common::import_test [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/flake8 >> test_simple.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/kqprun/recipe/import_test >> kqprun_recipe::import_test [GOOD] >> ydb-tests-stress-olap_workload-tests::import_test [GOOD] >> test.py::py2_flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/lib/flake8 >> ydb_cluster.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/s3_import/large/flake8 >> test_large_import.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/flake8 >> unstable_connection.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tools/tstool/flake8 >> tstool.py::flake8 [GOOD] |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/import_test >> ydb-tests-functional-sqs-common::import_test [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part19/ydb-tests-fq-yt-kqp_yt_file-part19 |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/olap_workload/tests/import_test >> ydb-tests-stress-olap_workload-tests::import_test [GOOD] |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/ut_vector_index_build_reboots.cpp |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/ut_pg/ydb-core-tablet_flat-ut_pg >> test.py::flake8 [GOOD] >> __main__.py::flake8 [GOOD] >> TCompressorTests::ErrorLz4 [GOOD] >> TCompressorTests::SuccessLz4 [GOOD] >> TCompressorTests::WrongMagicLz4 [GOOD] >> test.py::py2_flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part17/py2_flake8 >> test.py::py2_flake8 [GOOD] >> test_account_actions.py::flake8 [GOOD] >> test_acl.py::flake8 [GOOD] >> test_counters.py::flake8 [GOOD] >> test_format_without_version.py::flake8 [GOOD] >> test_garbage_collection.py::flake8 [GOOD] >> test_multiplexing_tables_format.py::flake8 [GOOD] >> test_ping.py::flake8 [GOOD] >> test_queue_attributes_validation.py::flake8 [GOOD] >> test_queue_counters.py::flake8 [GOOD] >> test_queue_tags.py::flake8 [GOOD] >> test_queues_managing.py::flake8 [GOOD] >> test_throttling.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |51.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_scheme_op.{pb.h ... grpc.pb.h} |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_vdisk.cpp |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part10/flake8 >> test.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part13/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/olap/high_load/read_update_write.cpp |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/lfalloc/liblibrary-cpp-lfalloc.a |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/olap_workload/flake8 >> __main__.py::flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part0/ydb-tests-fq-yt-kqp_yt_file-part0 >> conftest.py::flake8 [GOOD] >> docker_wrapper_test.py::flake8 [GOOD] |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/backup/ut/ut.cpp |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/s3/compressors/ut/unittest >> TCompressorTests::WrongMagicLz4 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/flake8 >> test_throttling.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/hybrid_file/part10/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base3.{pb.h ... grpc.pb.h} |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/control_plane_storage.pb.{h, cc} >> test.py::flake8 [GOOD] >> ydb-tests-functional-minidumps::import_test [GOOD] >> test_schemeshard_limits.py::flake8 [GOOD] |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_base.{pb.h ... grpc.pb.h} >> ydb-tests-example::import_test [GOOD] >> test.py::py2_flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_insert_restarts.py::flake8 [GOOD] |51.1%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage.{pb.h ... grpc.pb.h} |51.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/tools/fqrun/fqrun |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/postgres_integrations/go-libpq/flake8 >> docker_wrapper_test.py::flake8 [GOOD] |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/limits/flake8 >> test_schemeshard_limits.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> ydb-tests-fq-plans::import_test [GOOD] >> ydb-tests-stress-simple_queue-tests::import_test [GOOD] >> __main__.py::flake8 [GOOD] |51.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part15/flake8 >> test.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/minidumps/import_test >> ydb-tests-functional-minidumps::import_test [GOOD] |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/backup/ut/ydb-library-backup-ut |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/restarts/flake8 >> test_insert_restarts.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/hybrid_file/part0/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/example/import_test >> ydb-tests-example::import_test [GOOD] |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/dynamic_prototype/libcpp-protobuf-dynamic_prototype.a |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/dq/provider/yql_dq_provider_ut.cpp |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/libs/sqlite3/libcontrib-libs-sqlite3.a |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/protobuf/yql/libcpp-protobuf-yql.a |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/plans/import_test >> ydb-tests-fq-plans::import_test [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/cmd_run_query.cpp |51.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/apps/ydb/ut/ydb-apps-ydb-ut |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part10/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/cmd_prepare_scheme.cpp >> replay::import_test [GOOD] >> test.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/simple_queue/tests/import_test >> ydb-tests-stress-simple_queue-tests::import_test [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/oltp_workload/flake8 >> __main__.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/cmd_run_bench.cpp |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/tests/tpch/lib/libtests-tpch-lib.a |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/cmd_drop.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/providers/yt/actors/ut/yql_yt_lookup_actor_ut.cpp >> ydb-tests-fq-mem_alloc::import_test [GOOD] >> test_mixed.py::flake8 [GOOD] >> __main__.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/cmd_prepare.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/describes_ut/ic_cache_ut.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/commands.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/tests/tpch/main.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/describes_ut/describe_topic_ut.cpp >> ydb-tests-stress-log-tests::import_test [GOOD] >> test.py::py2_flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/tests/tpch/lib/libtests-tpch-lib.global.a |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/yql/kqp_pragma_ut.cpp |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/ydb_serializable/replay/import_test >> replay::import_test [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part8/flake8 >> test.py::flake8 [GOOD] >> ydb-tests-stress-mixedpy::import_test [GOOD] >> test_example.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/mem_alloc/import_test >> ydb-tests-fq-mem_alloc::import_test [GOOD] |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/codegen/no_llvm/libminikql-codegen-no_llvm.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/computation/no_llvm/libminikql-computation-no_llvm.a |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/yql/kqp_scripting_ut.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/yql/kqp_yql_ut.cpp |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/mixedpy/flake8 >> test_mixed.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part11/py2_flake8 >> test.py::py2_flake8 [GOOD] >> test.py::flake8 [GOOD] >> helpers.py::flake8 [GOOD] >> test_base.py::flake8 [GOOD] >> test_query.py::flake8 [GOOD] >> test_s3.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/transfer/flake8 >> __main__.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/log/tests/import_test >> ydb-tests-stress-log-tests::import_test [GOOD] |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_cms.pb.{h, cc} |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part7/py2_flake8 >> test.py::py2_flake8 [GOOD] >> __main__.py::flake8 [GOOD] |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_config.{pb.h ... grpc.pb.h} |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/mixedpy/import_test >> ydb-tests-stress-mixedpy::import_test [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/example/flake8 >> test_example.py::flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/flake8 >> test.py::flake8 [GOOD] |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/export.{pb.h ... grpc.pb.h} |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/sql/lib/flake8 >> test_s3.py::flake8 [GOOD] |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/Modules/_sqlite/libpy3python3-Modules-_sqlite.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/tools/python3/Modules/_sqlite/libpy3python3-Modules-_sqlite.a |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/library/ut/ydb-tests-postgres_integrations-library-ut >> ydb-public-tools-lib-cmds-ut::import_test [GOOD] >> test_dump_restore.py::flake8 [GOOD] >> column_table_helper.py::flake8 [GOOD] >> range_allocator.py::flake8 [GOOD] >> s3_client.py::flake8 [GOOD] >> thread_helper.py::flake8 [GOOD] >> time_histogram.py::flake8 [GOOD] >> utils.py::flake8 [GOOD] >> ydb_client.py::flake8 [GOOD] |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/bscontroller/ut_selfheal/self_heal_actor_ut.cpp |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tools/cfg/bin/flake8 >> __main__.py::flake8 [GOOD] |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/actor_activity_ut.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/test_interconnect_ut.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/actor_tracker_ut.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_replrecoverymachine_ut.cpp >> test.py::py2_flake8 [GOOD] >> test_commit.py::flake8 [GOOD] >> test_timeout.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/etcd_proxy/etcd_proxy |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/import_tracing/lib/libpy3python-import_tracing-lib.global.a |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/import_tracing/constructor/libpy3python-import_tracing-constructor.global.a |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/actor_bootstrapped_ut.cpp >> test_s3.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/public/tools/lib/cmds/ut/import_test >> ydb-public-tools-lib-cmds-ut::import_test [GOOD] |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/runtime_py3/main/libpython-runtime_py3-main.a |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_incremental_restore_scan.cpp |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dump_restore/flake8 >> test_dump_restore.py::flake8 [GOOD] |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/import_test/libpy3python-testing-import_test.global.a |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/actorlib_impl/test_protocols_ut.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/bscontroller/ut_selfheal/main.cpp |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/hybrid_file/part6/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/dq/provider/ut/ydb-library-yql-providers-dq-provider-ut |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/common/flake8 >> ydb_client.py::flake8 [GOOD] |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/pq_read/test/flake8 >> test_timeout.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test_transform.py::flake8 [GOOD] |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/solomon/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/protos/type_info.{pb.h ... grpc.pb.h} |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/flake8 >> test_s3.py::flake8 [GOOD] |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/repl/blobstorage_hullreplwritesst_ut.cpp |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_stats.pb.{h, cc} >> test_stability.py::flake8 [GOOD] |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yaml_config/ut_transform/flake8 >> test_transform.py::flake8 [GOOD] >> test_crud.py::flake8 [GOOD] >> test_inserts.py::flake8 [GOOD] >> test_kv.py::flake8 [GOOD] >> test_generator.py::flake8 [GOOD] >> test_init.py::flake8 [GOOD] |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/hybrid_file/part3/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/ydb_issue/proto/issue_id.{pb.h ... grpc.pb.h} |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/udf/service/terminate_policy/libudf-service-terminate_policy.global.a |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/codec/codegen/no_llvm/libcodec-codegen-no_llvm.a |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/invoke_builtins/no_llvm/libminikql-invoke_builtins-no_llvm.a >> test.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |51.3%| [AR] {BAZEL_DOWNLOAD} $(B)/yt/yql/providers/yt/comp_nodes/no_llvm/libyt-comp_nodes-no_llvm.a |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_issue_message.pb.{h, cc} |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup >> test_actorsystem.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/sql/flake8 >> test_kv.py::flake8 [GOOD] |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_broker.{pb.h ... grpc.pb.h} |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/providers/yt/actors/ut/ydb-library-yql-providers-yt-actors-ut |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stability/ydb/flake8 >> test_stability.py::flake8 [GOOD] |51.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tool/objcopy_7406de026bf25e30e96a88517d.o |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/common/protos/snapshot.pb.{h, cc} |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part4/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/tool |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/solomon/flake8 >> test.py::flake8 [GOOD] >> ydb-tests-functional-sqs-with_quotas::import_test [GOOD] |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhuge_ut.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugeheap_ut.cpp |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/flake8 >> test_init.py::flake8 [GOOD] |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/top_ut.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/huge/blobstorage_hullhugeheap_ctx_ut.cpp |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/flake8 >> test_actorsystem.py::flake8 [GOOD] |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut >> test_async_replication.py::flake8 [GOOD] |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/tests/tpch/tpch |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/activation.pb.{h, cc} |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/stream.{pb.h ... grpc.pb.h} |51.4%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/fq_config.pb.{h, cc} >> test_dml.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/ut_schema/ut_columnshard_schema.cpp |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/flake8 >> test_async_replication.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/import_test >> ydb-tests-functional-sqs-with_quotas::import_test [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part18/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/flake8 >> test_dml.py::flake8 [GOOD] |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/pg/kqp_pg_ut.cpp >> test_base.py::flake8 [GOOD] >> test_postgres.py::flake8 [GOOD] >> test_sql_logic.py::flake8 [GOOD] >> test_stream_query.py::flake8 [GOOD] >> test_log_scenario.py::flake8 [GOOD] >> zip_bomb.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/hybrid_file/part2/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/scheme_shard/ydb-tests-functional-scheme_shard |51.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/mvp/meta/ut/ydb-mvp-meta-ut |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/pg/pg_catalog_ut.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_oos_logic_ut.cpp >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_break.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part6/flake8 >> test.py::flake8 [GOOD] |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/result_formatter/result_formatter_ut.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/statistics/database/ut/ut_database.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/skeleton/skeleton_vpatch_actor_ut.cpp |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/hybrid_file/part1/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_delete_ut.cpp >> test_clean.py::flake8 [GOOD] >> test_clickbench.py::flake8 [GOOD] >> test_diff_processing.py::flake8 [GOOD] >> test_external.py::flake8 [GOOD] >> test_tpch.py::flake8 [GOOD] >> test_workload_simple_queue.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/batch_operations/kqp_batch_update_ut.cpp |51.3%| [LD] {BAZEL_DOWNLOAD} $(B)/tools/rescompiler/rescompiler |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/flake8 >> test_stream_query.py::flake8 [GOOD] |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/certificate_check/cert_utils_ut.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/certificate_check/cert_check_ut.cpp |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/apps/pgwire/pgwire |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/flake8 >> zip_bomb.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/generic/connector/tests/datasource/ms_sql_server/flake8 >> test.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/merge_split_common_table/std/flake8 >> test.py::flake8 [GOOD] |51.3%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/workload_manager_config.{pb.h ... grpc.pb.h} |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/main.cpp |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/minidumps/flake8 >> test_break.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/server.cpp |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tpc/medium/flake8 >> test_workload_simple_queue.py::flake8 [GOOD] |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/quoter/quoter_service_bandwidth_test/quota_requester.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub.cpp >> test.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/gc_ut.cpp |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part11/flake8 >> test.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/ydb_checkpoint_storage_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/storage_service_ydb_ut.cpp |51.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence >> test_vector_index.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/checkpoint_storage/ut/ydb_state_storage_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/balance_coverage/balance_coverage_builder_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/ut_helpers.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/quoter_resource_tree_ut.cpp >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part16/flake8 >> test.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part2/flake8 >> test.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/tablet/tablet_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_change_collector.cpp |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part19/flake8 >> test.py::flake8 [GOOD] >> __main__.py::flake8 [GOOD] |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/tools/yqlrun/http/libtools-yqlrun-http.a |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/flake8 >> test_vector_index.py::flake8 [GOOD] |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/yql/tools/yqlrun/yqlrun.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_replication/ut_replication.cpp >> __main__.py::flake8 [GOOD] >> parser.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/tools/yqlrun/lib/libtools-yqlrun-lib.a |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/repl/ut/ydb-core-blobstorage-vdisk-repl-ut |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ut_export_reboots_s3.cpp |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/health_check/health_check_ut.cpp |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/generic/connector/tests/datasource/mysql/flake8 >> test.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/public/tools/ydb_recipe/flake8 >> __main__.py::flake8 [GOOD] |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/yql/tools/yqlrun/yqlrun >> test.py::py2_flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> run_tests.py::flake8 [GOOD] >> integrations_test.py::test_read_jtest_results[o/OK] [GOOD] >> integrations_test.py::test_read_jtest_results[f/failed1] [GOOD] >> integrations_test.py::test_read_jtest_results[f/failed2] [GOOD] >> integrations_test.py::test_read_jtest_results[f/error1] [GOOD] >> integrations_test.py::test_read_jtest_results[s/skipped1] [GOOD] >> integrations_test.py::test_read_jtest_results[s/skipped2] [GOOD] >> integrations_test.py::test_read_jtest_with_one_result [GOOD] >> MetaCache::BasicForwarding [GOOD] >> MetaCache::TimeoutFallback [GOOD] |51.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/docs/generator/flake8 >> parser.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part0/flake8 >> test.py::flake8 [GOOD] |51.2%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/labeled_counters.{pb.h ... grpc.pb.h} |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf >> hive_matchers.py::flake8 [GOOD] >> test_create_tablets.py::flake8 [GOOD] >> test_drain.py::flake8 [GOOD] >> test_kill_tablets.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_background_cleaning/ut_background_cleaning.cpp |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part16/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_move/ut_move.cpp |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/yql_testlib/libydb-core-yql_testlib.a |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_serverless/ut_serverless.cpp >> conftest.py::flake8 [GOOD] >> test_ydb_backup.py::flake8 [GOOD] >> test_ydb_flame_graph.py::flake8 [GOOD] >> test_ydb_impex.py::flake8 [GOOD] >> test_ydb_recursive_remove.py::flake8 [GOOD] >> test_ydb_scheme.py::flake8 [GOOD] >> test_ydb_scripting.py::flake8 [GOOD] >> test_ydb_sql.py::flake8 [GOOD] >> test_ydb_table.py::flake8 [GOOD] >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> scenario.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test_case.py::flake8 [GOOD] >> ydb-tests-functional-audit::import_test [GOOD] >> test.py::py2_flake8 [GOOD] >> __main__.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part1/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/schemereq_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_object_storage_listing.cpp |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/benchmarks/runner/run_tests/flake8 >> run_tests.py::flake8 [GOOD] >> ydb-tests-datashard-dml::import_test [GOOD] >> __main__.py::flake8 [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/mvp/meta/ut/unittest >> MetaCache::TimeoutFallback [GOOD] Test command err: 2025-06-03T10:20:44.769450Z :HTTP INFO: http_proxy_acceptor.cpp:89: Listening on http://[::]:29798 2025-06-03T10:20:44.769522Z :HTTP INFO: http_proxy_acceptor.cpp:89: Listening on http://[::]:17978 2025-06-03T10:20:44.769573Z :HTTP DEBUG: http_proxy.cpp:22: Connection created [1:14:2061] 2025-06-03T10:20:44.769579Z :HTTP DEBUG: http_proxy_outgoing.cpp:188: resolving 127.0.0.1:29798 2025-06-03T10:20:44.769602Z :HTTP DEBUG: http_proxy_outgoing.cpp:155: connecting to 127.0.0.1:29798 2025-06-03T10:20:44.769733Z :HTTP DEBUG: http_proxy_outgoing.cpp:329: (#11,127.0.0.1:29798) outgoing connection opened 2025-06-03T10:20:44.769738Z :HTTP DEBUG: http_proxy_outgoing.cpp:331: (#11,127.0.0.1:29798) <- (GET /server) 2025-06-03T10:20:44.769793Z :HTTP DEBUG: http_proxy_incoming.cpp:83: (#12,[::ffff:127.0.0.1]:46578) incoming connection opened 2025-06-03T10:20:44.769823Z :HTTP DEBUG: http_proxy_incoming.cpp:156: (#12,[::ffff:127.0.0.1]:46578) -> (GET /server) 2025-06-03T10:20:44.769840Z :HTTP DEBUG: meta_cache.cpp:231: Updating ownership http://127.0.0.1:17978 with deadline 2025-06-03T10:21:44.769835Z 2025-06-03T10:20:44.769845Z :HTTP DEBUG: meta_cache.cpp:237: SetRefreshTime "/server" to 2025-06-03T10:21:44.769835Z (+1748946104.769835s) 2025-06-03T10:20:44.769850Z :HTTP DEBUG: meta_cache.cpp:198: IncomingForward /server to http://127.0.0.1:17978 timeout 30.000000s 2025-06-03T10:20:44.769869Z :HTTP DEBUG: http_proxy.cpp:22: Connection created [1:16:2063] 2025-06-03T10:20:44.769873Z :HTTP DEBUG: http_proxy_outgoing.cpp:188: resolving 127.0.0.1:17978 2025-06-03T10:20:44.769879Z :HTTP DEBUG: http_proxy_outgoing.cpp:155: connecting to 127.0.0.1:17978 2025-06-03T10:20:44.769923Z :HTTP DEBUG: http_proxy_outgoing.cpp:329: (#13,127.0.0.1:17978) outgoing connection opened 2025-06-03T10:20:44.769928Z :HTTP DEBUG: http_proxy_outgoing.cpp:331: (#13,127.0.0.1:17978) <- (GET /server) 2025-06-03T10:20:44.769959Z :HTTP DEBUG: http_proxy_incoming.cpp:83: (#14,[::ffff:127.0.0.1]:32846) incoming connection opened 2025-06-03T10:20:44.770237Z :HTTP DEBUG: http_proxy_incoming.cpp:156: (#14,[::ffff:127.0.0.1]:32846) -> (GET /server) 2025-06-03T10:20:44.770284Z :HTTP DEBUG: http_proxy_incoming.cpp:278: (#14,[::ffff:127.0.0.1]:32846) <- (200 Found, 6 bytes) 2025-06-03T10:20:44.770315Z :HTTP DEBUG: http_proxy_incoming.cpp:331: (#14,[::ffff:127.0.0.1]:32846) connection closed 2025-06-03T10:20:44.770485Z :HTTP DEBUG: http_proxy_outgoing.cpp:101: (#13,127.0.0.1:17978) -> (200 Found, 6 bytes) 2025-06-03T10:20:44.770492Z :HTTP DEBUG: http_proxy_outgoing.cpp:109: (#13,127.0.0.1:17978) connection closed 2025-06-03T10:20:44.770659Z :HTTP DEBUG: meta_cache.cpp:146: Cache received successfull (200) response for /server 2025-06-03T10:20:44.770703Z :HTTP DEBUG: http_proxy.cpp:146: Connection closed [1:16:2063] 2025-06-03T10:20:44.770717Z :HTTP DEBUG: http_proxy_incoming.cpp:278: (#12,[::ffff:127.0.0.1]:46578) <- (200 Found, 6 bytes) 2025-06-03T10:20:44.770783Z :HTTP DEBUG: http_proxy_incoming.cpp:331: (#12,[::ffff:127.0.0.1]:46578) connection closed 2025-06-03T10:20:44.770872Z :HTTP DEBUG: http_proxy_outgoing.cpp:101: (#11,127.0.0.1:29798) -> (200 Found, 6 bytes) 2025-06-03T10:20:44.770881Z :HTTP DEBUG: http_proxy_outgoing.cpp:109: (#11,127.0.0.1:29798) connection closed 2025-06-03T10:20:44.770986Z :HTTP DEBUG: http_proxy.cpp:146: Connection closed [1:14:2061] 2025-06-03T10:20:44.774485Z :HTTP INFO: http_proxy_acceptor.cpp:89: Listening on http://[::]:7923 2025-06-03T10:20:44.774583Z :HTTP INFO: http_proxy_acceptor.cpp:89: Listening on http://[::]:29371 2025-06-03T10:20:44.774639Z :HTTP DEBUG: http_proxy.cpp:22: Connection created [2:14:2061] 2025-06-03T10:20:44.774648Z :HTTP DEBUG: http_proxy_outgoing.cpp:188: resolving 127.0.0.1:7923 2025-06-03T10:20:44.774672Z :HTTP DEBUG: http_proxy_outgoing.cpp:155: connecting to 127.0.0.1:7923 2025-06-03T10:20:44.774861Z :HTTP DEBUG: http_proxy_outgoing.cpp:329: (#11,127.0.0.1:7923) outgoing connection opened 2025-06-03T10:20:44.774874Z :HTTP DEBUG: http_proxy_outgoing.cpp:331: (#11,127.0.0.1:7923) <- (GET /server) 2025-06-03T10:20:44.774948Z :HTTP DEBUG: http_proxy_incoming.cpp:83: (#12,[::ffff:127.0.0.1]:49102) incoming connection opened 2025-06-03T10:20:44.774978Z :HTTP DEBUG: http_proxy_incoming.cpp:156: (#12,[::ffff:127.0.0.1]:49102) -> (GET /server) 2025-06-03T10:20:44.775005Z :HTTP DEBUG: meta_cache.cpp:231: Updating ownership http://127.0.0.1:29371 with deadline 2025-06-03T10:30:44.774995Z 2025-06-03T10:20:44.775013Z :HTTP DEBUG: meta_cache.cpp:237: SetRefreshTime "/server" to 2025-06-03T10:30:44.774995Z (+1748946644.774995s) 2025-06-03T10:20:44.775023Z :HTTP DEBUG: meta_cache.cpp:198: IncomingForward /server to http://127.0.0.1:29371 timeout 30.000000s 2025-06-03T10:20:44.775045Z :HTTP DEBUG: http_proxy.cpp:22: Connection created [2:16:2063] 2025-06-03T10:20:44.775053Z :HTTP DEBUG: http_proxy_outgoing.cpp:188: resolving 127.0.0.1:29371 2025-06-03T10:20:44.775066Z :HTTP DEBUG: http_proxy_outgoing.cpp:155: connecting to 127.0.0.1:29371 2025-06-03T10:20:44.775171Z :HTTP DEBUG: http_proxy_outgoing.cpp:329: (#13,127.0.0.1:29371) outgoing connection opened 2025-06-03T10:20:44.775178Z :HTTP DEBUG: http_proxy_outgoing.cpp:331: (#13,127.0.0.1:29371) <- (GET /server) 2025-06-03T10:20:44.775198Z :HTTP DEBUG: http_proxy_incoming.cpp:83: (#14,[::ffff:127.0.0.1]:55820) incoming connection opened 2025-06-03T10:20:44.775238Z :HTTP DEBUG: http_proxy_incoming.cpp:156: (#14,[::ffff:127.0.0.1]:55820) -> (GET /server) 2025-06-03T10:20:44.785413Z :HTTP ERROR: http_proxy_outgoing.cpp:122: (#13,127.0.0.1:29371) connection closed with error: Connection timed out 2025-06-03T10:20:44.785626Z :HTTP DEBUG: http_proxy_incoming.cpp:189: (#14,[::ffff:127.0.0.1]:55820) connection closed 2025-06-03T10:20:44.785837Z :HTTP WARN: meta_cache.cpp:151: Cache received failed response with error "Connection timed out" for /server - retrying locally 2025-06-03T10:20:44.785879Z :HTTP DEBUG: http_proxy.cpp:146: Connection closed [2:16:2063] 2025-06-03T10:20:44.796155Z :HTTP DEBUG: http_proxy_incoming.cpp:278: (#12,[::ffff:127.0.0.1]:49102) <- (200 Found, 6 bytes) 2025-06-03T10:20:44.796281Z :HTTP DEBUG: http_proxy_incoming.cpp:331: (#12,[::ffff:127.0.0.1]:49102) connection closed 2025-06-03T10:20:44.796391Z :HTTP DEBUG: http_proxy_outgoing.cpp:101: (#11,127.0.0.1:7923) -> (200 Found, 6 bytes) 2025-06-03T10:20:44.796403Z :HTTP DEBUG: http_proxy_outgoing.cpp:109: (#11,127.0.0.1:7923) connection closed 2025-06-03T10:20:44.796714Z :HTTP DEBUG: http_proxy.cpp:146: Connection closed [2:14:2061] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/worker_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_trace.cpp |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/flake8 >> test_kill_tablets.py::flake8 [GOOD] |51.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/huge/ut/ydb-core-blobstorage-vdisk-huge-ut >> test.py::py2_flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/helper.cpp |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ydb_cli/flake8 >> test_ydb_table.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/ut_logs_engine.cpp |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/import_test >> ydb-tests-datashard-dml::import_test [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/kqprun/recipe/flake8 >> __main__.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/import_test >> ydb-tests-functional-audit::import_test [GOOD] >> test_sql_streaming.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part3/py2_flake8 >> test.py::py2_flake8 [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/postgres_integrations/library/ut/py3test >> integrations_test.py::test_read_jtest_with_one_result [GOOD] Test command err: /home/runner/.ya/build/build_root/u93c/0015b7/ydb/tests/postgres_integrations/library/ut/test-results/py3test/ydb/tests/postgres_integrations/library/pytest_integration.py:26: PytestCollectionWarning: cannot collect test class 'TestCase' because it has a __init__ constructor (from: integrations_test.py) /home/runner/.ya/build/build_root/u93c/0015b7/ydb/tests/postgres_integrations/library/ut/test-results/py3test/ydb/tests/postgres_integrations/library/pytest_integration.py:20: PytestCollectionWarning: cannot collect test class 'TestState' because it has a __init__ constructor (from: integrations_test.py) |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/ut_script.cpp |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/generic/connector/tests/join/flake8 >> test_case.py::flake8 [GOOD] |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/statistics_workload/flake8 >> __main__.py::flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/ut_program.cpp >> test_restarts.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |51.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part19/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/engines/ut/ut_insert_table.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ut_split_merge_reboots.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_continuous_backup/ut_continuous_backup.cpp >> conftest.py::flake8 [GOOD] >> test_serverless.py::flake8 [GOOD] >> ydb-tests-functional-benchmarks_init::import_test [GOOD] >> tpc_tests.py::flake8 [GOOD] |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/ycloud/impl/service_account_service_ut.cpp |51.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/streaming_optimize/flake8 >> test_sql_streaming.py::flake8 [GOOD] |51.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/ycloud/impl/folder_service_ut.cpp |51.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part5/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part4/flake8 >> test.py::flake8 [GOOD] >> base.py::flake8 [GOOD] >> collection.py::flake8 [GOOD] >> test_tpch_import.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |50.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/flake8 >> test_restarts.py::flake8 [GOOD] |50.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part17/flake8 >> test.py::flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_explain_ut.cpp |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part0/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/libpy3olap_workload.global.a |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/sanitize_groups.cpp >> test_crud.py::flake8 [GOOD] >> test_discovery.py::flake8 [GOOD] >> test_execute_scheme.py::flake8 [GOOD] >> test_indexes.py::flake8 [GOOD] >> test_insert.py::flake8 [GOOD] >> test_isolation.py::flake8 [GOOD] >> test_public_api.py::flake8 [GOOD] >> test_read_table.py::flake8 [GOOD] >> test_session_grace_shutdown.py::flake8 [GOOD] >> test_session_pool.py::flake8 [GOOD] |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/import_test >> ydb-tests-functional-benchmarks_init::import_test [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/flake8 >> test_serverless.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/benchmarks/runner/flake8 >> tpc_tests.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/recovery.cpp |51.1%| [BN] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tool/simple_queue |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/ds_proxy_lwtrace.cpp |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/generic/connector/tests/datasource/ydb/flake8 >> test.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_rtmr/ut_rtmr.cpp >> test_leader_start_inflight.py::flake8 [GOOD] >> test_kqprun_recipe.py::flake8 [GOOD] >> test_query_cache.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/discover.cpp |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part1/flake8 >> test.py::flake8 [GOOD] |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/s3_import/flake8 >> test_tpch_import.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/api/flake8 >> test_session_pool.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/race.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/acceleration.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/common_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain/ut_extsubdomain.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/arrow/kqp_arrow_in_channels_ut.cpp >> Metrics::OnlyOneItem [GOOD] >> Metrics::EmptyIssuesList [GOOD] >> Metrics::SeveralTopItems [GOOD] >> ydb-tests-fq-restarts::import_test [GOOD] >> SanitizeLable::Truncate200 [GOOD] >> Metrics::MoreThanFiveItems [GOOD] >> SanitizeLable::Empty [GOOD] >> SanitizeLable::SkipSingleBadSymbol [GOOD] >> SanitizeLable::SkipBadSymbols [GOOD] >> Metrics::SeveralSubItems [GOOD] >> Metrics::CombineSubItems [GOOD] |51.1%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/compute/common/ut/objcopy_caf222d14387d4810b5cb3e853.o |51.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/large/flake8 >> test_leader_start_inflight.py::flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_compaction/ut_compaction.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_view/ut_view.cpp |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/kqprun/tests/flake8 >> test_kqprun_recipe.py::flake8 [GOOD] |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/hybrid_file/part9/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_scan_data_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/arrow/kqp_types_arrow_ut.cpp |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/query_cache/flake8 >> test_query_cache.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/compute/common/ut/config_ut.cpp >> test.py::flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/compute/common/ut/utils_ut.cpp |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/restarts/import_test >> ydb-tests-fq-restarts::import_test [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cms_ut_common.cpp |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/metrics/ut/unittest >> Metrics::CombineSubItems [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/sentinel_ut.cpp |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part13/flake8 >> test.py::flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/deadlines.cpp |51.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/assimilation.cpp >> runner.py::flake8 [GOOD] >> test_disk.py::flake8 [GOOD] >> test_alloc_default.py::flake8 [GOOD] >> test_tablet.py::flake8 [GOOD] >> test_dc_local.py::flake8 [GOOD] >> test_result_limits.py::flake8 [GOOD] >> test_scheduling.py::flake8 [GOOD] >> http_client.py::flake8 [GOOD] >> query_results.py::flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/backpressure.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/counting_events.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/donor.cpp >> test.py::py2_flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/table_creator/table_creator_ut.cpp |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/benchmarks/runner/runner/flake8 >> runner.py::flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/decommit_3dc.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/block_race.cpp |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/mem_alloc/flake8 >> test_scheduling.py::flake8 [GOOD] |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/flake8 >> test_tablet.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/http_api_client/flake8 >> query_results.py::flake8 [GOOD] >> test_quota_exhaustion.py::flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/monitoring.cpp |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part12/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/incorrect_queries.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/extra_block_checks.cpp >> test_workload.py::flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_split_merge/ut_split_merge.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_split_merge/ut_find_split_key.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_sink_mvcc_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_sink_locks_ut.cpp >> test_bulkupserts_tpch.py::flake8 [GOOD] >> test_insert_delete_duplicate_records.py::flake8 [GOOD] >> test_insertinto_selectfrom.py::flake8 [GOOD] >> test_tiering.py::flake8 [GOOD] >> test_workload_manager.py::flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_locks_ut.cpp |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_data_erasure/ut_data_erasure.cpp |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/flake8 >> test_quota_exhaustion.py::flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_sink_tx_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/gc.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_tx_ut.cpp >> test_workload.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> test_copy_table.py::flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/gc_quorum_3dc.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/encryption.cpp |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/oltp_workload/tests/flake8 >> test_workload.py::flake8 [GOOD] >> test_cte.py::flake8 [GOOD] >> overlapping_portions.py::flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_snapshot_isolation_ut.cpp |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/sql/large/flake8 >> test_workload_manager.py::flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_mvcc_ut.cpp |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part14/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/tx/kqp_locks_tricky_ut.cpp >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime.py::flake8 [GOOD] >> select_positive.py::flake8 [GOOD] >> select_positive_with_schema.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> test.py::py2_flake8 [GOOD] >> ydb-tests-datashard-parametrized_queries::import_test [GOOD] |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part7/flake8 >> test.py::flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_kqp_scan.cpp |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/flake8 >> test_copy_table.py::flake8 [GOOD] |51.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_minikql.cpp >> collection.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> select_datetime_with_service_name.py::flake8 [GOOD] >> select_positive_with_service_name.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |51.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/transfer/tests/flake8 >> test_workload.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/group_reconfiguration.cpp |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/kqp/plan2svg/flake8 >> test_cte.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/get.cpp >> test.py::flake8 [GOOD] >> test_liveness_wardens.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part15/py2_flake8 >> test.py::py2_flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/oom/flake8 >> overlapping_portions.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/get_block.cpp |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/generic/connector/tests/datasource/postgresql/flake8 >> test.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/parametrized_queries/import_test >> ydb-tests-datashard-parametrized_queries::import_test [GOOD] >> compare.py::flake8 [GOOD] >> test_update_script_tables.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/sequencer_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_auditsettings/ut_auditsettings.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/scale_recommender_policy_ut.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/storage_pool_info_ut.cpp >> conftest.py::flake8 [GOOD] >> test_join.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/hive_impl_ut.cpp |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/generic/connector/tests/datasource/oracle/flake8 >> test.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/hive/object_distribution_ut.cpp |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serializable/flake8 >> test.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/wardens/flake8 >> test_liveness_wardens.py::flake8 [GOOD] >> test_select.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> gen-report.py::flake8 [GOOD] >> test_tpcds.py::flake8 [GOOD] >> test_tpch_spilling.py::flake8 [GOOD] >> test_cp_ic.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/benchmarks/runner/result_compare/flake8 >> compare.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/script_execution/flake8 >> test_update_script_tables.py::flake8 [GOOD] |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp >> test_dispatch.py::flake8 [GOOD] >> test_retry.py::flake8 [GOOD] >> test_retry_high_rate.py::flake8 [GOOD] >> alter_compression.py::flake8 [GOOD] >> base.py::flake8 [GOOD] |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/generic/streaming/flake8 >> test_join.py::flake8 [GOOD] |51.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3of4.cpp |51.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/public/tools/lib/cmds/ut/flake8 >> test.py::flake8 [GOOD] |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/patch.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/ycloud/impl/access_service_ut.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/main.cpp >> ydb-tests-datashard-s3::import_test [GOOD] >> MatchPredicate::EmptyWhere [GOOD] >> MatchPredicate::RightColumn [GOOD] >> MatchPredicate::EmptyMatch [GOOD] >> MatchPredicate::NotLess [GOOD] >> MatchPredicate::Less [GOOD] >> MatchPredicate::Between [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part9/flake8 >> test.py::flake8 [GOOD] |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/mirror3dc.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/index_restore_get.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_params_ut.cpp >> test_ttl.py::flake8 [GOOD] >> ydb-tests-olap-ttl_tiering::import_test [GOOD] >> __main__.py::flake8 [GOOD] |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/space_check.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/scrub_fast.cpp |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/flake8 >> test_select.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/benchmarks/runner/result_convert/flake8 >> gen-report.py::flake8 [GOOD] |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/shred.cpp |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tpc/large/flake8 >> test_tpch_spilling.py::flake8 [GOOD] |51.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/multi_plane/flake8 >> test_retry_high_rate.py::flake8 [GOOD] |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/self_heal.cpp |51.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/runtime/kqp_compute_scheduler_ut.cpp |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/column_family/compression/flake8 >> base.py::flake8 [GOOD] >> test.py::flake8 [GOOD] >> ydb-tests-functional-large_serializable::import_test [GOOD] |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/validation.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/sync.cpp |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/import_test >> ydb-tests-datashard-s3::import_test [GOOD] >> test_compatibility.py::flake8 [GOOD] >> test_data_type.py::flake8 [GOOD] >> test_example.py::flake8 [GOOD] >> test_export_s3.py::flake8 [GOOD] >> test_followers.py::flake8 [GOOD] >> test_rolling.py::flake8 [GOOD] >> test_statistics.py::flake8 [GOOD] >> test_stress.py::flake8 [GOOD] >> udf/test_datetime2.py::flake8 [GOOD] >> udf/test_digest.py::flake8 [GOOD] |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/flake8 >> test_ttl.py::flake8 [GOOD] |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/snapshots.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_malfunction.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema >> conftest.py::flake8 [GOOD] >> test_auditlog.py::flake8 [GOOD] >> ydb-tests-tools-pq_read-test::import_test [GOOD] |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/import_test >> ydb-tests-olap-ttl_tiering::import_test [GOOD] |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/multiget.cpp |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/tools/visualize_portions/flake8 >> __main__.py::flake8 [GOOD] |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/generic/pushdown/ut/unittest >> MatchPredicate::Between [GOOD] >> ydb-tests-functional-blobstorage::import_test [GOOD] |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_prefixed_vector_ut.cpp |51.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part14/ydb-tests-fq-yt-kqp_yt_file-part14 |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/immediate_controls_configurator_ut.cpp |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part14/flake8 >> test.py::flake8 [GOOD] |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_vector_ut.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/configs_cache_ut.cpp |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/large_serializable/import_test >> ydb-tests-functional-large_serializable::import_test [GOOD] >> test.py::flake8 [GOOD] >> __main__.py::flake8 [GOOD] |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_write.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/configs_dispatcher_ut.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_multishard_ut.cpp |51.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_data_source/ut_external_data_source.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/feature_flags_configurator_ut.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/defrag.cpp |51.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/compatibility/flake8 >> udf/test_digest.py::flake8 [GOOD] >> test_alter_ops.py::flake8 [GOOD] >> test_copy_ops.py::flake8 [GOOD] >> test_scheme_shard_operations.py::flake8 [GOOD] |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/ut_rw/ut_backup.cpp |51.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/flake8 >> test_auditlog.py::flake8 [GOOD] |51.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/pq_read/test/import_test >> ydb-tests-tools-pq_read-test::import_test [GOOD] |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_stats_ut.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/jaeger_tracing_configurator_ut.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl_utility.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/ut_rw/ut_normalizer.cpp >> common.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_rename.py::flake8 [GOOD] >> test_common.py::flake8 [GOOD] >> test_yandex_cloud_mode.py::flake8 [GOOD] >> test_yandex_cloud_queue_counters.py::flake8 [GOOD] >> integrations_test.py::flake8 [GOOD] >> test_base.py::flake8 [GOOD] >> test_http_api.py::flake8 [GOOD] >> test_multinode_cluster.py::flake8 [GOOD] >> test_recompiles_requests.py::flake8 [GOOD] |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/net_classifier_updater_ut.cpp |51.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/import_test >> ydb-tests-functional-blobstorage::import_test [GOOD] |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/console_ut_tenants.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/log_settings_configurator_ut.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_query_ut.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_ttl/ut_ttl.cpp |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/ycloud/impl/user_account_service_ut.cpp |51.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_file/part5/flake8 >> test.py::flake8 [GOOD] |51.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_types_ut.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_analyze_ut.cpp |51.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/ydb_serializable/flake8 >> __main__.py::flake8 [GOOD] |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/modifications_validator_ut.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/query/kqp_limits_ut.cpp |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/audit.pb.{h, cc} |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tenant_slot_broker.{pb.h ... grpc.pb.h} |51.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/flake8 >> test_scheme_shard_operations.py::flake8 [GOOD] >> conftest.py::flake8 [GOOD] >> test_auth_system_views.py::flake8 [GOOD] >> test_create_users.py::flake8 [GOOD] >> test_create_users_strict_acl_checks.py::flake8 [GOOD] >> test_db_counters.py::flake8 [GOOD] >> test_dynamic_tenants.py::flake8 [GOOD] >> test_publish_into_schemeboard_with_common_ssring.py::flake8 [GOOD] >> test_storage_config.py::flake8 [GOOD] >> test_system_views.py::flake8 [GOOD] >> test_tenants.py::flake8 [GOOD] >> test_user_administration.py::flake8 [GOOD] >> test_users_groups_with_acl.py::flake8 [GOOD] |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/compute.pb.{h, cc} |51.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/flake8 >> test_rename.py::flake8 [GOOD] |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/console/console_ut_configs.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/columnshard/ut_rw/ut_columnshard_read_write.cpp |51.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/postgres_integrations/library/ut/flake8 >> integrations_test.py::flake8 [GOOD] |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/private_api.pb.{h, cc} |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/memory_controller_config.{pb.h ... grpc.pb.h} |51.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/http_api/flake8 >> test_http_api.py::flake8 [GOOD] |51.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/flake8 >> test_yandex_cloud_queue_counters.py::flake8 [GOOD] |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp.{pb.h ... grpc.pb.h} |51.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/flake8 >> test_recompiles_requests.py::flake8 [GOOD] >> test.py::flake8 [GOOD] |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/opentelemetry/proto/trace/v1/trace.{pb.h ... grpc.pb.h} |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/schemeshard/operations.{pb.h ... grpc.pb.h} |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/drivemodel.{pb.h ... grpc.pb.h} |51.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/flake8 >> test_users_groups_with_acl.py::flake8 [GOOD] |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/yql_translation_settings.{pb.h ... grpc.pb.h} |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_base.{pb.h ... grpc.pb.h} >> test_fifo_messaging.py::flake8 [GOOD] >> test_generic_messaging.py::flake8 [GOOD] >> test_polling.py::flake8 [GOOD] |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/local.{pb.h ... grpc.pb.h} |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/scheme_log.{pb.h ... grpc.pb.h} |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/compile_service_config.{pb.h ... grpc.pb.h} |51.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/viewer/tests/flake8 >> test.py::flake8 [GOOD] |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_tx.{pb.h ... grpc.pb.h} |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_database.{pb.h ... grpc.pb.h} |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/flat_tx_scheme.{pb.h ... grpc.pb.h} |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters.{pb.h ... grpc.pb.h} |51.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/flake8 >> test_polling.py::flake8 [GOOD] |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/config.{pb.h ... grpc.pb.h} |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/load_test/ut/group_test_ut.cpp |51.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/backup/common/ut/ydb-core-backup-common-ut |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/whiteboard_flags.{pb.h ... grpc.pb.h} >> ydb-tests-tools-kqprun-tests::import_test [GOOD] >> Backpressure::MonteCarlo [GOOD] >> conftest.py::flake8 [GOOD] >> s3_helpers.py::flake8 [GOOD] >> test_bindings_0.py::flake8 [GOOD] >> test_bindings_1.py::flake8 [GOOD] >> test_compressions.py::flake8 [GOOD] >> test_early_finish.py::flake8 [GOOD] >> test_explicit_partitioning_0.py::flake8 [GOOD] >> test_explicit_partitioning_1.py::flake8 [GOOD] >> test_format_setting.py::flake8 [GOOD] >> test_formats.py::flake8 [GOOD] >> test_inflight.py::flake8 [GOOD] >> test_insert.py::flake8 [GOOD] >> test_public_metrics.py::flake8 [GOOD] >> test_push_down.py::flake8 [GOOD] >> test_s3_0.py::flake8 [GOOD] >> test_s3_1.py::flake8 [GOOD] >> test_size_limit.py::flake8 [GOOD] >> test_statistics.py::flake8 [GOOD] >> test_streaming_join.py::flake8 [GOOD] >> test_test_connection.py::flake8 [GOOD] >> test_validation.py::flake8 [GOOD] >> test_ydb_over_fq.py::flake8 [GOOD] >> test_yq_v2.py::flake8 [GOOD] |51.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 |51.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/kqprun/tests/import_test >> ydb-tests-tools-kqprun-tests::import_test [GOOD] |51.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/folder_service/proto/config.pb.{h, cc} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/backpressure/ut_client/unittest >> Backpressure::MonteCarlo [GOOD] Test command err: Clock# 1970-01-01T00:00:00.000000Z elapsed# 0.000021s EventsProcessed# 0 clients.size# 0 Clock# 1970-01-01T00:00:18.451564Z elapsed# 0.000077s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:00:34.903483Z elapsed# 0.000090s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:00:46.680272Z elapsed# 0.000102s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:01:05.299082Z elapsed# 0.000114s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:01:16.512632Z elapsed# 0.000125s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:01:30.522251Z elapsed# 0.000137s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:01:42.585420Z elapsed# 0.000148s EventsProcessed# 2 clients.size# 0 Clock# 1970-01-01T00:02:02.031787Z elapsed# 0.004393s EventsProcessed# 2280 clients.size# 1 Clock# 1970-01-01T00:02:14.836781Z elapsed# 0.006778s EventsProcessed# 3721 clients.size# 1 Clock# 1970-01-01T00:02:28.517594Z elapsed# 0.009517s EventsProcessed# 5295 clients.size# 1 Clock# 1970-01-01T00:02:47.424923Z elapsed# 0.013613s EventsProcessed# 7583 clients.size# 1 Clock# 1970-01-01T00:03:02.786483Z elapsed# 0.016707s EventsProcessed# 9426 clients.size# 1 Clock# 1970-01-01T00:03:17.718304Z elapsed# 0.019758s EventsProcessed# 11177 clients.size# 1 Clock# 1970-01-01T00:03:31.687952Z elapsed# 0.022699s EventsProcessed# 12885 clients.size# 1 Clock# 1970-01-01T00:03:45.704326Z elapsed# 0.025732s EventsProcessed# 14601 clients.size# 1 Clock# 1970-01-01T00:04:05.532248Z elapsed# 0.025799s EventsProcessed# 14603 clients.size# 0 Clock# 1970-01-01T00:04:24.035386Z elapsed# 0.025811s EventsProcessed# 14603 clients.size# 0 Clock# 1970-01-01T00:04:36.081990Z elapsed# 0.025821s EventsProcessed# 14603 clients.size# 0 Clock# 1970-01-01T00:04:50.354352Z elapsed# 0.025832s EventsProcessed# 14603 clients.size# 0 Clock# 1970-01-01T00:05:06.774476Z elapsed# 0.025843s EventsProcessed# 14603 clients.size# 0 Clock# 1970-01-01T00:05:23.095614Z elapsed# 0.025854s EventsProcessed# 14603 clients.size# 0 Clock# 1970-01-01T00:05:35.309526Z elapsed# 0.025865s EventsProcessed# 14603 clients.size# 0 Clock# 1970-01-01T00:05:50.905369Z elapsed# 0.029290s EventsProcessed# 16520 clients.size# 1 Clock# 1970-01-01T00:06:06.005203Z elapsed# 0.035474s EventsProcessed# 20051 clients.size# 2 Clock# 1970-01-01T00:06:21.728144Z elapsed# 0.041854s EventsProcessed# 23649 clients.size# 2 Clock# 1970-01-01T00:06:35.855765Z elapsed# 0.047596s EventsProcessed# 26870 clients.size# 2 Clock# 1970-01-01T00:06:50.417302Z elapsed# 0.053883s EventsProcessed# 30400 clients.size# 2 Clock# 1970-01-01T00:07:02.341429Z elapsed# 0.058478s EventsProcessed# 33141 clients.size# 2 Clock# 1970-01-01T00:07:13.157849Z elapsed# 0.062604s EventsProcessed# 35696 clients.size# 2 Clock# 1970-01-01T00:07:26.572288Z elapsed# 0.065373s EventsProcessed# 37315 clients.size# 1 Clock# 1970-01-01T00:07:41.104385Z elapsed# 0.068269s EventsProcessed# 39101 clients.size# 1 Clock# 1970-01-01T00:07:54.571123Z elapsed# 0.070846s EventsProcessed# 40690 clients.size# 1 Clock# 1970-01-01T00:08:06.259722Z elapsed# 0.073177s EventsProcessed# 42073 clients.size# 1 Clock# 1970-01-01T00:08:25.707700Z elapsed# 0.076937s EventsProcessed# 44410 clients.size# 1 Clock# 1970-01-01T00:08:43.033609Z elapsed# 0.080382s EventsProcessed# 46542 clients.size# 1 Clock# 1970-01-01T00:08:59.802919Z elapsed# 0.083933s EventsProcessed# 48554 clients.size# 1 Clock# 1970-01-01T00:09:19.204729Z elapsed# 0.088029s EventsProcessed# 50918 clients.size# 1 Clock# 1970-01-01T00:09:35.741132Z elapsed# 0.095165s EventsProcessed# 54786 clients.size# 2 Clock# 1970-01-01T00:09:46.991857Z elapsed# 0.099902s EventsProcessed# 57391 clients.size# 2 Clock# 1970-01-01T00:09:59.818672Z elapsed# 0.105472s EventsProcessed# 60462 clients.size# 2 Clock# 1970-01-01T00:10:19.382153Z elapsed# 0.113855s EventsProcessed# 65053 clients.size# 2 Clock# 1970-01-01T00:10:30.455830Z elapsed# 0.118567s EventsProcessed# 67749 clients.size# 2 Clock# 1970-01-01T00:10:41.786205Z elapsed# 0.120964s EventsProcessed# 69078 clients.size# 1 Clock# 1970-01-01T00:10:51.972578Z elapsed# 0.123255s EventsProcessed# 70327 clients.size# 1 Clock# 1970-01-01T00:11:02.162437Z elapsed# 0.125468s EventsProcessed# 71548 clients.size# 1 Clock# 1970-01-01T00:11:12.956972Z elapsed# 0.130430s EventsProcessed# 74206 clients.size# 2 Clock# 1970-01-01T00:11:23.512756Z elapsed# 0.134881s EventsProcessed# 76679 clients.size# 2 Clock# 1970-01-01T00:11:39.816546Z elapsed# 0.141619s EventsProcessed# 80435 clients.size# 2 Clock# 1970-01-01T00:11:51.259830Z elapsed# 0.149381s EventsProcessed# 84417 clients.size# 3 Clock# 1970-01-01T00:12:07.739583Z elapsed# 0.160306s EventsProcessed# 90319 clients.size# 3 Clock# 1970-01-01T00:12:23.430331Z elapsed# 0.170334s EventsProcessed# 95891 clients.size# 3 Clock# 1970-01-01T00:12:37.975412Z elapsed# 0.179040s EventsProcessed# 101037 clients.size# 3 Clock# 1970-01-01T00:12:49.730201Z elapsed# 0.186986s EventsProcessed# 105266 clients.size# 3 Clock# 1970-01-01T00:13:01.770864Z elapsed# 0.198119s EventsProcessed# 111124 clients.size# 4 Clock# 1970-01-01T00:13:15.661793Z elapsed# 0.214070s EventsProcessed# 119393 clients.size# 5 Clock# 1970-01-01T00:13:31.234397Z elapsed# 0.231621s EventsProcessed# 128860 clients.size# 5 Clock# 1970-01-01T00:13:43.529986Z elapsed# 0.244857s EventsProcessed# 136167 clients.size# 5 Clock# 1970-01-01T00:14:02.978095Z elapsed# 0.265472s EventsProcessed# 147797 clients.size# 5 Clock# 1970-01-01T00:14:17.048601Z elapsed# 0.279729s EventsProcessed# 155938 clients.size# 5 Clock# 1970-01-01T00:14:29.600851Z elapsed# 0.293510s EventsProcessed# 163393 clients.size# 5 Clock# 1970-01-01T00:14:41.255406Z elapsed# 0.306286s EventsProcessed# 170201 clients.size# 5 Clock# 1970-01-01T00:14:54.798717Z elapsed# 0.321803s EventsProcessed# 178374 clients.size# 5 Clock# 1970-01-01T00:15:10.238351Z elapsed# 0.339302s EventsProcessed# 187856 clients.size# 5 Clock# 1970-01-01T00:15:23.414407Z elapsed# 0.354287s EventsProcessed# 195728 clients.size# 5 Clock# 1970-01-01T00:15:41.365392Z elapsed# 0.373149s EventsProcessed# 206242 clients.size# 5 Clock# 1970-01-01T00:15:51.914211Z elapsed# 0.384650s EventsProcessed# 212578 clients.size# 5 Clock# 1970-01-01T00:16:10.637503Z elapsed# 0.406219s EventsProcessed# 223869 clients.size# 5 Clock# 1970-01-01T00:16:26.752940Z elapsed# 0.424043s EventsProcessed# 233281 clients.size# 5 Clock# 1970-01-01T00:16:41.339759Z elapsed# 0.439861s EventsProcessed# 241687 clients.size# 5 Clock# 1970-01-01T00:16:58.349370Z elapsed# 0.458618s EventsProcessed# 251580 clients.size# 5 Clock# 1970-01-01T00:17:17.240146Z elapsed# 0.478959s EventsProcessed# 262897 clients.size# 5 Clock# 1970-01-01T00:17:31.410449Z elapsed# 0.494826s EventsProcessed# 271323 clients.size# 5 Clock# 1970-01-01T00:17:47.800545Z elapsed# 0.512772s EventsProcessed# 280945 clients.size# 5 Clock# 1970-01-01T00:18:05.150797Z elapsed# 0.531492s EventsProcessed# 291204 clients.size# 5 Clock# 1970-01-01T00:18:20.612973Z elapsed# 0.548338s EventsProcessed# 300417 clients.size# 5 Clock# 1970-01-01T00:18:31.528199Z elapsed# 0.561071s EventsProcessed# 307220 clients.size# 5 Clock# 1970-01-01T00:18:49.445660Z elapsed# 0.579913s EventsProcessed# 317734 clients.size# 5 Clock# 1970-01-01T00:19:01.647090Z elapsed# 0.593163s EventsProcessed# 324893 clients.size# 5 Clock# 1970-01-01T00:19:16.954770Z elapsed# 0.609878s EventsProcessed# 334108 clients.size# 5 Clock# 1970-01-01T00:19:29.876799Z elapsed# 0.620849s EventsProcessed# 340246 clients.size# 4 Clock# 1970-01-01T00:19:43.667982Z elapsed# 0.632622s EventsProcessed# 346782 clients.size# 4 Clock# 1970-01-01T00:20:02.174483Z elapsed# 0.648692s EventsProcessed# 355783 clients.size# 4 Clock# 1970-01-01T00:20:15.703056Z elapsed# 0.660644s EventsProcessed# 362323 clients.size# 4 Clock# 1970-01-01T00:20:31.485223Z elapsed# 0.670189s EventsProcessed# 367747 clients.size# 3 Clock# 1970-01-01T00:20:50.991142Z elapsed# 0.682447s EventsProcessed# 374589 clients.size# 3 Clock# 1970-01-01T00:21:02.807394Z elapsed# 0.690547s EventsProcessed# 378895 clients.size# 3 Clock# 1970-01-01T00:21:14.464395Z elapsed# 0.697708s EventsProcessed# 383075 clients.size# 3 Clock# 1970-01-01T00:21:32.852258Z elapsed# 0.709496s EventsProcessed# 389802 clients.size# 3 Clock# 1970-01-01T00:21:49.627320Z elapsed# 0.720315s EventsProcessed# 395956 clients.size# 3 Clock# 1970-01-01T00:22:03.881528Z elapsed# 0.728804s EventsProcessed# 400929 clients.size# 3 Clock# 1970-01-01T00:22:20.996096Z elapsed# 0.738767s EventsProcessed# 407043 clients.size# 3 Clock# 1970-01-01T00:22:32.120228Z elapsed# 0.745302s EventsProcessed# 410996 clients.size# 3 Clock# 1970-01-01T00:22:50.280338Z elapsed# 0.756394s EventsProcessed# 417562 clients.size# 3 Clock# 1970-01-01T00:23:01.272314Z elapsed# 0.762994s EventsProcessed# 421427 clients.size# 3 Clock# 1970-01-01T00:23:20.160263Z elapsed# 0.774066s EventsProcessed# 428104 clients.size# 3 Clock# 1970-01-01T00:23:31.068554Z elapsed# 0.780411s EventsProcessed# 431976 clients.size# 3 Clock# 1970-01-01T00:23:43.292475Z elapsed# 0.788027s EventsProcessed# 436519 clients.size# 3 Clock# 1970-01-01T00:24:01.320303Z elapsed# 0.798731s EventsProcessed# 443049 clients.size# 3 Clock# 1970-01-01T00:24:13.469345Z elapsed# 0.808015s EventsProcessed# 447321 clients.size# 3 Clock# 1970-01-01T00:24:28.506284Z elapsed# 0.818574s EventsProcessed# 452736 clients.size# 3 Clock# 1970-01-01T00:24:40.846412Z elapsed# 0.825343s EventsProcessed# 457211 clients.size# 3 Clock# 1970-01-01T00:24:51.272791Z elapsed# 0.831163s EventsProcessed# 461044 clients.size# 3 Clock# 1970-01-01T00:25:04.327032Z elapsed# 0.838211s EventsProcessed# 465698 clients.size# 3 Clock# 1970-01-01T00:25:23.052995Z elapsed# 0.847976s EventsProcessed# 472159 clients.size# 3 Clock# 1970-01-01T00:25:40.552185Z elapsed# 0.857513s EventsProcessed# 478309 clients.size# 3 Clock# 1970-01-01T00:25:56.668351Z elapsed# 0.866033s EventsProcessed# 483982 clients.size# 3 Clock# 1970-01-01T00:26:12.877174Z elapsed# 0.874300s EventsProcessed# 489713 clients.size# 3 Clock# 1970-01-01T00:26:29.852007Z elapsed# 0.882914s EventsProcessed# 495680 clients.size# 3 Clock# 1970-01-01T00:26:45.306041Z elapsed# 0.894272s EventsProcessed# 502844 clients.size# 4 Clock# 1970-01-01T00:26:59.284960Z elapsed# 0.903069s EventsProcessed# 507735 clients.size# 3 Clock# 1970-01-01T00:27:09.603283Z elapsed# 0.907995s EventsProcessed# 510145 clients.size# 2 Clock# 1970-01-01T00:27:27.544716Z elapsed# 0.916127s EventsProcessed# 514454 clients.size# 2 Clock# 1970-01-01T00:27:37.829101Z elapsed# 0.918529s EventsProcessed# 515682 clients.size# 1 Clock# 1970-01-01T00:27:54.874790Z elapsed# 0.922391s EventsProcessed# 517680 clients.size# 1 Clock# 1970-01-01T00:28:07.333526Z elapsed# 0.925315s EventsProcessed# 519180 clients.size# 1 Clock# 1970-01-01T00:28:22.916544Z elapsed# 0.928763s EventsProcessed# 521058 clients.size# 1 Clock# 1970-01-01T00:28:38.078501Z elapsed# 0.931918s EventsProcessed# 522856 clients.size# 1 Clock# 1970-01-01T00:28:49.640749Z elapsed# 0.935460s EventsProcessed# 524260 clients.size# 1 Clock# 1970-01-01T00:29:07.981302Z elapsed# 0.945976s EventsProcessed# 528474 clients.size# 2 Clock# 1970-01-01T00:29:23.939673Z elapsed# 0.954158s EventsProcessed# 532328 clients.size# 2 Clock# 1970-01-01T00:29:41.126625Z elapsed# 0.961459s EventsProcessed# 536402 clients.size# 2 Clock# 1970-01-01T00:29:56.404092Z elapsed# 0.967592s EventsProcessed# 539982 clients.size# 2 Clock# 1970-01-01T00:30:08.073971Z elapsed# 0.972519s EventsProcessed# 542743 clients.size# 2 Clock# 1970-01-01T00:30:19.696445Z elapsed# 0.974837s EventsProcessed# 544142 clients.size# 1 Clock# 1970-01-01T00:30:33.659085Z elapsed# 0.977447s EventsProcessed# 545759 clients.size# 1 Clock# 1970-01-01T00:30:51.957860Z elapsed# 0.977525s EventsProcessed# 545761 clients.size# 0 Clock# 1970-01-01T00:31:06.714875Z elapsed# 0.977535s EventsProcessed# 545761 clients.size# 0 Clock# 1970-01-01T00:31:21.825635Z elapsed# 0.977545s Eve ... Clock# 1970-01-01T05:28:40.831569Z elapsed# 15.879973s EventsProcessed# 8606775 clients.size# 2 Clock# 1970-01-01T05:28:58.172003Z elapsed# 15.885316s EventsProcessed# 8612982 clients.size# 3 Clock# 1970-01-01T05:29:12.124093Z elapsed# 15.889385s EventsProcessed# 8617740 clients.size# 3 Clock# 1970-01-01T05:29:24.614257Z elapsed# 15.893288s EventsProcessed# 8622305 clients.size# 3 Clock# 1970-01-01T05:29:43.250036Z elapsed# 15.899162s EventsProcessed# 8629139 clients.size# 3 Clock# 1970-01-01T05:29:56.816402Z elapsed# 15.903312s EventsProcessed# 8633982 clients.size# 3 Clock# 1970-01-01T05:30:09.020029Z elapsed# 15.906992s EventsProcessed# 8638290 clients.size# 3 Clock# 1970-01-01T05:30:22.658281Z elapsed# 15.911131s EventsProcessed# 8643146 clients.size# 3 Clock# 1970-01-01T05:30:38.437369Z elapsed# 15.915841s EventsProcessed# 8648696 clients.size# 3 Clock# 1970-01-01T05:30:48.465615Z elapsed# 15.918866s EventsProcessed# 8652256 clients.size# 3 Clock# 1970-01-01T05:31:04.543280Z elapsed# 15.923696s EventsProcessed# 8657911 clients.size# 3 Clock# 1970-01-01T05:31:23.275085Z elapsed# 15.929320s EventsProcessed# 8664542 clients.size# 3 Clock# 1970-01-01T05:31:35.415765Z elapsed# 15.932829s EventsProcessed# 8668694 clients.size# 3 Clock# 1970-01-01T05:31:49.827431Z elapsed# 15.937103s EventsProcessed# 8673737 clients.size# 3 Clock# 1970-01-01T05:32:05.217412Z elapsed# 15.940127s EventsProcessed# 8677313 clients.size# 2 Clock# 1970-01-01T05:32:21.607180Z elapsed# 15.945322s EventsProcessed# 8683247 clients.size# 3 Clock# 1970-01-01T05:32:37.811657Z elapsed# 15.950229s EventsProcessed# 8688909 clients.size# 3 Clock# 1970-01-01T05:32:49.753237Z elapsed# 15.953878s EventsProcessed# 8693097 clients.size# 3 Clock# 1970-01-01T05:33:03.391566Z elapsed# 15.958081s EventsProcessed# 8697934 clients.size# 3 Clock# 1970-01-01T05:33:20.666206Z elapsed# 15.961662s EventsProcessed# 8702074 clients.size# 2 Clock# 1970-01-01T05:33:33.890895Z elapsed# 15.964278s EventsProcessed# 8705103 clients.size# 2 Clock# 1970-01-01T05:33:49.585138Z elapsed# 15.967552s EventsProcessed# 8708894 clients.size# 2 Clock# 1970-01-01T05:34:08.995051Z elapsed# 15.971490s EventsProcessed# 8713470 clients.size# 2 Clock# 1970-01-01T05:34:22.637580Z elapsed# 15.974276s EventsProcessed# 8716718 clients.size# 2 Clock# 1970-01-01T05:34:38.529925Z elapsed# 15.977489s EventsProcessed# 8720453 clients.size# 2 Clock# 1970-01-01T05:34:58.504298Z elapsed# 15.979499s EventsProcessed# 8722778 clients.size# 1 Clock# 1970-01-01T05:35:11.921769Z elapsed# 15.980861s EventsProcessed# 8724339 clients.size# 1 Clock# 1970-01-01T05:35:22.269541Z elapsed# 15.983084s EventsProcessed# 8726858 clients.size# 2 Clock# 1970-01-01T05:35:38.723647Z elapsed# 15.986626s EventsProcessed# 8730894 clients.size# 2 Clock# 1970-01-01T05:35:53.130553Z elapsed# 15.989666s EventsProcessed# 8734347 clients.size# 2 Clock# 1970-01-01T05:36:11.122260Z elapsed# 15.993511s EventsProcessed# 8738710 clients.size# 2 Clock# 1970-01-01T05:36:26.946520Z elapsed# 15.996743s EventsProcessed# 8742403 clients.size# 2 Clock# 1970-01-01T05:36:43.869197Z elapsed# 16.000318s EventsProcessed# 8746475 clients.size# 2 Clock# 1970-01-01T05:36:58.888976Z elapsed# 16.005025s EventsProcessed# 8751734 clients.size# 3 Clock# 1970-01-01T05:37:17.630762Z elapsed# 16.010893s EventsProcessed# 8758324 clients.size# 3 Clock# 1970-01-01T05:37:34.034268Z elapsed# 16.016179s EventsProcessed# 8764220 clients.size# 3 Clock# 1970-01-01T05:37:53.497369Z elapsed# 16.022580s EventsProcessed# 8771404 clients.size# 3 Clock# 1970-01-01T05:38:04.810892Z elapsed# 16.026030s EventsProcessed# 8775322 clients.size# 3 Clock# 1970-01-01T05:38:21.611118Z elapsed# 16.031292s EventsProcessed# 8781269 clients.size# 3 Clock# 1970-01-01T05:38:32.905774Z elapsed# 16.034897s EventsProcessed# 8785314 clients.size# 3 Clock# 1970-01-01T05:38:47.343748Z elapsed# 16.039451s EventsProcessed# 8790427 clients.size# 3 Clock# 1970-01-01T05:39:06.588034Z elapsed# 16.045439s EventsProcessed# 8797197 clients.size# 3 Clock# 1970-01-01T05:39:16.698626Z elapsed# 16.048501s EventsProcessed# 8800645 clients.size# 3 Clock# 1970-01-01T05:39:27.889800Z elapsed# 16.050912s EventsProcessed# 8803371 clients.size# 2 Clock# 1970-01-01T05:39:41.473035Z elapsed# 16.053758s EventsProcessed# 8806577 clients.size# 2 Clock# 1970-01-01T05:39:52.025480Z elapsed# 16.056005s EventsProcessed# 8809126 clients.size# 2 Clock# 1970-01-01T05:40:04.089986Z elapsed# 16.058660s EventsProcessed# 8812125 clients.size# 2 Clock# 1970-01-01T05:40:22.942307Z elapsed# 16.062630s EventsProcessed# 8816616 clients.size# 2 Clock# 1970-01-01T05:40:33.661603Z elapsed# 16.063692s EventsProcessed# 8817811 clients.size# 1 Clock# 1970-01-01T05:40:53.587755Z elapsed# 16.065802s EventsProcessed# 8820197 clients.size# 1 Clock# 1970-01-01T05:41:10.525438Z elapsed# 16.067665s EventsProcessed# 8822294 clients.size# 1 Clock# 1970-01-01T05:41:24.160946Z elapsed# 16.069106s EventsProcessed# 8823911 clients.size# 1 Clock# 1970-01-01T05:41:37.753454Z elapsed# 16.070552s EventsProcessed# 8825555 clients.size# 1 Clock# 1970-01-01T05:41:53.299246Z elapsed# 16.072183s EventsProcessed# 8827405 clients.size# 1 Clock# 1970-01-01T05:42:09.339719Z elapsed# 16.073970s EventsProcessed# 8829255 clients.size# 1 Clock# 1970-01-01T05:42:27.118162Z elapsed# 16.075788s EventsProcessed# 8831297 clients.size# 1 Clock# 1970-01-01T05:42:46.612933Z elapsed# 16.077874s EventsProcessed# 8833647 clients.size# 1 Clock# 1970-01-01T05:42:56.908506Z elapsed# 16.078975s EventsProcessed# 8834889 clients.size# 1 Clock# 1970-01-01T05:43:07.800030Z elapsed# 16.080161s EventsProcessed# 8836209 clients.size# 1 Clock# 1970-01-01T05:43:26.093940Z elapsed# 16.081986s EventsProcessed# 8838245 clients.size# 1 Clock# 1970-01-01T05:43:45.282486Z elapsed# 16.083966s EventsProcessed# 8840462 clients.size# 1 Clock# 1970-01-01T05:43:55.578850Z elapsed# 16.085131s EventsProcessed# 8841753 clients.size# 1 Clock# 1970-01-01T05:44:14.174888Z elapsed# 16.089100s EventsProcessed# 8846130 clients.size# 2 Clock# 1970-01-01T05:44:27.177072Z elapsed# 16.091857s EventsProcessed# 8849222 clients.size# 2 Clock# 1970-01-01T05:44:44.437984Z elapsed# 16.095566s EventsProcessed# 8853351 clients.size# 2 Clock# 1970-01-01T05:44:57.884691Z elapsed# 16.098501s EventsProcessed# 8856616 clients.size# 2 Clock# 1970-01-01T05:45:13.388428Z elapsed# 16.101756s EventsProcessed# 8860243 clients.size# 2 Clock# 1970-01-01T05:45:33.055917Z elapsed# 16.105885s EventsProcessed# 8864847 clients.size# 2 Clock# 1970-01-01T05:45:51.539778Z elapsed# 16.109905s EventsProcessed# 8869331 clients.size# 2 Clock# 1970-01-01T05:46:11.097716Z elapsed# 16.111938s EventsProcessed# 8871591 clients.size# 1 Clock# 1970-01-01T05:46:25.641182Z elapsed# 16.113459s EventsProcessed# 8873278 clients.size# 1 Clock# 1970-01-01T05:46:40.821113Z elapsed# 16.115068s EventsProcessed# 8875065 clients.size# 1 Clock# 1970-01-01T05:46:57.387201Z elapsed# 16.116818s EventsProcessed# 8877021 clients.size# 1 Clock# 1970-01-01T05:47:12.009407Z elapsed# 16.118361s EventsProcessed# 8878737 clients.size# 1 Clock# 1970-01-01T05:47:31.303670Z elapsed# 16.120511s EventsProcessed# 8881136 clients.size# 1 Clock# 1970-01-01T05:47:45.497426Z elapsed# 16.121994s EventsProcessed# 8882767 clients.size# 1 Clock# 1970-01-01T05:48:03.121183Z elapsed# 16.125901s EventsProcessed# 8887023 clients.size# 2 Clock# 1970-01-01T05:48:21.498489Z elapsed# 16.129905s EventsProcessed# 8891420 clients.size# 2 Clock# 1970-01-01T05:48:35.930837Z elapsed# 16.133019s EventsProcessed# 8894838 clients.size# 2 Clock# 1970-01-01T05:48:46.518309Z elapsed# 16.135257s EventsProcessed# 8897295 clients.size# 2 Clock# 1970-01-01T05:49:06.062084Z elapsed# 16.139516s EventsProcessed# 8901949 clients.size# 2 Clock# 1970-01-01T05:49:20.412972Z elapsed# 16.142595s EventsProcessed# 8905330 clients.size# 2 Clock# 1970-01-01T05:49:39.956711Z elapsed# 16.146925s EventsProcessed# 8910088 clients.size# 2 Clock# 1970-01-01T05:49:51.359781Z elapsed# 16.149561s EventsProcessed# 8912960 clients.size# 2 Clock# 1970-01-01T05:50:09.144904Z elapsed# 16.153548s EventsProcessed# 8917330 clients.size# 2 Clock# 1970-01-01T05:50:24.807012Z elapsed# 16.155276s EventsProcessed# 8919224 clients.size# 1 Clock# 1970-01-01T05:50:42.899277Z elapsed# 16.157302s EventsProcessed# 8921419 clients.size# 1 Clock# 1970-01-01T05:51:01.510401Z elapsed# 16.159400s EventsProcessed# 8923720 clients.size# 1 Clock# 1970-01-01T05:51:11.531205Z elapsed# 16.160471s EventsProcessed# 8924893 clients.size# 1 Clock# 1970-01-01T05:51:31.345457Z elapsed# 16.162704s EventsProcessed# 8927327 clients.size# 1 Clock# 1970-01-01T05:51:42.355310Z elapsed# 16.163942s EventsProcessed# 8928675 clients.size# 1 Clock# 1970-01-01T05:51:57.084283Z elapsed# 16.165531s EventsProcessed# 8930413 clients.size# 1 Clock# 1970-01-01T05:52:11.988960Z elapsed# 16.167067s EventsProcessed# 8932108 clients.size# 1 Clock# 1970-01-01T05:52:23.028473Z elapsed# 16.169578s EventsProcessed# 8934775 clients.size# 2 Clock# 1970-01-01T05:52:39.624974Z elapsed# 16.173172s EventsProcessed# 8938630 clients.size# 2 Clock# 1970-01-01T05:52:53.179561Z elapsed# 16.176079s EventsProcessed# 8941752 clients.size# 2 Clock# 1970-01-01T05:53:11.044121Z elapsed# 16.179999s EventsProcessed# 8945967 clients.size# 2 Clock# 1970-01-01T05:53:21.888927Z elapsed# 16.182431s EventsProcessed# 8948586 clients.size# 2 Clock# 1970-01-01T05:53:37.407494Z elapsed# 16.185860s EventsProcessed# 8952285 clients.size# 2 Clock# 1970-01-01T05:53:50.285748Z elapsed# 16.188644s EventsProcessed# 8955301 clients.size# 2 Clock# 1970-01-01T05:54:05.324692Z elapsed# 16.191950s EventsProcessed# 8958859 clients.size# 2 Clock# 1970-01-01T05:54:20.303853Z elapsed# 16.197158s EventsProcessed# 8964315 clients.size# 3 Clock# 1970-01-01T05:54:36.197702Z elapsed# 16.202437s EventsProcessed# 8969886 clients.size# 3 Clock# 1970-01-01T05:54:52.658908Z elapsed# 16.207929s EventsProcessed# 8975684 clients.size# 3 Clock# 1970-01-01T05:55:06.617600Z elapsed# 16.212648s EventsProcessed# 8980665 clients.size# 3 Clock# 1970-01-01T05:55:25.286130Z elapsed# 16.218967s EventsProcessed# 8987340 clients.size# 3 Clock# 1970-01-01T05:55:41.819610Z elapsed# 16.224447s EventsProcessed# 8993144 clients.size# 3 Clock# 1970-01-01T05:55:55.537463Z elapsed# 16.228940s EventsProcessed# 8997870 clients.size# 3 Clock# 1970-01-01T05:56:08.014471Z elapsed# 16.233046s EventsProcessed# 9002226 clients.size# 3 Clock# 1970-01-01T05:56:20.207111Z elapsed# 16.237130s EventsProcessed# 9006551 clients.size# 3 Clock# 1970-01-01T05:56:37.456699Z elapsed# 16.243025s EventsProcessed# 9012809 clients.size# 3 Clock# 1970-01-01T05:56:49.856646Z elapsed# 16.247094s EventsProcessed# 9017110 clients.size# 3 Clock# 1970-01-01T05:57:02.352608Z elapsed# 16.251171s EventsProcessed# 9021432 clients.size# 3 Clock# 1970-01-01T05:57:21.113062Z elapsed# 16.257514s EventsProcessed# 9028126 clients.size# 3 Clock# 1970-01-01T05:57:38.659509Z elapsed# 16.263458s EventsProcessed# 9034381 clients.size# 3 Clock# 1970-01-01T05:57:55.136000Z elapsed# 16.269031s EventsProcessed# 9040269 clients.size# 3 Clock# 1970-01-01T05:58:11.384071Z elapsed# 16.274470s EventsProcessed# 9046027 clients.size# 3 Clock# 1970-01-01T05:58:21.405945Z elapsed# 16.277907s EventsProcessed# 9049639 clients.size# 3 Clock# 1970-01-01T05:58:36.221254Z elapsed# 16.282945s EventsProcessed# 9054954 clients.size# 3 Clock# 1970-01-01T05:58:53.892546Z elapsed# 16.288810s EventsProcessed# 9061168 clients.size# 3 Clock# 1970-01-01T05:59:06.850893Z elapsed# 16.292962s EventsProcessed# 9065548 clients.size# 3 Clock# 1970-01-01T05:59:18.764956Z elapsed# 16.296913s EventsProcessed# 9069750 clients.size# 3 Clock# 1970-01-01T05:59:35.192796Z elapsed# 16.302530s EventsProcessed# 9075686 clients.size# 3 Clock# 1970-01-01T05:59:45.369263Z elapsed# 16.305974s EventsProcessed# 9079325 clients.size# 3 Clock# 1970-01-01T05:59:56.717086Z elapsed# 16.309782s EventsProcessed# 9083361 clients.size# 3 |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jmespath/py3/libpy3python-jmespath-py3.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/boto3/py3/libpy3python-boto3-py3.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/jedi/py3/libpy3python-jedi-py3.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Werkzeug/py3/libpy3python-Werkzeug-py3.global.a |51.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/flake8 >> test_yq_v2.py::flake8 [GOOD] |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/Pygments/py3/libpy3python-Pygments-py3.global.a |51.4%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/decorator/py3/libpy3python-decorator-py3.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ipython/py3/libpy3python-ipython-py3.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ipdb/py3/libpy3python-ipdb-py3.global.a >> EncryptedFileSerializerTest::AddByte [GOOD] >> EncryptedFileSerializerTest::EmptyFile [GOOD] >> EncryptedFileSerializerTest::SplitOnBlocks [GOOD] >> EncryptedFileSerializerTest::DeleteLastByte [GOOD] >> EncryptedFileSerializerTest::WrongParametersForDeserializer [GOOD] >> EncryptedFileSerializerTest::RemoveLastBlock [GOOD] >> EncryptedFileSerializerTest::BigHeaderSize [GOOD] >> EncryptedFileSerializerTest::BigBlockSize [GOOD] >> PathsNormalizationTest::NormalizeExportPrefix [GOOD] >> PathsNormalizationTest::NormalizeItemPath [GOOD] >> EncryptedFileSerializerTest::ChangeAnyByte [GOOD] >> EncryptedFileSerializerTest::ReadPartial [GOOD] >> EncryptedFileSerializerTest::WrongParametersForSerializer [GOOD] >> EncryptedFileSerializerTest::SerializeWholeFileAtATime [GOOD] >> PathsNormalizationTest::NormalizeItemPrefix [GOOD] >> EncryptedFileSerializerTest::IVSerialization [GOOD] >> EncryptedFileSerializerTest::RestoreFromState [GOOD] >> ydb-tests-postgres_integrations-library-ut::import_test [GOOD] |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/parso/py3/libpy3python-parso-py3.global.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/xmltodict/py3/libpy3python-xmltodict-py3.global.a |51.5%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/s3transfer/py3/libpy3python-s3transfer-py3.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/wcwidth/py3/libpy3python-wcwidth-py3.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/stack-data/libpy3contrib-python-stack-data.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/moto/py3/libpy3python-moto-py3.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/traitlets/py3/libpy3python-traitlets-py3.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pexpect/py3/libpy3python-pexpect-py3.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/psutil/py3/libpy3python-psutil-py3.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/asttokens/libpy3contrib-python-asttokens.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/py/py3/libpy3python-py-py3.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/matplotlib-inline/libpy3contrib-python-matplotlib-inline.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/executing/libpy3contrib-python-executing.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/responses/py3/libpy3python-responses-py3.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pure-eval/libpy3contrib-python-pure-eval.global.a |51.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/postgres_integrations/library/ut/import_test >> ydb-tests-postgres_integrations-library-ut::import_test [GOOD] >> conftest.py::flake8 [GOOD] >> test_2_selects_limit.py::flake8 [GOOD] >> test_3_selects.py::flake8 [GOOD] >> test_bad_syntax.py::flake8 [GOOD] >> test_base.py::flake8 [GOOD] >> test_big_state.py::flake8 [GOOD] >> test_continue_mode.py::flake8 [GOOD] >> test_cpu_quota.py::flake8 [GOOD] >> test_delete_read_rules_after_abort_by_system.py::flake8 [GOOD] >> test_disposition.py::flake8 [GOOD] >> test_eval.py::flake8 [GOOD] >> test_invalid_consumer.py::flake8 [GOOD] >> test_kill_pq_bill.py::flake8 [GOOD] >> test_mem_alloc.py::flake8 [GOOD] >> test_metrics_cleanup.py::flake8 [GOOD] >> test_pq_read_write.py::flake8 [GOOD] |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/pytz/py3/libpy3python-pytz-py3.global.a |51.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/compute/common/ut/ydb-core-fq-libs-compute-common-ut |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/psutil/py3/libpy3python-psutil-py3.global.a |51.6%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/ptyprocess/py3/libpy3python-ptyprocess-py3.global.a |51.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/coordinator_ut.cpp |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/prompt-toolkit/py3/libpy3python-prompt-toolkit-py3.global.a >> test_public_metrics.py::flake8 [GOOD] >> test_read_rules_deletion.py::flake8 [GOOD] >> test_recovery.py::flake8 [GOOD] >> test_recovery_match_recognize.py::flake8 [GOOD] >> test_recovery_mz.py::flake8 [GOOD] >> test_restart_query.py::flake8 [GOOD] >> test_row_dispatcher.py::flake8 [GOOD] >> test_select_1.py::flake8 [GOOD] >> test_select_limit.py::flake8 [GOOD] >> test_select_limit_db_id.py::flake8 [GOOD] >> test_select_timings.py::flake8 [GOOD] >> test_stop.py::flake8 [GOOD] >> test_watermarks.py::flake8 [GOOD] >> test_yds_bindings.py::flake8 [GOOD] >> test_yq_streaming.py::flake8 [GOOD] |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_replication_reboots/ut_replication_reboots.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/leader_election_ut.cpp |51.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/common/ut/unittest >> EncryptedFileSerializerTest::RestoreFromState [GOOD] |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay_yt/query_replay.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay_yt/main.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/data/kqp_read_null_ut.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay_yt/query_compiler.cpp |51.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yds/flake8 >> test_yq_streaming.py::flake8 [GOOD] |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_transfer/ut_transfer.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/row_dispatcher_ut.cpp |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_agg_ut.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_extract_predicate_unpack_ut.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/ydb_convert_ut.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_merge_ut.cpp |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/table_description_ut.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_returning_ut.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_sqlin_ut.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_kv_ut.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ydb_convert/compression_ut.cpp |51.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/olap_workload/olap_workload |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_named_expressions_ut.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_sort_ut.cpp |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/quotas_manager.pb.{h, cc} |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_ranges_ut.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/row_dispatcher/ut/topic_session_ut.cpp |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/s3_settings.{pb.h ... grpc.pb.h} |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/actors/protos/interconnect.pb.{h, cc} >> ydb-tests-functional-scheme_shard::import_test [GOOD] |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_not_null_ut.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/opt/kqp_ne_ut.cpp |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/control_plane_proxy.pb.{h, cc} |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/table_stats.{pb.h ... grpc.pb.h} |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/filestore_config.{pb.h ... grpc.pb.h} |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/proto/dq_tasks.pb.{h, cc} |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet_counters_aggregator.{pb.h ... grpc.pb.h} |51.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/import_test >> ydb-tests-functional-scheme_shard::import_test [GOOD] |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_formats.pb.{h, cc} |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console.{pb.h ... grpc.pb.h} |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/python/botocore/py3/libpy3python-botocore-py3.global.a |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/console_tenant.{pb.h ... grpc.pb.h} |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tablet.{pb.h ... grpc.pb.h} |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/datashard_backup.{pb.h ... grpc.pb.h} |51.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part5/ydb-tests-fq-yt-kqp_yt_file-part5 |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_index_build.cpp |51.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_physical.{pb.h ... grpc.pb.h} |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build/ut_vector_index_build.cpp |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/protos/common.pb.{h, cc} |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/protobuf_udf/libessentials-minikql-protobuf_udf.a |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_config.{pb.h ... grpc.pb.h} |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/ssa.pb.{h, cc} |51.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/fifo/functional-sqs-merge_split_common_table-fifo |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/formats/arrow/protos/fields.pb.{h, cc} |51.8%| [CC] {BAZEL_DOWNLOAD, FAILED} $(S)/ydb/core/mind/hive/hive_ut.cpp |51.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication >> ydb-tests-functional-sqs-messaging::import_test [GOOD] |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/index_builder.{pb.h ... grpc.pb.h} |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/keyvalue/keyvalue_ut_trace.cpp |51.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut |51.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub |51.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/import_test >> ydb-tests-functional-sqs-messaging::import_test [GOOD] |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/monlib/encode/unistat/libmonlib-encode-unistat.a |51.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/find_root/libpy3library-python-find_root.global.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/pytest/plugins/libpy3python-pytest-plugins.global.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/recipes/common/libpy3library-recipes-common.global.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/pytest/libpy3library-python-pytest.global.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/filter/libpy3python-testing-filter.global.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/monlib/libpy3library-python-monlib.global.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/testing/recipe/libpy3python-testing-recipe.global.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/library/python/monlib/libpy3library-python-monlib.a |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/driver_lib/run/auto_config_initializer_ut.cpp |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/statistics.{pb.h ... grpc.pb.h} |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/core/issue/protos/issue_id.pb.{h, cc} |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless |51.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/yql/essentials/tools/sql2yql/sql2yql |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/lib/libpy3tests-datashard-lib.global.a |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/test_meta/libpy3tests-library-test_meta.global.a |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/node_whiteboard.{pb.h ... grpc.pb.h} |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/yql/essentials/tools/sql2yql/sql2yql.cpp |51.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/ttl/ydb-tests-functional-ttl |51.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/compatibility/objcopy_34b0f266abff6cf7a95311821b.o |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/lib/libpy3tests-sql-lib.global.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/simple_queue/workload/libpy3stress-simple_queue-workload.global.a |51.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/compatibility/objcopy_9beede1c5ddb1a5202bb8125bf.o |51.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/compatibility/objcopy_65ac58c27d43a55d0ea4eda626.o |51.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/ut/ydb-tests-tools-nemesis-ut |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/compatibility/libpy3tests-library-compatibility.global.a |51.8%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/tests/compatibility/objcopy_bacd88684c3eece7794e086ee2.o |51.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/validation.pb.{h, cc} |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/long_tx_service.{pb.h ... grpc.pb.h} |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tracing.{pb.h ... grpc.pb.h} |51.8%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/protos/marker.pb.{h, cc} |51.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut |51.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/compatibility/ydb-tests-compatibility |51.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut >> ydb-tests-functional-api::import_test [GOOD] |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_scheme.{pb.h ... grpc.pb.h} |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/yql/essentials/public/types/yql_types.pb.{h, cc} |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/task_controller.pb.{h, cc} |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/config/protos/nodes_manager.pb.{h, cc} |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blobstorage_vdisk_config.{pb.h ... grpc.pb.h} |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/alloc.{pb.h ... grpc.pb.h} |51.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serializable/ydb-tests-functional-serializable |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters.{pb.h ... grpc.pb.h} |51.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations |51.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/api/import_test >> ydb-tests-functional-api::import_test [GOOD] |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/compute/ut/dq_compute_actor_async_input_helper_ut.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_ext_blobs_multiple_channels.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/compute/ut/dq_compute_actor_ut.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_olap/ut_olap.cpp |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/streaming/libstreaming_udf.global.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/file/libfile_udf.global.a |51.8%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/protobuf/libprotobuf_udf.global.a |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/compute/ut/dq_compute_issues_buffer_ut.cpp |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/dq/actors/compute/ut/dq_source_watermark_tracker_ut.cpp |51.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/split_merge/ydb-tests-datashard-split_merge |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/query_replay_yt/query_replay_yt |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/contrib/libs/opentelemetry-proto/opentelemetry/proto/common/v1/common.{pb.h ... grpc.pb.h} |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/blob_depot_config.{pb.h ... grpc.pb.h} |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace |51.7%| [LD] {BAZEL_DOWNLOAD} $(B)/tools/rescompressor/rescompressor |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/dq/actors/protos/dq_status_codes.pb.{h, cc} |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/http_config.{pb.h ... grpc.pb.h} |51.7%| [BN] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/stability/tool/ydb_cli |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/dq/actors/compute/ut/ydb-library-yql-dq-actors-compute-ut |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/initializer/ut/ut_init.cpp |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/contrib/restricted/google/benchmark/librestricted-google-benchmark.a |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/wrappers/s3_wrapper_ut.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/ut_backup_collection_reboots.cpp |51.7%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_query.pb.{h, cc} |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/base/board_subscriber_ut.cpp |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut >> ydbd_slice::import_test [GOOD] |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ut_subdomain_reboots.cpp >> olap_workload::import_test [GOOD] |51.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tools/ydbd_slice/bin/import_test >> ydbd_slice::import_test [GOOD] |51.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/olap_workload/import_test >> olap_workload::import_test [GOOD] |51.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration |51.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/ydb_status_codes.pb.{h, cc} |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/metadata/secret/ut/ut_secret.cpp |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/kqp_stats.{pb.h ... grpc.pb.h} |51.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain |51.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/counters_schemeshard.{pb.h ... grpc.pb.h} |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/annotations/sensitive.pb.{h, cc} |51.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/protos/draft/ydb_dynamic_config.pb.{h, cc} |51.6%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_datashard.{pb.h ... grpc.pb.h} |51.5%| [PB] {BAZEL_DOWNLOAD} $(B)/ydb/core/protos/tx_columnshard.{pb.h ... grpc.pb.h} |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/control/immediate_control_board_actor_ut.cpp |51.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/nodewarden/blobstorage_node_warden_ut.cpp |51.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdsk_ut.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/codecs_ut.cpp |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/library/cpp/testing/gbenchmark/libcpp-testing-gbenchmark.a |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmsgwriter_ut.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogdata_ut.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmsgimpl_ut.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogmem_ut.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/synclog/blobstorage_synclogkeeper_ut.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/nodewarden/bind_queue_ut.cpp |51.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/assign_tx_id_ut.cpp |51.7%| [AR] {BAZEL_DOWNLOAD} $(B)/yql/essentials/minikql/comp_nodes/no_llvm/libminikql-comp_nodes-no_llvm.a |51.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/vdisk_restart.cpp |51.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/library/cpp/build_info/liblibrary-cpp-build_info.a |51.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/ydb-core-tx-schemeshard-ut_data_erasure >> ydb-tests-functional-sqs-cloud::import_test [GOOD] |52.0%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/library/cpp/svnversion/liblibrary-cpp-svnversion.a |52.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/import_test >> ydb-tests-functional-sqs-cloud::import_test [GOOD] |53.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut |54.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write |55.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |55.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/log_backend/ut/ydb-core-log_backend-ut |55.7%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/yt/yt/client/libyt-yt-client.a |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_event_managers.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index/ut_unique_index.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_test_functions.cpp |55.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index/ut_vector_index.cpp |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index/ut_async_index.cpp |55.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage |55.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/sentinel_ut_unstable.cpp |56.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/blob_depot_fat.cpp |56.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/keyvalue/grpc_service_ut.cpp |56.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cms_ut_common.cpp |58.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan >> JsonEnvelopeTest::ArrayItem [GOOD] |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_counters_ut.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_quorum_tracker_ut.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_request_reporting_ut.cpp |59.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_patch_ut.cpp |59.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_sequence_ut.cpp |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_get_ut.cpp |59.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge |59.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/dsproxy/ut/dsproxy_put_ut.cpp >> JsonEnvelopeTest::Escape [GOOD] >> JsonEnvelopeTest::Simple [GOOD] >> JsonEnvelopeTest::BinaryData [GOOD] >> JsonEnvelopeTest::NoReplace [GOOD] |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/ut_helpers.cpp |59.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ut_extsubdomain_reboots.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/replication_huge.cpp |59.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/replication.cpp |60.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/log_backend/ut/unittest >> JsonEnvelopeTest::NoReplace [GOOD] |61.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut |62.2%| [BN] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/tool/cfg |62.8%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/yt/yt/client/libyt-yt-client.a |64.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql |66.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/s3_recipe/s3_recipe |66.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut |66.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut |66.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/gen_restarts.cpp |66.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/huge_migration_ut.cpp |66.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_serverless_reboots/ut_serverless_reboots.cpp |66.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/mon_reregister_ut.cpp |66.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/grpc_streaming/ut/grpc/libgrpc_streaming-ut-grpc.a |66.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_vdisk/vdisk_test.cpp |67.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_streaming/grpc_streaming_ut.cpp |67.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor |67.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_stats/ut_stats.cpp |67.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/fq/libs/ydb/ut/ydb_ut.cpp |67.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer |68.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/bscontroller/grouper_ut.cpp |68.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/bscontroller/mv_object_map_ut.cpp |68.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/bscontroller/group_mapper_ut.cpp |68.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/osiris.cpp |69.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/dq/actors/spilling/ut/ydb-library-yql-dq-actors-spilling-ut |69.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow |70.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut |70.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/address_classification/net_classifier_ut.cpp |70.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots |70.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/cancel_tx_ut.cpp |70.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/console/ut/ydb-core-cms-console-ut |70.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/flat_ut.cpp |70.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/locks_ut.cpp |70.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/client/object_storage_listing_ut.cpp |70.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query |70.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/table_writer_ut.cpp >> DqSpillingFileTests::FdCounterSingleFile [GOOD] >> DqSpillingFileTests::FdCounterMultiFile [GOOD] >> DqSpillingFileTests::Simple [GOOD] >> DqSpillingFileTests::Write_TotalSizeLimitExceeded [GOOD] |70.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/ut/params_ut.cpp |70.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_actors_ut.cpp |70.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/read_only_pdisk.cpp >> DqSpillingFileTests::ThreadPoolQueueOverflow [GOOD] >> DqSpillingFileTests::ReadError [GOOD] |70.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/operation_helpers_ut.cpp |70.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_cxx_database_ut.cpp |70.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tablet_flat/ut_pg/ydb-core-tablet_flat-ut_pg |70.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::FdCounterMultiFile [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::Write_TotalSizeLimitExceeded [GOOD] Test command err: 2025-06-03T10:20:58.281972Z :KQP_COMPUTE ERROR: spilling_file.cpp:425: [Write] Total size limit exceeded. From: [1:5:2052], blobId: 2, bytes: 50 |70.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::Simple [GOOD] >> DqSpillingFileTests::SingleFilePart [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::ReadError [GOOD] Test command err: 2025-06-03T10:20:58.336118Z :KQP_COMPUTE ERROR: spilling_file.cpp:968: [Read async] file: /home/runner/.ya/build/build_root/u93c/001a2c/ydb/library/yql/dq/actors/spilling/ut/test-results/unittest/testing_out_stuff/chunk3/dq_spilling_8555/node_1_36095c4a-59ed6717-6728239-8732edc8/1_test_0, blobId: 0, offset: 0, error: (Error 2: No such file or directory) util/system/file.cpp:936: can't open "/home/runner/.ya/build/build_root/u93c/001a2c/ydb/library/yql/dq/actors/spilling/ut/test-results/unittest/testing_out_stuff/chunk3/dq_spilling_8555/node_1_36095c4a-59ed6717-6728239-8732edc8/1_test_0" with mode RdOnly (0x00000008) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::ThreadPoolQueueOverflow [GOOD] Test command err: 2025-06-03T10:20:58.377424Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377485Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377499Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377511Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377525Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377541Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377553Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377566Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377577Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377590Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377602Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377612Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377623Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377644Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377656Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377698Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377710Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377720Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377730Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377742Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377752Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377769Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377779Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377789Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377800Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377810Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377822Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377832Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377849Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377865Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377875Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377888Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377898Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377909Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377920Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377930Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377941Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377953Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377964Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377975Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.377990Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378000Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378011Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378021Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378032Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378043Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378053Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378064Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378076Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378086Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378096Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378111Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378122Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-06-03T10:20:58.378133Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378146Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378157Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378167Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378177Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378187Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378201Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378213Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378224Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378235Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378248Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378260Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-06-03T10:20:58.378268Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378280Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378291Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378310Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378348Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378361Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378371Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-06-03T10:20:58.378379Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378393Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-06-03T10:20:58.378405Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378419Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378430Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378442Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378453Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378464Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-06-03T10:20:58.378477Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378490Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378502Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-06-03T10:20:58.378510Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378524Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378536Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378546Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378558Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378573Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation 2025-06-03T10:20:58.378581Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378594Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378631Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378643Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378656Z :KQP_COMPUTE ERROR: spilling_file.cpp:476: [Write] Can not run operation [Write] Can not run operation 2025-06-03T10:20:58.378752Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-03T10:20:58.378773Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation 2025-06-03T10:20:58.378798Z :KQP_COMPUTE ERROR: spilling_file.cpp:357: [CloseFile] Can not run operation |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_cache_s3fifo_ut.cpp |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_slice.cpp >> DqSpillingFileTests::Write_FileSizeLimitExceeded [GOOD] |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_versions.cpp |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_redo.cpp |70.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/huge/ut/ydb-core-blobstorage-vdisk-huge-ut |70.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_part_multi.cpp |70.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/grpc_services/rpc_calls_ut.cpp |70.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/dq/runtime/ut/ydb-library-yql-dq-runtime-ut |70.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::SingleFilePart [GOOD] |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/ut/queue_id_ut.cpp |70.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/apps/ydb/ydb |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_tables_ut.cpp >> DqSpillingFileTests::NoSpillingService [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::Write_FileSizeLimitExceeded [GOOD] Test command err: 2025-06-03T10:20:58.797389Z :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:5:2052], blobId: 2, bytes: 50 |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_gclogic_ut.cpp |70.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/incrhuge/ut/ydb-core-blobstorage-incrhuge-ut |70.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_bloom.cpp |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_part.cpp |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_ut.cpp |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_database_ut.cpp |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_executor_leases_ut.cpp >> functional-sqs-merge_split_common_table-fifo::import_test [GOOD] |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_localwriter_ut.cpp |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncquorum_ut.cpp |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_data_ut.cpp |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/vdisk/syncer/blobstorage_syncer_broker_ut.cpp |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/workload_service/ut/kqp_workload_service_ut.cpp |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_table_part_ut.cpp |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_row_versions_ut.cpp |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/flat_range_cache_ut.cpp |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/flat_test_db.cpp |70.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::NoSpillingService [GOOD] |70.3%| [BN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/stability/tool/ydb_cli |70.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut |70.3%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/ut/objcopy_9f29b589555ed64086e5eadccf.o |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_handle_ut.cpp |70.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_cache_switchable_ut.cpp |70.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/merge_split_common_table/fifo/import_test >> functional-sqs-merge_split_common_table-fifo::import_test [GOOD] |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_btree_index_nodes.cpp |70.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_btree_index_iter_charge.cpp |70.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_charge.cpp |70.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/shared_cache_clock_pro_ut.cpp |70.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/ldap_auth_provider/ldap_utils_ut.cpp >> DqSpillingFileTests::MultipleFileParts [GOOD] >> ydb-tests-functional-ttl::import_test [GOOD] |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_datetime.cpp |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_data_cleanup.cpp |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_compaction_multi.cpp |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_comp_gen.cpp |70.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/providers/dq/provider/ut/ydb-library-yql-providers-dq-provider-ut |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_compaction.cpp |70.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_other.cpp |70.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_forward.cpp |70.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_proto.cpp |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_memtable.cpp |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_db_iface.cpp |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/ldap_auth_provider/ldap_auth_provider_ut.cpp |70.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_sausage.cpp |70.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_self.cpp |70.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::MultipleFileParts [GOOD] >> ydb-tests-tools-nemesis-ut::import_test [GOOD] >> TFlatDatabasePgTest::BasicTypes [GOOD] |70.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_iterator.cpp |70.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/import_test >> ydb-tests-functional-ttl::import_test [GOOD] |70.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_rename_table_column.cpp |70.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/compute/common/ut/ydb-core-fq-libs-compute-common-ut |70.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_db_scheme.cpp |70.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_pages.cpp |70.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_slice_loader.cpp |70.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_screen.cpp |70.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut_pg/unittest |70.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yql/tools/dq/worker_node/main.cpp |70.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/import_test >> ydb-tests-tools-nemesis-ut::import_test [GOOD] |70.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/service/kqp_document_api_ut.cpp |70.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache.cpp |70.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/service/kqp_service_ut.cpp |70.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/service/kqp_qs_scripts_ut.cpp |70.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_decimal.cpp |70.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tablet_flat/ut_pg/ydb-core-tablet_flat-ut_pg |70.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_shared_sausagecache_actor.cpp |70.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/ut/ut_stat.cpp |70.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/restart_pdisk.cpp |70.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/ydb/ydb |71.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/ydb-core-blobstorage-vdisk-synclog-ut >> ydb-tests-functional-serializable::import_test [GOOD] |71.0%| [BN] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/ydb_cli |71.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut_pg/unittest >> TFlatDatabasePgTest::BasicTypes [GOOD] |71.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/ydb-core-fq-libs-db_id_async_resolver_impl-ut |71.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut |71.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut_pg/unittest |71.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serializable/import_test >> ydb-tests-functional-serializable::import_test [GOOD] |71.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/io_formats/arrow/scheme/ut/ydb-core-io_formats-arrow-scheme-ut |71.6%| [LD] {BAZEL_DOWNLOAD} $(B)/contrib/python/moto/bin/moto_server >> DqOutputWideChannelWithStorageTests::Overflow [GOOD] >> DqOutputWideChannelTests::SingleRead [GOOD] >> DqOutputWideChannelTests::PartialRead [GOOD] >> DqOutputWideChannelTests::PopAll [GOOD] |72.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/service/kqp_qs_queries_ut.cpp >> TBlobStorageHullHugeHeap::AllocateAllReleaseAll [GOOD] >> TBlobStorageHullHugeHeap::AllocateAllSerializeDeserializeReleaseAll [GOOD] |72.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/yaml_config/tools/dump/main.cpp >> TIncrHugeBasicTest::Defrag |72.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/incrhuge/ut/unittest |72.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeHeap::AllocateAllSerializeDeserializeReleaseAll [GOOD] |72.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/runtime/ut/unittest >> DqOutputWideChannelTests::PopAll [GOOD] |72.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/iceberg_ut_data.cpp >> TIncrHugeBasicTest::Recovery [GOOD] >> ConvertUnboxedValueToArrowAndBack::Struct [GOOD] >> ConvertUnboxedValueToArrowAndBack::OptionalOfOptional [GOOD] >> ConvertUnboxedValueToArrowAndBack::Tuple [GOOD] >> ConvertUnboxedValueToArrowAndBack::VariantOverStruct [GOOD] |72.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/federated_query/generic_ut/kqp_generic_provider_ut.cpp |72.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBasicTest::Recovery [GOOD] >> Config::ExcludeScope [GOOD] |72.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/runtime/ut/unittest >> ConvertUnboxedValueToArrowAndBack::VariantOverStruct [GOOD] |72.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |72.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/incrhuge/ut/unittest ------- [LD] {default-linux-x86_64, relwithdebinfo} $(B)/yql/tools/yqlrun/yqlrun ld.lld: warning: version script assignment of 'global' to symbol '__after_morecore_hook' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'daylight' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'environ' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '_environ' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__malloc_initialize_hook' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'program_invocation_name' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'program_invocation_short_name' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'timezone' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tzname' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__libc_start_main' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateHappensAfter' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateHappensBefore' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateIgnoreWritesBegin' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateIgnoreWritesEnd' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateIgnoreReadsBegin' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'AnnotateIgnoreReadsEnd' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'abort' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'accept' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'accept4' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'asctime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'asctime_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'asprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'backtrace' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'backtrace_symbols' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'bind' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'canonicalize_file_name' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'capget' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'capset' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'cfree' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'clock_getres' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'clock_gettime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'clock_settime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'close' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__close' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'closedir' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'confstr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'connect' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'creat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'creat64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ctermid' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ctime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ctime_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__cxa_atexit' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dlclose' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dl_iterate_phdr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dlopen' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'drand48_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dup' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dup2' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dup3' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'endgrent' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'endpwent' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_create' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_create1' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_ctl' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_pwait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_wait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ether_aton' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ether_aton_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ether_hostton' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ether_line' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ether_ntoa' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ether_ntoa_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ether_ntohost' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'eventfd' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'eventfd_read' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'eventfd_write' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '_exit' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fclose' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fdopen' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fflush' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fgetxattr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'flistxattr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fmemopen' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fopen' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fopen64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fopencookie' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fork' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fread' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'freopen' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'freopen64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'frexp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'frexpf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'frexpl' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fscanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fstatfs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fstatfs64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fstatvfs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fstatvfs64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ftime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fwrite' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstat64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getaddrinfo' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'get_current_dir_name' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getcwd' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getdelim' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__getdelim' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getgroups' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gethostbyaddr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gethostbyaddr_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gethostbyname' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gethostbyname2' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gethostbyname2_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gethostbyname_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gethostent' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gethostent_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getifaddrs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getitimer' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getline' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getmntent' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getmntent_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getnameinfo' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getpass' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getpeername' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getresgid' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getresuid' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getsockname' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getsockopt' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gettimeofday' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getxattr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'glob' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'glob64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gmtime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gmtime_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'iconv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'if_indextoname' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'if_nametoindex' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'inet_aton' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'inet_ntop' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'inet_pton' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'initgroups' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'inotify_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'inotify_init1' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ioctl' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_fprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_fscanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_printf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_scanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_snprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_sprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_sscanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_vfprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_vfscanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_vprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_vscanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_vsnprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_vsprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_vsscanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'kill' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'lgamma' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'lgammaf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'lgammaf_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'lgammal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'lgammal_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'lgamma_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'lgetxattr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'listen' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'listxattr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'llistxattr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'localtime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'localtime_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'longjmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'lrand48_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__lxstat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__lxstat64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mbsnrtowcs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mbsrtowcs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mbstowcs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'memchr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'memcmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'memcpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'memmem' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'memmove' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'memrchr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'memset' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mincore' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mktime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mlockall' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mmap' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mmap64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'modf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'modff' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'modfl' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'munlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'munlockall' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'munmap' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'nanosleep' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '_obstack_begin' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '_obstack_begin_1' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '_obstack_newchunk' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'on_exit' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'open' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'open64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'opendir' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'open_memstream' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'open_wmemstream' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__overflow' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pipe' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pipe2' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'poll' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ppoll' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'prctl' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pread' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pread64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'preadv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'preadv64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'printf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'process_vm_readv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'process_vm_writev' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_attr_getaffinity_np' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_attr_getdetachstate' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_attr_getguardsize' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_attr_getinheritsched' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_attr_getschedparam' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_attr_getschedpolicy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_attr_getscope' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_attr_getstack' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_attr_getstacksize' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_barrierattr_getpshared' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_barrier_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_barrier_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_barrier_wait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_condattr_getclock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_condattr_getpshared' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_broadcast' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_signal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_timedwait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_cond_wait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_create' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_detach' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_getschedparam' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_join' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_kill' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutexattr_getprioceiling' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutexattr_getprotocol' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutexattr_getpshared' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutexattr_getrobust' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutexattr_getrobust_np' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutexattr_gettype' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_lock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_timedlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_trylock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_unlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_once' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlockattr_getkind_np' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlockattr_getpshared' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_rdlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_timedrdlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_timedwrlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_tryrdlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_trywrlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_unlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlock_wrlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_setcancelstate' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_setcanceltype' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_setname_np' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_spin_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_spin_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_spin_lock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_spin_trylock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_spin_unlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ptrace' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'puts' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pvalloc' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pwrite' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pwrite64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pwritev' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pwritev64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'raise' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'random_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'rand_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'read' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'readdir' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'readdir64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'readdir64_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'readdir_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'readv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'realpath' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'recv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'recvfrom' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'recvmsg' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'remquo' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'remquof' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'remquol' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__res_iclose' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'rmdir' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'scandir' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'scandir64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'scanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sched_getaffinity' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sched_getparam' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sem_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sem_getvalue' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sem_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sem_post' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sem_timedwait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sem_trywait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sem_wait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'send' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sendmsg' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sendto' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'setgrent' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'setitimer' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'setjmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '_setjmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'setlocale' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'setpwent' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'shmctl' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigaction' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigemptyset' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigfillset' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'siglongjmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'signal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'signalfd' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigpending' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigprocmask' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigsetjmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__sigsetjmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigsuspend' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigtimedwait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigwait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigwaitinfo' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sincos' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sincosf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sincosl' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sleep' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'snprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'socket' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'socketpair' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sscanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'statfs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'statfs64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'statvfs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'statvfs64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strcasecmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strcasestr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strchr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strchrnul' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strcmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strcpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strcspn' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strdup' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strerror' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strerror_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strlen' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strncasecmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strncmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strncpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strnlen' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strpbrk' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strptime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strrchr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strspn' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strstr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoimax' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoumax' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sysinfo' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tcgetattr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tempnam' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'textdomain' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'time' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'timerfd_gettime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'timerfd_settime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'times' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__tls_get_addr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tmpfile' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tmpfile64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tmpnam' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tmpnam_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tsearch' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__uflow' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__underflow' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'unlink' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'usleep' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vasprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vfork' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vfprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vfscanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vscanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vsnprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vsprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vsscanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wait3' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wait4' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'waitid' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'waitpid' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcrtomb' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcsnrtombs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcsrtombs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstombs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wordexp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__woverflow' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'write' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'writev' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wuflow' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wunderflow' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_bool' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_bytes' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_char' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_double' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_enum' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_float' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_hyper' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_int' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_int16_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_int32_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_int64_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_int8_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_long' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_longlong_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdrmem_create' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_quad_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_short' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdrstdio_create' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_string' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_u_char' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_u_hyper' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_u_int' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_uint16_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_uint32_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_uint64_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_uint8_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_u_long' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_u_longlong_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_u_quad_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_u_short' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__xpg_strerror_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__xstat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__xstat64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'accept' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'accept4' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'asctime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'asctime_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'asprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'backtrace' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'backtrace_symbols' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'bcopy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'canonicalize_file_name' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'capget' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'capset' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'cfree' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'clock_getres' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'clock_gettime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'clock_settime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'confstr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ctermid' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ctime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ctime_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__cxa_atexit' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dladdr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dlclose' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dlerror' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dl_iterate_phdr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'dlopen' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'drand48_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'endgrent' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'endpwent' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_pwait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'epoll_wait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ether_aton' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ether_aton_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ether_hostton' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ether_line' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ether_ntoa' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ether_ntoa_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ether_ntohost' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'eventfd_read' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'eventfd_write' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '_exit' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fclose' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fcvt' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fdopen' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fflush' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fgetgrent' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fgetgrent_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fgetpwent' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fgetpwent_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fgets' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fgets_unlocked' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fgetxattr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'flistxattr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fmemopen' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fopen' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fopen64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fopencookie' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fork' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'forkpty' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fread' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fread_unlocked' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'freopen' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'freopen64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'frexp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'frexpf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'frexpl' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fscanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fstatfs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fstatfs64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fstatvfs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'fstatvfs64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ftime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstat64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstatat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__fxstatat64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gcvt' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getaddrinfo' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'get_current_dir_name' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getcwd' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getdelim' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__getdelim' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getenv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getgrent' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getgrent_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getgrgid' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getgrgid_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getgrnam' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getgrnam_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getgroups' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gethostbyaddr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gethostbyaddr_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gethostbyname' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gethostbyname2' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gethostbyname2_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gethostbyname_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gethostent' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gethostent_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gethostname' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getifaddrs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getitimer' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getline' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getmntent' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getmntent_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getnameinfo' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getpass' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getpeername' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getpwent' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getpwent_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getpwnam' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getpwnam_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getpwuid' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getpwuid_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getresgid' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getresuid' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getrlimit' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getrlimit64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getrusage' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getsockname' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getsockopt' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gettimeofday' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'getxattr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'glob' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'glob64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gmtime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'gmtime_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'iconv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'if_indextoname' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'if_nametoindex' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'inet_aton' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'inet_ntop' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'inet_pton' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'initgroups' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ioctl' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_fprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_fscanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_printf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_scanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_snprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_sprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_sscanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_vfprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_vfscanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_vprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_vscanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_vsnprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_vsprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__isoc99_vsscanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'lgamma' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'lgammaf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'lgammaf_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'lgammal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'lgammal_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'lgamma_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'lgetxattr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'listxattr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'llistxattr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'localtime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'localtime_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'lrand48_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__lxstat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__lxstat64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mallinfo' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'malloc_stats' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mallopt' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mbrtowc' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mbsnrtowcs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mbsrtowcs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mbstowcs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mbtowc' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'memccpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'memchr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'memcmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'memcpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'memmem' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'memmove' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mempcpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'memrchr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'memset' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mincore' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mktime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mlockall' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mmap' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'mmap64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'modf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'modff' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'modfl' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'munlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'munlockall' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '_obstack_begin' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '_obstack_begin_1' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '_obstack_newchunk' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'opendir' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'open_memstream' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'openpty' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'open_wmemstream' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__overflow' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pipe' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pipe2' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'poll' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ppoll' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'prctl' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pread' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pread64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'preadv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'preadv64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'printf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'prlimit' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'prlimit64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'process_vm_readv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'process_vm_writev' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_attr_getaffinity_np' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_attr_getdetachstate' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_attr_getguardsize' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_attr_getinheritsched' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_attr_getschedparam' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_attr_getschedpolicy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_attr_getscope' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_attr_getstack' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_attr_getstacksize' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_barrierattr_getpshared' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_condattr_getclock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_condattr_getpshared' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_create' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_getschedparam' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_join' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_key_create' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutexattr_getprioceiling' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutexattr_getprotocol' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutexattr_getpshared' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutexattr_getrobust' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutexattr_getrobust_np' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutexattr_gettype' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_lock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_mutex_unlock' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlockattr_getkind_np' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_rwlockattr_getpshared' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_setcancelstate' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_setcanceltype' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pthread_setname_np' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'ptrace' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'putenv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pvalloc' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pwrite' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pwrite64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pwritev' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'pwritev64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'random_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'rand_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'read' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'readdir' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'readdir64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'readdir64_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'readdir_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'readlink' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'readv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'realpath' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'recv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'recvfrom' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'recvmsg' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'remquo' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'remquof' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'remquol' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'scandir' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'scandir64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'scanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sched_getaffinity' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sched_getparam' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sem_destroy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sem_getvalue' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sem_init' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sem_post' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sem_timedwait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sem_trywait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sem_wait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'send' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sendmsg' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sendto' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'setenv' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'setgrent' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'setitimer' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'setlocale' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'setpwent' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'shmat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'shmctl' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigaction' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigemptyset' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigfillset' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'signal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigpending' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigprocmask' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigtimedwait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigwait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sigwaitinfo' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sincos' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sincosf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sincosl' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'snprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'socketpair' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sscanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'statfs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'statfs64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'statvfs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'statvfs64' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'stpcpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strcasecmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strcasestr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strcat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strchr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strchrnul' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strcmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strcpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strcspn' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strdup' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strdup' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strerror' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strerror_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strftime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strftime_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strftime_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strlen' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strncasecmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strncat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strncmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strncpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strndup' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strndup' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strnlen' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strpbrk' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strptime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strrchr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strspn' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strstr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtod' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtod_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtod_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtod_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtof' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtof_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtof_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtof_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoimax' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtol' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtold' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtold_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtold_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtold_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtol_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoll' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtol_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtol_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoll_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoll_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoll_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoul' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoul_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoull' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoul_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoul_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoull_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__strtoull_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoull_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strtoumax' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strxfrm' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'strxfrm_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'swprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'sysinfo' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tcgetattr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tempnam' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'textdomain' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'time' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'timerfd_gettime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'timerfd_settime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'times' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__tls_get_addr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tmpnam' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tmpnam_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tsearch' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'tzset' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__uflow' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'uname' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__underflow' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vasprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vfprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vfscanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vscanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vsnprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vsprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vsscanf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'vswprintf' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wait' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wait3' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wait4' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'waitid' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'waitpid' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcrtomb' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcschr' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcscmp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcscpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcsftime' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcsftime_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcsftime_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcslen' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcsnrtombs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcsrtombs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstod' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstod_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstod_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstod_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstof' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstof_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstof_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstof_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstol' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstold' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstold_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstold_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstold_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstol_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoll' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstol_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstol_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoll_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoll_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoll_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstombs' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoul' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoul_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoull' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoul_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoul_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoull_internal' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wcstoull_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wcstoull_l' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wmemcpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wmemmove' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wmempcpy' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wmemset' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'wordexp' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__woverflow' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'write' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'writev' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wuflow' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__wunderflow' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_bool' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_bytes' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_char' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_double' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_enum' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_float' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_hyper' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_int' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_int16_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_int32_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_int64_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_int8_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_long' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_longlong_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdrmem_create' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_quad_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_short' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdrstdio_create' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_string' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_u_char' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_u_hyper' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_u_int' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_uint16_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_uint32_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_uint64_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_uint8_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_u_long' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_u_longlong_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_u_quad_t' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol 'xdr_u_short' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__xpg_strerror_r' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__xstat' failed: symbol not defined ld.lld: warning: version script assignment of 'global' to symbol '__xstat64' failed: symbol not defined >> StatsFormat::FullStat [GOOD] |72.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/huge.cpp |72.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet_flat/benchmark/b_part.cpp |72.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/yql/tools/yqlrun/yqlrun |72.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/compute/common/ut/unittest >> Config::ExcludeScope [GOOD] >> FormatTimes::DurationMs [GOOD] |72.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs |72.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt >> StatsFormat::AggregateStat [GOOD] >> Config::IncludeScope [GOOD] |72.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap |72.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/compute/common/ut/unittest >> StatsFormat::FullStat [GOOD] |72.9%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx >> TBlobStorageHullHugeHeap::RecoveryMode [GOOD] >> TBlobStorageHullHugeHeap::BorderValues [GOOD] |73.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/providers/dq/runtime/ut/ydb-library-yql-providers-dq-runtime-ut |73.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/compute/common/ut/unittest >> FormatTimes::DurationMs [GOOD] |73.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/compute/common/ut/unittest >> Config::IncludeScope [GOOD] |73.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/compute/common/ut/unittest >> StatsFormat::AggregateStat [GOOD] |73.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yaml_config/tools/dump/yaml-to-proto-dump >> DqOutputChannelWithStorageTests::Spill [GOOD] >> DqOutputWideChannelTests::Overflow [GOOD] >> DqOutputWideChannelTests::BigRow >> DqOutputWideChannelTests::BigRow [GOOD] >> DqOutputWideChannelTests::ChunkSizeLimit [GOOD] >> FormatTimes::ParseDuration [GOOD] |73.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeHeap::BorderValues [GOOD] |73.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/blobstorage/ut_blobstorage/balancing.cpp |73.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/blobsan/main.cpp |73.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut |73.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/providers/dq/scheduler/ut/ydb-library-yql-providers-dq-scheduler-ut |73.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/compute/common/ut/unittest |73.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/compute/common/ut/unittest |73.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/runtime/ut/unittest >> DqOutputWideChannelTests::ChunkSizeLimit [GOOD] >> FormatTimes::DurationUs [GOOD] >> TIncrHugeBasicTest::WriteReadDeleteEnumRecover [GOOD] |73.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/compute/common/ut/unittest >> FormatTimes::ParseDuration [GOOD] |73.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/compute/common/ut/unittest >> ConvertUnboxedValueToArrowAndBack::VariantOverTupleWithOptionals [GOOD] >> DqOutputChannelTests::BigRow >> DqOutputChannelTests::Overflow [GOOD] |73.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/compute/common/ut/unittest >> FormatTimes::DurationUs [GOOD] |73.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/incrhuge/ut/unittest |73.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/controller/target_discoverer_ut.cpp >> TIncrHugeBlobIdDict::Basic [GOOD] >> TIncrHugeBasicTest::WriteReadDeleteEnum [GOOD] |73.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/proxy/proxy_actor_ut.cpp |73.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBasicTest::WriteReadDeleteEnumRecover [GOOD] |73.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes |73.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kesus/proxy/ut_helpers.cpp |73.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/incrhuge/ut/unittest |73.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/incrhuge/ut/unittest |73.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBlobIdDict::Basic [GOOD] |73.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBasicTest::WriteReadDeleteEnum [GOOD] |73.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel |73.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/downtime_ut.cpp |73.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cms_tenants_ut.cpp |73.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cluster_info_ut.cpp |73.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/dq/actors/spilling/ut/ydb-library-yql-dq-actors-spilling-ut |73.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/blobsan/blobsan |73.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_vdisk/ydb-core-blobstorage-ut_vdisk |73.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/ut_helpers.cpp >> DqOutputChannelTests::BigRow [GOOD] >> DqOutputChannelTests::ChunkSizeLimit [GOOD] |73.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots |73.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cms_maintenance_api_ut.cpp >> TBlobStorageHullHugeHeap::WriteRestore [GOOD] >> TBlobStorageHullHugeKeeperPersState::SerializeParse [GOOD] >> DqUnboxedValueDoNotFitToArrow::DictOptionalToTuple >> DqOutputWideChannelWithStorageTests::Spill [GOOD] |73.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yaml_config/ut/ydb-library-yaml_config-ut |73.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ut_data_erasure_reboots.cpp |73.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_column_build/ut_column_build.cpp |73.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut_pg/unittest |73.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/runtime/ut/unittest >> DqOutputChannelTests::ChunkSizeLimit [GOOD] |73.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cms_ut_common.cpp |73.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/blobs_sharing_ut.cpp >> DqUnboxedValueToNativeArrowConversion::Tuple [GOOD] >> DqUnboxedValueToNativeArrowConversion::DictUtf8ToInterval >> DqUnboxedValueToNativeArrowConversion::Struct [GOOD] >> DqUnboxedValueDoNotFitToArrow::DictOptionalToTuple [GOOD] >> DqUnboxedValueDoNotFitToArrow::OptionalOfOptional [GOOD] >> DqUnboxedValueDoNotFitToArrow::LargeVariant >> TestArrowBlockSplitter::CheckLargeScalarRows [GOOD] >> TestArrowBlockSplitter::CheckLargeRows [GOOD] >> DqUnboxedValueToNativeArrowConversion::VariantOverTupleWithOptionals [GOOD] >> DqUnboxedValueToNativeArrowConversion::VariantOverStruct [GOOD] |73.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/config/init/ut/ydb-core-config-init-ut |73.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut |73.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/aggregations_ut.cpp >> ConvertUnboxedValueToArrowAndBack::DictUtf8ToInterval |73.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/cms/cms_ut.cpp |73.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/clickbench_ut.cpp |73.4%| [PY] {BAZEL_DOWNLOAD} $(B)/ydb/core/http_proxy/ut/objcopy_5fddfa8f171a3216cad65e02ab.o |73.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/decimal_ut.cpp |73.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeKeeperPersState::SerializeParse [GOOD] >> THugeHeapCtxTests::Basic [GOOD] |73.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data |73.4%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/ydb-core-fq-libs-db_id_async_resolver_impl-ut |73.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots |73.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/locks_ut.cpp |73.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/federated_query/ut/ydb-core-kqp-federated_query-ut |73.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/yql/tools/dq/worker_node/worker_node >> DqUnboxedValueDoNotFitToArrow::LargeVariant [GOOD] >> DqUnboxedValueToNativeArrowConversion::DictUtf8ToInterval [GOOD] >> DqUnboxedValueToNativeArrowConversion::ListOfJsons [GOOD] |73.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_order.cpp |73.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/runtime/ut/unittest >> DqUnboxedValueToNativeArrowConversion::VariantOverStruct [GOOD] |73.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/huge/ut/unittest >> THugeHeapCtxTests::Basic [GOOD] |73.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/write_ut.cpp >> DqOutputChannelWithStorageTests::Overflow [GOOD] >> DqOutputChannelTests::SingleRead [GOOD] >> DqOutputChannelTests::PartialRead [GOOD] >> DqOutputChannelTests::PopAll [GOOD] >> TestArrowBlockSplitter::SplitWithScalars [GOOD] >> TestArrowBlockSplitter::PassSmallBlock [GOOD] >> TestArrowBlockSplitter::SplitLargeBlock [GOOD] |73.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/tools/dqrun/dqrun |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/http_proxy/ut/json_proto_conversion_ut.cpp |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/dictionary_ut.cpp |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/sharding/ut/ut_sharding.cpp |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/sparsed_ut.cpp |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/compression_ut.cpp |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_olap_reboots/ut_olap_reboots.cpp >> ydb-tests-datashard-split_merge::import_test [GOOD] |73.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/dq/runtime/ut/ydb-library-yql-providers-dq-runtime-ut |73.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/runtime/ut/unittest >> DqUnboxedValueDoNotFitToArrow::LargeVariant [GOOD] |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/load_test/ut_ycsb.cpp |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/delete_ut.cpp |73.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/runtime/ut/unittest >> DqUnboxedValueToNativeArrowConversion::ListOfJsons [GOOD] |73.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/ut/olap/combinatory/libut-olap-combinatory.a |73.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_mirror3of4/ydb-core-blobstorage-ut_mirror3of4 |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/topic_ut.cpp |73.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/ut/olap/helpers/libut-olap-helpers.a |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/commitoffset_ut.cpp |73.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/runtime/ut/unittest >> DqOutputChannelTests::PopAll [GOOD] >> DqSpillingFileTests::StartError [GOOD] |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/ut_common.cpp |73.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/runtime/ut/unittest >> TestArrowBlockSplitter::SplitLargeBlock [GOOD] |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/balancing_ut.cpp |73.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/core-fq-libs-control_plane_storage-internal-ut |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_common_pq.cpp |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/sys_view/ut_large.cpp |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_write_ut.cpp |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/mirrorer_ut.cpp |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_pq_reboots/ut_pq_reboots.cpp |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_effects_ut.cpp |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_base_reboots/ut_base_reboots.cpp |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_inplace_update_ut.cpp |73.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/ymq/http/ut/xml_builder_ut.cpp |73.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/tools/dqrun/dqrun |73.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/import_test >> ydb-tests-datashard-split_merge::import_test [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/actors/spilling/ut/unittest >> DqSpillingFileTests::StartError [GOOD] Test command err: 2025-06-03T10:21:05.786203Z :KQP_COMPUTE ERROR: spilling_file.cpp:239: (TIoSystemError) (Error 13: Permission denied) util/folder/path.cpp:424: could not create directory /nonexistent 2025-06-03T10:21:05.786228Z :KQP_COMPUTE ERROR: spilling_file.cpp:278: Service is broken, send error to client [1:5:2052] 2025-06-03T10:21:05.786253Z :KQP_COMPUTE ERROR: spilling_file.cpp:278: Service is broken, send error to client [1:5:2052] 2025-06-03T10:21:05.786260Z :KQP_COMPUTE ERROR: spilling_file.cpp:278: Service is broken, send error to client [1:5:2052] |73.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_mirror3of4/ydb-core-blobstorage-ut_mirror3of4 |73.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/effects/kqp_immediate_effects_ut.cpp |73.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/indexes_ut.cpp |73.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/persqueue/ut/ut_with_sdk/autoscaling_ut.cpp |73.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/datatime64_ut.cpp |73.3%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/json_ut.cpp |73.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/config/init/ut/ydb-core-config-init-ut |73.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/federated_query/ut/ydb-core-kqp-federated_query-ut |73.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/statistics_ut.cpp |73.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/kqp_olap_stats_ut.cpp |73.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut_pg/unittest |73.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/view/view_ut.cpp >> ConvertUnboxedValueToArrowAndBack::DictUtf8ToInterval [GOOD] >> ConvertUnboxedValueToArrowAndBack::ListOfJsons [GOOD] >> ConvertUnboxedValueToArrowAndBack::DictOptionalToTuple |73.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/optimizer_ut.cpp |73.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_upload_rows.cpp |73.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/sys_view_ut.cpp |73.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace |73.5%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/tiering_ut.cpp |73.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_snapshot.cpp >> ConvertUnboxedValueToArrowAndBack::DictOptionalToTuple [GOOD] >> ConvertUnboxedValueToArrowAndBack::LargeVariant >> TopTest::Test1 [GOOD] |73.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut_pg/unittest |73.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_volatile.cpp |73.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator_ext_blobs.cpp |73.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/kqp/ut/olap/kqp_olap_ut.cpp |73.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/ut_helpers.cpp |73.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut_pg/unittest |73.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/scheme_board/subscriber_ut.cpp |73.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_login_large/ut_login_large.cpp |73.8%| [TA] $(B)/ydb/core/fq/libs/compute/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} |73.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yaml_config/tools/dump/yaml-to-proto-dump |73.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TopTest::Test1 [GOOD] >> TBlobStorageHullHugeChain::HeapAllocSmall [GOOD] >> TBlobStorageHullHugeHeap::AllocateAllFromOneChunk [GOOD] |73.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ut_rtmr_reboots.cpp >> TestFileCache::Add [GOOD] >> TestFileCache::ContainsReleased [GOOD] >> TestFileCache::AcquireSingleFile2Times [GOOD] >> TestFileCache::AddAfterRemoveAcquired [GOOD] >> TestFileCache::AcquireRelease [GOOD] >> TestFileCache::Acquire [GOOD] >> TestFileCache::Evict [GOOD] >> TestFileCache::Find [GOOD] >> TBlobStorageHullHugeChain::AllocFreeAllocTest [GOOD] >> TBlobStorageHullHugeChain::HeapAllocLargeStandard [GOOD] >> TestFileCache::Create [GOOD] >> TBlobStorageHullHugeChain::AllocFreeRestartAllocTest [GOOD] >> TBlobStorageHullHugeChain::HeapAllocLargeNonStandard [GOOD] |73.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeHeap::AllocateAllFromOneChunk [GOOD] >> TestCommon::Empty [GOOD] >> TopTest::Test2 [GOOD] >> TChainLayoutBuilder::TestProdConf [GOOD] >> TestCommon::ParseCounterName [GOOD] >> TestCommon::CollectTaskRunnerStatisticsByStage [GOOD] >> TestCommon::CollectTaskRunnerStatisticsByTask [GOOD] >> TChainLayoutBuilder::TestMilestoneId [GOOD] >> MdbEndpoingGenerator::Legacy [GOOD] >> MdbEndpoingGenerator::Generic_NoTransformHost [GOOD] >> MdbEndpoingGenerator::Generic_WithTransformHost [GOOD] |73.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut_pg/unittest |73.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/providers/yt/actors/ut/ydb-library-yql-providers-yt-actors-ut |74.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_read_iterator.cpp |74.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut_pg/unittest |74.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/yt/actors/ut/ydb-library-yql-providers-yt-actors-ut >> ConvertUnboxedValueToArrowAndBack::LargeVariant [GOOD] >> TSchedulerTest::Use75PercentForLargeInNonOverload [GOOD] >> TSchedulerTest::ReserveForSmall [GOOD] >> TSchedulerTest::DoNotReserveForSmall [GOOD] >> TSchedulerTest::UseOnlyHalfForLargeInOverload [GOOD] >> TSchedulerTest::SimpleFifo [GOOD] >> TSchedulerTest::OneUserForCluster [GOOD] >> TSchedulerTest::NewbieFirst [GOOD] >> TSchedulerTest::HalfWorkersForSmall [GOOD] >> TSchedulerTest::FifoAfterOneHour [GOOD] |74.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut_pg/unittest |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeChain::HeapAllocLargeNonStandard [GOOD] |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TBlobStorageHullHugeChain::AllocFreeRestartAllocTest [GOOD] >> TestFederatedQueryHelpers::TestCheckNestingDepth [GOOD] |74.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots |74.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/dq/scheduler/ut/ydb-library-yql-providers-dq-scheduler-ut |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TopTest::Test2 [GOOD] |74.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/dq/provider/ut/unittest >> TestCommon::CollectTaskRunnerStatisticsByTask [GOOD] >> TestFederatedQueryHelpers::TestTruncateIssues [GOOD] >> TestFederatedQueryHelpers::TestValidateResultSetColumns [GOOD] >> FormatCSV::Instants [GOOD] >> FormatCSV::EmptyData [GOOD] >> FormatCSV::Common [GOOD] >> FormatCSV::Strings [GOOD] >> FormatCSV::Nulls [GOOD] >> ColumnShardConfigValidation::NotCorrectZSTDCompression [GOOD] >> ColumnShardConfigValidation::CorrectZSTDCompression [GOOD] >> ColumnShardConfigValidation::NotCorrectLZ4Compression [GOOD] >> ColumnShardConfigValidation::CorrectPlainCompression [GOOD] >> ColumnShardConfigValidation::AcceptDefaultCompression [GOOD] >> ColumnShardConfigValidation::CorrectLZ4Compression [GOOD] >> ColumnShardConfigValidation::NotAcceptDefaultCompression [GOOD] >> ColumnShardConfigValidation::NotCorrectPlainCompression [GOOD] >> ConsoleDumper::Basic [GOOD] >> ConsoleDumper::CoupleMerge [GOOD] >> Mirror3of4::ReplicationSmall >> ConsoleDumper::CoupleOverwrite [GOOD] >> Init::TWithDefaultParser [GOOD] >> ConsoleDumper::CoupleMergeOverwriteRepeated [GOOD] >> ConsoleDumper::ReverseMerge [GOOD] >> ConsoleDumper::ReverseOverwrite [GOOD] >> ConsoleDumper::ReverseMergeOverwriteRepeated [GOOD] >> ConsoleDumper::Different [GOOD] >> StaticNodeSelectorsInit::TestStaticNodeSelectorForActorSystem [GOOD] >> ConsoleDumper::SimpleNode [GOOD] >> StaticNodeSelectorsInit::TestStaticNodeSelectorWithAnotherLabel [GOOD] >> ConsoleDumper::JoinSimilar [GOOD] >> StaticNodeSelectorsInit::TestStaticNodeSelectorInheritance >> ConsoleDumper::DontJoinDifferent [GOOD] >> ConsoleDumper::SimpleTenant [GOOD] >> ConsoleDumper::SimpleNodeTenant [GOOD] >> ConsoleDumper::SimpleHostId [GOOD] >> ConsoleDumper::SimpleNodeId [GOOD] >> ConsoleDumper::DontJoinNodeTenant [GOOD] >> ConsoleDumper::JoinMultipleSimple [GOOD] >> ConsoleDumper::MergeNode [GOOD] >> ConsoleDumper::MergeOverwriteRepeatedNode [GOOD] >> ConsoleDumper::Ordering [GOOD] >> ConsoleDumper::IgnoreUnmanagedItems [GOOD] >> YamlConfig::CollectLabels [GOOD] >> YamlConfig::MaterializeSpecificConfig [GOOD] >> YamlConfig::MaterializeAllConfigSimple [GOOD] >> YamlConfig::MaterializeAllConfigs |74.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/huge/ut/unittest >> TChainLayoutBuilder::TestMilestoneId [GOOD] |74.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/dq/runtime/ut/unittest >> TestFileCache::Create [GOOD] |74.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/db_id_async_resolver_impl/ut/unittest >> MdbEndpoingGenerator::Generic_WithTransformHost [GOOD] |74.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/runtime/ut/unittest >> ConvertUnboxedValueToArrowAndBack::LargeVariant [GOOD] >> ParseStats::ParseWithSources [GOOD] >> YamlConfig::MaterializeAllConfigs [GOOD] >> ParseStats::ParseJustOutput [GOOD] >> StaticNodeSelectorsInit::TestStaticNodeSelectorInheritance [GOOD] >> YamlConfig::AppendVolatileConfig [GOOD] >> ParseStats::ParseMultipleGraphsV1 [GOOD] >> YamlConfig::AppendAndResolve [GOOD] >> ParseStats::ParseMultipleGraphsV2 [GOOD] >> YamlConfig::GetMetadata [GOOD] >> YamlConfig::ReplaceMetadata [GOOD] >> YamlConfigParser::Iterate [GOOD] >> YamlConfigParser::ProtoBytesFieldDoesNotDecodeBase64 [GOOD] >> YamlConfigParser::PdiskCategoryFromString [GOOD] >> YamlConfigParser::AllowDefaultHostConfigId [GOOD] >> YamlConfigParser::IncorrectHostConfigIdFails [GOOD] >> YamlConfigParser::NoMixedHostConfigIds [GOOD] >> StaticNodeSelectorsInit::TestStaticNodeSelectorByNodeId [GOOD] >> StaticNodeSelectorsInit::TestStaticNodeSelectorByNodeHost [GOOD] >> StaticNodeSelectorsInit::TestStaticNodeSelectorByNodeKind [GOOD] >> YamlConfigProto2Yaml::StorageConfig [GOOD] |74.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/dq/scheduler/ut/unittest >> TSchedulerTest::FifoAfterOneHour [GOOD] |74.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ut_helpers.cpp |74.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/federated_query/ut/unittest >> TestFederatedQueryHelpers::TestValidateResultSetColumns [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/io_formats/arrow/scheme/ut/unittest >> FormatCSV::Nulls [GOOD] Test command err: 12000000 Cannot read CSV: no columns specified Cannot read CSV: Invalid: Empty CSV file d'Artagnan '"' Jeanne d'Arc "'" 'd'Artagnan' ''"'' 'Jeanne d'Arc' '"'"' d'Artagnan '"' Jeanne d'Arc "'" src: ,"","" ,"","" ,, parsed: ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,ᴺᵁᴸᴸ,ᴺᵁᴸᴸ src: ,"","" ,"","" ,, parsed: ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,ᴺᵁᴸᴸ,ᴺᵁᴸᴸ src: \N,"","" \N,"\N","\N" \N,\N,\N parsed: ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,\N,\N ᴺᵁᴸᴸ,ᴺᵁᴸᴸ,ᴺᵁᴸᴸ src: NULL,"","" NULL,"NULL","NULL" NULL,NULL,NULL parsed: ᴺᵁᴸᴸ,, ᴺᵁᴸᴸ,NULL,NULL ᴺᵁᴸᴸ,ᴺᵁᴸᴸ,ᴺᵁᴸᴸ |74.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tx_proxy/proxy_ext_tenant_ut.cpp |74.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/config/validation/column_shard_config_validator_ut/unittest >> ColumnShardConfigValidation::NotCorrectPlainCompression [GOOD] |74.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/control_plane_storage/internal/ut/unittest >> ParseStats::ParseMultipleGraphsV2 [GOOD] |74.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/config/init/ut/unittest >> StaticNodeSelectorsInit::TestStaticNodeSelectorByNodeKind [GOOD] |74.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/compatibility/ydb-tests-compatibility ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yaml_config/ut/unittest >> YamlConfigProto2Yaml::StorageConfig [GOOD] Test command err: host_config: "[{\"drive\":[{\"type\":\"NVME\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_01\"},{\"type\":\"NVME\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_02\"}],\"host_config_id\":1},{\"drive\":[{\"type\":\"SSD\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_01\"}],\"host_config_id\":2}]" "\/dev\/disk\/by-partlabel\/kikimr_nvme_02" host_config: "[{\"drive\":[{\"type\":\"NVME\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_01\"},{\"type\":\"NVME\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_02\"}],\"host_config_id\":1},{\"drive\":[{\"type\":\"SSD\",\"path\":\"\\/dev\\/disk\\/by-partlabel\\/kikimr_nvme_01\"}],\"host_config_id\":2}]" host_configs: - host_config_id: 1 drive: - path: /dev/disk/by-partlabel/kikimr_nvme_01 type: NVME expected_slot_count: 9 - path: /dev/disk/by-partlabel/kikimr_nvme_02 type: NVME expected_slot_count: 9 - host_config_id: 2 drive: - path: /dev/disk/by-partlabel/kikimr_nvme_01 type: SSD expected_slot_count: 9 hosts: - host: sas8-6954.search.yandex.net port: 19000 host_config_id: 1 - host: sas8-6955.search.yandex.net port: 19000 host_config_id: 2 item_config_generation: 0 |74.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_export/ut_export.cpp >> YtLookupActor::Lookup [GOOD] |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/library/ncloud/impl/access_service_ut.cpp >> test_init.py::TestTpcdsInit::test_s1_column ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/yt/actors/ut/unittest >> YtLookupActor::Lookup [GOOD] Test command err: 2025-06-03 10:21:08.189 INFO ydb-library-yql-providers-yt-actors-ut(pid=9911, tid=0x00007F7CC83C5BC0) [default] storage.cpp:178: FileStorage initialized in "/home/runner/.ya/build/build_root/u93c/000a36/r3tmp/tmpTJuppJ/", temporary dir: "/home/runner/.ya/build/build_root/u93c/000a36/r3tmp/tmpTJuppJ/9911", files: 0, total size: 0 2025-06-03 10:21:08.214 INFO ydb-library-yql-providers-yt-actors-ut(pid=9911, tid=0x00007F7CC83C5BC0) [YT] yql_yt_lookup_actor.cpp:103: New Yt proivider lookup source actor(ActorId=[1:4:2051]) for cluster=Plato, table=Lookup 2025-06-03 10:21:08.215 DEBUG ydb-library-yql-providers-yt-actors-ut(pid=9911, tid=0x00007F7CC83C5BC0) [YT] yql_yt_lookup_actor.cpp:172: ActorId=[1:4:2051] Got LookupRequest for 4 keys >> test_init.py::TestTpcdsInit::test_s1_column [GOOD] >> test_init.py::TestTpcdsInit::test_s1_column_decimal >> test_init.py::TestTpcdsInit::test_s1_s3 >> test_generator.py::TestTpcdsGenerator::test_s1_state_and_parts >> test_init.py::TestTpcdsInit::test_s1_column_decimal [GOOD] >> test_init.py::TestTpcdsInit::test_s1_s3 [GOOD] >> test_init.py::TestTpchInit::test_s100_column >> test_init.py::TestClickbenchInit::test_s1_s3 >> test_init.py::TestTpchInit::test_s100_column [GOOD] >> test_generator.py::TestTpchGenerator::test_s1_state >> test_init.py::TestClickbenchInit::test_s1_s3 [GOOD] |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/security/ticket_parser_ut.cpp >> test_init.py::TestTpcdsInit::test_s100_column >> test_generator.py::TestTpchGenerator::test_s1_parts >> test_init.py::TestTpcdsInit::test_s1_column_decimal_ydb >> test_generator.py::TestTpcdsGenerator::test_s1_parts >> test_init.py::TestTpcdsInit::test_s100_column [GOOD] >> test_init.py::TestTpcdsInit::test_s1_column_decimal_ydb [GOOD] >> test_generator.py::TestTpchGenerator::test_s1 |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_index_build_reboots/ut_index_build_reboots.cpp |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpcdsInit::test_s1_row >> test_init.py::TestTpchInit::test_s1_column >> test_generator.py::TestTpcdsGenerator::test_s1_state |74.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/dq/provider/ut/ydb-library-yql-providers-dq-provider-ut |74.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpcdsInit::test_s1_column_decimal [GOOD] |74.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/bootstrapper_ut.cpp >> test_init.py::TestTpcdsInit::test_s1_row [GOOD] >> test_init.py::TestTpchInit::test_s1_s3 >> test_init.py::TestTpchInit::test_s1_column [GOOD] >> test_generator.py::TestTpchGenerator::test_s1_state_and_parts >> test_init.py::TestTpchInit::test_s1_s3 [GOOD] >> test_init.py::TestTpchInit::test_s1_row |74.2%| [TA] $(B)/ydb/library/yql/dq/actors/spilling/ut/test-results/unittest/{meta.json ... results_accumulator.log} |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/resource_broker_ut.cpp >> test_init.py::TestTpchInit::test_s1_column_decimal_ydb >> test_init.py::TestTpchInit::test_s1_row [GOOD] >> test_init.py::TestTpchInit::test_s1_column_decimal_ydb [GOOD] |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/py3test |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpchInit::test_s100_column [GOOD] |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_req_blockbs_ut.cpp |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_pipe_ut.cpp |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_metrics_ut.cpp |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpcdsInit::test_s100_column [GOOD] |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_counters_ut.cpp |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/pipe_tracker_ut.cpp |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_counters_aggregator_ut.cpp |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_pipecache_ut.cpp |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/ut/ut_utils.cpp |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tablet/tablet_resolver_ut.cpp |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/topic_data_ut.cpp |74.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/stress_tool/lib/libydb_device_test.a |74.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/stress_tool/proto/libtools-stress_tool-proto.a |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/stress_tool/device_test_tool_ut.cpp |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpchInit::test_s1_column [GOOD] |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/mind/bscontroller/ut_bscontroller/main.cpp |74.2%| [TA] $(B)/ydb/core/tablet_flat/ut_pg/test-results/unittest/{meta.json ... results_accumulator.log} |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpcdsInit::test_s1_row [GOOD] |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpchInit::test_s1_s3 [GOOD] |74.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/viewer/viewer_ut.cpp >> test_init.py::TestTpchInit::test_s1_column_decimal |74.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_table_reboots/ut_external_table_reboots.cpp |74.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/control/ut/ydb-core-control-ut |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpchInit::test_s1_column_decimal_ydb [GOOD] |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpchInit::test_s1_row [GOOD] >> test_init.py::TestTpchInit::test_s1_column_decimal [GOOD] |74.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_filestore_reboots/ut_filestore_reboots.cpp |74.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/stress_tool/device_test_tool.cpp |74.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |74.1%| [TA] $(B)/ydb/core/blobstorage/vdisk/huge/ut/test-results/unittest/{meta.json ... results_accumulator.log} |74.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/config/validation/auth_config_validator_ut/core-config-validation-auth_config_validator_ut |74.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer |74.2%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_sysview/ut_sysview.cpp |74.2%| [TA] $(B)/ydb/library/yql/dq/runtime/ut/test-results/unittest/{meta.json ... results_accumulator.log} |74.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/autoconfig/ydb-tests-functional-autoconfig |74.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yaml_config/tools/dump_ds_init/yaml-to-proto-dump-ds-init |74.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/test_connection/ut/ydb-core-fq-libs-test_connection-ut |74.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/test_connection/ut/ydb-core-fq-libs-test_connection-ut |74.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestTpchInit::test_s1_column_decimal [GOOD] |74.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/replication/service/json_change_record_ut.cpp >> test_generator.py::TestTpcdsGenerator::test_s1 |74.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats |74.4%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_restore/ut_restore.cpp >> test_init.py::TestClickbenchInit::test_s1_column |74.5%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/dq/runtime/ut/ydb-library-yql-dq-runtime-ut >> test_init.py::TestClickbenchInit::test_s1_column [GOOD] >> test_init.py::TestClickbenchInit::test_s1_row |74.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber |74.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots |74.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/providers/generic/provider/ut/pushdown/yql-providers-generic-provider-ut-pushdown |74.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/generic/provider/ut/pushdown/yql-providers-generic-provider-ut-pushdown >> test_init.py::TestClickbenchInit::test_s1_row [GOOD] |74.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut >> AuthConfigValidation::AcceptValidPasswordComplexity [GOOD] >> AuthConfigValidation::CannotAcceptInvalidPasswordComplexity [GOOD] >> AuthConfigValidation::AcceptValidAccountLockoutConfig [GOOD] >> AuthConfigValidation::CannotAcceptInvalidAccountLockoutConfig [GOOD] |74.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/config/validation/ut/ydb-core-config-validation-ut |74.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/config/validation/auth_config_validator_ut/unittest >> AuthConfigValidation::CannotAcceptInvalidAccountLockoutConfig [GOOD] |74.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |74.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_locks.cpp |74.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_incremental_restore/ut_incremental_restore.cpp |74.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/py3test >> test_init.py::TestClickbenchInit::test_s1_row [GOOD] |74.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/ut_external_data_source_reboots.cpp |74.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/persqueue_common_new_schemecache_ut.cpp |74.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut |74.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_sysview_reboots/ut_sysview_reboots.cpp |74.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/stress_tool/ydb_stress_tool |74.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/persqueue_new_schemecache_ut.cpp |74.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/backup/ut/ydb-library-backup-ut |74.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/backup/ut/ydb-library-backup-ut |74.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay/query_proccessor.cpp |74.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay/main.cpp |74.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/test_connection/ut/unittest |74.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/protobuf_printer/ut/ydb-library-protobuf_printer-ut |74.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay/query_replay.cpp |74.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tools/query_replay/query_compiler.cpp |74.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/common_ut.cpp |74.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/external_sources/object_storage/inference/ut/external_sources-object_storage-inference-ut >> test_transform.py::TestYamlConfigTransformations::test_basic[args0-dump] |74.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/external_sources/object_storage/inference/ut/external_sources-object_storage-inference-ut |74.8%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compression_ut.cpp |74.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/external_sources/ut/ydb-core-external_sources-ut >> PushdownTest::NoFilter [GOOD] >> PushdownTest::Equal [GOOD] >> PushdownTest::NotEqualInt32Int64 [GOOD] >> PushdownTest::TrueCoalesce [GOOD] |74.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/external_sources/ut/ydb-core-external_sources-ut |74.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/compress_executor_ut.cpp >> PushdownTest::CmpInt16AndInt32 [GOOD] >> PushdownTest::PartialAnd [GOOD] >> PushdownTest::PartialAndOneBranchPushdownable [GOOD] >> PushdownTest::NotNull [GOOD] >> PushdownTest::NotNullForDatetime [GOOD] >> PushdownTest::IsNull [GOOD] |74.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/retry_policy_ut.cpp >> PushdownTest::StringFieldsNotSupported |74.9%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp >> ConfigValidation::SameStaticGroup [GOOD] >> ConfigValidation::StaticGroupSizesGrow [GOOD] >> ConfigValidation::StaticGroupSizesShrink [GOOD] >> ConfigValidation::VDiskChanged [GOOD] >> ConfigValidation::TooManyVDiskChanged [GOOD] >> DatabaseConfigValidation::AllowedFields >> DatabaseConfigValidation::AllowedFields [GOOD] >> DatabaseConfigValidation::NotAllowedFields [GOOD] >> PushdownTest::StringFieldsNotSupported [GOOD] >> PushdownTest::StringFieldsNotSupported2 [GOOD] >> PushdownTest::RegexpPushdown [GOOD] |74.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/statistics_workload/libpy3statistics_workload.global.a >> UtilTest::SizeFromStringParsingWithBinaryPrefix [GOOD] >> UtilTest::PathParseTest [GOOD] >> BackupToolValuePrintParse::ResultSetIntarvalsPrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetFloatPrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetVoidPrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetDecimalPrintTest [GOOD] >> BackupToolValuePrintParse::ParseValuesFromFile [GOOD] >> BackupToolValuePrintParse::ResultSetJsonDocumentPrintTest [GOOD] >> BackupToolValuePrintParse::ParseValuesFromString [GOOD] >> BackupToolValuePrintParse::ResultSetDyNumberPrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetInt8PrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetInt32PrintTest [GOOD] >> UtilTest::SizeFromStringParsingErrors [GOOD] >> UtilTest::SizeFromStringParsing [GOOD] >> UtilTest::SizeFromStringParsingWithDecimalPrefix [GOOD] >> BackupToolValuePrintParse::ResultSetUtf8PrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetStringPrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetBoolPrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetInt16PrintTest [GOOD] >> BackupToolValuePrintParse::ResultSetInt64PrintTest [GOOD] |75.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/datashard/datashard_ut_change_exchange.cpp |75.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/config/validation/ut/unittest >> DatabaseConfigValidation::NotAllowedFields [GOOD] |75.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/read_session_ut.cpp |75.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/providers/s3/actors/ut/ydb-library-yql-providers-s3-actors-ut |75.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tiering/ut/ut_object.cpp ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/generic/provider/ut/pushdown/unittest >> PushdownTest::RegexpPushdown [GOOD] Test command err: Initial program: ( (let $data_source (DataSource '"generic" '"test_cluster")) (let $empty_lambda (lambda '($arg) (Bool '"true"))) (let $table (MrTableConcat (Key '('table (String '"test_table")))) ) (let $read (Read! world $data_source $table)) (let $map_lambda (lambda '($row) (OptionalIf (Bool '"true") $row ) )) (let $filtered_data (FlatMap (Right! $read) $map_lambda)) (let $resulte_data_sink (DataSink '"result")) (let $result (ResWrite! (Left! $read) $resulte_data_sink (Key) $filtered_data '('('type)))) (return (Commit! $result $resulte_data_sink)) ) 2025-06-03 10:21:14.727 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (Read! world (DataSource '"generic" '"test_cluster") (MrTableConcat (Key '('table (String '"test_table")))))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($4) (OptionalIf (Bool '"true") $4))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-03 10:21:14.728 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (Read! world (DataSource '"generic" '"test_cluster") (MrTableConcat (Key '('table (String '"test_table")))))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($4) (OptionalIf (Bool '"true") $4))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-03 10:21:14.728 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [generic] yql_generic_io_discovery.cpp:55: discovered cluster name: test_cluster 2025-06-03 10:21:14.728 INFO yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [generic] yql_generic_load_meta.cpp:91: Loading table meta for: `test_cluster`.`test_table` 2025-06-03 10:21:14.733 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (Bool '"true") $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-03 10:21:14.735 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (Bool '"true") $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-03 10:21:14.736 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (OptionalIf (Bool '"true") $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-03 10:21:14.736 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (Bool '"true")) (let $2 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($5) $1))) (let $3 (DataSink '"result")) (let $4 (ResWrite! (Left! $2) $3 (Key) (FlatMap (Right! $2) (lambda '($6) (OptionalIf $1 $6))) '('('type)))) (return (Commit! $4 $3)) ) 2025-06-03 10:21:14.736 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_co_simple1.cpp:986: OptionalIf over Bool 'true 2025-06-03 10:21:14.736 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (Just $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-03 10:21:14.736 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (Just $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-03 10:21:14.736 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (Just $5))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-03 10:21:14.736 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_co_simple1.cpp:2040: FlatMap with Just 2025-06-03 10:21:14.736 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (Right! $1) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-03 10:21:14.737 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (Right! $1) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-03 10:21:14.737 INFO yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [generic] yql_optimize.cpp:135: PhysicalOptimizer-TrimReadWorld 2025-06-03 10:21:14.737 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (DataSink '"result")) (let $2 (ResWrite! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)))) (return (Commit! $2 $1)) ) 2025-06-03 10:21:14.737 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (DataSink '"result")) (let $2 (ResWrite! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)))) (return (Commit! $2 $1)) ) 2025-06-03 10:21:14.737 INFO yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [RESULT] yql_result_provider.cpp:773: ResPull 2025-06-03 10:21:14.737 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (DataSink '"result")) (let $2 (ResPull! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)) '"generic")) (return (Commit! $2 $1)) ) 2025-06-03 10:21:14.737 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (DataSink '"result")) (let $2 (ResPull! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)) '"generic")) (return (Commit! $2 $1)) ) 2025-06-03 10:21:14.737 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Optimized expr: ( (let $1 (DataSink '"result")) (let $2 (ResPull! world $1 (Key) (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($3) (Bool '"true")))) '('('type)) '"generic")) (return (Commit! $2 $1)) ) 2025-06-03 10:21:14.738 INFO yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [generic] yql_generic_dq_integration.cpp:191: Filling source settings: cluster: test_cluster, table: test_table, endpoint: host: "host" port: 42 2025-06-03 10:21:14.739 INFO yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [generic] yql_optimize.cpp:135: BuildGenericDqSourceSettings 2025-06-03 10:21:14.740 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Built settings: ( (let $1 (DataSink '"result")) (let $2 '('"col_bool" '"col_date" '"col_datetime" '"col_double" '"col_dynumber" '"col_float" '"col_int16" '"col_int32" '"col_int64" '"col_int8" '"col_interval" '"col_json" '"col_json_document" '"col_optional_bool" '"col_optional_date" '"col_optional_datetime" '"col_optional_double" '"col_optional_dynumber" '"col_optional_float" '"col_optional_int16" '"col_optional_int32" '"col_optional_int64" '"col_optional_int8" '"col_optional_interval" '"col_optional_json" '"col_optional_json_document" '"col_optional_string" '"col_optional_timestamp" '"col_optional_tz_date" '"col_optional_tz_datetime" '"col_optional_tz_timestamp" '"col_optional_uint16" '"col_optional_uint32" '"col_optional_uint64" '"col_optional_uint8" '"col_optional_utf8" '"col_optional_uuid" '"col_optional_yson" '"col_string" '"col_timestamp" '"col_tz_date" '"col_tz_datetime" '"col_tz_timestamp" '"col_uint16" '"col_uint32" '"col_uint64" '"col_uint8" '"col_utf8" '"col_uuid" '"col_yson")) (let $3 (GenSourceSettings world '"test_cluster" '"test_table" (SecureParam '"cluster:default_test_cluster") $2 (lambda '($32) (Bool '"true")))) (let $4 (DataType 'Bool)) (let $5 (DataType 'Date)) (let $6 (DataType 'Datetime)) (let $7 (DataType 'Double)) (let $8 (DataType 'DyNumber)) (let $9 (DataType 'Float)) (let $10 (DataType 'Int16)) (let $11 (DataType 'Int32)) (let $12 (DataType 'Int64)) (let $13 (DataType 'Int8)) (let $14 (DataType 'Interval)) (let $15 (DataType 'Json)) (let $16 (DataType 'JsonDocument)) (let $17 (DataType 'String)) (let $18 (DataType 'Timestamp)) (let $19 (DataType 'TzDate)) (let $20 (DataType 'TzDatetime)) (let $21 (DataType 'TzTimestamp)) (let $22 (DataType 'Uint16)) (let $23 (DataType 'Uint32)) (let $24 (DataType 'Uint64)) (let $25 (DataType 'Uint8)) (let $26 (DataType 'Utf8)) (let $27 (DataType 'Uuid)) (let $28 (DataType 'Yson)) (let $29 (StructType '('"col_bool" $4) '('"col_date" $5) '('"col_datetime" $6) '('"col_double" $7) '('"col_dynumber" $8) '('"col_float" $9) '('"col_int16" $10) '('"col_int32" $11) '('"col_int64" $12) '('"col_int8" $13) '('"col_interval" $14) '('"col_json" $15) '('"col_json_document" $16) '('"col_optional_bool" (OptionalType $4)) '('"col_optional_date" (OptionalType $5)) '('"col_optional_datetime" (OptionalType $6)) '('"col_optional_double" (OptionalType $7)) '('"col_optional_dynumber" (OptionalType $8)) '('"col_optional_float" (OptionalType $9)) '('"col_optional_int16" (OptionalType $10)) '('"col_optional_int32" (OptionalType $11)) '('"col_optional_int64" (OptionalType $12)) '('"col_optional_int8" (OptionalType $13)) '('"col_optional_interval" (OptionalType $14)) '('"col_optional_json" (OptionalType $15)) '('"col_optional_json_document" (OptionalType $16)) '('"col_optional_string" (OptionalType $17)) '('"col_optional_timestamp" (OptionalType $18)) '('"col_optional_tz_date" (OptionalT ... sitive" $6) '('"DotNl" $6) '('"Literal" $6) '('"LogErrors" $6) '('"LongestMatch" $6) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $6) '('"NeverNl" $6) '('"OneLine" $6) '('"PerlClasses" $6) '('"PosixSyntax" $6) '('"Utf8" $6) '('"WordBoundary" $6)))) '"" '())) (return (OptionalIf (Apply $9 (Just (Member $5 '"col_string"))) $5)) )))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-03 10:21:15.126 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($4) (Bool '"true")))) (let $2 (DataSink '"result")) (let $3 (ResWrite! (Left! $1) $2 (Key) (FlatMap (Right! $1) (lambda '($5) (block '( (let $6 (DataType 'Bool)) (let $7 (OptionalType (StructType '('"CaseSensitive" $6) '('"DotNl" $6) '('"Literal" $6) '('"LogErrors" $6) '('"LongestMatch" $6) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $6) '('"NeverNl" $6) '('"OneLine" $6) '('"PerlClasses" $6) '('"PosixSyntax" $6) '('"Utf8" $6) '('"WordBoundary" $6)))) (let $8 (DataType 'String)) (let $9 (CallableType '() '($6) '((OptionalType $8)))) (let $10 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $7)) (VoidType) '"" $9 (TupleType $8 $7) '"" '())) (return (OptionalIf (Apply $10 (Just (Member $5 '"col_string"))) $5)) )))) '('('type)))) (return (Commit! $3 $2)) ) 2025-06-03 10:21:15.127 INFO yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [generic] yql_optimize.cpp:135: PhysicalOptimizer-TrimReadWorld 2025-06-03 10:21:15.127 INFO yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [default] physical_opt.cpp:76: Push filter lambda: ( (return (lambda '($1) (block '( (let $2 (DataType 'Bool)) (let $3 (OptionalType (StructType '('"CaseSensitive" $2) '('"DotNl" $2) '('"Literal" $2) '('"LogErrors" $2) '('"LongestMatch" $2) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $2) '('"NeverNl" $2) '('"OneLine" $2) '('"PerlClasses" $2) '('"PosixSyntax" $2) '('"Utf8" $2) '('"WordBoundary" $2)))) (let $4 (DataType 'String)) (let $5 (CallableType '() '($2) '((OptionalType $4)))) (let $6 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $3)) (VoidType) '"" $5 (TupleType $4 $3) '"" '())) (return (Apply $6 (Just (Member $1 '"col_string")))) )))) ) 2025-06-03 10:21:15.127 INFO yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [generic] yql_optimize.cpp:135: PhysicalOptimizer-PushFilterToReadTable 2025-06-03 10:21:15.128 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (DataSink '"result")) (let $2 (DataType 'Bool)) (let $3 (OptionalType (StructType '('"CaseSensitive" $2) '('"DotNl" $2) '('"Literal" $2) '('"LogErrors" $2) '('"LongestMatch" $2) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $2) '('"NeverNl" $2) '('"OneLine" $2) '('"PerlClasses" $2) '('"PosixSyntax" $2) '('"Utf8" $2) '('"WordBoundary" $2)))) (let $4 (DataType 'String)) (let $5 (CallableType '() '($2) '((OptionalType $4)))) (let $6 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $3)) (VoidType) '"" $5 (TupleType $4 $3) '"" '())) (let $7 (ResWrite! world $1 (Key) (FlatMap (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($8) (Apply $6 (Just (Member $8 '"col_string")))))) (lambda '($9) (OptionalIf (Apply $6 (Just (Member $9 '"col_string"))) $9))) '('('type)))) (return (Commit! $7 $1)) ) 2025-06-03 10:21:15.128 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Expr: ( (let $1 (DataSink '"result")) (let $2 (DataType 'Bool)) (let $3 (OptionalType (StructType '('"CaseSensitive" $2) '('"DotNl" $2) '('"Literal" $2) '('"LogErrors" $2) '('"LongestMatch" $2) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $2) '('"NeverNl" $2) '('"OneLine" $2) '('"PerlClasses" $2) '('"PosixSyntax" $2) '('"Utf8" $2) '('"WordBoundary" $2)))) (let $4 (DataType 'String)) (let $5 (CallableType '() '($2) '((OptionalType $4)))) (let $6 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $3)) (VoidType) '"" $5 (TupleType $4 $3) '"" '())) (let $7 (ResWrite! world $1 (Key) (FlatMap (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($8) (Apply $6 (Just (Member $8 '"col_string")))))) (lambda '($9) (OptionalIf (Apply $6 (Just (Member $9 '"col_string"))) $9))) '('('type)))) (return (Commit! $7 $1)) ) 2025-06-03 10:21:15.128 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Expr to optimize: ( (let $1 (DataSink '"result")) (let $2 (DataType 'Bool)) (let $3 (OptionalType (StructType '('"CaseSensitive" $2) '('"DotNl" $2) '('"Literal" $2) '('"LogErrors" $2) '('"LongestMatch" $2) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $2) '('"NeverNl" $2) '('"OneLine" $2) '('"PerlClasses" $2) '('"PosixSyntax" $2) '('"Utf8" $2) '('"WordBoundary" $2)))) (let $4 (DataType 'String)) (let $5 (CallableType '() '($2) '((OptionalType $4)))) (let $6 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $3)) (VoidType) '"" $5 (TupleType $4 $3) '"" '())) (let $7 (ResWrite! world $1 (Key) (FlatMap (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($8) (Apply $6 (Just (Member $8 '"col_string")))))) (lambda '($9) (OptionalIf (Apply $6 (Just (Member $9 '"col_string"))) $9))) '('('type)))) (return (Commit! $7 $1)) ) 2025-06-03 10:21:15.129 TRACE yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [generic] yql_generic_physical_opt.cpp:140: Push filter. Lambda is already not empty 2025-06-03 10:21:15.129 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Optimized expr: ( (let $1 (DataSink '"result")) (let $2 (DataType 'Bool)) (let $3 (OptionalType (StructType '('"CaseSensitive" $2) '('"DotNl" $2) '('"Literal" $2) '('"LogErrors" $2) '('"LongestMatch" $2) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $2) '('"NeverNl" $2) '('"OneLine" $2) '('"PerlClasses" $2) '('"PosixSyntax" $2) '('"Utf8" $2) '('"WordBoundary" $2)))) (let $4 (DataType 'String)) (let $5 (CallableType '() '($2) '((OptionalType $4)))) (let $6 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $3)) (VoidType) '"" $5 (TupleType $4 $3) '"" '())) (let $7 (ResWrite! world $1 (Key) (FlatMap (Right! (GenReadTable! world (DataSource '"generic" '"test_cluster") (GenTable '"test_table") (Void) (lambda '($8) (Apply $6 (Just (Member $8 '"col_string")))))) (lambda '($9) (OptionalIf (Apply $6 (Just (Member $9 '"col_string"))) $9))) '('('type)))) (return (Commit! $7 $1)) ) 2025-06-03 10:21:15.129 INFO yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [generic] yql_generic_dq_integration.cpp:191: Filling source settings: cluster: test_cluster, table: test_table, endpoint: host: "host" port: 42 2025-06-03 10:21:15.130 INFO yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [generic] yql_optimize.cpp:135: BuildGenericDqSourceSettings 2025-06-03 10:21:15.131 DEBUG yql-providers-generic-provider-ut-pushdown(pid=25788, tid=0x00007EFED7519AC0) [core] yql_out_transformers.cpp:62: Built settings: ( (let $1 (DataSink '"result")) (let $2 '('"col_bool" '"col_date" '"col_datetime" '"col_double" '"col_dynumber" '"col_float" '"col_int16" '"col_int32" '"col_int64" '"col_int8" '"col_interval" '"col_json" '"col_json_document" '"col_optional_bool" '"col_optional_date" '"col_optional_datetime" '"col_optional_double" '"col_optional_dynumber" '"col_optional_float" '"col_optional_int16" '"col_optional_int32" '"col_optional_int64" '"col_optional_int8" '"col_optional_interval" '"col_optional_json" '"col_optional_json_document" '"col_optional_string" '"col_optional_timestamp" '"col_optional_tz_date" '"col_optional_tz_datetime" '"col_optional_tz_timestamp" '"col_optional_uint16" '"col_optional_uint32" '"col_optional_uint64" '"col_optional_uint8" '"col_optional_utf8" '"col_optional_uuid" '"col_optional_yson" '"col_string" '"col_timestamp" '"col_tz_date" '"col_tz_datetime" '"col_tz_timestamp" '"col_uint16" '"col_uint32" '"col_uint64" '"col_uint8" '"col_utf8" '"col_uuid" '"col_yson")) (let $3 (DataType 'Bool)) (let $4 (OptionalType (StructType '('"CaseSensitive" $3) '('"DotNl" $3) '('"Literal" $3) '('"LogErrors" $3) '('"LongestMatch" $3) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $3) '('"NeverNl" $3) '('"OneLine" $3) '('"PerlClasses" $3) '('"PosixSyntax" $3) '('"Utf8" $3) '('"WordBoundary" $3)))) (let $5 (DataType 'String)) (let $6 (OptionalType $5)) (let $7 (CallableType '() '($3) '($6))) (let $8 (Udf '"Re2.Grep" '((String '"\\\\d+") (Nothing $4)) (VoidType) '"" $7 (TupleType $5 $4) '"" '())) (let $9 (GenSourceSettings world '"test_cluster" '"test_table" (SecureParam '"cluster:default_test_cluster") $2 (lambda '($37) (Apply $8 (Just (Member $37 '"col_string")))))) (let $10 (DataType 'Bool)) (let $11 (DataType 'Date)) (let $12 (DataType 'Datetime)) (let $13 (DataType 'Double)) (let $14 (DataType 'DyNumber)) (let $15 (DataType 'Float)) (let $16 (DataType 'Int16)) (let $17 (DataType 'Int32)) (let $18 (DataType 'Int64)) (let $19 (DataType 'Int8)) (let $20 (DataType 'Interval)) (let $21 (DataType 'Json)) (let $22 (DataType 'JsonDocument)) (let $23 (DataType 'Timestamp)) (let $24 (DataType 'TzDate)) (let $25 (DataType 'TzDatetime)) (let $26 (DataType 'TzTimestamp)) (let $27 (DataType 'Uint16)) (let $28 (DataType 'Uint32)) (let $29 (DataType 'Uint64)) (let $30 (DataType 'Uint8)) (let $31 (DataType 'Utf8)) (let $32 (DataType 'Uuid)) (let $33 (DataType 'Yson)) (let $34 (StructType '('"col_bool" $10) '('"col_date" $11) '('"col_datetime" $12) '('"col_double" $13) '('"col_dynumber" $14) '('"col_float" $15) '('"col_int16" $16) '('"col_int32" $17) '('"col_int64" $18) '('"col_int8" $19) '('"col_interval" $20) '('"col_json" $21) '('"col_json_document" $22) '('"col_optional_bool" (OptionalType $10)) '('"col_optional_date" (OptionalType $11)) '('"col_optional_datetime" (OptionalType $12)) '('"col_optional_double" (OptionalType $13)) '('"col_optional_dynumber" (OptionalType $14)) '('"col_optional_float" (OptionalType $15)) '('"col_optional_int16" (OptionalType $16)) '('"col_optional_int32" (OptionalType $17)) '('"col_optional_int64" (OptionalType $18)) '('"col_optional_int8" (OptionalType $19)) '('"col_optional_interval" (OptionalType $20)) '('"col_optional_json" (OptionalType $21)) '('"col_optional_json_document" (OptionalType $22)) '('"col_optional_string" $6) '('"col_optional_timestamp" (OptionalType $23)) '('"col_optional_tz_date" (OptionalType $24)) '('"col_optional_tz_datetime" (OptionalType $25)) '('"col_optional_tz_timestamp" (OptionalType $26)) '('"col_optional_uint16" (OptionalType $27)) '('"col_optional_uint32" (OptionalType $28)) '('"col_optional_uint64" (OptionalType $29)) '('"col_optional_uint8" (OptionalType $30)) '('"col_optional_utf8" (OptionalType $31)) '('"col_optional_uuid" (OptionalType $32)) '('"col_optional_yson" (OptionalType $33)) '('"col_string" $5) '('"col_timestamp" $23) '('"col_tz_date" $24) '('"col_tz_datetime" $25) '('"col_tz_timestamp" $26) '('"col_uint16" $27) '('"col_uint32" $28) '('"col_uint64" $29) '('"col_uint8" $30) '('"col_utf8" $31) '('"col_uuid" $32) '('"col_yson" $33))) (let $35 (DqSourceWrap $9 (DataSource '"generic" '"test_cluster") $34)) (let $36 (ResWrite! world $1 (Key) (FlatMap $35 (lambda '($38) (OptionalIf (Apply $8 (Just (Member $38 '"col_string"))) $38))) '('('type)))) (return (Commit! $36 $1)) ) Dq source filter settings: filter_typed { regexp { value { column: "col_string" } pattern { typed_value { type { type_id: STRING } value { bytes_value: "\\\\d+" } } } } } >> FieldSizePrinterTest::PrintSuccess [GOOD] >> FieldSizePrinterTest::PrintRecursiveType [GOOD] >> HideFieldPrinterTest::PrintNoValue [GOOD] >> TokenPrinterTest::PrintToken [GOOD] >> PrinterWrapperTest::PrintsToStream [GOOD] >> PrinterWrapperTest::PrintsToString [GOOD] >> SecurityPrinterTest::PrintSensitive [GOOD] >> SecurityPrinterTest::PrintRecursiveType [GOOD] |75.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/tiering/ut/ut_tiers.cpp |75.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/backup/ut/unittest >> BackupToolValuePrintParse::ResultSetInt64PrintTest [GOOD] |75.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk |75.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build |75.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/bulk_upsert/ydb-public-sdk-cpp-tests-integration-bulk_upsert >> ArrowInferenceTest::csv_simple >> ExternalDataSourceTest::ValidateName [GOOD] >> ExternalDataSourceTest::ValidatePack [GOOD] >> ExternalDataSourceTest::ValidateAuth [GOOD] >> ExternalDataSourceTest::ValidateParameters [GOOD] >> ExternalDataSourceTest::ValidateHasExternalTable [GOOD] >> ExternalDataSourceTest::ValidateProperties [GOOD] >> ExternalDataSourceTest::ValidateLocation [GOOD] >> ExternalSourceBuilderTest::ValidateName [GOOD] >> ExternalSourceBuilderTest::ValidateAuthWithoutCondition [GOOD] >> ExternalSourceBuilderTest::ValidateAuthWithCondition [GOOD] >> ExternalSourceBuilderTest::ValidateUnsupportedField [GOOD] >> ExternalSourceBuilderTest::ValidateNonRequiredField [GOOD] >> ExternalSourceBuilderTest::ValidateRequiredField [GOOD] >> ExternalSourceBuilderTest::ValidateNonRequiredFieldValues [GOOD] >> ExternalSourceBuilderTest::ValidateRequiredFieldValues [GOOD] >> ExternalSourceBuilderTest::ValidateRequiredFieldOnCondition [GOOD] >> IcebergDdlTest::HiveCatalogWithS3Test [GOOD] >> IcebergDdlTest::HadoopCatalogWithS3Test [GOOD] >> ObjectStorageTest::SuccessValidation [GOOD] >> ObjectStorageTest::FailedCreate [GOOD] >> ObjectStorageTest::FailedValidation [GOOD] >> ObjectStorageTest::FailedJsonListValidation [GOOD] >> ObjectStorageTest::FailedOptionalTypeValidation [GOOD] >> ObjectStorageTest::WildcardsValidation [GOOD] >> ArrowInferenceTest::csv_simple [GOOD] >> ArrowInferenceTest::tsv_simple [GOOD] >> ArrowInferenceTest::tsv_empty [GOOD] >> ArrowInferenceTest::broken_json [GOOD] >> ArrowInferenceTest::empty_json_each_row [GOOD] >> ArrowInferenceTest::empty_json_list [GOOD] >> ArrowInferenceTest::broken_json_list [GOOD] |75.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk |75.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/core/tx/schemeshard/ut_cdc_stream/ut_cdc_stream.cpp |75.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/in_memory_control_plane_storage_ut.cpp >> Mirror3of4::ReplicationSmall [GOOD] >> Mirror3of4::ReplicationHuge >> test_transform.py::TestYamlConfigTransformations::test_basic[args0-dump] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_basic[args0-dump_ds_init] |75.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/protobuf_printer/ut/unittest >> SecurityPrinterTest::PrintRecursiveType [GOOD] |75.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/external_sources/ut/unittest >> ObjectStorageTest::WildcardsValidation [GOOD] |75.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_ut.cpp |75.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/validator/ut/validator/ydb-library-yaml_config-validator-ut-validator ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/external_sources/object_storage/inference/ut/gtest >> ArrowInferenceTest::broken_json_list [GOOD] Test command err: {
: Error: couldn't open csv/tsv file, check format and compression parameters: empty file, code: 1001 } {
: Error: couldn't open json file, check format and compression parameters: empty file, code: 1001 } {
: Error: couldn't open json file, check format and compression parameters: empty file, code: 1001 } 2025-06-03T10:21:17.084960Z 1 00h00m00.000000s :OBJECT_STORAGE_INFERENCINATOR DEBUG: TArrowInferencinator: [1:6:6]. HandleFileError: {
: Error: couldn't run arrow json chunker for /path/is/neither/real: Invalid: straddling object straddles two block boundaries (try to increase block size?), code: 1001 } {
: Error: couldn't run arrow json chunker for /path/is/neither/real: Invalid: straddling object straddles two block boundaries (try to increase block size?), code: 1001 } {
: Error: couldn't open json file, check format and compression parameters: Invalid: JSON parse error: Invalid value. in row 0, code: 1001 } |75.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_quotas_ut.cpp >> TArrowPushDown::SimplePushDown [GOOD] >> TArrowPushDown::FilterEverything [GOOD] >> TArrowPushDown::MatchSeveralRowGroups [GOOD] |75.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut |75.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/base/ut/ydb-core-ymq-base-ut |75.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/base/ut/ydb-core-ymq-base-ut |75.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_permissions_ut.cpp |75.0%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/dynamic_config/dynamic_config_ut.cpp |75.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris |75.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/public/sdk/cpp/src/client/federated_topic/ut/basic_usage_ut.cpp |75.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_permissions_ut.cpp |75.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_connections_ut.cpp |75.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/s3/actors/ut/unittest >> TArrowPushDown::MatchSeveralRowGroups [GOOD] |75.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_permissions_ut.cpp |75.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_bindings_ut.cpp |75.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge |75.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/dq/actors/compute/ut/ydb-library-yql-dq-actors-compute-ut |75.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_queries_ut.cpp |75.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ydb-core-tx-schemeshard-ut_data_erasure_reboots |75.1%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/tests/fq/control_plane_storage/ydb_control_plane_storage_internal_ut.cpp >> test_transform.py::TestYamlConfigTransformations::test_basic[args0-dump_ds_init] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_basic[args1-dump] |75.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/formats/arrow/ut/ydb-core-formats-arrow-ut |75.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/formats/arrow/ut/ydb-core-formats-arrow-ut >> Validator::StringValidation [GOOD] >> Validator::IntValidation [GOOD] >> Validator::Enums [GOOD] >> Validator::OpaqueMaps [GOOD] >> Validator::BoolValidation [GOOD] >> Validator::MultitypeNodeValidation [GOOD] >> Validator::IntArrayValidation [GOOD] >> Validator::MapValidation [GOOD] |75.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/providers/pq/provider/ut/ydb-library-yql-providers-pq-provider-ut |75.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots |75.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/io_formats/arrow/scheme/ut/ydb-core-io_formats-arrow-scheme-ut |75.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/pq/provider/ut/ydb-library-yql-providers-pq-provider-ut |75.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/providers/dq/actors/ut/ydb-library-yql-providers-dq-actors-ut |75.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer >> ActionParsingTest::ToAndFromStringAreConsistent [GOOD] >> ActionParsingTest::ActionsForQueueTest [GOOD] >> ActionParsingTest::BatchActionTest [GOOD] >> ActionParsingTest::ActionsForMessageTest [GOOD] >> ActionParsingTest::FastActionsTest [GOOD] >> HttpCountersTest::CountersAggregationTest [GOOD] >> LazyCounterTest::LazyCounterTest [GOOD] >> LazyCounterTest::AggregationLazyTest [GOOD] >> LazyCounterTest::AggregationNonLazyTest [GOOD] >> LazyCounterTest::HistogramAggregationTest [GOOD] >> MessageAttributeValidationTest::MessageAttributeValidationTest [GOOD] >> MessageBodyValidationTest::MessageBodyValidationTest [GOOD] >> MeteringCountersTest::CountersAggregationTest [GOOD] >> NameValidationTest::NameValidationTest [GOOD] >> QueueAttributes::BasicStdTest [GOOD] >> QueueAttributes::BasicFifoTest [GOOD] >> QueueAttributes::BasicClampTest [GOOD] >> QueueCountersTest::InsertCountersTest [GOOD] >> QueueCountersTest::RemoveQueueCountersNonLeaderWithoutFolderTest [GOOD] >> QueueCountersTest::RemoveQueueCountersLeaderWithoutFolderTest [GOOD] >> QueueCountersTest::RemoveQueueCountersNonLeaderWithFolderTest [GOOD] >> QueueCountersTest::RemoveQueueCountersLeaderWithFolderTest [GOOD] >> QueueCountersTest::CountersAggregationTest [GOOD] >> QueueCountersTest::CountersAggregationCloudTest [GOOD] >> RedrivePolicy::RedrivePolicyValidationTest [GOOD] |75.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/api/client/yc_private/oauth/libclient-yc_private-oauth.a |75.4%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/huge/ut/ydb-core-blobstorage-vdisk-huge-ut |75.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yaml_config/validator/ut/validator/unittest >> Validator::MapValidation [GOOD] >> RedrivePolicy::RedrivePolicyToJsonTest [GOOD] >> RedrivePolicy::RedrivePolicyArnValidationTest [GOOD] >> SecureProtobufPrinterTest::MessageBody [GOOD] >> SecureProtobufPrinterTest::Tokens [GOOD] >> StringValidationTest::IsAlphaNumAndPunctuationTest [GOOD] >> UserCountersTest::DisableCountersTest [GOOD] >> UserCountersTest::RemoveUserCountersTest [GOOD] >> UserCountersTest::CountersAggregationTest [GOOD] >> TDqSourceWatermarkTrackerTest::StartWatermark3 [GOOD] >> TDqSourceWatermarkTrackerTest::WatermarkMovement2 [GOOD] >> TIssuesBufferTest::TestSimplePush [GOOD] >> TDqSourceWatermarkTrackerTest::IdleFirstShouldReturnStartWatermark [GOOD] >> TComputeActorAsyncInputHelperTest::PollAsyncInput [GOOD] >> TDqSourceWatermarkTrackerTest::StartWatermark1 [GOOD] >> TDqSourceWatermarkTrackerTest::WatermarkMovement1 [GOOD] >> TDqSourceWatermarkTrackerTest::WatermarkMovement3 [GOOD] >> TIssuesBufferTest::TestEmpty [GOOD] >> TDqSourceWatermarkTrackerTest::StartWatermark2 [GOOD] >> TDqSourceWatermarkTrackerTest::WatermarkMovement4 [GOOD] >> TComputeActorTest::ReceiveData [GOOD] >> TDqSourceWatermarkTrackerTest::Idle1 [GOOD] >> TIssuesBufferTest::TestPushWithOverflow [GOOD] >> TIssuesBufferTest::TestUseAfterDump [GOOD] >> TIssuesBufferTest::TestSmallBuffer [GOOD] >> TComputeActorTest::Empty [GOOD] >> TDqSourceWatermarkTrackerTest::IdleNextCheckAt [GOOD] |75.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/ymq/base/ut/unittest >> UserCountersTest::CountersAggregationTest [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_basic[args1-dump] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_basic[args1-dump_ds_init] >> ArrowTest::BatchBuilder [GOOD] >> ArrowTest::ArrowToYdbConverter >> ArrowTest::ArrowToYdbConverter [GOOD] >> ArrowTest::SortWithCompositeKey [GOOD] >> ArrowTest::MergingSortedInputStream [GOOD] >> ArrowTest::MergingSortedInputStreamReversed [GOOD] >> ArrowTest::MergingSortedInputStreamReplace [GOOD] >> ArrowTest::MaxVersionFilter [GOOD] >> ArrowTest::EqualKeysVersionFilter [GOOD] >> ColumnFilter::MergeFilters [GOOD] >> ColumnFilter::CombineFilters [GOOD] >> YqlPqSimpleTests::SelectWithNoSchema |75.5%| [PY] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/stability/tool/objcopy_533f06087e794c7af638ea75dc.o >> ColumnFilter::ApplyFilterToFilter [GOOD] >> ColumnFilter::FilterSlice [GOOD] >> ColumnFilter::FilterCheckSlice [GOOD] >> ColumnFilter::FilterSlice1 [GOOD] >> ColumnFilter::CutFilter1 [GOOD] >> ColumnFilter::CutFilter2 [GOOD] >> Dictionary::Simple |75.5%| [PY] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/objcopy_533f06087e794c7af638ea75dc.o ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/dq/actors/compute/ut/unittest >> TDqSourceWatermarkTrackerTest::IdleNextCheckAt [GOOD] Test command err: 2025-06-03T10:21:19.948451Z :Unused ERROR: dq_compute_actor_channels.cpp:133: TxId: TxId, task: 0. Unexpected input channelId: 0 seqNo: 0, expected: 1 |75.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots >> TestIssuesGrouping::ShouldCountEveryIssue [GOOD] >> TestIssuesGrouping::ShouldRemoveOldIssues [GOOD] >> TestIssuesGrouping::ShouldRemoveIfMoreThanMaxIssues [GOOD] >> TestIssuesGrouping::ShouldRemoveTheOldestIfMoreThanMaxIssues [GOOD] >> TestIssuesGrouping::ShouldSaveSubIssues [GOOD] >> ResultReceiver::ReceiveStatus [GOOD] >> ResultReceiver::ReceiveError [GOOD] >> ResultReceiver::WriteQueue [GOOD] |75.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut |75.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/providers/s3/provider/ut/ydb-library-yql-providers-s3-provider-ut |75.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/s3/provider/ut/ydb-library-yql-providers-s3-provider-ut |75.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing |75.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots |75.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |75.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut |75.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/dq/actors/ut/unittest >> ResultReceiver::WriteQueue [GOOD] |75.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut |75.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut |75.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut >> YqlPqSimpleTests::SelectWithNoSchema [GOOD] >> YqlPqSimpleTests::SelectWithSchema >> test_transform.py::TestYamlConfigTransformations::test_basic[args1-dump_ds_init] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_simplified[dump] |75.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/yql/essentials/tools/sql2yql/sql2yql |75.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/functions_executor_wrapper.cpp >> TCollectingS3ListingStrategyTests::IfNoIssuesOccursShouldReturnCollectedPaths [GOOD] >> TCollectingS3ListingStrategyTests::IfThereAreMoreRecordsThanSpecifiedByLimitShouldReturnError [GOOD] >> TCollectingS3ListingStrategyTests::IfAnyIterationReturnIssueThanWholeStrategyShouldReturnIt [GOOD] >> TCollectingS3ListingStrategyTests::IfExceptionIsReturnedFromIteratorThanItShouldCovertItToIssue [GOOD] |75.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/yql/essentials/tools/sql2yql/sql2yql |75.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/first_class_src_ids_ut.cpp |75.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_ut.cpp |75.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/pqtablet_mock.cpp |75.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/kqp_mock.cpp |75.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/partition_writer_cache_actor_fixture.cpp |75.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/persqueue_compat_ut.cpp |75.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/rate_limiter_test_setup.cpp |75.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/persqueue_common_ut.cpp >> YqlPqSimpleTests::SelectWithSchema [GOOD] >> YqlPqSimpleTests::SelectStarWithSchema |75.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/demo_tx.cpp |75.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/ut/topic_service_ut.cpp |75.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/topic_yql_ut.cpp |75.6%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/stability/tool/libpy3tests-stability-tool.global.a |75.7%| [AR] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/libpy3tests-stability-tool.global.a |75.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/s3/provider/ut/unittest >> TCollectingS3ListingStrategyTests::IfExceptionIsReturnedFromIteratorThanItShouldCovertItToIssue [GOOD] |75.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |75.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot |75.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut |75.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut |75.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable |75.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tools/query_replay/ydb_query_replay |75.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service |75.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut >> YqlPqSimpleTests::SelectStarWithSchema [GOOD] |75.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut |75.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/compute/common/ut/ydb-core-fq-libs-compute-common-ut |75.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order |75.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_pdiskfit/ut/ydb-core-blobstorage-ut_pdiskfit-ut |75.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber >> Dictionary::Simple [GOOD] >> Dictionary::ComparePayloadAndFull |75.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/pq/provider/ut/unittest >> YqlPqSimpleTests::SelectStarWithSchema [GOOD] Test command err: 2025-06-03 10:21:20.392 INFO ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [DQ] interconnect_helpers.cpp:215: Start listener ::1:31337 socket: 7 2025-06-03 10:21:20.419 INFO ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [DQ] service_node.cpp:126: Starting GRPC on 31338 GRpc memory quota was set but disabled due to issues with grpc quoter, to enable it use EnableGRpcMemoryQuota option 2025-06-03 10:21:20.427 INFO ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [default] storage.cpp:178: FileStorage initialized in "/home/runner/.ya/build/build_root/u93c/00091f/r3tmp/tmpRfsBy1/", temporary dir: "/home/runner/.ya/build/build_root/u93c/00091f/r3tmp/tmpRfsBy1/30333", files: 0, total size: 0 Parse SQL... ( (import aggregate_module '"/lib/yql/aggregate.yqls") (import window_module '"/lib/yql/window.yqls") (import core_module '"/lib/yql/core.yqls") (let world (Configure! world (DataSource '"config") '"DqEngine" '"force")) (let world (Configure! world (DataSource '"pq" '"$all") '"Attr" '"consumer" '"my_test_consumer")) (let world (block '( (let x (Read! world (DataSource '"pq" '"lb") (MrTableConcat (Key '('table (String '"my_in_topic")))) (Void) '())) (let world (Left! x)) (let table1 (Right! x)) (let values (block '( (let select (block '( (let core table1) (let core (Filter core (lambda '(row) (Coalesce ("<" (Member row '"Data") (String '"100")) (Bool 'false))))) (let core (PersistableRepr (block '( (let projectCoreType (TypeOf core)) (let core (SqlProject core '((SqlProjectItem projectCoreType '"Data" (lambda '(row) (block '( (let res (Member row '"Data")) (return res) ))))))) (return core) )))) (return core) ))) (return select) ))) (let world (block '( (let sink (DataSink '"pq" '"lb")) (let world (Write! world sink (Key '('table (String '"my_out_topic"))) values '('('mode 'append)))) (return world) ))) (return world) ))) (let world (block '( (let world (CommitAll! world)) (return world) ))) (return world) ) Compile... Optimize... 2025-06-03 10:21:20.773 INFO ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [DQ] yql_dq_gateway.cpp:605: {85abaf5f-4cc0009-d2184d37-9c2288af} OpenSession 2025-06-03 10:21:20.775 INFO ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [PQ] yql_pq_load_meta.cpp:35: {85abaf5f-4cc0009-d2184d37-9c2288af} Load topic meta for: `lb`.`my_out_topic` 2025-06-03 10:21:20.775 INFO ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [PQ] yql_pq_load_meta.cpp:35: {85abaf5f-4cc0009-d2184d37-9c2288af} Load topic meta for: `lb`.`my_in_topic` 2025-06-03 10:21:20.775 INFO ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [PQ] yql_pq_datasource.cpp:88: {85abaf5f-4cc0009-d2184d37-9c2288af} RewriteIO 2025-06-03 10:21:20.775 INFO ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [PQ] yql_pq_datasource.cpp:88: {85abaf5f-4cc0009-d2184d37-9c2288af} RewriteIO 2025-06-03 10:21:20.775 INFO ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [PQ] yql_pq_datasink.cpp:102: {85abaf5f-4cc0009-d2184d37-9c2288af} Rewrite Write! ( (let $1 (Configure! world (DataSource '"config") '"DqEngine" '"force")) (let $2 (Configure! $1 (DataSource '"pq" '"$all") '"Attr" '"consumer" '"my_test_consumer")) (let $3 (DataSink '"pq" '"lb")) (let $4 '('('"PartitionsCount" '"1"))) (let $5 (DataType 'String)) (let $6 '('"Data" $5)) (let $7 (StructType $6)) (let $8 (PqTopic '"lb" '"/Root" '"my_out_topic" $4 '() $7)) (let $9 '"_yql_sys_create_time") (let $10 '"_yql_sys_tsp_write_time") (let $11 '"_yql_sys_partition_id") (let $12 '"_yql_sys_message_group_id") (let $13 '('('"system" $9) '('"system" $10) '('"system" $11) '('"system" '"_yql_sys_offset") '('"system" $12) '('"system" '"_yql_sys_seq_no"))) (let $14 (PqTopic '"lb" '"/Root" '"my_in_topic" $4 $13 $7)) (let $15 (DataType 'Timestamp)) (let $16 (DataType 'Uint64)) (let $17 (ListType (StructType $6 '($9 $15) '($12 $5) '('"_yql_sys_offset" $16) '($11 $16) '('"_yql_sys_seq_no" $16) '($10 $15)))) (let $18 (SqlProjectItem $17 '"Data" (lambda '($22) (Member $22 '"Data")))) (let $19 (SqlProjectItem $17 $10 (lambda '($23) (Member $23 $10)))) (let $20 (PqWriteTopic! $2 $3 $8 (RemovePrefixMembers (SqlProject (Filter (Right! (PqReadTopic! $2 (DataSource '"pq" '"lb") $14 (Void) '"raw" '"" (Void) '('('"data.datetime.formatname" '"POSIX") '('"data.timestamp.formatname" '"POSIX")))) (lambda '($21) (< (Member $21 '"Data") (String '"100")))) '($18 $19)) '('_yql_)) 'append '())) (return (Commit! $20 $3)) ) 2025-06-03 10:21:20.778 DEBUG ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [core] yql_co_simple1.cpp:4041: {85abaf5f-4cc0009-d2184d37-9c2288af} Canonize Filter ( (let $1 (Configure! world (DataSource '"config") '"DqEngine" '"force")) (let $2 (Configure! $1 (DataSource '"pq" '"$all") '"Attr" '"consumer" '"my_test_consumer")) (let $3 (DataSink '"pq" '"lb")) (let $4 '('('"PartitionsCount" '"1"))) (let $5 (DataType 'String)) (let $6 '('"Data" $5)) (let $7 (StructType $6)) (let $8 (PqTopic '"lb" '"/Root" '"my_out_topic" $4 '() $7)) (let $9 '"_yql_sys_create_time") (let $10 '"_yql_sys_tsp_write_time") (let $11 '"_yql_sys_partition_id") (let $12 '"_yql_sys_message_group_id") (let $13 '('('"system" $9) '('"system" $10) '('"system" $11) '('"system" '"_yql_sys_offset") '('"system" $12) '('"system" '"_yql_sys_seq_no"))) (let $14 (PqTopic '"lb" '"/Root" '"my_in_topic" $4 $13 $7)) (let $15 (DataType 'Timestamp)) (let $16 (DataType 'Uint64)) (let $17 (ListType (StructType $6 '($9 $15) '($12 $5) '('"_yql_sys_offset" $16) '($11 $16) '('"_yql_sys_seq_no" $16) '($10 $15)))) (let $18 (SqlProjectItem $17 '"Data" (lambda '($22) (Member $22 '"Data")))) (let $19 (SqlProjectItem $17 $10 (lambda '($23) (Member $23 $10)))) (let $20 (PqWriteTopic! $2 $3 $8 (RemovePrefixMembers (SqlProject (FlatMap (Right! (PqReadTopic! $2 (DataSource '"pq" '"lb") $14 (Void) '"raw" '"" (Void) '('('"data.datetime.formatname" '"POSIX") '('"data.timestamp.formatname" '"POSIX")))) (lambda '($21) (OptionalIf (< (Member $21 '"Data") (String '"100")) $21))) '($18 $19)) '('_yql_)) 'append '())) (return (Commit! $20 $3)) ) 2025-06-03 10:21:20.779 DEBUG ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [core] yql_co_simple1.cpp:6509: {85abaf5f-4cc0009-d2184d37-9c2288af} Expand SqlProject 2025-06-03 10:21:20.779 INFO ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A5937F640) [DQ] yql_dq_gateway.cpp:633: {85abaf5f-4cc0009-d2184d37-9c2288af} OpenSession OK ( (let $1 (Configure! world (DataSource '"config") '"DqEngine" '"force")) (let $2 (Configure! $1 (DataSource '"pq" '"$all") '"Attr" '"consumer" '"my_test_consumer")) (let $3 (DataSink '"pq" '"lb")) (let $4 '('('"PartitionsCount" '"1"))) (let $5 (StructType '('"Data" (DataType 'String)))) (let $6 (PqTopic '"lb" '"/Root" '"my_out_topic" $4 '() $5)) (let $7 '"_yql_sys_tsp_write_time") (let $8 '('('"system" '"_yql_sys_create_time") '('"system" $7) '('"system" '"_yql_sys_partition_id") '('"system" '"_yql_sys_offset") '('"system" '"_yql_sys_message_group_id") '('"system" '"_yql_sys_seq_no"))) (let $9 (PqTopic '"lb" '"/Root" '"my_in_topic" $4 $8 $5)) (let $10 (PqWriteTopic! $2 $3 $6 (RemovePrefixMembers (FlatMap (FlatMap (Right! (PqReadTopic! $2 (DataSource '"pq" '"lb") $9 (Void) '"raw" '"" (Void) '('('"data.datetime.formatname" '"POSIX") '('"data.timestamp.formatname" '"POSIX")))) (lambda '($11) (OptionalIf (< (Member $11 '"Data") (String '"100")) $11))) (lambda '($12) (AsList (FlattenMembers '('"" (AsStruct '('"Data" (Member $12 '"Data")))) '('"" (AsStruct '($7 (Member $12 $7)))))))) '('_yql_)) 'append '())) (return (Commit! $10 $3)) ) 2025-06-03 10:21:20.780 DEBUG ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [core] yql_opt_utils.cpp:770: {85abaf5f-4cc0009-d2184d37-9c2288af} Enumerate struct literal for FlattenMembers 2025-06-03 10:21:20.780 DEBUG ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [core] yql_opt_utils.cpp:770: {85abaf5f-4cc0009-d2184d37-9c2288af} Enumerate struct literal for FlattenMembers ( (let $1 (Configure! world (DataSource '"config") '"DqEngine" '"force")) (let $2 (Configure! $1 (DataSource '"pq" '"$all") '"Attr" '"consumer" '"my_test_consumer")) (let $3 (DataSink '"pq" '"lb")) (let $4 '('('"PartitionsCount" '"1"))) (let $5 (StructType '('"Data" (DataType 'String)))) (let $6 (PqTopic '"lb" '"/Root" '"my_out_topic" $4 '() $5)) (let $7 '"_yql_sys_tsp_write_time") (let $8 '('('"system" '"_yql_sys_create_time") '('"system" $7) '('"system" '"_yql_sys_partition_id") '('"system" '"_yql_sys_offset") '('"system" '"_yql_sys_message_group_id") '('"system" '"_yql_sys_seq_no"))) (let $9 (PqTopic '"lb" '"/Root" '"my_in_topic" $4 $8 $5)) (let $10 (PqWriteTopic! $2 $3 $6 (RemovePrefixMembers (FlatMap (FlatMap (Right! (PqReadTopic! $2 (DataSource '"pq" '"lb") $9 (Void) '"raw" '"" (Void) '('('"data.datetime.formatname" '"POSIX") '('"data.timestamp.formatname" '"POSIX")))) (lambda '($11) (OptionalIf (< (Member $11 '"Data") (String '"100")) $11))) (lambda '($12) (AsList (AsStruct '('"Data" (Member $12 '"Data")) '($7 (Member $12 $7)))))) '('_yql_)) 'append '())) (return (Commit! $10 $3)) ) 2025-06-03 10:21:20.781 DEBUG ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [core] yql_co_simple1.cpp:2028: {85abaf5f-4cc0009-d2184d37-9c2288af} FlatMap with single arg AsList ( (let $1 (Configure! world (DataSource '"config") '"DqEngine" '"force")) (let $2 (Configure! $1 (DataSource '"pq" '"$all") '"Attr" '"consumer" '"my_test_consumer")) (let $3 (DataSink '"pq" '"lb")) (let $4 '('('"PartitionsCount" '"1"))) (let $5 (StructType '('"Data" (DataType 'String)))) (let $6 (PqTopic '"lb" '"/Root" '"my_out_topic" $4 '() $5)) (let $7 '"_yql_sys_tsp_write_time") (let $8 '('('"system" '"_yql_sys_create_time") '('"system" $7) '('"system" '"_yql_sys_partition_id") '('"system" '"_yql_sys_offset") '('"system" '"_yql_sys_message_group_id") '('"system" '"_yql_sys_seq_no"))) (let $9 (PqTopic '"lb" '"/Root" '"my_in_topic" $4 $8 $5)) (let $10 (PqWriteTopic! $2 $3 $6 (RemovePrefixMembers (FlatMap (FlatMap (Right! (PqReadTopic! $2 (DataSource '"pq" '"lb") $9 (Void) '"raw" '"" (Void) '('('"data.datetime.formatname" '"POSIX") '('"data.timestamp.formatname" '"POSIX")))) (lambda '($11) (OptionalIf (< (Member $11 '"Data") (String '"100")) $11))) (lambda '($12) (Just (AsStruct '('"Data" (Member $12 '"Data")) '($7 (Member $12 $7)))))) '('_yql_)) 'append '())) (return (Commit! $10 $3)) ) 2025-06-03 10:21:20.781 DEBUG ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [core] yql_co_simple1.cpp:2083: {85abaf5f-4cc0009-d2184d37-9c2288af} FlatMap to ExtractMembers ( (let $1 (Configure! world (DataSource '"config") '"DqEngine" '"force")) (let $2 (Configure! $1 (DataSource '"pq" '"$all") '"Attr" '"consumer" '"my_test_consumer")) (let $3 (DataSink '"pq" '"lb")) (let $4 '('('"PartitionsCount" '"1"))) (let $5 (StructType '('"Data" (DataType 'String)))) (let $6 (PqTopic '"lb" '"/Root" '"my_out_topic" $4 '() $5)) (let $7 '"_yql_sys_tsp_write_time") (let $8 '('('"system" '"_yql_sys_create_time") '('"system" $7) '('"system" '"_yql_sys_partition_id") '('"system" '"_yql_sys_offset") '('"system" '"_yql_sys_message_group_id") '('"system" '"_yql_sys_seq_no"))) (let $9 (PqTopic '"lb" '"/Root" '"my_in_topic" $4 $8 $5)) (let $10 (PqWriteTopic! $2 $3 $6 (RemovePrefixMembers (ExtractMembers (FlatMap (Right! (PqReadTopic! $2 (DataSource '"pq" '"lb") $9 (Void) '"raw" '"" (Void) '('('"data.datetime.formatname" '"POSIX") '('"data.timestamp.formatname" '"POSIX")))) (lambda '($11) (OptionalIf (< (Member $11 '"Data") (String '"100")) $11))) '('"Data" $7)) '('_yql_)) 'append '())) (return (Commit! $10 ... '"config") '"DqEngine" '"force")) (let $2 (Configure! $1 (DataSource '"pq" '"$all") '"Attr" '"consumer" '"my_test_consumer")) (let $3 (DataSink '"pq" '"lb")) (let $4 '('('"PartitionsCount" '"1"))) (let $5 (DataType 'String)) (let $6 (PqTopic '"lb" '"/Root" '"my_out_topic" $4 '() (StructType '('"Data" $5)))) (let $7 '('('"system" '"_yql_sys_create_time") '('"system" '"_yql_sys_tsp_write_time") '('"system" '"_yql_sys_partition_id") '('"system" '"_yql_sys_offset") '('"system" '"_yql_sys_message_group_id") '('"system" '"_yql_sys_seq_no"))) (let $8 (PqTopic '"lb" '"/Root" '"my_in_topic" $4 $7 (StructType '('"x" $5) '('"y" (DataType 'Int32))))) (let $9 (PqWriteTopic! $2 $3 $6 (FlatMap (ExtractMembers (Right! (PqReadTopic! $2 (DataSource '"pq" '"lb") $8 '('"y" '"x") '"json" '"" (Void) '('('"data.datetime.formatname" '"POSIX") '('"data.timestamp.formatname" '"POSIX")))) '('"x" '"y")) (lambda '($10) (AsList (FlattenMembers '('"" (AsStruct '('"x" (Member $10 '"x")))))))) 'append '())) (return (Commit! $9 $3)) ) 2025-06-03 10:21:21.993 DEBUG ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [core] yql_opt_utils.cpp:770: {c7304e77-f93cbf9-ad02a906-7883b238} Enumerate struct literal for FlattenMembers ( (let $1 (Configure! world (DataSource '"config") '"DqEngine" '"force")) (let $2 (Configure! $1 (DataSource '"pq" '"$all") '"Attr" '"consumer" '"my_test_consumer")) (let $3 (DataSink '"pq" '"lb")) (let $4 '('('"PartitionsCount" '"1"))) (let $5 (DataType 'String)) (let $6 (PqTopic '"lb" '"/Root" '"my_out_topic" $4 '() (StructType '('"Data" $5)))) (let $7 '('('"system" '"_yql_sys_create_time") '('"system" '"_yql_sys_tsp_write_time") '('"system" '"_yql_sys_partition_id") '('"system" '"_yql_sys_offset") '('"system" '"_yql_sys_message_group_id") '('"system" '"_yql_sys_seq_no"))) (let $8 (PqTopic '"lb" '"/Root" '"my_in_topic" $4 $7 (StructType '('"x" $5) '('"y" (DataType 'Int32))))) (let $9 (PqWriteTopic! $2 $3 $6 (FlatMap (ExtractMembers (Right! (PqReadTopic! $2 (DataSource '"pq" '"lb") $8 '('"y" '"x") '"json" '"" (Void) '('('"data.datetime.formatname" '"POSIX") '('"data.timestamp.formatname" '"POSIX")))) '('"x" '"y")) (lambda '($10) (AsList (AsStruct '('"x" (Member $10 '"x")))))) 'append '())) (return (Commit! $9 $3)) ) 2025-06-03 10:21:21.993 DEBUG ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [core] yql_co_simple1.cpp:2028: {c7304e77-f93cbf9-ad02a906-7883b238} FlatMap with single arg AsList ( (let $1 (Configure! world (DataSource '"config") '"DqEngine" '"force")) (let $2 (Configure! $1 (DataSource '"pq" '"$all") '"Attr" '"consumer" '"my_test_consumer")) (let $3 (DataSink '"pq" '"lb")) (let $4 '('('"PartitionsCount" '"1"))) (let $5 (DataType 'String)) (let $6 (PqTopic '"lb" '"/Root" '"my_out_topic" $4 '() (StructType '('"Data" $5)))) (let $7 '('('"system" '"_yql_sys_create_time") '('"system" '"_yql_sys_tsp_write_time") '('"system" '"_yql_sys_partition_id") '('"system" '"_yql_sys_offset") '('"system" '"_yql_sys_message_group_id") '('"system" '"_yql_sys_seq_no"))) (let $8 (PqTopic '"lb" '"/Root" '"my_in_topic" $4 $7 (StructType '('"x" $5) '('"y" (DataType 'Int32))))) (let $9 (PqWriteTopic! $2 $3 $6 (FlatMap (ExtractMembers (Right! (PqReadTopic! $2 (DataSource '"pq" '"lb") $8 '('"y" '"x") '"json" '"" (Void) '('('"data.datetime.formatname" '"POSIX") '('"data.timestamp.formatname" '"POSIX")))) '('"x" '"y")) (lambda '($10) (Just (AsStruct '('"x" (Member $10 '"x")))))) 'append '())) (return (Commit! $9 $3)) ) 2025-06-03 10:21:21.994 DEBUG ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [core] yql_co_simple1.cpp:2083: {c7304e77-f93cbf9-ad02a906-7883b238} FlatMap to ExtractMembers ( (let $1 (Configure! world (DataSource '"config") '"DqEngine" '"force")) (let $2 (Configure! $1 (DataSource '"pq" '"$all") '"Attr" '"consumer" '"my_test_consumer")) (let $3 (DataSink '"pq" '"lb")) (let $4 '('('"PartitionsCount" '"1"))) (let $5 (DataType 'String)) (let $6 (PqTopic '"lb" '"/Root" '"my_out_topic" $4 '() (StructType '('"Data" $5)))) (let $7 '('('"system" '"_yql_sys_create_time") '('"system" '"_yql_sys_tsp_write_time") '('"system" '"_yql_sys_partition_id") '('"system" '"_yql_sys_offset") '('"system" '"_yql_sys_message_group_id") '('"system" '"_yql_sys_seq_no"))) (let $8 (PqTopic '"lb" '"/Root" '"my_in_topic" $4 $7 (StructType '('"x" $5) '('"y" (DataType 'Int32))))) (let $9 (PqWriteTopic! $2 $3 $6 (ExtractMembers (ExtractMembers (Right! (PqReadTopic! $2 (DataSource '"pq" '"lb") $8 '('"y" '"x") '"json" '"" (Void) '('('"data.datetime.formatname" '"POSIX") '('"data.timestamp.formatname" '"POSIX")))) '('"x" '"y")) '('"x")) 'append '())) (return (Commit! $9 $3)) ) 2025-06-03 10:21:21.995 DEBUG ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [core] yql_co_simple1.cpp:4082: {c7304e77-f93cbf9-ad02a906-7883b238} ExtractMembers over ExtractMembers ( (let $1 (Configure! world (DataSource '"config") '"DqEngine" '"force")) (let $2 (Configure! $1 (DataSource '"pq" '"$all") '"Attr" '"consumer" '"my_test_consumer")) (let $3 (DataSink '"pq" '"lb")) (let $4 '('('"PartitionsCount" '"1"))) (let $5 (DataType 'String)) (let $6 (PqTopic '"lb" '"/Root" '"my_out_topic" $4 '() (StructType '('"Data" $5)))) (let $7 '('('"system" '"_yql_sys_create_time") '('"system" '"_yql_sys_tsp_write_time") '('"system" '"_yql_sys_partition_id") '('"system" '"_yql_sys_offset") '('"system" '"_yql_sys_message_group_id") '('"system" '"_yql_sys_seq_no"))) (let $8 (PqTopic '"lb" '"/Root" '"my_in_topic" $4 $7 (StructType '('"x" $5) '('"y" (DataType 'Int32))))) (let $9 (PqWriteTopic! $2 $3 $6 (ExtractMembers (Right! (PqReadTopic! $2 (DataSource '"pq" '"lb") $8 '('"y" '"x") '"json" '"" (Void) '('('"data.datetime.formatname" '"POSIX") '('"data.timestamp.formatname" '"POSIX")))) '('"x")) 'append '())) (return (Commit! $9 $3)) ) 2025-06-03 10:21:21.996 INFO ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [DQ] yql_dq_recapture.cpp:110: {c7304e77-f93cbf9-ad02a906-7883b238} DqsRecapture ( (let $1 (Configure! world (DataSource '"config") '"DqEngine" '"force")) (let $2 (Configure! $1 (DataSource '"pq" '"$all") '"Attr" '"consumer" '"my_test_consumer")) (let $3 (DataSink '"pq" '"lb")) (let $4 '('('"PartitionsCount" '"1"))) (let $5 (DataType 'String)) (let $6 (PqTopic '"lb" '"/Root" '"my_out_topic" $4 '() (StructType '('"Data" $5)))) (let $7 '"_yql_sys_create_time") (let $8 '"_yql_sys_tsp_write_time") (let $9 '"_yql_sys_partition_id") (let $10 '"_yql_sys_message_group_id") (let $11 '('('"system" $7) '('"system" $8) '('"system" $9) '('"system" '"_yql_sys_offset") '('"system" $10) '('"system" '"_yql_sys_seq_no"))) (let $12 '('"x" $5)) (let $13 '('"y" (DataType 'Int32))) (let $14 (PqTopic '"lb" '"/Root" '"my_in_topic" $4 $11 (StructType $12 $13))) (let $15 '('"SharedReading" '"0")) (let $16 '('('"Consumer" '"my_test_consumer") '('"Endpoint" '"lb.ru") $15 '('"ReconnectPeriod" '"") '('"Format" '"json") '('"ReadGroup" '""))) (let $17 (DataType 'Timestamp)) (let $18 (DataType 'Uint64)) (let $19 (StructType '($7 $17) '($10 $5) '('"_yql_sys_offset" $18) '($9 $18) '('"_yql_sys_seq_no" $18) '($8 $17) $12 $13)) (let $20 (DqPqTopicSource $2 $14 '('"x" '"y") $16 (SecureParam '"cluster:default_lb") '"" $19)) (let $21 '($7 $8 $9 '"_yql_sys_offset" $10 '"_yql_sys_seq_no")) (let $22 '('('"format" '"json") '('"metadataColumns" $21) '('"formatSettings" '('('"data.datetime.formatname" '"POSIX") '('"data.timestamp.formatname" '"POSIX"))) '('"settings" '($15)))) (let $23 (DqSourceWrap $20 (DataSource '"pq" '"lb") $19 $22)) (let $24 (PqWriteTopic! $2 $3 $6 (ExtractMembers $23 '('"x")) 'append '())) (return (Commit! $24 $3)) ) 2025-06-03 10:21:21.997 INFO ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [PQ] yql_optimize.cpp:135: {c7304e77-f93cbf9-ad02a906-7883b238} LogicalOptimizer-ExtractMembersOverDqWrap ( (let $1 (Configure! world (DataSource '"config") '"DqEngine" '"force")) (let $2 (Configure! $1 (DataSource '"pq" '"$all") '"Attr" '"consumer" '"my_test_consumer")) (let $3 (DataSink '"pq" '"lb")) (let $4 '('('"PartitionsCount" '"1"))) (let $5 (DataType 'String)) (let $6 (PqTopic '"lb" '"/Root" '"my_out_topic" $4 '() (StructType '('"Data" $5)))) (let $7 (StructType '('"x" $5))) (let $8 (PqTopic '"lb" '"/Root" '"my_in_topic" $4 '() $7)) (let $9 '('"SharedReading" '"0")) (let $10 '('('"Consumer" '"my_test_consumer") '('"Endpoint" '"lb.ru") $9 '('"ReconnectPeriod" '"") '('"Format" '"json") '('"ReadGroup" '""))) (let $11 (DqPqTopicSource $2 $8 '('"x") $10 (SecureParam '"cluster:default_lb") '"" $7)) (let $12 '('('"format" '"json") '('"formatSettings" '('('"data.datetime.formatname" '"POSIX") '('"data.timestamp.formatname" '"POSIX"))) '('"settings" '($9)))) (let $13 (DqSourceWrap $11 (DataSource '"pq" '"lb") $7 $12)) (let $14 (PqWriteTopic! $2 $3 $6 $13 'append '())) (return (Commit! $14 $3)) ) 2025-06-03 10:21:21.998 INFO ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [DQ] yql_optimize.cpp:135: {c7304e77-f93cbf9-ad02a906-7883b238} DqsPhy-BuildStageWithSourceWrap ( (let $1 (Configure! world (DataSource '"config") '"DqEngine" '"force")) (let $2 (Configure! $1 (DataSource '"pq" '"$all") '"Attr" '"consumer" '"my_test_consumer")) (let $3 (DataSink '"pq" '"lb")) (let $4 '('('"PartitionsCount" '"1"))) (let $5 (DataType 'String)) (let $6 (PqTopic '"lb" '"/Root" '"my_out_topic" $4 '() (StructType '('"Data" $5)))) (let $7 (DataSource '"pq" '"lb")) (let $8 (StructType '('"x" $5))) (let $9 (PqTopic '"lb" '"/Root" '"my_in_topic" $4 '() $8)) (let $10 '('"SharedReading" '"0")) (let $11 '('('"Consumer" '"my_test_consumer") '('"Endpoint" '"lb.ru") $10 '('"ReconnectPeriod" '"") '('"Format" '"json") '('"ReadGroup" '""))) (let $12 (DqPqTopicSource $2 $9 '('"x") $11 (SecureParam '"cluster:default_lb") '"" $8)) (let $13 (DqStage '((DqSource $7 $12)) (lambda '($15) (block '( (let $16 '('('"format" '"json") '('"formatSettings" '('('"data.datetime.formatname" '"POSIX") '('"data.timestamp.formatname" '"POSIX"))) '('"settings" '($10)))) (let $17 (DqSourceWideWrap $15 $7 $8 $16)) (return (NarrowMap $17 (lambda '($18) (AsStruct '('"x" $18))))) ))) '('('"_logical_id" '200296)))) (let $14 (PqWriteTopic! $2 $3 $6 (DqCnUnionAll (TDqOutput $13 '"0")) 'append '())) (return (Commit! $14 $3)) ) 2025-06-03 10:21:21.999 INFO ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [PQ] yql_pq_physical_optimize.cpp:91: {c7304e77-f93cbf9-ad02a906-7883b238} Optimize PqWriteTopic `lb`.`my_out_topic` 2025-06-03 10:21:21.999 INFO ydb-library-yql-providers-pq-provider-ut(pid=30333, tid=0x00007F4A7B524AC0) [PQ] yql_optimize.cpp:95: {c7304e77-f93cbf9-ad02a906-7883b238} PhysicalOptimizer-PqWriteTopic ( (let $1 (Configure! world (DataSource '"config") '"DqEngine" '"force")) (let $2 (Configure! $1 (DataSource '"pq" '"$all") '"Attr" '"consumer" '"my_test_consumer")) (let $3 (DataSource '"pq" '"lb")) (let $4 '('('"PartitionsCount" '"1"))) (let $5 (DataType 'String)) (let $6 (StructType '('"x" $5))) (let $7 (PqTopic '"lb" '"/Root" '"my_in_topic" $4 '() $6)) (let $8 '('"Endpoint" '"lb.ru")) (let $9 '('"SharedReading" '"0")) (let $10 '('('"Consumer" '"my_test_consumer") $8 $9 '('"ReconnectPeriod" '"") '('"Format" '"json") '('"ReadGroup" '""))) (let $11 (SecureParam '"cluster:default_lb")) (let $12 (DqPqTopicSource $2 $7 '('"x") $10 $11 '"" $6)) (let $13 (DataSink '"pq" '"lb")) (let $14 (PqTopic '"lb" '"/Root" '"my_out_topic" $4 '() (StructType '('"Data" $5)))) (let $15 (DqPqTopicSink $14 '($8) $11)) (return (Commit! (DqQuery! $2 '((DqStage '((DqSource $3 $12)) (lambda '($16) (block '( (let $17 '('('"format" '"json") '('"formatSettings" '('('"data.datetime.formatname" '"POSIX") '('"data.timestamp.formatname" '"POSIX"))) '('"settings" '($9)))) (let $18 (DqSourceWideWrap $16 $3 $6 $17)) (return (NarrowMap $18 (lambda '($19) (AsStruct '('"x" $19))))) ))) '('('"_logical_id" '200296)) '((DqSink '"0" $13 $15))))) $13)) ) Done. |75.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile |75.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows |75.7%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/persqueue_v1/persqueue_ut.cpp >> Mirror3of4::ReplicationHuge [GOOD] >> Dictionary::ComparePayloadAndFull [GOOD] >> Hash::ScalarBinaryHash [GOOD] >> Hash::ScalarCTypeHash [GOOD] >> Hash::ScalarCompositeHash [GOOD] >> ProgramStep::Round0 [GOOD] >> ProgramStep::Round1 [GOOD] >> ProgramStep::Filter [GOOD] >> ProgramStep::Add [GOOD] >> ProgramStep::Substract [GOOD] >> ProgramStep::Multiply [GOOD] >> ProgramStep::Divide [GOOD] >> ProgramStep::Gcd [GOOD] >> ProgramStep::Lcm [GOOD] >> ProgramStep::Mod [GOOD] >> ProgramStep::ModOrZero [GOOD] >> ProgramStep::Abs [GOOD] >> ProgramStep::Negate [GOOD] >> ProgramStep::Compares [GOOD] >> ProgramStep::Logic0 [GOOD] >> ProgramStep::Logic1 [GOOD] >> ProgramStep::StartsWith [GOOD] >> ProgramStep::EndsWith [GOOD] >> ProgramStep::MatchSubstring [GOOD] >> ProgramStep::StartsWithIgnoreCase [GOOD] >> ProgramStep::EndsWithIgnoreCase [GOOD] >> ProgramStep::MatchSubstringIgnoreCase [GOOD] >> ProgramStep::ScalarTest [GOOD] >> ProgramStep::TestValueFromNull [GOOD] >> ProgramStep::MergeFilterSimple [GOOD] >> ProgramStep::Projection [GOOD] >> ProgramStep::MinMax [GOOD] >> ProgramStep::Sum [GOOD] >> ProgramStep::SumGroupBy [GOOD] >> ProgramStep::SumGroupByNotNull [GOOD] >> ProgramStep::MinMaxSomeGroupBy >> test_transform.py::TestYamlConfigTransformations::test_simplified[dump] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_simplified[dump_ds_init] |75.7%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut >> ProgramStep::MinMaxSomeGroupBy [GOOD] >> ProgramStep::MinMaxSomeGroupByNotNull [GOOD] >> SortableBatchPosition::FindPosition [GOOD] |75.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage |75.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut |75.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark |75.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant |75.6%| [CC] {BAZEL_DOWNLOAD} $(S)/ydb/services/cms/cms_ut.cpp |75.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/ydb-core-blobstorage-vdisk-synclog-ut |75.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots >> ydb-tests-functional-autoconfig::import_test [GOOD] |75.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/sequenceshard/public/ut/ydb-core-tx-sequenceshard-public-ut ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/formats/arrow/ut/unittest >> SortableBatchPosition::FindPosition [GOOD] Test command err: Process: 100000d;/100000; 10000d;/10000; NO_CODEC(poolsize=1024;keylen=1) 0.2021203448 0.2210911404 NO_CODEC(poolsize=1024;keylen=10) 0.1534132783 0.2482180533 NO_CODEC(poolsize=1024;keylen=16) 0.1104676508 0.2045372848 NO_CODEC(poolsize=1024;keylen=32) 0.06592569055 0.1591802296 NO_CODEC(poolsize=1024;keylen=64) 0.03972180035 0.1324717476 NO_CODEC(poolsize=128;keylen=1) 0.2016566193 0.2164784476 NO_CODEC(poolsize=128;keylen=10) 0.07304169975 0.08752922393 NO_CODEC(poolsize=128;keylen=16) 0.05151637558 0.06514358749 NO_CODEC(poolsize=128;keylen=32) 0.02919093319 0.04189888314 NO_CODEC(poolsize=128;keylen=64) 0.01605694811 0.02821124922 NO_CODEC(poolsize=16;keylen=1) 0.2010010074 0.2099570542 NO_CODEC(poolsize=16;keylen=10) 0.0719219365 0.07635285397 NO_CODEC(poolsize=16;keylen=16) 0.05039654131 0.05396013899 NO_CODEC(poolsize=16;keylen=32) 0.02807102527 0.03070808446 NO_CODEC(poolsize=16;keylen=64) 0.01493699686 0.01701612239 NO_CODEC(poolsize=1;keylen=1) 0.2008730831 0.2086845872 NO_CODEC(poolsize=1;keylen=10) 0.07177339648 0.07487027428 NO_CODEC(poolsize=1;keylen=16) 0.0502445638 0.05244238527 NO_CODEC(poolsize=1;keylen=32) 0.02791992658 0.0291982148 NO_CODEC(poolsize=1;keylen=64) 0.01478641518 0.01551089526 NO_CODEC(poolsize=512;keylen=1) 0.2021203448 0.2210911404 NO_CODEC(poolsize=512;keylen=10) 0.1482943606 0.1971260763 NO_CODEC(poolsize=512;keylen=16) 0.1053484084 0.1534129488 NO_CODEC(poolsize=512;keylen=32) 0.0608061115 0.1080222928 NO_CODEC(poolsize=512;keylen=64) 0.03460202321 0.08129402495 NO_CODEC(poolsize=64;keylen=1) 0.2013687897 0.2136153969 NO_CODEC(poolsize=64;keylen=10) 0.07240183504 0.08114272681 NO_CODEC(poolsize=64;keylen=16) 0.05087647028 0.05875304549 NO_CODEC(poolsize=64;keylen=32) 0.02855098581 0.03550414104 NO_CODEC(poolsize=64;keylen=64) 0.01541697597 0.02181403389 lz4(poolsize=1024;keylen=1) 0.006629768257 0.05541610349 lz4(poolsize=1024;keylen=10) 0.04233951498 0.3344832994 lz4(poolsize=1024;keylen=16) 0.05657489465 0.404264214 lz4(poolsize=1024;keylen=32) 0.09037137941 0.5318074361 lz4(poolsize=1024;keylen=64) 0.01074936154 0.1063492063 lz4(poolsize=128;keylen=1) 0.003831111821 0.02881389382 lz4(poolsize=128;keylen=10) 0.00718182175 0.06087121933 lz4(poolsize=128;keylen=16) 0.008735936466 0.07523964551 lz4(poolsize=128;keylen=32) 0.01375268158 0.117441454 lz4(poolsize=128;keylen=64) 0.02262360212 0.1850289108 lz4(poolsize=16;keylen=1) 0.00273442178 0.01820340324 lz4(poolsize=16;keylen=10) 0.003078137332 0.02169239789 lz4(poolsize=16;keylen=16) 0.003266503667 0.02356577168 lz4(poolsize=16;keylen=32) 0.003742685614 0.02844311377 lz4(poolsize=16;keylen=64) 0.004937163375 0.03979647465 lz4(poolsize=1;keylen=1) 0.00251497006 0.01603325416 lz4(poolsize=1;keylen=10) 0.002531395234 0.01628089447 lz4(poolsize=1;keylen=16) 0.002515970516 0.01617933723 lz4(poolsize=1;keylen=32) 0.00251450677 0.01630226314 lz4(poolsize=1;keylen=64) 0.002511620933 0.01653353149 lz4(poolsize=512;keylen=1) 0.005362411291 0.04359726295 lz4(poolsize=512;keylen=10) 0.02347472854 0.1933066062 lz4(poolsize=512;keylen=16) 0.03056053336 0.2426853056 lz4(poolsize=512;keylen=32) 0.04856356058 0.3467897492 lz4(poolsize=512;keylen=64) 0.04102771881 0.3228658321 lz4(poolsize=64;keylen=1) 0.003312844256 0.02372010279 lz4(poolsize=64;keylen=10) 0.004839661617 0.03863241259 lz4(poolsize=64;keylen=16) 0.005715507689 0.04687204687 lz4(poolsize=64;keylen=32) 0.007821957352 0.06669044223 lz4(poolsize=64;keylen=64) 0.01258912656 0.1073551894 zstd(poolsize=1024;keylen=1) 0.007324840764 0.0754840827 zstd(poolsize=1024;keylen=10) 0.04506846012 0.3776978417 zstd(poolsize=1024;keylen=16) 0.0655640205 0.4694540288 zstd(poolsize=1024;keylen=32) 0.1110720087 0.6098141264 zstd(poolsize=1024;keylen=64) 0.1914108287 0.7447345433 zstd(poolsize=128;keylen=1) 0.003769847609 0.04002713704 zstd(poolsize=128;keylen=10) 0.007456731695 0.07809798271 zstd(poolsize=128;keylen=16) 0.0102539786 0.1029455519 zstd(poolsize=128;keylen=32) 0.01677217062 0.1578947368 zstd(poolsize=128;keylen=64) 0.03005940945 0.2517949988 zstd(poolsize=16;keylen=1) 0.002620896858 0.02794819359 zstd(poolsize=16;keylen=10) 0.002816201441 0.03048416019 zstd(poolsize=16;keylen=16) 0.003368308096 0.03570300158 zstd(poolsize=16;keylen=32) 0.004159808469 0.0434375 zstd(poolsize=16;keylen=64) 0.005779996974 0.05875115349 zstd(poolsize=1;keylen=1) 0.002461243407 0.02626193724 zstd(poolsize=1;keylen=10) 0.002154636612 0.0234375 zstd(poolsize=1;keylen=16) 0.002356872222 0.02519132653 zstd(poolsize=1;keylen=32) 0.002427911996 0.02573879886 zstd(poolsize=1;keylen=64) 0.00258021431 0.02699269609 zstd(poolsize=512;keylen=1) 0.005583027596 0.05848930481 zstd(poolsize=512;keylen=10) 0.0236929438 0.2237078941 zstd(poolsize=512;keylen=16) 0.03443366072 0.2936507937 zstd(poolsize=512;keylen=32) 0.05917328099 0.4212765957 zstd(poolsize=512;keylen=64) 0.1058929843 0.5749553837 zstd(poolsize=64;keylen=1) 0.00319560285 0.03401360544 zstd(poolsize=64;keylen=10) 0.004852093844 0.05176470588 zstd(poolsize=64;keylen=16) 0.00633344236 0.06557881773 zstd(poolsize=64;keylen=32) 0.009647738439 0.09619952494 zstd(poolsize=64;keylen=64) 0.01626771323 0.1514644351 NO_CODEC --1000 ----1 ------1 FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=14168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=20168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=36168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=68168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% ----16 ------1 FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=14168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=20168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=36168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=68168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% ----64 ------1 FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=14168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=20168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; --------20168 / 20296 = 0.6306661411% ------32 FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=36168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=36168;columns=1; --------36168 / 36296 = 0.35265594% ------64 FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=68168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=68168;columns=1; --------68168 / 68296 = 0.1874194682% ----128 ------1 FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=5168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=5168;columns=1; --------5168 / 5296 = 2.416918429% ------10 FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=14168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=14168;columns=1; --------14168 / 14296 = 0.8953553442% ------16 FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=20168;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=20168;columns=1; - ... (9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"1,2\",\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"1,2\",\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"1,2\",\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=216;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"1,2\",\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"1,2\",\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(18):{\"i\":\"1,2\",\"o\":\"3,4\",\"t\":\"Aggregation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"1\",\"p\":{\"address\":{\"name\":\"x\",\"id\":1}},\"o\":\"1\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"y\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(27):{\"i\":\"1,3,4\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N2 -> N5[label="1"]; N0 -> N5[label="2"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"x\",\"id\":1},{\"name\":\"y\",\"id\":2}]},\"o\":\"1,2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=192;columns=1; |75.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut |75.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut |75.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore |75.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/import_test >> ydb-tests-functional-autoconfig::import_test [GOOD] |75.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots |75.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots |75.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut |75.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots |75.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export |75.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/client/ut/ydb-core-client-ut |75.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut |75.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sysview/ydb-core-tx-schemeshard-ut_sysview |75.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut |75.6%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut >> SemiSortedDeltaAndVarLengthCodec::Random32 >> TBlobStorageSyncLogDsk::SeveralChunks [GOOD] >> TBlobStorageSyncLogDsk::OverlappingPages_OnePageIndexed [GOOD] >> TBlobStorageSyncLogDsk::OverlappingPages_SeveralPagesIndexed [GOOD] >> TBlobStorageSyncLogDsk::TrimLog [GOOD] >> TBlobStorageSyncLogMem::FilledIn1PutAfterSnapshot [GOOD] |75.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_mirror3of4/unittest >> Mirror3of4::ReplicationHuge [GOOD] Test command err: 2025-06-03T10:21:07.659741Z 1 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:0:0]: (0) SKELETON START Marker# BSVS37 2025-06-03T10:21:07.659815Z 2 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:1:0]: (0) SKELETON START Marker# BSVS37 2025-06-03T10:21:07.659845Z 3 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:2:0]: (0) SKELETON START Marker# BSVS37 2025-06-03T10:21:07.659879Z 4 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:3:0]: (0) SKELETON START Marker# BSVS37 2025-06-03T10:21:07.659909Z 5 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:4:0]: (0) SKELETON START Marker# BSVS37 2025-06-03T10:21:07.659935Z 6 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:5:0]: (0) SKELETON START Marker# BSVS37 2025-06-03T10:21:07.659971Z 7 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:6:0]: (0) SKELETON START Marker# BSVS37 2025-06-03T10:21:07.659998Z 8 00h00m00.000000s :BS_SKELETON INFO: PDiskId# 1 VDISK[0:_:0:7:0]: (0) SKELETON START Marker# BSVS37 2025-06-03T10:21:07.660091Z 1 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:0:0]: (0) LocalRecovery START 2025-06-03T10:21:07.660114Z 1 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Sending TEvYardInit: pdiskGuid# 9024190005639625593 skeletonid# [1:139:13] selfid# [1:155:22] delay 0.000000 sec 2025-06-03T10:21:07.660123Z 2 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:1:0]: (0) LocalRecovery START 2025-06-03T10:21:07.660130Z 2 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:1:0]: (0) Sending TEvYardInit: pdiskGuid# 1507844712683735155 skeletonid# [2:140:11] selfid# [2:156:12] delay 0.000000 sec 2025-06-03T10:21:07.660135Z 3 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:2:0]: (0) LocalRecovery START 2025-06-03T10:21:07.660142Z 3 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:2:0]: (0) Sending TEvYardInit: pdiskGuid# 368337877801634583 skeletonid# [3:141:11] selfid# [3:157:12] delay 0.000000 sec 2025-06-03T10:21:07.660148Z 4 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:3:0]: (0) LocalRecovery START 2025-06-03T10:21:07.660153Z 4 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:3:0]: (0) Sending TEvYardInit: pdiskGuid# 8759093550386486092 skeletonid# [4:142:11] selfid# [4:158:12] delay 0.000000 sec 2025-06-03T10:21:07.660159Z 5 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:4:0]: (0) LocalRecovery START 2025-06-03T10:21:07.660165Z 5 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:4:0]: (0) Sending TEvYardInit: pdiskGuid# 15448034051844567379 skeletonid# [5:143:11] selfid# [5:159:12] delay 0.000000 sec 2025-06-03T10:21:07.660172Z 6 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:5:0]: (0) LocalRecovery START 2025-06-03T10:21:07.660178Z 6 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:5:0]: (0) Sending TEvYardInit: pdiskGuid# 4164179309534612228 skeletonid# [6:144:11] selfid# [6:160:12] delay 0.000000 sec 2025-06-03T10:21:07.660183Z 7 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:6:0]: (0) LocalRecovery START 2025-06-03T10:21:07.660189Z 7 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:6:0]: (0) Sending TEvYardInit: pdiskGuid# 106264343363092691 skeletonid# [7:145:11] selfid# [7:161:12] delay 0.000000 sec 2025-06-03T10:21:07.660195Z 8 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:7:0]: (0) LocalRecovery START 2025-06-03T10:21:07.660201Z 8 00h00m00.000000s :BS_LOCALRECOVERY DEBUG: PDiskId# 1 VDISK[0:_:0:7:0]: (0) Sending TEvYardInit: pdiskGuid# 11625528978641733344 skeletonid# [8:146:11] selfid# [8:162:12] delay 0.000000 sec 2025-06-03T10:21:07.660330Z 1 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:453} PDiskMock[1:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:0:0] PDiskGuid# 9024190005639625593 CutLogID# [1:139:13] WhiteboardProxyId# [1:122:10]} 2025-06-03T10:21:07.660525Z 1 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:488} PDiskMock[1:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-06-03T10:21:07.660540Z 2 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:453} PDiskMock[2:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:1:0] PDiskGuid# 1507844712683735155 CutLogID# [2:140:11] WhiteboardProxyId# [2:124:10]} 2025-06-03T10:21:07.660550Z 2 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:488} PDiskMock[2:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-06-03T10:21:07.660558Z 3 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:453} PDiskMock[3:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:2:0] PDiskGuid# 368337877801634583 CutLogID# [3:141:11] WhiteboardProxyId# [3:126:10]} 2025-06-03T10:21:07.660566Z 3 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:488} PDiskMock[3:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-06-03T10:21:07.660573Z 4 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:453} PDiskMock[4:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:3:0] PDiskGuid# 8759093550386486092 CutLogID# [4:142:11] WhiteboardProxyId# [4:128:10]} 2025-06-03T10:21:07.660580Z 4 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:488} PDiskMock[4:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-06-03T10:21:07.660587Z 5 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:453} PDiskMock[5:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:4:0] PDiskGuid# 15448034051844567379 CutLogID# [5:143:11] WhiteboardProxyId# [5:130:10]} 2025-06-03T10:21:07.660594Z 5 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:488} PDiskMock[5:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-06-03T10:21:07.660601Z 6 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:453} PDiskMock[6:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:5:0] PDiskGuid# 4164179309534612228 CutLogID# [6:144:11] WhiteboardProxyId# [6:132:10]} 2025-06-03T10:21:07.660609Z 6 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:488} PDiskMock[6:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-06-03T10:21:07.660617Z 7 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:453} PDiskMock[7:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:6:0] PDiskGuid# 106264343363092691 CutLogID# [7:145:11] WhiteboardProxyId# [7:134:10]} 2025-06-03T10:21:07.660625Z 7 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:488} PDiskMock[7:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-06-03T10:21:07.660632Z 8 00h00m00.000000s :BS_PDISK NOTICE: {PDM01@pdisk_mock.cpp:453} PDiskMock[8:1] received TEvYardInit Msg# {EvYardInit ownerRound# 2 VDisk# [0:4294967295:0:7:0] PDiskGuid# 11625528978641733344 CutLogID# [8:146:11] WhiteboardProxyId# [8:136:10]} 2025-06-03T10:21:07.660639Z 8 00h00m00.000000s :BS_PDISK INFO: {PDM02@pdisk_mock.cpp:488} PDiskMock[8:1] sending TEvYardInitResult Msg# {EvYardInitResult Status# OK ErrorReason# "" StatusFlags# None PDiskParams# {{TPDiskParams ownerId# 1 ownerRound# 2 ChunkSize# 134217728 AppendBlockSize# 4096 RecommendedReadSize# 45056 SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 BulkWriteBlockSize# 65536 PrefetchSizeBytes# 209715 GlueRequestDistanceBytes# 41943}} OwnedChunks# {}} Created# true 2025-06-03T10:21:07.660978Z 1 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:0:0]: (0) MAX LSNS: LogoBlobs# [ExplicitlySet# true Derived# false Lsn# NotSet] Blocks# [ExplicitlySet# true Derived# false Lsn# NotSet] Barriers# [ExplicitlySet# true Derived# false Lsn# NotSet] SyncLog# 0 2025-06-03T10:21:07.661180Z 2 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:1:0]: (0) MAX LSNS: LogoBlobs# [ExplicitlySet# true Derived# false Lsn# NotSet] Blocks# [ExplicitlySet# true Derived# false Lsn# NotSet] Barriers# [ExplicitlySet# true Derived# false Lsn# NotSet] SyncLog# 0 2025-06-03T10:21:07.661379Z 3 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:2:0]: (0) MAX LSNS: LogoBlobs# [ExplicitlySet# true Derived# false Lsn# NotSet] Blocks# [ExplicitlySet# true Derived# false Lsn# NotSet] Barriers# [ExplicitlySet# true Derived# false Lsn# NotSet] SyncLog# 0 2025-06-03T10:21:07.661563Z 4 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:3:0]: (0) MAX LSNS: LogoBlobs# [ExplicitlySet# true Derived# false Lsn# NotSet] Blocks# [ExplicitlySet# true Derived# false Lsn# NotSet] Barriers# [ExplicitlySet# true Derived# false Lsn# NotSet] SyncLog# 0 2025-06-03T10:21:07.661919Z 5 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:4:0]: (0) MAX LSNS: LogoBlobs# [ExplicitlySet# true Derived# false Lsn# NotSet] Blocks# [ExplicitlySet# true Derived# false Lsn# NotSet] Barriers# [ExplicitlySet# true Derived# false Lsn# NotSet] SyncLog# 0 2025-06-03T10:21:07.662113Z 6 00h00m00.000000s :BS_LOCALRECOVERY NOTICE: PDiskId# 1 VDISK[0:_:0:5:0]: (0) MAX LSNS: LogoBlobs# [ExplicitlySet# true Derived# ... PDISK DEBUG: {PDM12@pdisk_mock.cpp:647} PDiskMock[7:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 25 Cookie# 0}} Recipient# [7:345:29] 2025-06-03T10:21:22.871151Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM11@pdisk_mock.cpp:585} PDiskMock[8:1] received TEvLog Msg# {EvLog ownerId# 1 ownerRound# 2 Signature# 138 DataSize# 578 Lsn# 25 LsnSegmentStart# 25 Cookie# 0{CommitRecord FirstLsnToKeep# 0 IsStartingPoint# 1 DeleteToDecommitted# 0 CommitChunks# [] DeleteChunks# [] DirtyChunks# []}} VDiskId# [0:4294967295:0:7:0] 2025-06-03T10:21:22.871154Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:647} PDiskMock[8:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 25 Cookie# 0}} Recipient# [8:355:29] 2025-06-03T10:21:22.871568Z 7 00h00m00.000000s :BS_PDISK DEBUG: {PDM11@pdisk_mock.cpp:585} PDiskMock[7:1] received TEvLog Msg# {EvLog ownerId# 1 ownerRound# 2 Signature# 138 DataSize# 578 Lsn# 26 LsnSegmentStart# 26 Cookie# 0{CommitRecord FirstLsnToKeep# 0 IsStartingPoint# 1 DeleteToDecommitted# 0 CommitChunks# [] DeleteChunks# [] DirtyChunks# []}} VDiskId# [0:4294967295:0:6:0] 2025-06-03T10:21:22.871580Z 7 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:647} PDiskMock[7:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 26 Cookie# 0}} Recipient# [7:345:29] 2025-06-03T10:21:22.871590Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM11@pdisk_mock.cpp:585} PDiskMock[8:1] received TEvLog Msg# {EvLog ownerId# 1 ownerRound# 2 Signature# 138 DataSize# 578 Lsn# 26 LsnSegmentStart# 26 Cookie# 0{CommitRecord FirstLsnToKeep# 0 IsStartingPoint# 1 DeleteToDecommitted# 0 CommitChunks# [] DeleteChunks# [] DirtyChunks# []}} VDiskId# [0:4294967295:0:7:0] 2025-06-03T10:21:22.871596Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:647} PDiskMock[8:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 26 Cookie# 0}} Recipient# [8:355:29] 2025-06-03T10:21:22.871645Z 2 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:1:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-06-03T10:21:22.871683Z 7 00h00m00.000000s :BS_PDISK DEBUG: {PDM11@pdisk_mock.cpp:585} PDiskMock[7:1] received TEvLog Msg# {EvLog ownerId# 1 ownerRound# 2 Signature# 138 DataSize# 578 Lsn# 27 LsnSegmentStart# 27 Cookie# 0{CommitRecord FirstLsnToKeep# 0 IsStartingPoint# 1 DeleteToDecommitted# 0 CommitChunks# [] DeleteChunks# [] DirtyChunks# []}} VDiskId# [0:4294967295:0:6:0] 2025-06-03T10:21:22.871688Z 7 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:647} PDiskMock[7:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 27 Cookie# 0}} Recipient# [7:345:29] 2025-06-03T10:21:22.871693Z 2 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:1:0]: (0) GLUEREAD(0x310271894210): {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 53886563919168} 2025-06-03T10:21:22.871698Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM11@pdisk_mock.cpp:585} PDiskMock[8:1] received TEvLog Msg# {EvLog ownerId# 1 ownerRound# 2 Signature# 138 DataSize# 578 Lsn# 27 LsnSegmentStart# 27 Cookie# 0{CommitRecord FirstLsnToKeep# 0 IsStartingPoint# 1 DeleteToDecommitted# 0 CommitChunks# [] DeleteChunks# [] DirtyChunks# []}} VDiskId# [0:4294967295:0:7:0] 2025-06-03T10:21:22.871702Z 8 00h00m00.000000s :BS_PDISK DEBUG: {PDM12@pdisk_mock.cpp:647} PDiskMock[8:1] sending TEvLogResult Msg# {EvLogResult Status# OK ErrorReason# "" StatusFlags# None LogChunkCount# 0{Lsn# 27 Cookie# 0}} Recipient# [8:355:29] 2025-06-03T10:21:22.871709Z 2 00h00m00.000000s :BS_PDISK DEBUG: {PDM13@pdisk_mock.cpp:737} PDiskMock[2:1] received TEvChunkRead Msg# {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 53886563919168} VDiskId# [0:4294967295:0:1:0] 2025-06-03T10:21:22.871817Z 2 00h00m00.000000s :BS_PDISK DEBUG: {PDM14@pdisk_mock.cpp:777} PDiskMock[2:1] sending TEvChunkReadResult Msg# {EvChunkReadres Status# OK ErrorReason# "" chunkIdx# 1 Offset# 5 DataSize# 1048576 Cookie# 53886563919168 StatusFlags# None} 2025-06-03T10:21:22.871835Z 2 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:1:0]: (0) GLUEREAD FINISHED(0x310271894210): actualReadN# 1 origReadN# 1 2025-06-03T10:21:22.871874Z 2 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:1:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:2] OK Size# 1048576 FullDataSize# 1048576 PayloadId# 0 Data# 1048576b Ingress# 1369701526376808448} BlockedGeneration# 0} 2025-06-03T10:21:22.872390Z 3 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:2:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-06-03T10:21:22.872475Z 3 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:2:0]: (0) GLUEREAD(0x310271894210): {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 53886563919168} 2025-06-03T10:21:22.872506Z 3 00h00m00.000000s :BS_PDISK DEBUG: {PDM13@pdisk_mock.cpp:737} PDiskMock[3:1] received TEvChunkRead Msg# {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 53886563919168} VDiskId# [0:4294967295:0:2:0] 2025-06-03T10:21:22.872583Z 3 00h00m00.000000s :BS_PDISK DEBUG: {PDM14@pdisk_mock.cpp:777} PDiskMock[3:1] sending TEvChunkReadResult Msg# {EvChunkReadres Status# OK ErrorReason# "" chunkIdx# 1 Offset# 5 DataSize# 1048576 Cookie# 53886563919168 StatusFlags# None} 2025-06-03T10:21:22.872601Z 3 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:2:0]: (0) GLUEREAD FINISHED(0x310271894210): actualReadN# 1 origReadN# 1 2025-06-03T10:21:22.872613Z 3 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:2:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:1] OK Size# 1048576 FullDataSize# 1048576 PayloadId# 0 Data# 1048576b Ingress# 2522623030983655424} BlockedGeneration# 0} 2025-06-03T10:21:22.872929Z 4 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:3:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-06-03T10:21:22.872961Z 4 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:3:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:3] OK Size# 0 FullDataSize# 1048576 BufferData# Ingress# 793240774073384960} BlockedGeneration# 0} 2025-06-03T10:21:22.873011Z 5 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:4:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-06-03T10:21:22.873026Z 5 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:4:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:3] OK Size# 0 FullDataSize# 1048576 BufferData# Ingress# 793240774073384960} BlockedGeneration# 0} 2025-06-03T10:21:22.873063Z 6 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:5:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-06-03T10:21:22.873081Z 6 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:5:0]: (0) GLUEREAD(0x310271894d10): {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 53886563919168} 2025-06-03T10:21:22.873091Z 6 00h00m00.000000s :BS_PDISK DEBUG: {PDM13@pdisk_mock.cpp:737} PDiskMock[6:1] received TEvChunkRead Msg# {EvChunkRead chunkIdx# 1 Offset# 5 Size# 1048576 ownerId# 1 ownerRound# 2 PriorityClass# 2 Cookie# 53886563919168} VDiskId# [0:4294967295:0:5:0] 2025-06-03T10:21:22.873156Z 6 00h00m00.000000s :BS_PDISK DEBUG: {PDM14@pdisk_mock.cpp:777} PDiskMock[6:1] sending TEvChunkReadResult Msg# {EvChunkReadres Status# OK ErrorReason# "" chunkIdx# 1 Offset# 5 DataSize# 1048576 Cookie# 53886563919168 StatusFlags# None} 2025-06-03T10:21:22.873161Z 6 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:5:0]: (0) GLUEREAD FINISHED(0x310271894d10): actualReadN# 1 origReadN# 1 2025-06-03T10:21:22.873171Z 6 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:5:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:2] OK Size# 1048576 FullDataSize# 1048576 PayloadId# 0 Data# 1048576b Ingress# 1946162278680231936} {[1:1:1:0:0:1048576:3] OK Size# 0 FullDataSize# 1048576 BufferData# Ingress# 1946162278680231936} BlockedGeneration# 0} 2025-06-03T10:21:22.873477Z 7 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:6:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-06-03T10:21:22.873505Z 7 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:6:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:0] NODATA Ingress# 216780021769961472} BlockedGeneration# 0} 2025-06-03T10:21:22.873548Z 8 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:7:0]: (0) TEvVGet: {ExtrQuery# [1:1:1:0:0:1048576:0] sh# 0 sz# 0} {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 1680000 ExtQueueId# GetFastRead IntQueueId# IntGetFast CostSettings# { SeekTimeUs# 40 ReadSpeedBps# 1048576000 WriteSpeedBps# 1048576000 ReadBlockSize# 65536 WriteBlockSize# 65536 MinHugeBlobInBytes# 65537} SendMeCostSettings# 1} Notify# 0 Internals# 1 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BSVS14 2025-06-03T10:21:22.873563Z 8 00h00m00.000000s :BS_VDISK_GET DEBUG: PDiskId# 1 VDISK[0:_:0:7:0]: (0) TEvVGetResult: {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:1048576:0] NODATA Ingress# 216780021769961472} BlockedGeneration# 0} |75.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_vdisk/ydb-core-blobstorage-ut_vdisk |75.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_vdisk/ydb-core-blobstorage-ut_vdisk >> TBlobStorageSyncLogKeeper::CutLog_EntryPointNewFormat [GOOD] >> TBlobStorageSyncLogMem::ManyLogoBlobsPerf >> TBlobStorageSyncLogMem::EmptyMemRecLog [GOOD] >> TBlobStorageSyncLogMem::FilledIn1 [GOOD] >> TBlobStorageSyncLogMem::EmptyMemRecLogPutAfterSnapshot [GOOD] >> CodecsTest::Basic [GOOD] >> CodecsTest::NaturalNumbersAndZero [GOOD] >> CodecsTest::LargeAndRepeated [GOOD] >> NaiveFragmentWriterTest::Basic [GOOD] >> SemiSortedDeltaAndVarLengthCodec::Random32 [GOOD] >> SemiSortedDeltaAndVarLengthCodec::Random64 >> NaiveFragmentWriterTest::Long [GOOD] >> ReorderCodecTest::Basic [GOOD] >> RunLengthCodec::BasicTest32 [GOOD] >> RunLengthCodec::BasicTest64 [GOOD] >> VarLengthIntCodec::BasicTest64 [GOOD] >> VarLengthIntCodec::Random32 >> TBlobStorageSyncLogData::SerializeParseEmpty1_Proto [GOOD] >> TBlobStorageSyncLogData::SerializeParseEmpty2_Proto [GOOD] >> SemiSortedDeltaCodec::Random32 [GOOD] >> SemiSortedDeltaCodec::Random64 >> RunLengthCodec::Random32 >> TBlobStorageSyncLogDsk::AddByOne [GOOD] >> TBlobStorageSyncLogDsk::AddFive [GOOD] >> TBlobStorageSyncLogDsk::ComplicatedSerializeWithOverlapping [GOOD] >> TBlobStorageSyncLogDsk::DeleteChunks [GOOD] |75.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut |75.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut >> SemiSortedDeltaAndVarLengthCodec::Random64 [GOOD] >> SemiSortedDeltaCodec::BasicTest32 [GOOD] >> SemiSortedDeltaCodec::BasicTest64 [GOOD] >> VarLengthIntCodec::Random32 [GOOD] >> VarLengthIntCodec::Random64 [GOOD] >> SemiSortedDeltaCodec::Random64 [GOOD] >> RunLengthCodec::Random32 [GOOD] >> RunLengthCodec::Random64 [GOOD] >> SemiSortedDeltaAndVarLengthCodec::BasicTest32 [GOOD] >> SemiSortedDeltaAndVarLengthCodec::BasicTest64 [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_simplified[dump_ds_init] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_domains_config[dump] |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> NaiveFragmentWriterTest::Basic [GOOD] |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> TBlobStorageSyncLogDsk::TrimLog [GOOD] |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> TBlobStorageSyncLogMem::EmptyMemRecLogPutAfterSnapshot [GOOD] |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> TBlobStorageSyncLogDsk::DeleteChunks [GOOD] |75.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/sequenceshard/public/ut/unittest |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> RunLengthCodec::BasicTest64 [GOOD] |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> SemiSortedDeltaCodec::Random64 [GOOD] |75.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap |75.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> VarLengthIntCodec::Random64 [GOOD] |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> SemiSortedDeltaCodec::BasicTest64 [GOOD] |75.5%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> SemiSortedDeltaAndVarLengthCodec::BasicTest64 [GOOD] |75.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb |75.4%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut >> TBlobStorageSyncLogMem::ManyLogoBlobsPerf [GOOD] >> TBlobStorageSyncLogMem::ManyLogoBlobsBuildSwapSnapshot [GOOD] >> VarLengthIntCodec::BasicTest32 [GOOD] |75.3%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/incrhuge/ut/ydb-core-blobstorage-incrhuge-ut |75.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk |75.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut |75.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/services/cms/ut/ydb-services-cms-ut |75.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore/ydb-core-tx-schemeshard-ut_incremental_restore |75.3%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record |75.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/cms/ut/ydb-core-cms-ut |75.2%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_sysview_reboots/ydb-core-tx-schemeshard-ut_sysview_reboots |75.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks |75.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut |75.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots |75.1%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/synclog/ut/unittest >> VarLengthIntCodec::BasicTest32 [GOOD] |75.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange |75.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/security/ut/ydb-core-security-ut |75.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut |75.0%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream |74.8%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/libpy3node_broker.global.a |74.8%| [LD] {BAZEL_DOWNLOAD, FAILED} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller >> test_transform.py::TestYamlConfigTransformations::test_domains_config[dump] [GOOD] >> test_transform.py::TestYamlConfigTransformations::test_domains_config[dump_ds_init] >> ydb-tests-compatibility::import_test [GOOD] |73.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/stability/tool/tool |73.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/stability/tool/tool |72.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/tools/combiner_perf/bin/combiner_perf |72.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/runner/run_tests/libpy3benchmarks-runner-run_tests.global.a |72.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/transfer/libpy3transfer.global.a |72.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/repl/ut/ydb-core-blobstorage-vdisk-repl-ut |72.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/tests/tpch/tpch |72.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_vdisk2/ydb-core-blobstorage-ut_vdisk2 |72.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/tools/combiner_perf/bin/combiner_perf |72.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/compatibility/import_test >> ydb-tests-compatibility::import_test [GOOD] |72.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit >> test_transform.py::TestYamlConfigTransformations::test_domains_config[dump_ds_init] [GOOD] |72.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/tests/tpch/tpch |71.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_vdisk2/ydb-core-blobstorage-ut_vdisk2 |71.9%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/config/validation/auth_config_validator_ut/core-config-validation-auth_config_validator_ut |71.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/backup/ydb-tests-functional-backup |71.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/tools/dq/service_node/service_node |71.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/tools/dq/service_node/service_node >> TBsVDiskGC::TGCManyVPutsDelTabletTest >> TBsVDiskOutOfSpace::WriteUntilOrangeZone [GOOD] >> TBsVDiskOutOfSpace::WriteUntilYellowZone >> TBsVDiskRangeHuge::Simple3PutRangeGetAllForwardFresh >> TBsLocalRecovery::StartStopNotEmptyDB >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingForwardFresh >> TBsLocalRecovery::WriteRestartReadHuge |71.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/yaml_config/ut_transform/py3test >> test_transform.py::TestYamlConfigTransformations::test_domains_config[dump_ds_init] [GOOD] >> TBsVDiskBadBlobId::PutBlobWithBadId >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingBackwardFresh >> TBsVDiskRange::Simple3PutRangeGetNothingForwardFresh >> TBsVDiskExtreme::Simple3Put3GetFresh >> TBsVDiskExtremeHuge::Simple3Put3GetFresh |71.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tools/blobsan/blobsan >> TBsVDiskExtreme::SimpleGetFromEmptyDB >> TBsVDiskRepl3::SyncLogTest >> TBsDbStat::ChaoticParallelWrite_DbStat >> TBsVDiskRepl1::ReplProxyKeepBits >> TBsVDiskGC::GCPutKeepIntoEmptyDB >> TBsVDiskManyPutGet::ManyPutGetWaitCompaction >> TBsVDiskExtremeHandoffHuge::SimpleHndPut1SeqGetFresh >> TBsVDiskExtreme::Simple3Put1SeqGetAllFresh >> TBsVDiskRange::Simple3PutRangeGetAllForwardFresh >> transfer::import_test [GOOD] |71.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/transfer/import_test >> transfer::import_test [GOOD] |71.1%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/tx__start_tablet.cpp >> TBsVDiskRangeHuge::Simple3PutRangeGetAllForwardFresh [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetAllForwardCompaction >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingForwardFresh [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingForwardCompaction |70.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/tools/dq/worker_node/worker_node |70.8%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/tx__start_tablet.cpp >> TBsVDiskBadBlobId::PutBlobWithBadId [GOOD] >> TBsVDiskBrokenPDisk::WriteUntilDeviceDeath >> TBsVDiskRange::Simple3PutRangeGetNothingForwardFresh [GOOD] >> TBsVDiskExtreme::Simple3Put3GetFresh [GOOD] |70.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/tools/dq/worker_node/worker_node >> TBsVDiskExtreme::Simple3Put3GetCompaction >> TBsVDiskRange::Simple3PutRangeGetNothingForwardCompaction >> TBsVDiskExtremeHuge::Simple3Put3GetFresh [GOOD] >> TBsVDiskExtreme::SimpleGetFromEmptyDB [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingBackwardFresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put3GetCompaction >> TBsVDiskExtremeHandoff::SimpleHnd6Put1SeqGetFresh >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingBackwardCompaction |70.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/ydb-core-blobstorage-vdisk-hulldb-cache_block-ut >> TBsVDiskGC::GCPutKeepIntoEmptyDB [GOOD] >> TBsVDiskGC::GCPutBarrierVDisk0NoSync >> TBsVDiskRange::Simple3PutRangeGetAllForwardFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetAllForwardCompaction >> TBsVDiskExtreme::Simple3Put1SeqGetAllFresh [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqGetAllCompaction >> TBsVDiskExtremeHandoffHuge::SimpleHndPut1SeqGetFresh [GOOD] >> TBsVDiskExtremeHandoffHuge::SimpleHnd2Put1GetFresh >> TBsVDiskRepl3::SyncLogTest [GOOD] >> THugeMigration::ExtendMap_HugeBlobs >> TBsVDiskBrokenPDisk::WriteUntilDeviceDeath [GOOD] >> TBsVDiskDefrag::DefragEmptyDB >> TBsVDiskExtremeHandoff::SimpleHnd6Put1SeqGetFresh [GOOD] >> TBsVDiskExtremeHandoff::SimpleHnd6Put1SeqGetCompaction >> TBsVDiskRangeHuge::Simple3PutRangeGetAllForwardCompaction [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetAllBackwardFresh >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingForwardCompaction [GOOD] >> TBsVDiskRepl1::ReplProxyData >> TBsVDiskRangeHuge::Simple3PutRangeGetNothingBackwardCompaction [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleForwardFresh >> TBsVDiskExtremeHandoffHuge::SimpleHnd2Put1GetFresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqGetAllFresh >> tool::import_test [GOOD] >> TBsVDiskRange::Simple3PutRangeGetAllForwardCompaction [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleForwardCompaction |66.6%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/apps/dstool/libpy3ydb-dstool.global.a |66.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/validator/ut/validator_checks/yaml_config-validator-ut-validator_checks >> TBsVDiskExtremeHuge::Simple3Put3GetCompaction [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsOkFresh |66.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/ydb_recipe/libpy3ydb_recipe.global.a >> TBsVDiskExtreme::Simple3Put1SeqGetAllCompaction [GOOD] |66.4%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/runner/runner/libpy3benchmarks-runner-runner.global.a >> TBsVDiskExtreme::Simple3Put1SeqGet2Fresh >> TBsVDiskDefrag::DefragEmptyDB [GOOD] >> TBsVDiskDefrag::Defrag50PercentGarbage >> TBlobStorageBlocksCacheTest::LegacyAndModern [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetAllBackwardFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetNothingForwardCompaction [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetAllBackwardCompaction >> TBsVDiskRange::Simple3PutRangeGetNothingBackwardFresh >> TBlobStorageBlocksCacheTest::PutDeepIntoPast [GOOD] >> TBlobStorageBlocksCacheTest::DeepInFlight [GOOD] |66.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/statistics_workload/statistics_workload >> TBsVDiskRepl1::ReplProxyData [GOOD] >> TBsVDiskRepl1::ReplEraseDiskRestore >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleForwardFresh [GOOD] >> TBlobStorageBlocksCacheTest::PutIntoPast [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleForwardCompaction >> TBsVDiskExtremeHandoff::SimpleHnd6Put1SeqGetCompaction [GOOD] >> TBsVDiskExtremeHandoff::SimpleHnd2Put1GetFresh >> TBlobStorageBlocksCacheTest::MultipleTables [GOOD] >> TBsVDiskExtreme::Simple3Put3GetCompaction [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsOkFresh >> THugeMigration::ExtendMap_HugeBlobs [GOOD] >> THugeMigration::ExtendMap_SmallBlobsBecameHuge >> TBlobStorageBlocksCacheTest::Repeat [GOOD] >> TBsVDiskGC::GCPutBarrierVDisk0NoSync [GOOD] >> TBsVDiskGC::GCPutBarrierSync >> TBsVDiskExtremeHuge::Simple3Put1SeqGetAllFresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqGetAllCompaction >> Checks::BasicIntChecks [GOOD] >> Checks::IntArrayValidation [GOOD] >> Checks::BasicStringChecks [GOOD] >> Checks::OpaqueMaps [GOOD] >> Checks::ErrorInCheck [GOOD] >> Checks::MapValidation [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsOkFresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsOkCompaction >> TBsVDiskExtreme::Simple3Put1SeqGet2Fresh [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqGet2Compaction >> TBsVDiskManyPutGet::ManyPutGetWaitCompaction [GOOD] >> TBsVDiskManyPutGet::ManyPutRangeGetFreshIndexOnly |65.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |65.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |65.9%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/runner/result_compare/libpy3benchmarks-runner-result_compare.global.a |65.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::LegacyAndModern [GOOD] >> TBsVDiskRange::Simple3PutRangeGetNothingBackwardFresh [GOOD] |65.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBsVDiskRange::Simple3PutRangeGetNothingBackwardCompaction |65.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest |65.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::DeepInFlight [GOOD] |65.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::PutIntoPast [GOOD] |65.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stability/tool/import_test >> tool::import_test [GOOD] |65.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::PutDeepIntoPast [GOOD] >> TBsVDiskExtremeHandoff::SimpleHnd2Put1GetFresh [GOOD] >> HullReplWriteSst::Basic >> TBsVDiskExtremeHandoff::SimpleHnd2Put1GetCompaction |65.7%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/ydb_serializable/libpy3tests-tools-ydb_serializable.global.a >> TBsVDiskExtreme::Simple3Put1SeqSubsOkFresh [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsOkCompaction |65.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::Repeat [GOOD] |65.7%| [TA] $(B)/ydb/core/blobstorage/vdisk/synclog/ut/test-results/unittest/{meta.json ... results_accumulator.log} |65.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/resource_pools/ut/ydb-core-resource_pools-ut >> TBsVDiskRangeHuge::Simple3PutRangeGetAllBackwardCompaction [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardCompaction |65.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/unittest >> TBlobStorageBlocksCacheTest::MultipleTables [GOOD] |65.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/tools/dump/yaml-to-proto-dump >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleForwardCompaction [GOOD] >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardFresh >> TBsVDiskRange::Simple3PutRangeGetMiddleForwardCompaction [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleBackwardFresh |65.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/oauth2_token_exchange/tests-unit-client-oauth2_token_exchange |65.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/udfs/common/datetime/libdatetime_udf.so >> THugeMigration::ExtendMap_SmallBlobsBecameHuge [GOOD] >> THugeMigration::RollbackMap_HugeBlobs >> TBsVDiskExtremeHuge::Simple3Put1SeqGetAllCompaction [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqGet2Fresh >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsOkCompaction [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsErrorFresh >> TBsVDiskManyPutGet::ManyPutRangeGetFreshIndexOnly [GOOD] >> TBsVDiskManyPutGet::ManyPutRangeGetCompactionIndexOnly >> TBsVDiskExtreme::Simple3Put1SeqGet2Compaction [GOOD] >> TBsVDiskExtreme::Simple3Put1GetMissingPartFresh |65.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yaml_config/validator/ut/validator_checks/unittest >> Checks::MapValidation [GOOD] |65.3%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/re2/libre2_udf.so |65.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/repl/ut/unittest >> TBsVDiskRange::Simple3PutRangeGetNothingBackwardCompaction [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleForwardFresh |65.1%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/pire/libpire_udf.so |65.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/repl/ut/unittest >> TBlobStorageReplRecoveryMachine::BasicFunctionality |65.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/repl/ut/unittest |65.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/runner/run_tests/run_tests >> TBlobStorageReplRecoveryMachine::BasicFunctionality [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleBackwardFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleBackwardCompaction >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardFresh [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsOkCompaction [GOOD] >> ResourcePoolTest::SettingsValidation [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsErrorFresh >> ResourcePoolTest::SettingsExtracting [GOOD] >> ResourcePoolClassifierTest::SettingsValidation [GOOD] >> ResourcePoolClassifierTest::StringSettingsParsing [GOOD] >> ResourcePoolClassifierTest::SettingsExtracting [GOOD] >> ResourcePoolTest::IntSettingsParsing [GOOD] >> ResourcePoolTest::SecondsSettingsParsing [GOOD] >> ResourcePoolClassifierTest::IntSettingsParsing [GOOD] >> ResourcePoolTest::PercentSettingsParsing [GOOD] >> TBsVDiskExtremeHandoff::SimpleHnd2Put1GetCompaction [GOOD] |64.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/repl/ut/unittest |64.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/repl/ut/unittest |64.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/repl/ut/unittest >> TBsVDiskExtremeHandoffHuge::SimpleHnd2Put1GetCompaction >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardCompaction [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqGet2Fresh [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqGet2Compaction >> TestTokenExchange::Exchanges [GOOD] >> TestTokenExchange::ExchangesFromConfig |64.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/repl/ut/unittest |64.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/repl/ut/unittest >> TBsVDiskRepl1::ReplProxyKeepBits [GOOD] >> TestTokenExchange::ExchangesFromConfig [GOOD] >> TBsVDiskRepl2::ReplEraseDiskRestoreWOOneDisk >> TestTokenExchange::BadParams [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsErrorFresh [GOOD] |64.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/repl/ut/unittest >> TBlobStorageReplRecoveryMachine::BasicFunctionality [GOOD] >> TestTokenExchange::BadParamsFromConfig [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsErrorCompaction >> TestTokenExchange::BadResponse >> TBsVDiskExtreme::Simple3Put1GetMissingPartFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetMiddleForwardFresh [GOOD] >> TestTokenExchange::BadResponse [GOOD] >> TBsVDiskExtreme::Simple3Put1GetMissingPartCompaction >> TestTokenExchange::BadResponseFromConfig [GOOD] >> TestTokenExchange::UpdatesToken |64.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardFresh [GOOD] |64.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRangeHuge::Simple3PutRangeGetMiddleBackwardCompaction [GOOD] |64.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/resource_pools/ut/unittest >> ResourcePoolTest::PercentSettingsParsing [GOOD] |64.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRange::Simple3PutRangeGetMiddleForwardFresh [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsErrorFresh [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsErrorCompaction >> TestTokenExchange::UpdatesToken [GOOD] >> TestTokenExchange::UpdatesTokenFromConfig |64.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/crypto/ut/ydb-core-blobstorage-crypto-ut >> TBsVDiskRange::Simple3PutRangeGetMiddleBackwardCompaction [GOOD] >> THugeMigration::RollbackMap_HugeBlobs [GOOD] >> TMonitoring::ReregisterTest [GOOD] >> run_tests::import_test [GOOD] >> TBsVDiskExtremeHandoffHuge::SimpleHnd2Put1GetCompaction [GOOD] |64.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpch/ut/ydb-library-workload-tpch-ut |64.5%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/driver/libpy3nemesis.global.a >> TestTokenExchange::UpdatesTokenFromConfig [GOOD] >> TestTokenExchange::UsesCachedToken [GOOD] >> TestTokenExchange::UpdatesTokenInBackgroud >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsErrorCompaction [GOOD] |64.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRange::Simple3PutRangeGetMiddleBackwardCompaction [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk/unittest >> TMonitoring::ReregisterTest [GOOD] Test command err: RUN TEST SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration SendData iteration |64.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/benchmarks/runner/run_tests/import_test >> run_tests::import_test [GOOD] |64.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtremeHandoffHuge::SimpleHnd2Put1GetCompaction [GOOD] >> TBsVDiskExtremeHuge::Simple3Put1SeqGet2Compaction [GOOD] |64.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtremeHuge::Simple3Put1SeqSubsErrorCompaction [GOOD] |64.3%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/local_ydb/libpy3local_ydb.global.a >> TBlobStorageCrypto::TestOffsetStreamCypher [GOOD] >> TBlobStorageCrypto::TestInplaceStreamCypher [GOOD] >> TBlobStorageCrypto::TestMixedStreamCypher [GOOD] >> TBlobStorageCrypto::PerfTestStreamCypher >> TBsLocalRecovery::WriteRestartReadHuge [GOOD] >> TBsVDiskExtreme::Simple3Put1SeqSubsErrorCompaction [GOOD] >> TBsVDiskExtreme::Simple3Put1GetMissingPartCompaction [GOOD] >> TBsLocalRecovery::WriteRestartReadHugeIncreased >> TBlobStorageCrypto::PerfTestStreamCypher [GOOD] >> TBlobStorageCrypto::UnalignedTestStreamCypher [GOOD] >> TBlobStorageCryptoRope::TestEqualInplaceStreamCypher |64.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtremeHuge::Simple3Put1SeqGet2Compaction [GOOD] >> TpchQueries::ScaleFactor [GOOD] |64.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtreme::Simple3Put1GetMissingPartCompaction [GOOD] |64.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtreme::Simple3Put1SeqSubsErrorCompaction [GOOD] >> TBlobStorageCryptoRope::TestEqualInplaceStreamCypher [GOOD] |64.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/services/persqueue_cluster_discovery/cluster_ordering/ut/cluster_ordering-ut >> TBlobStorageCryptoRope::TestEqualMixedStreamCypher >> ydb_recipe::import_test [GOOD] |64.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/workload/tpch/ut/unittest >> TpchQueries::ScaleFactor [GOOD] |64.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/public/tools/ydb_recipe/import_test >> ydb_recipe::import_test [GOOD] >> TestTokenExchange::UpdatesTokenInBackgroud [GOOD] >> TestTokenExchange::UpdatesTokenAndRetriesErrors |64.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/ut/ydb-library-yaml_config-ut >> statistics_workload::import_test [GOOD] |64.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/validator/ut/validator_builder/yaml_config-validator-ut-validator_builder >> TWeighedOrderingTest::WeighedOrderingTest [GOOD] >> TWeighedOrderingTest::SimpleSelectionTest [GOOD] >> TWeighedOrderingTest::WeighedSelectionTest [GOOD] >> TBsVDiskRepl1::ReplEraseDiskRestore [GOOD] >> TBsVDiskRepl1::ReadOnly |63.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/statistics_workload/import_test >> statistics_workload::import_test [GOOD] |63.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_cluster_discovery/cluster_ordering/ut/unittest >> TWeighedOrderingTest::WeighedSelectionTest [GOOD] >> ValidatorBuilder::CanHaveDuplicateType [GOOD] >> ValidatorBuilder::CanHaveMultipleType [GOOD] >> ValidatorBuilder::BuildSimpleValidator [GOOD] >> ValidatorBuilder::CreateMultitypeNode [GOOD] >> ValidatorBuilder::CanCreateAllTypesOfNodes [GOOD] >> TBsVDiskRepl1::ReadOnly [GOOD] |63.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/workload/tpcc/ut/ydb-library-workload-tpcc-ut |63.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yaml_config/validator/ut/validator_builder/unittest >> ValidatorBuilder::CanCreateAllTypesOfNodes [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRepl1::ReadOnly [GOOD] Test command err: 2025-06-03T10:21:46.105609Z :BS_SYNCER ERROR: guid_recovery.cpp:714: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-03T10:21:46.259616Z :BS_SYNCER ERROR: guid_recovery.cpp:767: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 9255500860494259210] 2025-06-03T10:21:46.322591Z :BS_SYNCER ERROR: blobstorage_osiris.cpp:203: PDiskId# 4 VDISK[0:_:0:1:1]: (0) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 >> TBlobStorageCryptoRope::TestEqualMixedStreamCypher [GOOD] >> TBlobStorageCryptoRope::TestMixedStreamCypher >> TBlobStorageCryptoRope::TestMixedStreamCypher [GOOD] >> TBlobStorageCryptoRope::TestOffsetStreamCypher [GOOD] >> TBlobStorageCryptoRope::TestInplaceStreamCypher >> TBlobStorageCryptoRope::TestInplaceStreamCypher [GOOD] >> TBlobStorageCryptoRope::PerfTestStreamCypher [GOOD] >> TBlobStorageCryptoRope::UnalignedTestStreamCypher [GOOD] >> TChaCha::KeystreamTest1 [GOOD] >> TChaCha::KeystreamTest2 [GOOD] >> TChaCha::KeystreamTest3 [GOOD] >> TChaCha::KeystreamTest4 [GOOD] >> TChaCha::KeystreamTest5 [GOOD] >> TChaCha::KeystreamTest6 [GOOD] >> TChaCha::KeystreamTest7 [GOOD] >> TChaCha::KeystreamTest8 [GOOD] >> TChaCha::MultiEncipherOneDecipher [GOOD] >> TChaCha::SecondBlock [GOOD] >> TChaCha512::KeystreamTest1 [GOOD] >> TChaCha512::KeystreamTest2 [GOOD] >> TChaCha512::KeystreamTest3 [GOOD] >> TChaCha512::KeystreamTest4 [GOOD] >> TChaCha512::KeystreamTest5 [GOOD] >> TChaCha512::KeystreamTest6 [GOOD] >> TChaCha512::KeystreamTest7 [GOOD] >> TChaCha512::KeystreamTest8 [GOOD] >> TChaCha512::MultiEncipherOneDecipher [GOOD] >> TChaCha512::SecondBlock [GOOD] >> TBinnedTimerQueueTest::ShouldAddSingleItem >> TChaCha512::CompatibilityTest >> TChaCha512::CompatibilityTest [GOOD] >> TBinnedTimerQueueTest::ShouldAddSingleItem [GOOD] >> TChaChaVec::KeystreamTest1 [GOOD] >> TBinnedTimerQueueTest::ShouldPopItemInOrder [GOOD] >> TChaChaVec::KeystreamTest2 [GOOD] >> TBinnedTimerQueueTest::ShouldInsertOutOfOrderAndStillPopInOrder [GOOD] >> TChaChaVec::KeystreamTest3 [GOOD] >> TBinnedTimerQueueTest::ShouldAdvanceWhenBucketExhausted [GOOD] >> TChaChaVec::KeystreamTest4 [GOOD] >> TBinnedTimerQueueTest::ShouldThrowOnEmptyPop [GOOD] >> TChaChaVec::KeystreamTest5 [GOOD] >> TBinnedTimerQueueTest::ShouldRespectBucketSoftLimit [GOOD] >> TChaChaVec::KeystreamTest6 [GOOD] >> TBinnedTimerQueueTest::ShouldPopInStrictOrderAfterShuffledInsertion >> TChaChaVec::KeystreamTest7 [GOOD] >> TBinnedTimerQueueTest::ShouldPopInStrictOrderAfterShuffledInsertion [GOOD] >> TChaChaVec::KeystreamTest8 [GOOD] >> TChaChaVec::MultiEncipherOneDecipher [GOOD] >> TChaChaVec::SecondBlock [GOOD] >> TBinnedTimerQueueTest::ShouldDistributeTimersAcrossBuckets [GOOD] >> TChaChaVec::CompatibilityTest >> TBinnedTimerQueueTest::ShouldHandleBucketBoundaries [GOOD] >> TBinnedTimerQueueTest::ShouldHandleBucketOverflow [GOOD] >> TBinnedTimerQueueTest::ShouldHandleBucketRotation [GOOD] >> TBinnedTimerQueueTest::ShouldHandleBucketRotationWithFixedTimePoints [GOOD] >> TCircularQueueTest::ShouldReportEmptyInitially [GOOD] >> TCircularQueueTest::ShouldPushAndPopSingleItem [GOOD] >> TCircularQueueTest::ShouldRejectPushWhenFull [GOOD] >> TCircularQueueTest::ShouldRejectPopWhenEmpty [GOOD] >> TChaChaVec::CompatibilityTest [GOOD] >> TCircularQueueTest::ShouldPreserveFIFOOrder [GOOD] >> TPoly1305::TestVector1 [GOOD] >> TCircularQueueTest::ShouldWrapAroundProperly [GOOD] >> TPoly1305::TestVector2 [GOOD] >> TCircularQueueTest::ShouldResizeThenOperate [GOOD] >> TPoly1305::TestVector3 [GOOD] >> TPoly1305::TestVector4 [GOOD] >> TPoly1305Vec::TestVector1 [GOOD] >> TPoly1305Vec::TestVector2 [GOOD] >> TPoly1305Vec::TestVector3 [GOOD] >> TPoly1305Vec::TestVector4 [GOOD] >> TTest_t1ha::TestZeroInputHashIsNotZero [GOOD] >> TTest_t1ha::PerfTest [GOOD] >> THistogramTest::ShouldInitializeCorrectly [GOOD] >> THistogramTest::ShouldThrowOnInvalidParameters [GOOD] >> TTest_t1ha::T1haHashResultsStablilityTest [GOOD] >> THistogramTest::ShouldRecordValuesInLinearBuckets [GOOD] >> THistogramTest::ShouldRecordValuesInExponentialBuckets [GOOD] >> THistogramTest::ShouldHandleValuesAboveMaxValue [GOOD] >> THistogramTest::ShouldHandleZeroValues [GOOD] >> THistogramTest::ShouldAddHistograms [GOOD] >> THistogramTest::ShouldThrowOnAddingDifferentHistograms [GOOD] >> THistogramTest::ShouldResetHistogram [GOOD] >> THistogramTest::ShouldHandlePercentileEdgeCases [GOOD] >> THistogramTest::ShouldHandleEmptyHistogram [GOOD] >> THistogramTest::ShouldHandleSingleValueHistogram [GOOD] |63.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/fq/libs/hmac/ut/ydb-core-fq-libs-hmac-ut |63.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/node_broker >> THistogramTest::ShouldHandleLargeValues [GOOD] >> THistogramTest::ShouldHandleAllBucketTypes [GOOD] >> THistogramTest::ShouldHandleLargeHistogram [GOOD] >> THistogramTest::ShouldHandleBucketBoundaries [GOOD] >> THistogramTest::ShouldHandleVerySmallPercentiles [GOOD] >> THistogramTest::ShouldHandleExactPercentileBoundaries [GOOD] >> THistogramTest::ShouldHandleMaxValueBoundary [GOOD] >> THistogramTest::ShouldHandleRepeatedValues [GOOD] >> THistogramTest::ShouldHandleDifferentHistogramSizes [GOOD] |63.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/runner/runner/runner |63.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/crypto/ut/unittest >> TTest_t1ha::T1haHashResultsStablilityTest [GOOD] >> THistogramTest::ShouldHandleEdgeCaseValues [GOOD] >> THistogramTest::ShouldHandleStressTest [GOOD] >> THistogramTest::ShouldHandleHistogramMerging [GOOD] >> THistogramTest::ShouldHandleSpecific128_8192Case [GOOD] >> TTaskQueueTest::ShouldExecuteTerminalTaskWithSleepsAndTransactions >> TTaskQueueTest::ShouldExecuteTerminalTaskWithSleepsAndTransactions [GOOD] >> TTaskQueueTest::ShouldExecuteMultipleTransactionsWithSleeps >> TTaskQueueTest::ShouldExecuteMultipleTransactionsWithSleeps [GOOD] >> TTaskQueueTest::ShouldPropagateTransactionFailure [GOOD] >> TTaskQueueTest::ShouldHandleMultipleTerminals [GOOD] >> TTaskQueueTest::ShouldHandleQueueLimits >> TTaskQueueTest::ShouldHandleQueueLimits [GOOD] >> TTaskQueueTest::ShouldSupportUnlimitedInflight [GOOD] >> TTaskQueueTest::ShouldLimitInflightTerminals >> TTaskQueueTest::ShouldLimitInflightTerminals [GOOD] >> TTaskTest::ShouldAwaitInnerAndResumeOuter [GOOD] >> TTaskTest::ShouldPropagateExceptionFromInnerToOuter [GOOD] >> TTaskTest::ShouldPropagateExceptionFromOuter [GOOD] >> TTaskTest::ShouldSupportMultipleNestedTasks [GOOD] >> TTaskTest::ShouldSupportVoidTask [GOOD] >> TBsVDiskRepl2::ReplEraseDiskRestoreWOOneDisk [GOOD] >> TBsVDiskRepl3::ReplEraseDiskRestoreMultipart |63.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/debug_tools/ut/ydb-core-debug_tools-ut ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/workload/tpcc/ut/unittest >> TTaskTest::ShouldSupportVoidTask [GOOD] Test command err: Failed to push ready internal, queue is full >> TBsVDiskDefrag::Defrag50PercentGarbage [GOOD] >> TBsVDiskExtreme::Simple3Put1GetMissingKeyFresh >> TBsLocalRecovery::WriteRestartReadHugeIncreased [GOOD] >> TBsLocalRecovery::WriteRestartReadHugeDecreased >> HmacSha::HmacSha1 [GOOD] |63.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/tools/visualize_portions/libpy3visualize_portions.global.a |63.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/interactive/highlight/ut/ydb_cli-commands-interactive-highlight-ut |63.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/base/ut/ydb-core-blobstorage-base-ut >> TBsVDiskExtreme::Simple3Put1GetMissingKeyFresh [GOOD] >> TBsVDiskGC::GCPutBarrierSync [GOOD] >> TBsVDiskExtreme::Simple3Put1GetMissingKeyCompaction >> TBsVDiskGC::GCPutKeepBarrierSync |63.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/hmac/ut/unittest >> HmacSha::HmacSha1 [GOOD] >> OperationLog::Size1 [GOOD] >> OperationLog::Size8 [GOOD] >> OperationLog::Size29 [GOOD] >> OperationLog::Size1000 |63.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/metering/ut/ydb-core-metering-ut >> runner::import_test [GOOD] >> OperationLog::Size1000 [GOOD] >> OperationLog::ConcurrentWrites |63.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/runner/result_compare/result_compare |63.1%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/string/libstring_udf.so >> TBsVDiskExtreme::Simple3Put1GetMissingKeyCompaction [GOOD] >> OperationLog::ConcurrentWrites [GOOD] |63.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part15/ydb-tests-fq-yt-kqp_yt_file-part15 |63.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/benchmarks/runner/runner/import_test >> runner::import_test [GOOD] |63.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blob_depot/ut/ydb-core-blob_depot-ut >> node_broker::import_test [GOOD] >> BufferWithGaps::Basic [GOOD] >> TBatchedVecTest::TestOutputTOutputType [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskExtreme::Simple3Put1GetMissingKeyCompaction [GOOD] >> PtrTest::Test1 [GOOD] Test command err: 2025-06-03T10:21:31.790559Z :BS_VDISK_PUT ERROR: blobstorage_skeleton.cpp:559: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVPut: TabletID cannot be empty; id# [0:1:10:0:0:10:1] Marker# BSVS43 2025-06-03T10:21:33.654778Z :BS_VDISK_OTHER ERROR: vdisk_context.h:143: PDiskId# 1 VDISK[0:_:0:0:0]: (0) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'PDiskId# 1 TEvLog error because PDisk State# Error, there is a terminal internal error in PDisk. Did you check EvYardInit result? Marker# BSY07 StateErrorReason# PDisk is in StateError, reason# Received TEvYardControl::Brake' 2025-06-03T10:21:33.654798Z :BS_SKELETON ERROR: blobstorage_skeletonfront.cpp:1751: PDiskId# 1 VDISK[0:_:0:0:0]: (0) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# PDiskId# 1 TEvLog error because PDisk State# Error, there is a terminal internal error in PDisk. Did you check EvYardInit result? Marker# BSY07 StateErrorReason# PDisk is in StateError, reason# Received TEvYardControl::Brake Marker# BSVSF03 >> YqlHighlightTests::Empty [GOOD] >> BufferWithGaps::IsReadable [GOOD] |63.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/debug_tools/ut/unittest >> OperationLog::ConcurrentWrites [GOOD] >> TBatchedVecTest::TestToStringInt [GOOD] |63.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/grpc/server/ut/ydb-library-grpc-server-ut >> YqlHighlightTests::Invalid >> YqlHighlightTests::Invalid [GOOD] >> YqlHighlightTests::Emoji [GOOD] >> YqlHighlightTests::Typing [GOOD] |63.0%| [TA] $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/test-results/unittest/{meta.json ... results_accumulator.log} |63.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/stress/node_broker/import_test >> node_broker::import_test [GOOD] >> TBsVDiskGC::GCPutKeepBarrierSync [GOOD] |63.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/base/ut/gtest >> TBatchedVecTest::TestToStringInt [GOOD] >> TBsVDiskGC::GCPutManyBarriersNoSync >> TBsVDiskRepl3::ReplEraseDiskRestoreMultipart [GOOD] >> TBsVDiskRepl3::AnubisTest [GOOD] >> TBsVDiskRepl3::ReplPerf >> TBsLocalRecovery::WriteRestartReadHugeDecreased [GOOD] >> TTimeGridTest::TimeGrid [GOOD] >> TStreamRequestUnitsCalculatorTest::Basic [GOOD] >> TBsOther1::PoisonPill |63.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/ydb_cli/commands/interactive/highlight/ut/unittest >> YqlHighlightTests::Typing [GOOD] >> ClosedIntervalSet::Union |62.9%| [LD] {BAZEL_DOWNLOAD} $(B)/tools/cpp_style_checker/cpp_style_checker >> TBsVDiskGC::GCPutManyBarriersNoSync [GOOD] >> TBsVDiskGC::TGCManyVPutsCompactGCAllTest |62.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/metering/ut/unittest >> TStreamRequestUnitsCalculatorTest::Basic [GOOD] >> ClosedIntervalSet::Union [GOOD] >> ClosedIntervalSet::Difference >> ResponseTest::UniversalResponseRefBuf [GOOD] >> ResponseTest::UniversalResponseMsg [GOOD] >> ResponseTest::UniversalResponseRefMsg [GOOD] >> ResponseTest::UniversalResponseBuf [GOOD] >> StreamAdaptor::OrderingOneThread >> StreamAdaptor::OrderingOneThread [GOOD] >> StreamAdaptor::OrderingTwoThreads |62.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/common/yql_parser/ut/ydb-public-lib-ydb_cli-common-yql_parser-ut >> StreamAdaptor::OrderingTwoThreads [GOOD] >> StreamAdaptor::OrderingManyThreads >> result_compare::import_test [GOOD] >> StreamAdaptor::OrderingManyThreads [GOOD] >> StreamAdaptor::OrderingOneThreadWithSleep |62.8%| [LD] {BAZEL_UPLOAD} $(B)/ydb/tests/compatibility/ydb-tests-compatibility |62.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/json/ut/ydb-core-viewer-json-ut |62.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/scheme_board/ut_double_indexed/ydb-core-tx-scheme_board-ut_double_indexed |62.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/benchmarks/runner/result_compare/import_test >> result_compare::import_test [GOOD] >> StreamAdaptor::OrderingOneThreadWithSleep [GOOD] >> StreamAdaptor::OrderingTwoThreadsWithSleep >> StreamAdaptor::OrderingTwoThreadsWithSleep [GOOD] >> StreamAdaptor::OrderingManyThreadsWithSleep |62.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part17/ydb-tests-fq-yt-kqp_yt_file-part17 >> common.cpp::clang_format [GOOD] >> common.h::clang_format [GOOD] |62.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_import/ydb-tests-fq-yt-kqp_yt_import |62.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tpc/medium/ydb-tests-functional-tpc-medium >> TYqlParamParserTest::TestDictType [GOOD] >> TYqlParamParserTest::TestMultipleParams [GOOD] >> TYqlParamParserTest::TestStructType [GOOD] >> TYqlParamParserTest::TestDecimalType [GOOD] >> TYqlParamParserTest::TestInvalidQuery [GOOD] >> TYqlParamParserTest::TestBasicTypes [GOOD] >> TYqlParamParserTest::TestListType [GOOD] >> TYqlParamParserTest::TestNestedTypes [GOOD] >> TYqlParamParserTest::TestComplexQuery [GOOD] >> TYqlParamParserTest::TestOptionalTypes [GOOD] >> TYqlParamParserTest::TestWhitespace [GOOD] >> TYqlParamParserTest::TestCaseInsensitiveTypes [GOOD] >> TYqlParamParserTest::TestAllTypes [GOOD] >> TYqlParamParserTest::TestTupleType [GOOD] >> TBsOther1::PoisonPill [GOOD] >> Json::BasicRendering [GOOD] >> TBsOther1::ChaoticParallelWrite >> TBsVDiskGC::TGCManyVPutsCompactGCAllTest [GOOD] >> StreamAdaptor::OrderingManyThreadsWithSleep [GOOD] >> DoubleIndexedTests::TestUpsertByBothKeys [GOOD] >> DoubleIndexedTests::TestErase [GOOD] >> DoubleIndexedTests::TestMerge [GOOD] >> DoubleIndexedTests::TestFind [GOOD] >> DoubleIndexedTests::TestUpsertBySingleKey [GOOD] >> ClosedIntervalSet::Difference [GOOD] >> ClosedIntervalSet::Contains [GOOD] >> ClosedIntervalSet::EnumInRange |62.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/common/clang_format >> common.h::clang_format [GOOD] >> ClosedIntervalSet::EnumInRange [GOOD] >> ClosedIntervalSet::EnumInRangeReverse ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskGC::TGCManyVPutsCompactGCAllTest [GOOD] Test command err: 2025-06-03T10:21:34.566241Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:322:0:0:10:1] barrier# {Soft# {Gen# 1 Step# 450} Hard# } 2025-06-03T10:21:35.591701Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:370:0:0:3:1] barrier# {Soft# {Gen# 1 Step# 450} Hard# } 2025-06-03T10:21:35.684789Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:424:0:0:3:1] barrier# {Soft# {Gen# 1 Step# 450} Hard# } 2025-06-03T10:21:44.301743Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:322:0:0:10:1] barrier# {Soft# {Gen# 1 Step# 450} Hard# } 2025-06-03T10:21:44.301767Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:2:0]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:322:0:0:10:3] barrier# {Soft# {Gen# 1 Step# 450} Hard# } 2025-06-03T10:21:44.301776Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:2:1]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:370:0:0:3:3] barrier# {Soft# {Gen# 1 Step# 450} Hard# } 2025-06-03T10:21:44.301783Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:1:1]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:322:0:0:10:2] barrier# {Soft# {Gen# 1 Step# 450} Hard# } 2025-06-03T10:21:44.301807Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:370:0:0:3:1] barrier# {Soft# {Gen# 1 Step# 450} Hard# } 2025-06-03T10:21:44.301807Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:2:1]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:424:0:0:3:3] barrier# {Soft# {Gen# 1 Step# 450} Hard# } 2025-06-03T10:21:44.301812Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:1:1]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:370:0:0:3:2] barrier# {Soft# {Gen# 1 Step# 450} Hard# } 2025-06-03T10:21:44.301821Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:424:0:0:3:1] barrier# {Soft# {Gen# 1 Step# 450} Hard# } 2025-06-03T10:21:44.301826Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:1:1]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:424:0:0:3:2] barrier# {Soft# {Gen# 1 Step# 450} Hard# } 2025-06-03T10:21:53.485259Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:1:1]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:322:0:0:10:2] barrier# {Soft# {Gen# 1 Step# 1000} Hard# } 2025-06-03T10:21:53.485259Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:322:0:0:10:1] barrier# {Soft# {Gen# 1 Step# 1000} Hard# } 2025-06-03T10:21:53.485266Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:2:1]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:370:0:0:3:3] barrier# {Soft# {Gen# 1 Step# 1000} Hard# } 2025-06-03T10:21:53.485265Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:2:0]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:322:0:0:10:3] barrier# {Soft# {Gen# 1 Step# 1000} Hard# } 2025-06-03T10:21:53.485289Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:370:0:0:3:1] barrier# {Soft# {Gen# 1 Step# 1000} Hard# } 2025-06-03T10:21:53.485289Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:2:1]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:424:0:0:3:3] barrier# {Soft# {Gen# 1 Step# 1000} Hard# } 2025-06-03T10:21:53.485289Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:1:1]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:370:0:0:3:2] barrier# {Soft# {Gen# 1 Step# 1000} Hard# } 2025-06-03T10:21:53.485291Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:2:0]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:472:0:0:4:3] barrier# {Soft# {Gen# 1 Step# 1000} Hard# } 2025-06-03T10:21:53.485314Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:1:1]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:424:0:0:3:2] barrier# {Soft# {Gen# 1 Step# 1000} Hard# } 2025-06-03T10:21:53.485315Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:2:1]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:915:0:0:5:3] barrier# {Soft# {Gen# 1 Step# 1000} Hard# } 2025-06-03T10:21:53.485316Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:424:0:0:3:1] barrier# {Soft# {Gen# 1 Step# 1000} Hard# } 2025-06-03T10:21:53.485324Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:1:1]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:472:0:0:4:2] barrier# {Soft# {Gen# 1 Step# 1000} Hard# } 2025-06-03T10:21:53.485325Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:472:0:0:4:1] barrier# {Soft# {Gen# 1 Step# 1000} Hard# } 2025-06-03T10:21:53.485334Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:1:1]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:915:0:0:5:2] barrier# {Soft# {Gen# 1 Step# 1000} Hard# } 2025-06-03T10:21:53.485334Z :BS_HULLRECS CRIT: blobstorage_hull.cpp:111: PDiskId# 1 VDISK[0:_:0:0:0]: (0) Db# LogoBlobs; putting blob beyond the barrier id# [5000:1:915:0:0:5:1] barrier# {Soft# {Gen# 1 Step# 1000} Hard# } |62.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/ydb_cli/common/yql_parser/ut/unittest >> TYqlParamParserTest::TestTupleType [GOOD] |62.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/viewer/json/ut/unittest >> Json::BasicRendering [GOOD] |62.6%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/datetime2/libdatetime2_udf.so >> ClosedIntervalSet::EnumInRangeReverse [GOOD] >> GivenIdRange::IssueNewRange [GOOD] >> GivenIdRange::Trim |62.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_double_indexed/unittest >> DoubleIndexedTests::TestUpsertBySingleKey [GOOD] |62.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/grpc/server/ut/unittest >> StreamAdaptor::OrderingManyThreadsWithSleep [GOOD] >> GivenIdRange::Trim [GOOD] >> GivenIdRange::Subtract [GOOD] >> GivenIdRange::Points >> GivenIdRange::Points [GOOD] >> GivenIdRange::Runs >> GivenIdRange::Runs [GOOD] >> GivenIdRange::Allocate [GOOD] |62.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/benchmarks/runner/ydb-library-benchmarks-runner |62.5%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/tools/astdiff/astdiff |62.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sdk/cpp/sdk_credprovider/ydb-tests-functional-sdk-cpp-sdk_credprovider |62.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/server_restart/public-sdk-cpp-tests-integration-server_restart |62.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blob_depot/ut/unittest >> GivenIdRange::Allocate [GOOD] |62.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/kqp/plan2svg/ydb-tests-functional-kqp-plan2svg |62.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/solomon/ydb-library-yql-tests-sql-solomon |62.2%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/bin/libpy3ydb_configure.global.a |62.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/base/generated/ut/ydb-core-base-generated-ut >> ydb-tests-fq-yt-kqp_yt_import::import_test [GOOD] |62.0%| [AR] {BAZEL_DOWNLOAD} $(B)/ydb/tools/tstool/libpy3tstool.global.a |61.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/yt/kqp_yt_import/import_test >> ydb-tests-fq-yt-kqp_yt_import::import_test [GOOD] >> RuntimeFeatureFlags::ConversionFromProto [GOOD] >> RuntimeFeatureFlags::ConversionToProto [GOOD] >> RuntimeFeatureFlags::DefaultValues [GOOD] >> RuntimeFeatureFlags::UpdatingRuntimeFlags [GOOD] >> ydb-library-benchmarks-runner::import_test [GOOD] |61.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/ut/ydb-core-config-ut |61.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/base/generated/ut/unittest >> RuntimeFeatureFlags::UpdatingRuntimeFlags [GOOD] |61.8%| [AR] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mind/hive/libcore-mind-hive.a >> TBsOther1::ChaoticParallelWrite [GOOD] >> TBsOther2::ChaoticParallelWrite_SkeletonFrontQueuesOverload >> ydb-tests-functional-kqp-plan2svg::import_test [GOOD] |61.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part10/ydb-tests-fq-yt-kqp_yt_file-part10 |61.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/benchmarks/runner/import_test >> ydb-library-benchmarks-runner::import_test [GOOD] |61.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/kqp/plan2svg/import_test >> ydb-tests-functional-kqp-plan2svg::import_test [GOOD] |61.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk2/unittest >> TBsOther2::ChaoticParallelWrite_SkeletonFrontQueuesOverload [GOOD] >> ConfigProto::ForbidNewRequired [GOOD] |61.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk2/unittest |61.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk/unittest >> TBsOther2::ChaoticParallelWrite_SkeletonFrontQueuesOverload [GOOD] >> VDiskTest::HugeBlobWrite |61.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/config/ut/unittest >> ConfigProto::ForbidNewRequired [GOOD] |61.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk2/unittest >> ydb-tests-functional-tpc-medium::import_test [GOOD] |61.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk2/unittest |61.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk2/unittest |61.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk2/unittest |61.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk2/unittest |61.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk2/unittest |61.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk2/unittest >> TBsLocalRecovery::StartStopNotEmptyDB [GOOD] >> TBsVDiskRepl3::ReplPerf [GOOD] >> TBsLocalRecovery::WriteRestartRead |60.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tpc/medium/import_test >> ydb-tests-functional-tpc-medium::import_test [GOOD] |60.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/static_validator/ut/example_configs/static_validator-ut-example_configs ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRepl3::ReplPerf [GOOD] Test command err: 2025-06-03T10:21:50.081470Z :BS_SYNCER ERROR: guid_recovery.cpp:714: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-03T10:21:50.103310Z :BS_SYNCER ERROR: guid_recovery.cpp:767: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 6222939464116487752] 2025-06-03T10:21:50.210599Z :BS_SYNCER ERROR: blobstorage_osiris.cpp:203: PDiskId# 4 VDISK[0:_:0:1:1]: (0) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-03T10:21:53.562443Z :BS_SYNCER ERROR: guid_recovery.cpp:714: PDiskId# 4 VDISK[0:_:0:3:0]: (0) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-03T10:21:53.583673Z :BS_SYNCER ERROR: guid_recovery.cpp:767: PDiskId# 4 VDISK[0:_:0:3:0]: (0) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 9636244457738566862] 2025-06-03T10:21:53.607393Z :BS_SYNCER ERROR: blobstorage_osiris.cpp:203: PDiskId# 4 VDISK[0:_:0:3:0]: (0) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 2025-06-03T10:22:00.864008Z :BS_SYNCER ERROR: guid_recovery.cpp:714: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-03T10:22:00.958361Z :BS_SYNCER ERROR: guid_recovery.cpp:767: PDiskId# 4 VDISK[0:_:0:1:1]: (0) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 11253613739734397214] 2025-06-03T10:22:02.030814Z :BS_SYNCER ERROR: blobstorage_osiris.cpp:203: PDiskId# 4 VDISK[0:_:0:1:1]: (0) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 |60.9%| [UN] {default-linux-x86_64, relwithdebinfo} $(B)/yql/essentials/tests/common/test_framework/udfs_deps/common-test_framework-udfs_deps.pkg.fake >> TBsVDiskOutOfSpace::WriteUntilYellowZone [GOOD] >> TBsVDiskRange::RangeGetFromEmptyDB >> TBsVDiskRange::RangeGetFromEmptyDB [GOOD] >> TBsLocalRecovery::WriteRestartRead [GOOD] >> TBsVDiskRange::Simple3PutRangeGetAllBackwardFresh >> TBsLocalRecovery::MultiPutWriteRestartRead >> StaticConfigExamples::BLOCK42 [GOOD] >> StaticConfigExamples::MIRROR_3_DC_9_NODES [GOOD] >> StaticConfigExamples::MIRROR_3_DC_NODES_IN_MEMORY [GOOD] >> StaticConfigExamples::MIRROR_3_DC_NODES [GOOD] >> StaticConfigExamples::SingleNodeWithFile [GOOD] >> StaticConfigExamples::SINGLE_NODE_IN_MEMORY [GOOD] |60.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yaml_config/static_validator/ut/ydb-library-yaml_config-static_validator-ut |60.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yaml_config/static_validator/ut/example_configs/unittest >> StaticConfigExamples::SINGLE_NODE_IN_MEMORY [GOOD] >> TBsVDiskRange::Simple3PutRangeGetAllBackwardFresh [GOOD] >> TBsVDiskRange::Simple3PutRangeGetAllBackwardCompaction >> test_generator.py::TestTpchGenerator::test_s1_parts [GOOD] |60.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/streaming_optimize/ydb-tests-fq-streaming_optimize >> TBsLocalRecovery::MultiPutWriteRestartRead [GOOD] >> TBsLocalRecovery::MultiPutWriteRestartReadHuge |60.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/callables/libcallables_udf.so |60.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/type_inspection/libtype_inspection_udf.so >> TBsVDiskRange::Simple3PutRangeGetAllBackwardCompaction [GOOD] |60.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/dicts/libdicts_udf.so >> TBsLocalRecovery::MultiPutWriteRestartReadHuge [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeXXX |60.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/set/libset_udf.so |60.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part8/ydb-tests-fq-yt-kqp_yt_file-part8 |60.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpchGenerator::test_s1_parts [GOOD] |60.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/streaming/libstreaming_udf.so >> StaticValidator::HostConfigs [GOOD] >> StaticValidator::Hosts [GOOD] >> StaticValidator::DomainsConfig [GOOD] |60.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskRange::Simple3PutRangeGetAllBackwardCompaction [GOOD] |60.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/test/simple/libsimple_udf.so |60.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/dummylog/libdummylog.so |60.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/structs/libstructs_udf.so >> TBsVDiskGC::TGCManyVPutsDelTabletTest [GOOD] >> TBsVDiskManyPutGet::ManyPutGet |60.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/logs/dsv/libdsv_udf.so |60.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperloglog/libhyperloglog_udf.so |60.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/math/libmath_udf.so |60.7%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/vector/libvector_udf.so |60.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/core-fq-libs-control_plane_storage-internal-ut |60.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/security/ut/ydb-library-security-ut |60.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yaml_config/static_validator/ut/unittest >> StaticValidator::DomainsConfig [GOOD] |60.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/apps/dstool/ydb-dstool >> TBsVDiskManyPutGet::ManyPutGet [GOOD] >> TBsVDiskManyPutGet::ManyMultiSinglePutGet |60.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/result/ydb-public-sdk-cpp-tests-unit-client-result |60.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tools/solomon_emulator_grpc/solomon_recipe_grpc >> Util::SanitizeNebiusTicket [GOOD] >> Util::MaskNebiusTicket [GOOD] >> Util::MaskTicket [GOOD] |60.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/kqp/tests/kikimr_tpch/ydb-core-kqp-tests-kikimr_tpch >> ydb-tests-fq-streaming_optimize::import_test [GOOD] |60.5%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/stat/libstat_udf.so |60.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/security/ut/unittest >> Util::MaskTicket [GOOD] >> CppGrpcClientResultSetTest::ListCorruptedResultSet [GOOD] |60.4%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/yson2/libyson2_udf.so |60.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/streaming_optimize/import_test >> ydb-tests-fq-streaming_optimize::import_test [GOOD] |60.4%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/examples/lists/liblists_udf.so |60.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/result/unittest |60.4%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/digest/libdigest_udf.so >> ydb-dstool::import_test [GOOD] |60.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/result/unittest >> CppGrpcClientResultSetTest::ListCorruptedResultSet [GOOD] |60.4%| RESOURCE $(sbr:770480022) |60.3%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/topfreq/libtopfreq_udf.so >> TBsVDiskManyPutGet::ManyMultiSinglePutGet [GOOD] >> TBsVDiskManyPutGet::ManyMultiPutGet |60.3%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/top/libtop_udf.so |60.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/result/unittest |60.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/apps/dstool/import_test >> ydb-dstool::import_test [GOOD] >> TestTokenExchange::UpdatesTokenAndRetriesErrors [GOOD] >> TestTokenExchange::ShutdownWhileRefreshingToken |60.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/result/unittest >> CppGrpcClientResultSetTest::ListResultSet [GOOD] >> CppGrpcClientResultSetTest::Utf8OptionalResultSet [GOOD] |60.3%| [SB] {default-linux-x86_64, relwithdebinfo} $(B)/library/recipes/docker_compose/bin/docker-compose |60.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/commands/topic_workload/ut/ydb-public-lib-ydb_cli-commands-topic_workload-ut |60.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/result/unittest >> TBsVDiskManyPutGet::ManyMultiPutGet [GOOD] >> TBsVDiskManyPutGet::ManyMultiPutGetWithLargeBatch |60.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/result/unittest >> CppGrpcClientResultSetTest::Utf8OptionalResultSet [GOOD] >> CppGrpcClientResultSetTest::OptionalDictResultSet [GOOD] |60.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/result/unittest |60.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/result/unittest >> CppGrpcClientResultSetTest::ListResultSet [GOOD] |60.2%| [UN] {default-linux-x86_64, relwithdebinfo} $(B)/library/recipes/docker_compose/bin/docker-compose >> TBsVDiskManyPutGet::ManyMultiPutGetWithLargeBatch [GOOD] |60.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/result/unittest |60.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/result/unittest >> CppGrpcClientResultSetTest::OptionalDictResultSet [GOOD] |60.2%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/histogram/libhistogram_udf.so |60.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskManyPutGet::ManyMultiPutGetWithLargeBatch [GOOD] |60.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/base/ut_auth/ydb-core-base-ut_auth |60.2%| [PK] {default-linux-x86_64, relwithdebinfo} $(B)/library/recipes/docker_compose/bin/{recipes-docker_compose-bin.final.pkg.fake ... library/recipes/docker_compose/bin/docker-compose} >> TCommandWorkloadTopicParamsTests::TestRun_StrToBytes_Simple [GOOD] >> TCommandWorkloadTopicParamsTests::TestRun_StrToBytes_Kilo [GOOD] >> TCommandWorkloadTopicParamsTests::TestRun_StrToBytes_Mega [GOOD] >> TCommandWorkloadTopicParamsTests::TestRun_StrToBytes_Giga [GOOD] >> TCommandWorkloadTopicParamsTests::TestRun_StrToBytes_Error [GOOD] >> TTopicWorkloadWriterProducerTests::HandleAckEvent_ShouldSaveStatistics [GOOD] >> TTopicWorkloadWriterProducerTests::Send_ShouldCallWriteMethodOfTheWriteSession [GOOD] >> TTopicWorkloadWriterProducerTests::WaitForContinuationToken_ShouldExtractContinuationTokenFromEvent [GOOD] >> TTopicWorkloadWriterProducerTests::WaitForContinuationToken_ShouldThrowExceptionIfEventOfTheWrongType [GOOD] |60.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/large/ydb-tests-olap-s3_import-large >> test.py::test[solomon-BadDownsamplingAggregation-] |60.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/long_tx_service/public/ut/ydb-core-tx-long_tx_service-public-ut |60.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/ydb_cli/commands/topic_workload/ut/unittest >> TTopicWorkloadWriterProducerTests::WaitForContinuationToken_ShouldThrowExceptionIfEventOfTheWrongType [GOOD] |60.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/value/ydb-public-sdk-cpp-tests-unit-client-value >> test_generator.py::TestTpchGenerator::test_s1 [GOOD] >> AuthDatabaseAdmin::FailOnEmptyOwnerAndTokenWithEmptyUserSid [GOOD] >> AuthDatabaseAdmin::FailOnEmptyOwnerAndEmptyToken [GOOD] >> AuthDatabaseAdmin::FailOnEmptyOwnerAndNoToken [GOOD] >> AuthTokenAllowed::PassOnListMatchUserSid [GOOD] >> AuthTokenAllowed::PassOnListMatchUserSidWithGroup [GOOD] >> test.py::test[solomon-BadDownsamplingAggregation-] [GOOD] >> test.py::test[solomon-BadDownsamplingDisabled-] >> AuthDatabaseAdmin::FailOnOwnerAndTokenWithEmptyUserSid [GOOD] >> AuthDatabaseAdmin::PassOnOwnerMatchGroupSid [GOOD] >> AuthDatabaseAdmin::FailOnOwnerAndTokenWithEmptyUserSidAndGroups [GOOD] |60.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/load/ydb-tests-olap-load >> AuthTokenAllowed::PassOnListMatchGroupSid [GOOD] >> AuthTokenAllowed::PassOnEmptyListAndTokenWithEmptyUserSidAndGroups [GOOD] |60.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_auth/unittest >> AuthDatabaseAdmin::FailOnEmptyOwnerAndNoToken [GOOD] |60.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpchGenerator::test_s1 [GOOD] >> TestTokenExchange::ShutdownWhileRefreshingToken [GOOD] >> TestTokenExchange::ExchangesFromFileConfig [GOOD] >> TestTokenExchange::SkipsUnknownFieldsInConfig [GOOD] >> TestTokenExchange::JwtTokenSourceInConfig [GOOD] >> TestTokenExchange::BadConfigParams >> TestTokenExchange::BadConfigParams [GOOD] >> JwtTokenSourceTest::Encodes [GOOD] >> JwtTokenSourceTest::BadParams [GOOD] |60.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::PassOnListMatchUserSidWithGroup [GOOD] >> AuthTokenAllowed::FailOnListMatchGroupSid [GOOD] |60.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_auth/unittest >> AuthDatabaseAdmin::FailOnOwnerAndTokenWithEmptyUserSidAndGroups [GOOD] >> AuthTokenAllowed::PassOnEmptyListAndEmptyToken [GOOD] >> AuthTokenAllowed::FailOnListAndNoToken [GOOD] >> AuthTokenAllowed::FailOnListAndTokenWithEmptyUserSidAndGroups [GOOD] >> AuthTokenAllowed::FailOnListAndTokenWithEmptyUserSid [GOOD] |60.0%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/file/libfile_udf.so >> AuthDatabaseAdmin::FailOnOwnerAndEmptyToken [GOOD] >> AuthDatabaseAdmin::FailOnEmptyOwnerAndTokenWithEmptyUserSidAndGroups [GOOD] >> AuthDatabaseAdmin::FailOnOwnerAndNoToken [GOOD] |59.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::PassOnEmptyListAndTokenWithEmptyUserSidAndGroups [GOOD] |59.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/dq_file/part17/ydb-library-yql-tests-sql-dq_file-part17 >> test.py::test[solomon-BadDownsamplingDisabled-] [GOOD] >> test.py::test[solomon-BadDownsamplingFill-] |59.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::PassOnEmptyListAndEmptyToken [GOOD] |59.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/providers/s3/credentials/ut/ydb-library-yql-providers-s3-credentials-ut |59.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::FailOnListAndTokenWithEmptyUserSid [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/oauth2_token_exchange/unittest >> JwtTokenSourceTest::BadParams [GOOD] Test command err: Checked backgroud update on 0 iteration Checked backgroud update on 1 iteration Shutdown: 0.000074s >> YdbValue::BuildValueDictTypeMismatch2 [GOOD] >> YdbValue::ParseValue1 >> YdbValue::BuildValueDictEmptyNoType [GOOD] >> YdbValue::BuildValueDictTypeMismatch1 [GOOD] >> YdbValue::BuildValueDictEmpty2 [GOOD] >> YdbValue::BuildValueDictEmptyTypeMismatch [GOOD] |59.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/dq_file/part14/ydb-library-yql-tests-sql-dq_file-part14 >> YdbValue::ParseValue1 [GOOD] >> YdbValue::ParseValue2 [GOOD] >> YdbValue::ParseValuePg [GOOD] >> YdbValue::ParseValueMaybe [GOOD] |59.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_auth/unittest >> AuthDatabaseAdmin::FailOnOwnerAndNoToken [GOOD] >> test.py::test[solomon-BadDownsamplingFill-] [GOOD] |59.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/value/gtest >> YdbValue::BuildValueDictEmptyTypeMismatch [GOOD] |59.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/hybrid_file/part10/ydb-library-yql-tests-sql-hybrid_file-part10 >> test.py::test[solomon-BadDownsamplingInterval-] >> YdbValue::ParseType2 [GOOD] >> YdbValue::ParseTaggedType [GOOD] >> YdbValue::ParseType1 [GOOD] >> YdbValue::IncorrectUuid [GOOD] >> TBsDbStat::ChaoticParallelWrite_DbStat [GOOD] |59.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/value/gtest >> YdbValue::ParseValueMaybe [GOOD] >> TBsHuge::Simple >> YdbValue::BuildValueStruct [GOOD] >> YdbValue::BuildValueOptionalMismatch2 [GOOD] >> YdbValue::BuildValueNestedOptional [GOOD] >> YdbValue::BuildValueOptionalMismatch1 [GOOD] >> YdbValue::BuildValueOptional [GOOD] |59.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/hybrid_file/part1/ydb-library-yql-tests-sql-hybrid_file-part1 >> YdbValue::BuildTaggedType [GOOD] >> YdbValue::BuildType [GOOD] >> YdbValue::BuildTypeIncomplete [GOOD] >> YdbValue::BuildDyNumberValue [GOOD] >> YdbValue::BuildTaggedValue [GOOD] >> YdbValue::BuildValueTuplePrimitives [GOOD] >> YdbValue::BuildValueTupleTypeMismatch [GOOD] >> YdbValue::BuildValueWithType [GOOD] >> YdbValue::CorrectUuid [GOOD] |59.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/kqp/kqp_indexes/ydb-tests-functional-kqp-kqp_indexes |59.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part7/ydb-tests-fq-yt-kqp_yt_file-part7 |59.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/value/gtest >> YdbValue::IncorrectUuid [GOOD] >> AuthTokenAllowed::FailOnListAndEmptyToken [GOOD] |59.7%| [CC] {default-linux-x86_64, relwithdebinfo} $(S)/ydb/core/mind/hive/hive_ut.cpp >> AuthDatabaseAdmin::PassOnOwnerMatchUserSidWithGroup [GOOD] >> AuthDatabaseAdmin::PassOnOwnerMatchUserSid [GOOD] |59.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/value/gtest >> YdbValue::BuildValueOptional [GOOD] |59.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/value/gtest >> YdbValue::BuildTaggedValue [GOOD] >> TBsHuge::Simple [GOOD] >> TBsHuge::SimpleErasureNone >> test.py::test[solomon-BadDownsamplingInterval-] [GOOD] >> test.py::test[solomon-Basic-default.txt] >> AuthTokenAllowed::PassOnEmptyListAndToken [GOOD] >> AuthTokenAllowed::PassOnEmptyListAndTokenWithEmptyUserSid [GOOD] |59.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/value/gtest >> YdbValue::CorrectUuid [GOOD] >> YdbValue::BuildValueIncomplete [GOOD] >> YdbValue::BuildValueEmptyListUnknown [GOOD] >> YdbValue::BuildValueList [GOOD] >> YdbValue::BuildValueListEmpty [GOOD] >> YdbValue::BuildValueListEmpty2 [GOOD] |59.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_auth/unittest >> AuthDatabaseAdmin::PassOnOwnerMatchUserSid [GOOD] >> YdbValue::BuildValueListItemMismatch1 [GOOD] >> YdbValue::BuildValueListItemMismatch2 [GOOD] >> YdbValue::BuildValueListItemMismatch3 [GOOD] >> YdbValue::BuildValueListItemMismatch4 [GOOD] >> YdbValue::BuildValueListEmpty3 [GOOD] |59.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::PassOnEmptyListAndTokenWithEmptyUserSid [GOOD] >> AuthTokenAllowed::PassOnEmptyListAndInvalidTokenSerialized [GOOD] |59.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/value/gtest >> YdbValue::BuildValueListEmpty2 [GOOD] >> AuthTokenAllowed::PassOnEmptyListAndNoToken [GOOD] |59.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/solomon/ydb-tests-fq-solomon |59.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part16/ydb-tests-fq-yt-kqp_yt_file-part16 >> YdbValue::BuildTypeReuse [GOOD] >> YdbValue::BuildValueBadCall [GOOD] >> YdbValue::BuildValueDict1 [GOOD] >> YdbValue::BuildValueDict2 [GOOD] >> YdbValue::BuildValueDictEmpty1 [GOOD] >> LongTxServicePublicTypes::Snapshot [GOOD] >> LongTxServicePublicTypes::SnapshotMaxTxId [GOOD] >> LongTxServicePublicTypes::LongTxId [GOOD] >> LongTxServicePublicTypes::SnapshotReadOnly [GOOD] >> YdbValue::BuildValueTuple1 [GOOD] >> YdbValue::BuildValueTupleElementsMismatch1 [GOOD] >> YdbValue::BuildValueTuple2 [GOOD] >> YdbValue::BuildValueStructMissingMember [GOOD] >> YdbValue::BuildValueTupleElementsMismatch2 [GOOD] >> TBsHuge::SimpleErasureNone [GOOD] >> TBsLocalRecovery::ChaoticWriteRestart |59.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/value/gtest >> YdbValue::BuildValueListEmpty3 [GOOD] >> test.py::test[solomon-Basic-default.txt] [GOOD] >> test.py::test[solomon-BasicExtractMembers-default.txt] |59.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/value/gtest >> YdbValue::BuildValueDictEmpty1 [GOOD] >> TCredentials::CheckToken [GOOD] >> TCredentials::CheckAws [GOOD] |59.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_auth/unittest >> AuthTokenAllowed::PassOnEmptyListAndNoToken [GOOD] |59.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/value/gtest >> YdbValue::BuildValueTupleElementsMismatch2 [GOOD] |59.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/long_tx_service/public/ut/unittest >> LongTxServicePublicTypes::SnapshotReadOnly [GOOD] |59.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/limits/ydb-tests-functional-limits >> TBsLocalRecovery::ChaoticWriteRestartHugeXXX [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeIncreased |59.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/providers/s3/credentials/ut/unittest >> TCredentials::CheckAws [GOOD] |59.2%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/hyperscan/libhyperscan_udf.so >> test.py::test[solomon-BasicExtractMembers-default.txt] [GOOD] >> test.py::test[solomon-Downsampling-default.txt] |59.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/backup/s3_path_style/ydb-tests-functional-backup-s3_path_style |59.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/viewer/tests/ydb-core-viewer-tests >> test_generator.py::TestTpchGenerator::test_s1_state [GOOD] |59.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/postgres_integrations/go-libpq/ydb-tests-postgres_integrations-go-libpq |59.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/library/ut/ydb-tests-library-ut >> test.py::test[solomon-Downsampling-default.txt] [GOOD] >> test.py::test[solomon-DownsamplingValidSettings-default.txt] |59.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/http_api/ydb-tests-fq-http_api >> TIncrHugeBasicTest::Defrag [GOOD] |59.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpchGenerator::test_s1_state [GOOD] |59.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/serverless/ydb-tests-functional-serverless |59.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/tablet_flat/ut_large/ydb-core-tablet_flat-ut_large >> test.py::test[solomon-DownsamplingValidSettings-default.txt] [GOOD] >> test.py::test[solomon-HistResponse-default.txt] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/incrhuge/ut/unittest >> TIncrHugeBasicTest::Defrag [GOOD] Test command err: 2025-06-03T10:21:00.975150Z :BS_INCRHUGE DEBUG: incrhuge_keeper.cpp:71: BlockSize# 8128 BlocksInChunk# 2304 BlocksInMinBlob# 65 MaxBlobsPerChunk# 35 BlocksInDataSection# 2303 BlocksInIndexSection# 1 2025-06-03T10:21:00.975192Z :BS_INCRHUGE INFO: incrhuge_keeper_recovery.cpp:152: [PDisk# 000000001 Recovery] [IncrHugeKeeper PDisk# 000000001] starting ReadLog 2025-06-03T10:21:00.975402Z :BS_INCRHUGE INFO: incrhuge_keeper_recovery.cpp:161: [PDisk# 000000001 Recovery] [IncrHugeKeeper PDisk# 000000001] finished ReadLog 2025-06-03T10:21:00.975416Z :BS_INCRHUGE DEBUG: incrhuge_keeper_recovery.cpp:200: [PDisk# 000000001 Recovery] ApplyReadLog Chunks# [] Deletes# [] Owners# {} CurrentSerNum# 0 NextLsn# 1 2025-06-03T10:21:00.975429Z :BS_INCRHUGE INFO: incrhuge_keeper_recovery.cpp:515: [PDisk# 000000001 Recovery] [IncrHugeKeeper PDisk# 000000001] ready 2025-06-03T10:21:00.975471Z :TEST DEBUG: test_actor_concurrent.h:153: finished Init Reference# [] Enumerated# [] InFlightDeletes# [] 2025-06-03T10:21:00.975483Z :TEST DEBUG: test_actor_concurrent.h:209: ActionsTaken# 1 2025-06-03T10:21:00.975486Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 0 InFlightWritesSize# 0 2025-06-03T10:21:00.976190Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:811717:0:0] Lsn# 0 NumReq# 0 2025-06-03T10:21:00.976204Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 0 HandleWrite Lsn# 0 DataSize# 811717 WriteQueueSize# 1 WriteInProgressItemsSize# 0 2025-06-03T10:21:00.976208Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 0 2025-06-03T10:21:00.976211Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2025-06-03T10:21:00.976221Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:230: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem no free chunks 2025-06-03T10:21:00.977724Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:460: [PDisk# 000000001 Logger] ApplyLogChunkItem Lsn# 1 Status# OK 2025-06-03T10:21:00.977743Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 2 ChunkSerNum# 1000 2025-06-03T10:21:00.977747Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 3 ChunkSerNum# 1001 2025-06-03T10:21:00.977749Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 4 ChunkSerNum# 1002 2025-06-03T10:21:00.977751Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 5 ChunkSerNum# 1003 2025-06-03T10:21:00.977753Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 6 ChunkSerNum# 1004 2025-06-03T10:21:00.977756Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 7 ChunkSerNum# 1005 2025-06-03T10:21:00.977758Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 8 ChunkSerNum# 1006 2025-06-03T10:21:00.977760Z :BS_INCRHUGE DEBUG: incrhuge_keeper_alloc.cpp:64: [PDisk# 000000001 Allocator] ChunkIdx# 9 ChunkSerNum# 1007 2025-06-03T10:21:00.977765Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 0 2025-06-03T10:21:00.977768Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem entry 2025-06-03T10:21:00.977865Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 1 InFlightWritesSize# 1 2025-06-03T10:21:00.978191Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 0 ProcessWriteItem OffsetInBlocks# 0 IndexInsideChunk# 0 SizeInBlocks# 100 SizeInBytes# 812800 Offset# 0 Size# 812800 End# 812800 Id# 0000000000000000 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2025-06-03T10:21:00.980109Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:1745495:1:0] Lsn# 1 NumReq# 1 2025-06-03T10:21:00.980146Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 1 HandleWrite Lsn# 1 DataSize# 1745495 WriteQueueSize# 1 WriteInProgressItemsSize# 1 2025-06-03T10:21:00.980150Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 1 2025-06-03T10:21:00.980153Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 1 ProcessWriteItem entry 2025-06-03T10:21:00.980355Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 1 ProcessWriteItem OffsetInBlocks# 100 IndexInsideChunk# 1 SizeInBlocks# 215 SizeInBytes# 1747520 Offset# 812800 Size# 1747520 End# 2560320 Id# 0000000000000001 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2025-06-03T10:21:00.982906Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 0 ApplyBlobWrite Status# OK 2025-06-03T10:21:00.982931Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 0 WriteInProgressItemsSize# 1 2025-06-03T10:21:00.982940Z :BS_INCRHUGE DEBUG: incrhuge_keeper_defrag.cpp:46: [PDisk# 000000001 Defragmenter] overall efficiency 0.017 2025-06-03T10:21:00.983548Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 2 InFlightWritesSize# 2 2025-06-03T10:21:00.983742Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:602037:2:0] Lsn# 2 NumReq# 2 2025-06-03T10:21:00.983773Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 2 HandleWrite Lsn# 2 DataSize# 602037 WriteQueueSize# 1 WriteInProgressItemsSize# 1 2025-06-03T10:21:00.983783Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 1 2025-06-03T10:21:00.983786Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 2 ProcessWriteItem entry 2025-06-03T10:21:00.983884Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 2 ProcessWriteItem OffsetInBlocks# 315 IndexInsideChunk# 2 SizeInBlocks# 75 SizeInBytes# 609600 Offset# 2560320 Size# 609600 End# 3169920 Id# 0000000000000002 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2025-06-03T10:21:00.984948Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 3 InFlightWritesSize# 3 2025-06-03T10:21:00.986296Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:1287465:3:0] Lsn# 3 NumReq# 3 2025-06-03T10:21:00.986335Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 3 HandleWrite Lsn# 3 DataSize# 1287465 WriteQueueSize# 1 WriteInProgressItemsSize# 2 2025-06-03T10:21:00.986340Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 2 2025-06-03T10:21:00.986343Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 3 ProcessWriteItem entry 2025-06-03T10:21:00.986506Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 3 ProcessWriteItem OffsetInBlocks# 390 IndexInsideChunk# 3 SizeInBlocks# 159 SizeInBytes# 1292352 Offset# 3169920 Size# 1292352 End# 4462272 Id# 0000000000000003 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2025-06-03T10:21:00.988872Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 4 InFlightWritesSize# 4 2025-06-03T10:21:00.989232Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 1 ApplyBlobWrite Status# OK 2025-06-03T10:21:00.989256Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 0 WriteInProgressItemsSize# 2 2025-06-03T10:21:00.989267Z :BS_INCRHUGE DEBUG: incrhuge_keeper_defrag.cpp:46: [PDisk# 000000001 Defragmenter] overall efficiency 0.030 2025-06-03T10:21:00.989275Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 2 ApplyBlobWrite Status# OK 2025-06-03T10:21:00.989280Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 0 WriteInProgressItemsSize# 1 2025-06-03T10:21:00.989283Z :BS_INCRHUGE DEBUG: incrhuge_keeper_defrag.cpp:46: [PDisk# 000000001 Defragmenter] overall efficiency 0.030 2025-06-03T10:21:00.990536Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:1501676:4:0] Lsn# 4 NumReq# 4 2025-06-03T10:21:00.990573Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 4 HandleWrite Lsn# 4 DataSize# 1501676 WriteQueueSize# 1 WriteInProgressItemsSize# 1 2025-06-03T10:21:00.990587Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 1 2025-06-03T10:21:00.990590Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 4 ProcessWriteItem entry 2025-06-03T10:21:00.990769Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 4 ProcessWriteItem OffsetInBlocks# 549 IndexInsideChunk# 4 SizeInBlocks# 185 SizeInBytes# 1503680 Offset# 4462272 Size# 1503680 End# 5965952 Id# 0000000000000004 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2025-06-03T10:21:00.993639Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 5 InFlightWritesSize# 5 2025-06-03T10:21:00.993848Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:687721:5:0] Lsn# 5 NumReq# 5 2025-06-03T10:21:00.993881Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 5 HandleWrite Lsn# 5 DataSize# 687721 WriteQueueSize# 1 WriteInProgressItemsSize# 2 2025-06-03T10:21:00.993892Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 2 2025-06-03T10:21:00.993899Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 5 ProcessWriteItem entry 2025-06-03T10:21:00.993988Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 5 ProcessWriteItem OffsetInBlocks# 734 IndexInsideChunk# 5 SizeInBlocks# 85 SizeInBytes# 690880 Offset# 5965952 Size# 690880 End# 6656832 Id# 0000000000000005 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2025-06-03T10:21:00.995208Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 6 InFlightWritesSize# 6 2025-06-03T10:21:00.997378Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:1:1:0:1957662:6:0] Lsn# 6 NumReq# 6 2025-06-03T10:21:00.997398Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 6 HandleWrite Lsn# 6 DataSize# 1957662 WriteQueueSize# 1 WriteInProgressItemsSize# 3 2025-06-03T10:21:00.997403Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 1 WriteInProgressItemsSize# 3 2025-06-03T10:21:00.997407Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 6 ProcessWriteItem entry 2025-06-03T10:21:00.997633Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 6 ProcessWriteItem OffsetInBlocks# 819 IndexInsideChunk# 6 SizeInBlocks# 241 SizeInBytes# 1958848 Offset# 6656832 Size# 1958848 End# 8615680 Id# 0000000000000006 ChunkIdx# 2 ChunkSerNum# 1000 Defrag# false 2025-06-03T10:21:01.000588Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 3 ApplyBlobWrite Status# OK 2025-06-03T10:21:01.000622Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 0 WriteInProgressItemsSize# 3 2025-06-03T10:21:01.000632Z :BS_INCRHUGE DEBUG: incrhuge_keeper_defrag.cpp:46: [PDisk# 000000001 Defragmenter] overall efficiency 0.058 2025-06-03T10:21:01.001287Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 7 InFlightWritesSize# 7 2025-06-03T10:21:01.001842Z :TES ... EBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 2409 HandleWrite Lsn# 6088 DataSize# 1374538 WriteQueueSize# 9 WriteInProgressItemsSize# 5 2025-06-03T10:22:20.688839Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 9 WriteInProgressItemsSize# 5 2025-06-03T10:22:20.691471Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 41 InFlightWritesSize# 19 2025-06-03T10:22:20.691561Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 2396 ApplyBlobWrite Status# OK 2025-06-03T10:22:20.691584Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 9 WriteInProgressItemsSize# 4 2025-06-03T10:22:20.691588Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 2401 ProcessWriteItem entry 2025-06-03T10:22:20.691622Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:630823:6089:0] Lsn# 6089 NumReq# 41 2025-06-03T10:22:20.691895Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 2401 ProcessWriteItem OffsetInBlocks# 1298 IndexInsideChunk# 9 SizeInBlocks# 251 SizeInBytes# 2040128 Offset# 10550144 Size# 2040128 End# 12590272 Id# 0000000000000079 ChunkIdx# 61 ChunkSerNum# 1407 Defrag# false 2025-06-03T10:22:20.691913Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:344: [PDisk# 000000001 Writer] QueryId# 2397 ApplyBlobWrite Status# OK 2025-06-03T10:22:20.691919Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 8 WriteInProgressItemsSize# 4 2025-06-03T10:22:20.691921Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:192: [PDisk# 000000001 Writer] QueryId# 2402 ProcessWriteItem entry 2025-06-03T10:22:20.692161Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:319: [PDisk# 000000001 Writer] QueryId# 2402 ProcessWriteItem OffsetInBlocks# 1549 IndexInsideChunk# 10 SizeInBlocks# 226 SizeInBytes# 1836928 Offset# 12590272 Size# 1836928 End# 14427200 Id# 0000000000000025 ChunkIdx# 61 ChunkSerNum# 1407 Defrag# false 2025-06-03T10:22:20.692173Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 2410 HandleWrite Lsn# 6089 DataSize# 630823 WriteQueueSize# 8 WriteInProgressItemsSize# 5 2025-06-03T10:22:20.692174Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 8 WriteInProgressItemsSize# 5 2025-06-03T10:22:20.692819Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 42 InFlightWritesSize# 20 2025-06-03T10:22:20.693002Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:953725:6090:0] Lsn# 6090 NumReq# 42 2025-06-03T10:22:20.693020Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 2411 HandleWrite Lsn# 6090 DataSize# 953725 WriteQueueSize# 9 WriteInProgressItemsSize# 5 2025-06-03T10:22:20.693022Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 9 WriteInProgressItemsSize# 5 2025-06-03T10:22:20.694835Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 43 InFlightWritesSize# 21 2025-06-03T10:22:20.694860Z :TEST DEBUG: test_actor_concurrent.h:381: sent Delete Id# 000000000000005b NumReq# 43 2025-06-03T10:22:20.694863Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 44 InFlightWritesSize# 21 2025-06-03T10:22:20.694868Z :TEST DEBUG: test_actor_concurrent.h:381: sent Delete Id# 000000000000001d NumReq# 44 2025-06-03T10:22:20.694870Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 45 InFlightWritesSize# 21 2025-06-03T10:22:20.694872Z :TEST DEBUG: test_actor_concurrent.h:381: sent Delete Id# 000000000000003c NumReq# 45 2025-06-03T10:22:20.694875Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 46 InFlightWritesSize# 21 2025-06-03T10:22:20.694879Z :TEST DEBUG: test_actor_concurrent.h:381: sent Delete Id# 0000000000000086 NumReq# 46 2025-06-03T10:22:20.694880Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 47 InFlightWritesSize# 21 2025-06-03T10:22:20.694892Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:50: [PDisk# 000000001 Deleter] Owner# 1 SeqNo# 6091 HandleDelete Ids# [000000000000005b] 2025-06-03T10:22:20.694904Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:544: [PDisk# 000000001 Logger] LogBlobDeletes ChunkIdx# 40 ChunkSerNum# 1386 Id# 000000000000005b IndexInsideChunk# 5 SizeInBlocks# 236 Lsn# 3642 Owner# 1 SeqNo# 6091 2025-06-03T10:22:20.694907Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:638: [PDisk# 000000001 Logger] ProcessDeleteQueueItem Lsn# 3642 Entrypoint# false Virtual# false 2025-06-03T10:22:20.694931Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:50: [PDisk# 000000001 Deleter] Owner# 1 SeqNo# 6092 HandleDelete Ids# [000000000000001d] 2025-06-03T10:22:20.694942Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:544: [PDisk# 000000001 Logger] LogBlobDeletes ChunkIdx# 54 ChunkSerNum# 1400 Id# 000000000000001d IndexInsideChunk# 4 SizeInBlocks# 118 Lsn# 3643 Owner# 1 SeqNo# 6092 2025-06-03T10:22:20.694943Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:638: [PDisk# 000000001 Logger] ProcessDeleteQueueItem Lsn# 3643 Entrypoint# false Virtual# false 2025-06-03T10:22:20.694948Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:50: [PDisk# 000000001 Deleter] Owner# 1 SeqNo# 6093 HandleDelete Ids# [000000000000003c] 2025-06-03T10:22:20.694952Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:544: [PDisk# 000000001 Logger] LogBlobDeletes ChunkIdx# 55 ChunkSerNum# 1401 Id# 000000000000003c IndexInsideChunk# 8 SizeInBlocks# 82 Lsn# 3644 Owner# 1 SeqNo# 6093 2025-06-03T10:22:20.694955Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:638: [PDisk# 000000001 Logger] ProcessDeleteQueueItem Lsn# 3644 Entrypoint# false Virtual# false 2025-06-03T10:22:20.694958Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:50: [PDisk# 000000001 Deleter] Owner# 1 SeqNo# 6094 HandleDelete Ids# [0000000000000086] 2025-06-03T10:22:20.694962Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:544: [PDisk# 000000001 Logger] LogBlobDeletes ChunkIdx# 39 ChunkSerNum# 1385 Id# 0000000000000086 IndexInsideChunk# 0 SizeInBlocks# 235 Lsn# 3645 Owner# 1 SeqNo# 6094 2025-06-03T10:22:20.694963Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:638: [PDisk# 000000001 Logger] ProcessDeleteQueueItem Lsn# 3645 Entrypoint# false Virtual# false 2025-06-03T10:22:20.696629Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:2094172:6095:0] Lsn# 6095 NumReq# 47 2025-06-03T10:22:20.696656Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 2412 HandleWrite Lsn# 6095 DataSize# 2094172 WriteQueueSize# 10 WriteInProgressItemsSize# 5 2025-06-03T10:22:20.696661Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 10 WriteInProgressItemsSize# 5 2025-06-03T10:22:20.700645Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 48 InFlightWritesSize# 22 2025-06-03T10:22:20.702322Z :TEST DEBUG: test_actor_concurrent.h:295: sent Write LogoBlobId# [1:2:1:0:2016949:6096:0] Lsn# 6096 NumReq# 48 2025-06-03T10:22:20.702373Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:130: [PDisk# 000000001 Writer] QueryId# 2413 HandleWrite Lsn# 6096 DataSize# 2016949 WriteQueueSize# 11 WriteInProgressItemsSize# 5 2025-06-03T10:22:20.702380Z :BS_INCRHUGE DEBUG: incrhuge_keeper_write.cpp:174: [PDisk# 000000001 Writer] WriteQueueSize# 11 WriteInProgressItemsSize# 5 2025-06-03T10:22:20.705691Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:648: [PDisk# 000000001 Logger] ProcessDeleteQueueItem Lsn# 3636 Status# OK 2025-06-03T10:22:20.705705Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:686: [PDisk# 000000001 Logger] ApplyLogDeleteItem Entrypoint# false Lsn# 3636 Virtual# false 2025-06-03T10:22:20.705712Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:196: [PDisk# 000000001 Deleter] Owner# 1 SeqNo# 6078 finished Status# OK 2025-06-03T10:22:20.705715Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:249: [PDisk# 000000001 Deleter] deleting 0000000000000075 from lookup table 2025-06-03T10:22:20.705726Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:648: [PDisk# 000000001 Logger] ProcessDeleteQueueItem Lsn# 3637 Status# OK 2025-06-03T10:22:20.705727Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:686: [PDisk# 000000001 Logger] ApplyLogDeleteItem Entrypoint# false Lsn# 3637 Virtual# false 2025-06-03T10:22:20.705730Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:196: [PDisk# 000000001 Deleter] Owner# 1 SeqNo# 6079 finished Status# OK 2025-06-03T10:22:20.705732Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:249: [PDisk# 000000001 Deleter] deleting 0000000000000085 from lookup table 2025-06-03T10:22:20.705735Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:648: [PDisk# 000000001 Logger] ProcessDeleteQueueItem Lsn# 3638 Status# OK 2025-06-03T10:22:20.705737Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:686: [PDisk# 000000001 Logger] ApplyLogDeleteItem Entrypoint# false Lsn# 3638 Virtual# false 2025-06-03T10:22:20.705739Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:196: [PDisk# 000000001 Deleter] Owner# 1 SeqNo# 6080 finished Status# OK 2025-06-03T10:22:20.705741Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:249: [PDisk# 000000001 Deleter] deleting 0000000000000061 from lookup table 2025-06-03T10:22:20.705745Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:648: [PDisk# 000000001 Logger] ProcessDeleteQueueItem Lsn# 3639 Status# OK 2025-06-03T10:22:20.705747Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:686: [PDisk# 000000001 Logger] ApplyLogDeleteItem Entrypoint# false Lsn# 3639 Virtual# false 2025-06-03T10:22:20.705749Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:196: [PDisk# 000000001 Deleter] Owner# 1 SeqNo# 6081 finished Status# OK 2025-06-03T10:22:20.705750Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:249: [PDisk# 000000001 Deleter] deleting 0000000000000001 from lookup table 2025-06-03T10:22:20.705754Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:648: [PDisk# 000000001 Logger] ProcessDeleteQueueItem Lsn# 3640 Status# OK 2025-06-03T10:22:20.705756Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:686: [PDisk# 000000001 Logger] ApplyLogDeleteItem Entrypoint# false Lsn# 3640 Virtual# false 2025-06-03T10:22:20.705758Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:196: [PDisk# 000000001 Deleter] Owner# 1 SeqNo# 6082 finished Status# OK 2025-06-03T10:22:20.705760Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:249: [PDisk# 000000001 Deleter] deleting 000000000000004f from lookup table 2025-06-03T10:22:20.705764Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:648: [PDisk# 000000001 Logger] ProcessDeleteQueueItem Lsn# 3641 Status# OK 2025-06-03T10:22:20.705766Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:686: [PDisk# 000000001 Logger] ApplyLogDeleteItem Entrypoint# false Lsn# 3641 Virtual# false 2025-06-03T10:22:20.705767Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:196: [PDisk# 000000001 Deleter] Owner# 1 SeqNo# 6086 finished Status# OK 2025-06-03T10:22:20.705769Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:249: [PDisk# 000000001 Deleter] deleting 000000000000005e from lookup table 2025-06-03T10:22:20.706192Z :TEST DEBUG: test_actor_concurrent.h:213: GetNumRequestsInFlight# 49 InFlightWritesSize# 23 2025-06-03T10:22:20.706231Z :TEST DEBUG: test_actor_concurrent.h:381: sent Delete Id# 000000000000003a NumReq# 49 2025-06-03T10:22:20.706280Z :BS_INCRHUGE DEBUG: incrhuge_keeper_delete.cpp:50: [PDisk# 000000001 Deleter] Owner# 1 SeqNo# 6097 HandleDelete Ids# [000000000000003a] 2025-06-03T10:22:20.706304Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:544: [PDisk# 000000001 Logger] LogBlobDeletes ChunkIdx# 58 ChunkSerNum# 1404 Id# 000000000000003a IndexInsideChunk# 7 SizeInBlocks# 253 Lsn# 3646 Owner# 1 SeqNo# 6097 2025-06-03T10:22:20.706310Z :BS_INCRHUGE DEBUG: incrhuge_keeper_log.cpp:638: [PDisk# 000000001 Logger] ProcessDeleteQueueItem Lsn# 3646 Entrypoint# false Virtual# false |59.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/params/ydb-public-sdk-cpp-tests-unit-client-params >> ydb-tests-olap-s3_import-large::import_test [GOOD] |59.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/persqueue/codecs/ut/ydb-core-persqueue-codecs-ut >> TBtreeIndexTPartLarge::SmallKeys1GB |58.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/transfer/ut/functional/ydb-core-transfer-ut-functional |58.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/s3_import/large/import_test >> ydb-tests-olap-s3_import-large::import_test [GOOD] >> test.py::test[solomon-HistResponse-default.txt] [GOOD] >> test.py::test[solomon-InvalidProject-] >> ydb-tests-olap-load::import_test [GOOD] |58.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/dq_file/part10/ydb-library-yql-tests-sql-dq_file-part10 |58.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/unicode_base/libunicode_udf.so |58.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/load/import_test >> ydb-tests-olap-load::import_test [GOOD] |58.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/hive/ydb-tests-functional-hive |58.9%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/url_base/liburl_udf.so |58.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/endpoints/ydb-public-sdk-cpp-tests-unit-client-endpoints |58.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/params/gtest >> ParamsBuilder::TypeMismatchFromValue [GOOD] |58.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/ut/ydb-core-scheme-ut |58.8%| RESOURCE $(sbr:4966407557) >> ParamsBuilder::BuildFromValue [GOOD] |58.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/node_broker/tests/ydb-tests-stress-node_broker-tests >> test_generator.py::TestTpchGenerator::test_s1_state_and_parts [GOOD] |58.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/params/gtest >> ParamsBuilder::TypeMismatchFromValue [GOOD] |58.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/params/gtest >> ParamsBuilder::BuildFromValue [GOOD] |58.8%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/python/python3_small/libpython3_udf.so |58.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/codecs/ut/unittest |58.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/library/issue/ydb-public-sdk-cpp-tests-unit-library-issue |58.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stress/kv/tests/ydb-tests-stress-kv-tests |58.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpchGenerator::test_s1_state_and_parts [GOOD] |58.7%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yaml_config/tools/dump_ds_init/yaml-to-proto-dump-ds-init |58.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/codecs/ut/unittest >> PersQueueCodecs::FromV1Codec [GOOD] |58.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/json_value/ut/ydb-public-lib-json_value-ut |58.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/codecs/ut/unittest |58.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/ydb-core-blobstorage-vdisk-anubis_osiris-ut |58.6%| [SB] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/postgresql/psql/psql |58.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/dq_file/part12/ydb-library-yql-tests-sql-dq_file-part12 |58.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/driver_lib/version/ut/ydb-core-driver_lib-version-ut >> CheckUtils::NewPromiseInitialized [GOOD] |58.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/yt/kqp_yt_file/part11/ydb-tests-fq-yt-kqp_yt_file-part11 |58.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/client/metadata/ut/ydb-core-client-metadata-ut |58.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/codecs/ut/unittest >> Scheme::CompareOrder [GOOD] >> Scheme::CellVecTryParse [GOOD] |58.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/codecs/ut/unittest >> PersQueueCodecs::FromV1Codec [GOOD] |58.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/endpoints/unittest >> CheckUtils::NewPromiseInitialized [GOOD] |58.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/ydb_serializable/ydb_serializable |58.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/hybrid_file/part8/ydb-library-yql-tests-sql-hybrid_file-part8 |58.5%| [UN] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/functional/postgresql/psql/psql >> SchemeBorders::Full [GOOD] >> Scheme::YqlTypesMustBeDefined [GOOD] >> Scheme::TSerializedCellVec [GOOD] |58.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/hybrid_file/part2/ydb-library-yql-tests-sql-hybrid_file-part2 >> Scheme::UnsafeAppend [GOOD] |58.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/scheme/ut/unittest >> Scheme::CellVecTryParse [GOOD] >> SchemeRanges::CmpBorders [GOOD] |58.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/dq_file/part4/ydb-library-yql-tests-sql-dq_file-part4 >> SchemeBorders::Partial [GOOD] |58.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/hybrid_file/part4/ydb-library-yql-tests-sql-hybrid_file-part4 |58.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/tx/columnshard/tools/visualize_portions/visualize_portions |58.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/scheme/ut/unittest >> Scheme::YqlTypesMustBeDefined [GOOD] |58.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/dq_file/part18/ydb-library-yql-tests-sql-dq_file-part18 |58.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/sys_view/service/ut/ydb-core-sys_view-service-ut |58.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/dq_file/part13/ydb-library-yql-tests-sql-dq_file-part13 >> ydb-core-viewer-tests::import_test [GOOD] |58.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/scheme/ut/unittest >> SchemeBorders::Partial [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/scheme/ut/unittest >> Scheme::UnsafeAppend [GOOD] Test command err: Serialize: 0.000070s Cells constructor: 0.000189s Parse: 0.000079s Copy: 0.000030s Move: 0.000010s |58.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/clickbench/ydb-tests-functional-clickbench |58.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/codecs/ut/unittest |58.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/dq_file/part19/ydb-library-yql-tests-sql-dq_file-part19 >> ydb-tests-functional-limits::import_test [GOOD] >> JsonValueTest::PrimitiveValueUint8 [GOOD] >> JsonValueTest::PrimitiveValueUtf8String2 [GOOD] >> JsonValueTest::PrimitiveValueUtf8String1 [GOOD] >> JsonValueTest::TaggedValue [GOOD] >> TypesProto::Decimal22 [GOOD] >> Scheme::CompareUuidCells [GOOD] |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |58.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/dq_file/part7/ydb-library-yql-tests-sql-dq_file-part7 >> Scheme::EmptyCell [GOOD] >> SchemeRanges::RangesBorders [GOOD] |58.3%| [TA] $(B)/ydb/public/sdk/cpp/tests/unit/client/result/test-results/unittest/{meta.json ... results_accumulator.log} |58.3%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/json2/libjson2_udf.so |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest >> ydb-tests-postgres_integrations-go-libpq::import_test [GOOD] |58.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/dq_file/part0/ydb-library-yql-tests-sql-dq_file-part0 |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |58.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/viewer/tests/import_test >> ydb-core-viewer-tests::import_test [GOOD] >> ydb-tests-fq-http_api::import_test [GOOD] |58.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/limits/import_test >> ydb-tests-functional-limits::import_test [GOOD] |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/json_value/ut/unittest >> JsonValueTest::TaggedValue [GOOD] |58.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/scheme/ut/unittest >> Scheme::EmptyCell [GOOD] |58.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/scheme/ut/unittest >> SchemeRanges::RangesBorders [GOOD] |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest >> SysViewQueryHistory::AddDedup2 [GOOD] |58.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/ut_group/ydb-core-blobstorage-ut_group >> SysViewQueryHistory::AggrMergeDedup [GOOD] >> SysViewQueryHistory::StableMerge [GOOD] >> TBlobStorageAnubisAlgo::Mirror3 [GOOD] |58.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/postgres_integrations/go-libpq/import_test >> ydb-tests-postgres_integrations-go-libpq::import_test [GOOD] >> SysViewQueryHistory::AddDedup [GOOD] >> ydb-tests-functional-serverless::import_test [GOOD] >> ydb-tests-library-ut::import_test [GOOD] >> SysViewQueryHistory::StableMerge2 [GOOD] >> SysViewQueryHistory::AggrMerge [GOOD] |58.3%| [LD] {BAZEL_DOWNLOAD} $(B)/yql/essentials/udfs/common/protobuf/libprotobuf_udf.so |58.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/ydb_cli/common/ut/ydb-public-lib-ydb_cli-common-ut >> SysViewQueryHistory::TopDurationAdd [GOOD] |58.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/erasure/ut/ydb-core-erasure-ut >> SysViewQueryHistory::ScanQueryHistoryMerge [GOOD] |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest >> SysViewQueryHistory::ServiceQueryHistoryAdd [GOOD] |58.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/http_api/import_test >> ydb-tests-fq-http_api::import_test [GOOD] >> JsonValueTest::PrimitiveValueBool [GOOD] >> SysViewQueryHistory::AddDedupRandom [GOOD] >> EndpointElector::DiffOnRemove [GOOD] >> TypesProto::Decimal35 [GOOD] >> SysViewQueryHistory::TopReadBytesAdd [GOOD] >> JsonValueTest::InvalidJsonToBinaryString3 [GOOD] >> JsonValueTest::InvalidJsonToBinaryString7 [GOOD] >> JsonValueTest::PgValue [GOOD] >> JsonValueTest::NewDatetimeValuesStruct [GOOD] >> TypesProto::DecimalNoTypeInfo [GOOD] |58.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest >> TBlobStorageAnubisAlgo::Mirror3 [GOOD] |58.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::StableMerge [GOOD] |58.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/hybrid_file/part0/ydb-library-yql-tests-sql-hybrid_file-part0 |58.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/config/ydb-tests-functional-config |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::AggrMergeDedup [GOOD] >> JsonValueTest::InvalidJsonToBinaryString5 [GOOD] >> JsonValueTest::InvalidJsonToBinaryString4 [GOOD] >> JsonValueTest::InvalidJsonToBinaryString6 [GOOD] >> JsonValueTest::EmptyBinaryStringBase64 [GOOD] >> JsonValueTest::PrimitiveValueSimpleString [GOOD] >> JsonValueTest::PrimitiveValueDate32 [GOOD] |58.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/jaeger_tracing/ut/ydb-core-jaeger_tracing-ut |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::StableMerge2 [GOOD] |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::AddDedup [GOOD] |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |58.2%| [TA] $(B)/ydb/core/base/ut_auth/test-results/unittest/{meta.json ... results_accumulator.log} >> JsonValueTest::PrimitiveValueDatetime64 [GOOD] >> JsonValueTest::CompositeValueIntList [GOOD] >> JsonValueTest::PrimitiveValueInt8 [GOOD] >> JsonValueTest::PrimitiveValueDatetime [GOOD] >> JsonValueTest::CompositeValueStruct [GOOD] >> JsonValueTest::PrimitiveValueTimestamp [GOOD] >> JsonValueTest::PrimitiveValueDate [GOOD] >> TBsLocalRecovery::ChaoticWriteRestart [GOOD] >> JsonValueTest::BinaryStringAsciiFollowedByNonAscii [GOOD] >> JsonValueTest::CompositeValueTuple [GOOD] >> JsonValueTest::PrimitiveValueInt64 [GOOD] |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::ServiceQueryHistoryAdd [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHuge [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeDecreased >> JsonValueTest::PrimitiveValueFloat [GOOD] >> JsonValueTest::BinaryStringUnicode [GOOD] >> JsonValueTest::CompositeValueDict [GOOD] >> JsonValueTest::BinaryStringBase64 [GOOD] >> JsonValueTest::CompositeValueEmptyList [GOOD] >> JsonValueTest::PrimitiveValueInt32 [GOOD] >> JsonValueTest::PrimitiveValueDouble [GOOD] >> JsonValueTest::PrimitiveValueInt16 [GOOD] >> JsonValueTest::PrimitiveValueUint16 [GOOD] >> JsonValueTest::PrimitiveValueUint32 [GOOD] >> JsonValueTest::PrimitiveValueUint64 [GOOD] >> JsonValueTest::PrimitiveValueTimestamp64 [GOOD] |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::AggrMerge [GOOD] |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::TopDurationAdd [GOOD] |58.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/codecs/ut/unittest |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/params/gtest >> TErasureTypeTest::TestBlock42PartialRestore0 |58.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/persqueue/topic_parser/ut/ydb-library-persqueue-topic_parser-ut |58.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/import_test >> ydb-tests-functional-serverless::import_test [GOOD] |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::ScanQueryHistoryMerge [GOOD] |58.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/tests/library/ut/import_test >> ydb-tests-library-ut::import_test [GOOD] |58.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/dq_file/part16/ydb-library-yql-tests-sql-dq_file-part16 |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/anubis_osiris/ut/unittest |58.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/public/ydb_issue/ut/ydb-library-yql-public-ydb_issue-ut |58.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/scheme/ut/unittest >> TypesProto::DecimalNoTypeInfo [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeIncreased [GOOD] |58.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tools/tstool/tstool |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::AddDedupRandom [GOOD] |58.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/dq_file/part15/ydb-library-yql-tests-sql-dq_file-part15 |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/service/ut/unittest >> SysViewQueryHistory::TopReadBytesAdd [GOOD] |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/json_value/ut/unittest >> JsonValueTest::NewDatetimeValuesStruct [GOOD] |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/json_value/ut/unittest >> JsonValueTest::InvalidJsonToBinaryString6 [GOOD] |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/endpoints/unittest >> EndpointElector::DiffOnRemove [GOOD] |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/json_value/ut/unittest >> JsonValueTest::PrimitiveValueDate [GOOD] |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/json_value/ut/unittest >> JsonValueTest::CompositeValueTuple [GOOD] |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/json_value/ut/unittest >> JsonValueTest::PrimitiveValueInt64 [GOOD] |58.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/mvp/core/ut/ydb-mvp-core-ut |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/json_value/ut/unittest >> JsonValueTest::CompositeValueEmptyList [GOOD] |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/json_value/ut/unittest >> JsonValueTest::PrimitiveValueTimestamp64 [GOOD] |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/json_value/ut/unittest >> JsonValueTest::PrimitiveValueInt16 [GOOD] |58.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/dq_file/part5/ydb-library-yql-tests-sql-dq_file-part5 >> TErasureTypeTest::TestStripe31LossOfAllPossible1 [GOOD] >> TErasureTypeTest::TestEo [GOOD] >> TErasureTypeTest::TestBlock42PartialRestore3 >> TErasureTypeTest::TestBlockByteOrder [GOOD] >> TErasureTypeTest::TestStripe32LossOfAllPossible2 >> ErasureBrandNew::Block42_restore >> TErasureTypeTest::TestDifferentCasesInDiffSplitingMirror3Of4 [GOOD] >> TErasureTypeTest::TestStripe23LossOfAllPossible3 >> TErasureTypeTest::TestDifferentCasesInDiffSplitingBlock4Plus2 >> TErasureTypeTest::TestBlock23LossOfAllPossible3 >> ThrottlerControlTests::Simple [GOOD] >> TErasureTypeTest::TestStripe32LossOfAllPossible2 [GOOD] >> SamplingControlTests::Simple [GOOD] >> TErasureTypeTest::TestDifferentCasesInDiffSplitingBlock4Plus2 [GOOD] >> ThrottlerControlTests::MultiThreaded2Threads200Ticks30Init7Step >> SamplingControlTests::EdgeCaseUpper >> TErasureTypeTest::TestStripe23LossOfAllPossible3 [GOOD] >> SamplingControlTests::EdgeCaseUpper [GOOD] |58.2%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/config/validation/ut/ydb-core-config-validation-ut |58.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/config/tools/protobuf_plugin/ut/ydb-core-config-tools-protobuf_plugin-ut |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestStripe31LossOfAllPossible1 [GOOD] >> ThrottlerControlTests::LongIdle [GOOD] >> ThrottlerControlTests::MultiThreaded5Threads150Ticks500Init15Step >> ThrottlerControlTests::MultiThreaded10Threads100Ticks1000Init22Step >> ThrottlerControlTests::MultiThreaded2Threads200Ticks30Init7Step [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk/unittest >> TBsLocalRecovery::ChaoticWriteRestartHugeIncreased [GOOD] Test command err: 2025-06-03T10:22:07.746027Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:920:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746031Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:195:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746034Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:21:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746036Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:531:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746038Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:264:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746040Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:210:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746042Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:900:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746044Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:832:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746048Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:225:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746050Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:997:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746167Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:361:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746170Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:521:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746172Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:308:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746176Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:472:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746178Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:891:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746180Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:278:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746182Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:371:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746184Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:506:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746186Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:448:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746188Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:512:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746271Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:963:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746273Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:99:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746274Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:720:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746275Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:98:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746277Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:890:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746279Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:618:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746281Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:764:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746283Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:162:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746286Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:774:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746288Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:16:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746426Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:434:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746429Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:322:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746431Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:438:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746432Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:64:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746434Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:390:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746436Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:701:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746438Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:784:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746440Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:648:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746442Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:546:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746444Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:157:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746505Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:103:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746506Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:823:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746507Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:681:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746509Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:395:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746510Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:862:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746512Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:929:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746513Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:842:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746514Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:161:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746516Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:205:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746517Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:409:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746618Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:176:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746624Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:69:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746626Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:667:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746627Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:973:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746629Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:905:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746631Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:716:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746633Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:672:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746635Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:370:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746637Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:274:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746660Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:954:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746815Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:983:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746827Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:857:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746829Z :BS_VDISK_PU ... ob# [5000:1:511:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746955Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:1:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746957Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:341:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746959Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:30:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746961Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:579:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746964Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:244:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746966Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:847:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746968Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:424:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746970Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:234:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.746972Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:439:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747050Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:652:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747052Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:492:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747058Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:750:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747061Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:924:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747063Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:939:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747064Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:118:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747066Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:706:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747068Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:337:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747069Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:614:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747071Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:502:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747137Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:133:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747140Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:711:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747142Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:794:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747143Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:866:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747145Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:482:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747146Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:613:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747148Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:404:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747150Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:949:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747152Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:200:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747154Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:65:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747264Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:36:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747267Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:852:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747269Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:298:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747271Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:584:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747273Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:609:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747275Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:45:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747278Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:837:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747280Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:74:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747282Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:861:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747286Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:171:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747390Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:653:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747392Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:463:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747394Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:147:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747396Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:191:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747398Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:229:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747399Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:958:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747401Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:31:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747403Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:682:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747405Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:725:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747408Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:779:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747491Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:594:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747494Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:108:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747495Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:915:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747497Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:585:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747498Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:419:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747499Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:871:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747501Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:376:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747503Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:696:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747504Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:380:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747505Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:6:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747557Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:239:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747558Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:516:0:0:66560:1] Marker# BSVS08 2025-06-03T10:22:07.747559Z :BS_VDISK_PUT CRIT: blobstorage_skeleton.cpp:622: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVMultiPut: TEvVMultiPut has huge blob# [5000:1:346:0:0:66560:1] Marker# BSVS08 >> TErasureTypeTest::TestBlock23LossOfAllPossible3 [GOOD] >> ThrottlerControlTests::MultiThreaded10Threads100Ticks1000Init22Step [GOOD] >> ThrottlerControlTests::MultiThreaded5Threads150Ticks500Init15Step [GOOD] >> SamplingControlTests::EdgeCaseLower [GOOD] >> ThrottlerControlTests::Overflow_1 [GOOD] |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestEo [GOOD] |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlockByteOrder [GOOD] |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestDifferentCasesInDiffSplitingMirror3Of4 [GOOD] >> ThrottlerControlTests::Overflow_2 [GOOD] >> DiscoveryConverterTest::FullLegacyNamesWithRootDatabase [GOOD] >> DiscoveryConverterTest::FullLegacyPath [GOOD] |58.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/ydb-tests-sql |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::Simple [GOOD] |58.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/replication/ydb-tests-functional-replication |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestDifferentCasesInDiffSplitingBlock4Plus2 [GOOD] >> ToMessage::NonUtf8 [GOOD] >> IssueProtoTest::BinarySerialization [GOOD] >> TErasureTypeTest::TestBlock42LossOfAllPossible2 >> ToStreamTest::OneMessageTest [GOOD] >> ToStreamTest::SubIssuesTest [GOOD] >> TopicNameConverterForCPTest::CorrectLegacyTopics [GOOD] >> TopicNameConverterTest::LegacyStyle [GOOD] |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestStripe32LossOfAllPossible2 [GOOD] |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/jaeger_tracing/ut/unittest >> SamplingControlTests::Simple [GOOD] |58.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::LongIdle [GOOD] >> TopicNameConverterTest::PathFromDiscoveryConverter [GOOD] >> TopicNameConverterTest::FirstClass [GOOD] >> TopicNameConverterForCPTest::CorrectModernTopics [GOOD] >> TopicNameConverterTest::Paths [GOOD] |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestStripe23LossOfAllPossible3 [GOOD] |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/jaeger_tracing/ut/unittest >> SamplingControlTests::EdgeCaseUpper [GOOD] >> DiscoveryConverterTest::FullLegacyNames [GOOD] >> IssueProtoTest::KikimrYqlSameLayout [GOOD] >> DiscoveryConverterTest::FirstClass [GOOD] >> ToStreamTest::ManyIssuesTest [GOOD] >> IssueProtoTest::WrongBinStringException [GOOD] >> TErasureTypeTest::TestBlock42LossOfAllPossible2 [GOOD] |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock23LossOfAllPossible3 [GOOD] |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/jaeger_tracing/ut/unittest >> SamplingControlTests::EdgeCaseLower [GOOD] |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::MultiThreaded10Threads100Ticks1000Init22Step [GOOD] |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::MultiThreaded2Threads200Ticks30Init7Step [GOOD] |58.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/dq_file/part11/ydb-library-yql-tests-sql-dq_file-part11 |58.3%| [PK] {default-linux-x86_64, relwithdebinfo} $(B)/yql/essentials/tests/common/test_framework/udfs_deps/{common-test_framework-udfs_deps.final.pkg.fake ... yql/essentials/udfs/common/hyperscan/libhyperscan_udf.so} |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::MultiThreaded5Threads150Ticks500Init15Step [GOOD] >> DiscoveryConverterTest::MinimalName [GOOD] >> DiscoveryConverterTest::WithLogbrokerPath [GOOD] >> DiscoveryConverterTest::AccountDatabase [GOOD] >> DiscoveryConverterTest::CmWay [GOOD] >> TopicNameConverterTest::NoTopicName [GOOD] >> TopicNameConverterTest::LegacyStyleDoubleName [GOOD] >> DiscoveryConverterTest::DiscoveryConverter [GOOD] |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::Overflow_1 [GOOD] |58.3%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/util/ut/ydb-core-util-ut >> DiscoveryConverterTest::EmptyModern [GOOD] |58.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/jaeger_tracing/ut/unittest >> ThrottlerControlTests::Overflow_2 [GOOD] |58.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/public/ydb_issue/ut/unittest >> ToMessage::NonUtf8 [GOOD] |58.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/public/ydb_issue/ut/unittest >> IssueProtoTest::BinarySerialization [GOOD] |58.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/persqueue/topic_parser/ut/unittest >> DiscoveryConverterTest::FullLegacyPath [GOOD] >> TopicNameConverterForCPTest::BadLegacyTopics [GOOD] >> TopicNameConverterForCPTest::BadModernTopics [GOOD] >> ValidationTests::MapType [GOOD] >> ValidationTests::AdvancedCopyTo [GOOD] |58.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/public/ydb_issue/ut/unittest |58.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/public/ydb_issue/ut/unittest |58.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/persqueue/topic_parser/ut/unittest >> TopicNameConverterForCPTest::CorrectModernTopics [GOOD] >> ValidationTests::HasReservedPaths [GOOD] >> test.py::test[solomon-InvalidProject-] [GOOD] |58.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/persqueue/topic_parser/ut/unittest >> TopicNameConverterTest::FirstClass [GOOD] >> ValidationTests::CanDispatchByTag [GOOD] >> test.py::test[solomon-LabelColumns-default.txt] |58.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/public/ydb_issue/ut/unittest >> ToStreamTest::SubIssuesTest [GOOD] >> TErasureTypeTest::TestBlock43LossOfAllPossible3 |58.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/public/ydb_issue/ut/unittest |58.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/persqueue/topic_parser/ut/unittest >> DiscoveryConverterTest::FirstClass [GOOD] |58.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/public/ydb_issue/ut/unittest >> ToStreamTest::ManyIssuesTest [GOOD] |58.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/public/ydb_issue/ut/unittest >> IssueProtoTest::KikimrYqlSameLayout [GOOD] |58.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/public/ydb_issue/ut/unittest >> IssueProtoTest::WrongBinStringException [GOOD] |58.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/yql/public/ydb_issue/ut/unittest >> ToStreamTest::OneMessageTest [GOOD] |58.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock42LossOfAllPossible2 [GOOD] |58.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/persqueue/topic_parser/ut/unittest >> TopicNameConverterTest::Paths [GOOD] |58.4%| [CC] {BAZEL_UPLOAD} $(S)/ydb/core/mind/hive/hive_ut.cpp >> TErasureTypeTest::TestAllSpeciesCrcWhole2of2 >> TErasureTypeTest::isSplittedDataEqualsToOldVerion [GOOD] |58.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/persqueue/topic_parser/ut/unittest >> TopicNameConverterTest::LegacyStyleDoubleName [GOOD] |58.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/persqueue/topic_parser/ut/unittest >> DiscoveryConverterTest::CmWay [GOOD] >> ValidationTests::CanCopyTo >> ValidationTests::CanCopyTo [GOOD] |58.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/persqueue/topic_parser/ut/unittest >> DiscoveryConverterTest::WithLogbrokerPath [GOOD] >> TErasureTypeTest::TestStripe42LossOfAllPossible2 |58.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/persqueue/topic_parser/ut/unittest >> DiscoveryConverterTest::EmptyModern [GOOD] |58.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/library/persqueue/topic_parser/ut/unittest >> TopicNameConverterForCPTest::BadModernTopics [GOOD] |58.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/config/tools/protobuf_plugin/ut/unittest |58.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/config/tools/protobuf_plugin/ut/unittest >> ValidationTests::CanDispatchByTag [GOOD] >> TErasureTypeTest::TestBlock22LossOfAllPossible2 >> TErasureTypeTest::TestStripe42LossOfAllPossible2 [GOOD] >> TErasureTypeTest::TestAllSpeciesCrcWhole1of2 |58.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/config/tools/protobuf_plugin/ut/unittest >> ValidationTests::AdvancedCopyTo [GOOD] |58.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/tools/nemesis/driver/nemesis |58.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/config/tools/protobuf_plugin/ut/unittest >> ValidationTests::HasReservedPaths [GOOD] |58.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/config/tools/protobuf_plugin/ut/unittest |58.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/tenants/ydb-tests-functional-tenants >> TErasureTypeTest::TestBlock22LossOfAllPossible2 [GOOD] |58.4%| [TA] $(B)/ydb/public/sdk/cpp/tests/unit/client/value/test-results/gtest/{meta.json ... results_accumulator.log} |58.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/config/tools/protobuf_plugin/ut/unittest >> ValidationTests::MapType [GOOD] |58.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/erasure/ut/unittest >> TErasureTypeTest::isSplittedDataEqualsToOldVerion [GOOD] |58.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/config/tools/protobuf_plugin/ut/unittest |58.4%| [AR] {BAZEL_UPLOAD} $(B)/ydb/core/mind/hive/libcore-mind-hive.a >> TCowBTreeTest::SeekForwardPermutationsInplace [GOOD] |58.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/config/tools/protobuf_plugin/ut/unittest |58.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/config/tools/protobuf_plugin/ut/unittest >> ValidationTests::CanCopyTo [GOOD] >> TCowBTreeTest::SeekForwardPermutationsThreadSafe [GOOD] >> TCowBTreeTest::SeekBackwardPermutationsInplace >> TErasureTypeTest::TestBlock43LossOfAllPossible3 [GOOD] >> TCowBTreeTest::SeekBackwardPermutationsInplace [GOOD] >> TCowBTreeTest::SeekBackwardPermutationsThreadSafe [GOOD] >> TCowBTreeTest::RandomInsertInplace >> TIntrusiveStackTest::TestEmptyPop [GOOD] |58.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/config/tools/protobuf_plugin/ut/unittest |58.4%| [TA] $(B)/ydb/core/blobstorage/incrhuge/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/coordination/ydb-public-sdk-cpp-tests-unit-client-coordination |58.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestStripe42LossOfAllPossible2 [GOOD] >> TIntervalSetTest::IntervalSetTestAdd >> TLockFreeIntrusiveStackTest::ConcurrentRefCountNeverEmpty >> TIntervalSetTest::IntervalSetTestEmpty [GOOD] >> test.py::test[solomon-LabelColumns-default.txt] [GOOD] >> TIntrusiveStackTest::TestPushPop [GOOD] >> TIntervalSetTest::IntervalSetTestSpecificAdd [GOOD] >> test.py::test[solomon-Subquery-default.txt] >> TCowBTreeTest::Empty [GOOD] |58.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/backpressure/ut/ydb-core-blobstorage-backpressure-ut >> TConcurrentRWHashTest::TEraseTest [GOOD] >> TConcurrentRWHashTest::TInsertIfAbsentTest [GOOD] >> TCircularQueueTest::ShouldPush [GOOD] >> TCircularQueueTest::ShouldRemoveCurrentLast [GOOD] >> TCircularQueueTest::ShouldRemove [GOOD] >> TCowBTreeTest::Basics [GOOD] |58.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/data_quotas/ydb-tests-olap-data_quotas >> TCircularOperationQueueTest::BasicRPSCheckWithRound [GOOD] >> TCircularOperationQueueTest::ShouldTryToStartAnotherOneWhenStartFails [GOOD] >> TIntervalSetTest::IntervalSetTestAdd [GOOD] >> AddressClassifierTest::TestAddressExtraction [GOOD] >> TSimpleCacheTest::TestSimpleCache [GOOD] >> TFragmentedBufferTest::TestReplaceWithSetMonolith [GOOD] >> TStrongTypeTest::DefaultConstructorDeleted [GOOD] >> TQueueInplaceTests::TestSimpleInplace [GOOD] >> TPriorityOperationQueueTest::ShouldStartByPriority [GOOD] >> TQueueInplaceTests::CleanInDestructor [GOOD] >> TTokenBucketTest::Unlimited [GOOD] >> TWildcardTest::TestWildcard [GOOD] >> TSimpleCacheTest::TestNotSoSimpleCache [GOOD] >> TPriorityOperationQueueTest::ShouldUpdatePriorityReadyQueue [GOOD] >> TPriorityOperationQueueTest::ShouldReturnExecTimeWhenUpdateRunningPriority [GOOD] >> TWildcardTest::TestWildcards [GOOD] >> TULID::EveryBitOrder [GOOD] >> TPriorityOperationQueueTest::ShouldStartEmpty [GOOD] >> TPriorityOperationQueueTest::ShouldUpdatePriorityWaitingQueue [GOOD] >> TULID::ParseAndFormat [GOOD] >> TULID::TailByteOrder [GOOD] >> TPriorityOperationQueueTest::UpdateNonExistingShouldReturnFalse [GOOD] >> TPriorityOperationQueueTest::ShouldStartByPriorityWithRemove [GOOD] >> TTokenBucketTest::Limited [GOOD] >> TULID::HeadByteOrder [GOOD] >> TPriorityQueueTest::TestOrder [GOOD] >> TStrongTypeTest::DefaultConstructorValue [GOOD] >> TTokenBucketTest::DelayCalculation [GOOD] >> TULID::Generate [GOOD] >> TCowBTreeTest::MultipleSnapshots >> TCowBTreeTest::ClearAndReuse [GOOD] >> TCacheTest::TestSizeBasedOverflowCallback [GOOD] >> TCircularOperationQueueTest::CheckRemoveRunning [GOOD] >> TCacheTest::TestUnboundedMapCache [GOOD] >> TCircularOperationQueueTest::CheckOnDoneNotExisting [GOOD] >> TCircularOperationQueueTest::CheckTimeout [GOOD] >> TCacheTest::TestLruCache [GOOD] >> TCircularOperationQueueTest::BasicRPSCheck [GOOD] >> TCircularOperationQueueTest::CheckOnDoneInflight1 [GOOD] >> TCircularOperationQueueTest::CheckRemoveNotExisting [GOOD] >> TCircularOperationQueueTest::CheckWakeupAfterStop [GOOD] >> TCircularOperationQueueTest::CheckRemoveWaiting [GOOD] >> TCircularOperationQueueTest::CheckWakeupWhenRPSExhausted [GOOD] >> TCircularOperationQueueTest::CheckOnDoneInflight2 [GOOD] >> TCacheTest::Test2QCache [GOOD] >> TCircularOperationQueueTest::ShouldReturnExecTime [GOOD] >> TCircularOperationQueueTest::ShouldNotStartUntilStart [GOOD] >> TCircularQueueTest::ShouldNextSingleItem [GOOD] >> TCircularOperationQueueTest::ShouldTolerateInaccurateTimer [GOOD] >> TCircularOperationQueueTest::ShouldScheduleWakeupWhenNothingStarted [GOOD] >> TCircularQueueTest::ShouldGetQueue [GOOD] >> TCircularOperationQueueTest::ShouldStartInflightEnqueue3 [GOOD] >> TCircularQueueTest::ShouldNextMulti [GOOD] >> TCircularOperationQueueTest::ShouldStartInflightEnqueue100 [GOOD] |58.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock22LossOfAllPossible2 [GOOD] >> TIntervalSetTest::IntervalSetTestAddSubtract [GOOD] >> TIntervalSetTest::IntervalMapTestSubtract [GOOD] >> TIntervalSetTest::IntervalSetTestSubtract [GOOD] >> TIntervalSetTest::IntervalMapTestSubtractAgainstReference [GOOD] >> TIntervalSetTest::IntervalSetTestSubtractAgainstReference [GOOD] >> TIntervalSetTest::IntervalSetTestAddAgainstReference [GOOD] >> TIntervalSetTest::IntervalSetTestIsSubsetOfAgainstReference [GOOD] >> TIntervalSetTest::IntervalMapTestToStringAgainstReference [GOOD] >> TIntervalSetTest::IntervalSetTestToStringAgainstReference [GOOD] >> TIntervalSetTest::IntervalMapUnion >> AddressClassifierTest::TestAddressParsing [GOOD] >> AddressClassifierTest::TestClassfierWithAllIpTypes [GOOD] >> AddressClassifierTest::TestLabeledClassifier [GOOD] >> AddressClassifierTest::TestLabeledClassifierFromNetData [GOOD] >> TBitsTest::TestNaiveClz [GOOD] >> TBTreeTest::Basics [GOOD] >> TBTreeTest::ClearAndReuse [GOOD] >> TBTreeTest::SeekForwardPermutationsInplace [GOOD] >> TIntervalSetTest::IntervalMapUnion [GOOD] >> TBTreeTest::SeekForwardPermutationsThreadSafe [GOOD] >> TConcurrentRWHashTest::TRemoveTest [GOOD] >> TCircularOperationQueueTest::ShouldShuffle [GOOD] >> TIntervalSetTest::IntervalVecTestIsSubsetOfAgainstReference [GOOD] >> THyperLogCounterTest::TestIncrement [GOOD] >> TCacheTest::EnsureNoLeakAfterUnboundedCacheOnMapDtor [GOOD] >> TIntervalSetTest::IntervalSetUnion [GOOD] >> TBTreeTest::SeekBackwardPermutationsInplace [GOOD] >> TCircularQueueTest::Empty [GOOD] >> TFragmentedBufferTest::TestOverwriteRead [GOOD] >> TConcurrentRWHashTest::TEmptyGetTest [GOOD] >> TCircularOperationQueueTest::CheckRemoveNotRunning [GOOD] >> TIntervalSetTest::IntervalVecUnion >> TIntervalSetTest::IntervalMapUnionInplace >> TBTreeTest::SeekBackwardPermutationsThreadSafe [GOOD] >> TFragmentedBufferTest::TestIsNotMonolith [GOOD] >> TCircularQueueTest::ShouldNotRemoveMissing [GOOD] >> TCacheCacheTest::Random [GOOD] >> TCircularOperationQueueTest::ShouldStartInflightEnqueue2 [GOOD] |58.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock43LossOfAllPossible3 [GOOD] >> TIntervalSetTest::IntervalVecTestAddSubtract [GOOD] >> TBTreeTest::RandomInsertInplace >> THyperLogCounterTest::TestGetSet [GOOD] >> TCircularOperationQueueTest::ShouldStartInflightEnqueue1 [GOOD] >> TIntervalSetTest::IntervalVecTestToStringAgainstReference [GOOD] >> TFragmentedBufferTest::TestSetMonolith [GOOD] >> TIntervalSetTest::IntervalMapUnionInplace [GOOD] >> TIntervalSetTest::IntervalVecTestAddAgainstReference [GOOD] >> TConcurrentRWHashTest::TInsertIfAbsentTestFunc [GOOD] >> THyperLogCounterTest::TestAddRandom >> TIntervalSetTest::IntervalVecTestSpecificAdd [GOOD] >> THazardTest::CachedPointers [GOOD] >> TCircularOperationQueueTest::CheckStartAfterStop [GOOD] >> TIntervalSetTest::IntervalVecTestAdd [GOOD] >> TCacheTest::EnsureNoLeakAfterQ2CacheDtor [GOOD] |58.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/util/ut/unittest >> TULID::Generate [GOOD] >> TIntervalSetTest::IntervalVecTestSubtract [GOOD] >> TIntervalSetTest::IntervalSetUnionInplace >> TCacheTest::TestUpdateItemSize [GOOD] |58.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/multi_plane/ydb-tests-fq-multi_plane >> TIntervalSetTest::IntervalVecTestSubtractAgainstReference [GOOD] >> THazardTest::AutoProtectedPointers [GOOD] >> TFragmentedBufferTest::TestWriteRead [GOOD] >> TCircularOperationQueueTest::ShouldStartEmpty [GOOD] >> TIntervalSetTest::IntervalSetUnionInplace [GOOD] >> TCacheTest::EnsureNoLeakAfterLruCacheDtor [GOOD] >> TCircularOperationQueueTest::CheckTimeoutWhenFirstItemRemoved [GOOD] >> TConcurrentRWHashTest::TInsertTest [GOOD] >> TIntervalSetTest::IntervalVecTestEmpty [GOOD] >> THyperLogCounterTest::TestAddRandom [GOOD] >> TCircularOperationQueueTest::ShouldStartInflight3 [GOOD] >> TIntervalSetTest::IntervalMapUnionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalSetUnionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalSetIntersection [GOOD] >> TIntervalSetTest::IntervalSetIntersectionInplace >> TCircularOperationQueueTest::CheckWakeupWhenRPSExhausted2 [GOOD] >> TCircularQueueTest::ShouldNotPushTwice [GOOD] >> TIntervalSetTest::IntervalVecUnion [GOOD] >> THyperLogCounterTest::TestAddFixed >> TCircularOperationQueueTest::ShouldScheduleWakeupWhenHasWaitingAndStart [GOOD] >> TIntervalSetTest::IntervalSetIntersectionInplace [GOOD] >> TIntervalSetTest::IntervalSetIntersectionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalSetDifference >> TIntervalSetTest::IntervalSetDifference [GOOD] >> TCircularOperationQueueTest::RemoveExistingWhenShuffle [GOOD] >> TIntervalSetTest::IntervalVecUnionInplace [GOOD] >> TIntervalSetTest::IntervalVecUnionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalVecIntersection [GOOD] >> TIntervalSetTest::IntervalVecIntersectionInplace >> THyperLogCounterTest::TestAddFixed [GOOD] >> THyperLogCounterTest::TestHybridIncrement [GOOD] |57.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/tools/local_ydb/local_ydb |57.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/dq_file/part9/ydb-library-yql-tests-sql-dq_file-part9 >> TIntervalSetTest::IntervalSetDifferenceInplaceSelf [GOOD] >> TIntervalSetTest::IntervalSetTestIterator [GOOD] >> TIntervalSetTest::IntervalVecIntersectionInplace [GOOD] >> TCircularOperationQueueTest::UseMinOperationRepeatDelayWhenTimeout [GOOD] >> THyperLogCounterTest::TestHybridAdd [GOOD] >> TIntervalSetTest::IntervalVecIntersectionInplaceSelf [GOOD] >> test.py::test[solomon-Subquery-default.txt] [GOOD] >> TCircularQueueTest::ShouldRemoveCurrent [GOOD] >> TIntervalSetTest::IntervalMapTestEmpty [GOOD] >> TCircularOperationQueueTest::ShouldStartInflightEnqueue10 [GOOD] >> TIntervalSetTest::IntervalVecDifference [GOOD] >> TIntervalSetTest::IntervalMapTestSpecificAdd [GOOD] >> TIntervalSetTest::IntervalVecDifferenceInplaceSelf [GOOD] >> test.py::test[solomon-UnknownSetting-] >> TIntervalSetTest::IntervalMapTestAdd [GOOD] >> TIntrusiveFixedHashSetTest::TestEmptyFind [GOOD] >> TIntervalSetTest::IntervalMapTestAddSubtract [GOOD] >> TIntrusiveFixedHashSetTest::TestPushFindClear [GOOD] >> TIntervalSetTest::IntervalMapTestAddAgainstReference [GOOD] >> TIntrusiveHeapTest::TestEmpty [GOOD] >> TIntervalSetTest::IntervalMapTestIsSubsetOfAgainstReference [GOOD] >> TIntervalSetTest::IntervalMapIntersection >> TCircularOperationQueueTest::ShouldStartInflight1 [GOOD] >> TIntrusiveHeapTest::TestAddRemove [GOOD] >> TIntrusiveHeapTest::TestUpdateNoChange [GOOD] >> TIntrusiveHeapTest::TestUpdateIncrease [GOOD] >> TIntrusiveHeapTest::TestUpdateDecrease [GOOD] >> TIntervalSetTest::IntervalMapIntersection [GOOD] >> TCircularOperationQueueTest::RemoveNonExistingWhenShuffle [GOOD] >> TIntervalSetTest::IntervalMapIntersectionInplace >> TCircularOperationQueueTest::ShouldStartInflight100 [GOOD] >> TCircularOperationQueueTest::ShouldStartInflight2 [GOOD] >> TIntervalSetTest::IntervalMapIntersectionInplace [GOOD] >> TCircularOperationQueueTest::ShouldStartInflight10 [GOOD] >> TIntervalSetTest::IntervalMapIntersectionInplaceSelf [GOOD] >> TIntervalSetTest::IntervalMapDifference [GOOD] >> TIntervalSetTest::IntervalMapDifferenceInplaceSelf [GOOD] |57.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/util/ut/unittest >> TIntervalSetTest::IntervalSetTestIterator [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/util/ut/unittest >> TCircularOperationQueueTest::RemoveExistingWhenShuffle [GOOD] Test command err: 0.27433 |57.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/util/ut/unittest >> TCircularOperationQueueTest::ShouldStartInflight10 [GOOD] |57.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/util/ut/unittest >> TIntrusiveHeapTest::TestUpdateDecrease [GOOD] |57.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/util/ut/unittest >> TIntervalSetTest::IntervalMapDifferenceInplaceSelf [GOOD] |57.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/sessions/ydb-public-sdk-cpp-tests-integration-sessions |57.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/restarts/ydb-tests-functional-restarts |57.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/hybrid_file/part7/ydb-library-yql-tests-sql-hybrid_file-part7 |57.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/supp/ydb_supp >> test.py::test[solomon-UnknownSetting-] [GOOD] >> TQueueBackpressureTest::CreateDelete [GOOD] >> TBlobStorageQueueTest::TMessageLost [GOOD] |57.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/hybrid_file/part9/ydb-library-yql-tests-sql-hybrid_file-part9 |57.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/groupinfo/ut/ydb-core-blobstorage-groupinfo-ut |57.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/unit/client/coordination/unittest |57.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/backpressure/ut/unittest |57.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/cms/ydb-tests-functional-cms |56.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/hybrid_file/part6/ydb-library-yql-tests-sql-hybrid_file-part6 |56.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/backpressure/ut/unittest >> TQueueBackpressureTest::CreateDelete [GOOD] |56.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/backpressure/ut/unittest >> TBlobStorageQueueTest::TMessageLost [GOOD] |56.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tools/cfg/bin/ydb_configure |56.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/driver/ydb-public-sdk-cpp-tests-unit-client-driver |56.9%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/rename/ydb-tests-functional-rename |56.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/common/ut/ydb-core-blobstorage-vdisk-common-ut |56.8%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/dq_file/part8/ydb-library-yql-tests-sql-dq_file-part8 |56.8%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/solomon/pytest >> test.py::test[solomon-UnknownSetting-] [GOOD] |56.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/dsproxy/ut_strategy/ydb-core-blobstorage-dsproxy-ut_strategy |56.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/query/ut/ydb-core-blobstorage-vdisk-query-ut |56.7%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/sqs/merge_split_common_table/std/functional-sqs-merge_split_common_table-std >> TQueueBackpressureTest::IncorrectMessageId [GOOD] |56.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/barriers/ut/ydb-core-blobstorage-vdisk-hulldb-barriers-ut |56.6%| [TA] $(B)/ydb/core/sys_view/service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |56.6%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/dq_file/part2/ydb-library-yql-tests-sql-dq_file-part2 >> TLockFreeIntrusiveStackTest::ConcurrentRefCountNeverEmpty [GOOD] >> TLockFreeIntrusiveStackTest::ConcurrentRefCountHeavyContention |56.6%| [TA] $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test.py::test[optimizers-yql-2582_limit_for_join_input--ForceBlocks] >> test.py::test[join-full_trivial_udf_call--ForceBlocks] >> test.py::test[blocks-combine_all_minmax_nested--ForceBlocks] |56.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/backpressure/ut/unittest >> TBTreeTest::RandomInsertInplace [GOOD] >> TBTreeTest::RandomInsertThreadSafe >> test.py::test[schema-user_schema_mix3--ForceBlocks] |56.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/backpressure/ut/unittest >> TQueueBackpressureTest::IncorrectMessageId [GOOD] >> TCowBTreeTest::RandomInsertInplace [GOOD] >> test.py::test[join-no_empty_join_for_dyn--ForceBlocks] >> TCowBTreeTest::RandomInsertThreadSafe >> test.py::test[join-no_empty_join_for_dyn--ForceBlocks] [SKIPPED] >> test.py::test[join-star_join_inners--Results] |56.6%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/dq/actors/ut/ydb-library-yql-providers-dq-actors-ut >> test.py::test[hor_join-skip_yamr--Results] >> test.py::test[blocks-pg_to_dates--ForceBlocks] >> test.py::test[join-no_empty_join_for_dyn--Results] [SKIPPED] >> test.py::test[order_by-native_desc_publish--ForceBlocks] >> test.py::test[join-pullup_extend--Results] >> test.py::test[order_by-native_desc_publish--ForceBlocks] [SKIPPED] >> test.py::test[join-star_join_mirror-off-ForceBlocks] |56.5%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/providers/s3/actors/ut/ydb-library-yql-providers-s3-actors-ut |56.5%| [TA] $(B)/ydb/core/jaeger_tracing/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test.py::test[order_by-native_desc_publish--Results] [SKIPPED] >> test.py::test[pg-select_common_type_unionall--ForceBlocks] >> test.py::test[aggregate-group_by_rollup_grouping--ForceBlocks] >> test.py::test[join-mergejoin_force_align2--ForceBlocks] >> test.py::test[pg-tpch-q19-default.txt-Results] >> test.py::test[join-inner_all_right--ForceBlocks] |56.5%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/discovery_mutator/sdk-cpp-tests-unit-client-discovery_mutator >> test.py::test[join-mergejoin_force_align2--ForceBlocks] [SKIPPED] >> test.py::test[join-mergejoin_force_align2--Results] [SKIPPED] >> test.py::test[join-mergejoin_with_different_key_names--ForceBlocks] >> test.py::test[flatten_by-flatten_corr_name_column-default.txt-ForceBlocks] >> test.py::test[produce-reduce_with_python_having--ForceBlocks] >> test.py::test[type_v3-append_diff_layout1--Results] |56.4%| [LD] {BAZEL_UPLOAD} $(B)/ydb/tools/blobsan/blobsan |56.4%| [LD] {BAZEL_UPLOAD} $(B)/ydb/library/yql/dq/actors/compute/ut/ydb-library-yql-dq-actors-compute-ut >> test.py::test[produce-reduce_with_python_having--ForceBlocks] [SKIPPED] |56.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/lib/idx_test/ut/ydb-public-lib-idx_test-ut >> test.py::test[produce-reduce_with_python_having--Results] [SKIPPED] >> test.py::test[produce-reduce_with_python_row_repack--ForceBlocks] >> test.py::test[produce-reduce_with_python_row_repack--ForceBlocks] [SKIPPED] >> test.py::test[produce-reduce_with_python_row_repack--Results] [SKIPPED] >> test.py::test[ql_filter-integer_many_right--ForceBlocks] |56.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/hybrid_file/part3/ydb-library-yql-tests-sql-hybrid_file-part3 |56.4%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/dq_file/part6/ydb-library-yql-tests-sql-dq_file-part6 >> test.py::test[pg-tpcds-q15-default.txt-ForceBlocks] >> TBlobStorageGroupInfoIterTest::PerRealmIterator [GOOD] >> TBlobStorageGroupInfoIterTest::WalkFailRealms [GOOD] >> test.py::test[action-action_eval_cluster_and_table-default.txt-Results] >> test.py::test[blocks-string_pass--ForceBlocks] |56.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/dq_file/part1/ydb-library-yql-tests-sql-dq_file-part1 >> test.py::test[aggregate-group_by_ru_join_agg--Results] |56.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/hybrid_file/part5/ydb-library-yql-tests-sql-hybrid_file-part5 >> test.py::test[hor_join-sorted_out_mix--Results] >> test.py::test[hor_join-sorted_out_mix--Results] [SKIPPED] >> test.py::test[hor_join-yql19332_aux_cols--Results] >> test.py::test[action-eval_folder_via_file_in_job--ForceBlocks] |56.2%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/ingress/ut/ydb-core-blobstorage-vdisk-ingress-ut >> test.py::test[action-action_eval_cluster_table_for--ForceBlocks] >> TCowBTreeTest::MultipleSnapshots [GOOD] >> TCowBTreeTest::MultipleSnapshotsWithGc |56.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoIterTest::WalkFailRealms [GOOD] |56.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/library/yql/tests/sql/dq_file/part3/ydb-library-yql-tests-sql-dq_file-part3 >> test.py::test[key_filter-range_union_lower_excluded-default.txt-ForceBlocks] >> test.py::test[schema-select_all_inferschema-extra_field-ForceBlocks] >> TBlobStorageGroupInfoBlobMapTest::CheckCorrectBehaviourWithHashOverlow [GOOD] >> TBlobStorageGroupInfoBlobMapTest::Mirror3dcMapper |56.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/ydb-core-blobstorage-vdisk-synclog-ut |56.1%| [TA] $(B)/ydb/library/persqueue/topic_parser/ut/test-results/unittest/{meta.json ... results_accumulator.log} |56.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_pdiskfit/ut/ydb-core-blobstorage-ut_pdiskfit-ut |56.1%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/ydb-core-blobstorage-vdisk-hulldb-cache_block-ut >> TBlobStorageGroupInfoBlobMapTest::Mirror3dcMapper [GOOD] >> TBsLocalRecovery::ChaoticWriteRestartHugeDecreased [GOOD] >> test.py::test[join-pushdown_filter_over_left--ForceBlocks] |56.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/vdisk/repl/ut/ydb-core-blobstorage-vdisk-repl-ut |56.0%| [LD] {BAZEL_UPLOAD} $(B)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit |56.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/s3_import/ydb-tests-olap-s3_import |56.0%| [TA] $(B)/ydb/library/yql/public/ydb_issue/ut/test-results/unittest/{meta.json ... results_accumulator.log} |56.0%| COMPACTING CACHE 35.6GiB ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/groupinfo/ut/unittest >> TBlobStorageGroupInfoBlobMapTest::Mirror3dcMapper [GOOD] Test command err: [0:1:0:3:1]# 173 184 157 167 152 185 195 192 144 [0:1:1:1:1]# 189 195 192 171 157 161 167 155 196 [0:1:3:3:1]# 184 157 182 152 185 157 192 144 189 [0:1:3:4:0]# 148 154 155 158 194 160 156 163 140 [0:1:2:3:2]# 152 177 174 176 154 146 161 170 168 [0:1:1:2:1]# 157 167 152 189 195 192 171 157 161 [0:1:1:0:2]# 158 150 131 167 177 161 177 174 173 [0:1:3:0:1]# 161 155 171 196 154 167 184 157 182 [0:1:0:3:2]# 174 173 152 146 184 176 168 157 161 [0:1:2:2:0]# 163 140 161 148 162 159 168 178 190 [0:1:0:2:0]# 161 156 163 159 196 148 190 162 168 [0:1:3:2:1]# 152 185 157 192 144 189 161 155 171 [0:1:2:3:1]# 157 182 173 185 157 167 144 189 195 [0:1:3:1:2]# 157 161 170 131 190 158 161 178 167 [0:1:2:0:1]# 155 171 157 154 167 155 157 182 173 [0:1:3:0:2]# 131 190 158 161 178 167 173 152 177 [0:1:2:0:2]# 190 158 150 178 167 177 152 177 174 [0:1:2:4:1]# 154 167 155 157 182 173 185 157 167 [0:1:2:1:2]# 161 170 168 190 158 150 178 167 177 [0:1:2:4:2]# 178 167 177 152 177 174 176 154 146 [0:1:0:2:1]# 167 152 185 195 192 144 157 161 155 [0:1:0:0:0]# 190 162 168 174 148 154 177 158 194 [0:1:3:2:0]# 156 163 140 196 148 162 162 168 178 [0:1:1:0:1]# 171 157 161 167 155 196 182 173 184 [0:1:0:2:2]# 146 184 176 168 157 161 150 131 190 [0:1:1:0:0]# 178 190 162 155 174 148 160 177 158 [0:1:2:3:0]# 194 160 177 163 140 161 148 162 159 [0:1:2:4:0]# 154 155 174 194 160 177 163 140 161 [0:1:1:3:2]# 177 174 173 154 146 184 170 168 157 [0:1:2:1:1]# 144 189 195 155 171 157 154 167 155 [0:1:1:1:0]# 162 159 196 178 190 162 155 174 148 [0:1:1:3:1]# 182 173 184 157 167 152 189 195 192 [0:1:3:4:1]# 196 154 167 184 157 182 152 185 157 [0:1:1:4:2]# 167 177 161 177 174 173 154 146 184 [0:1:0:1:0]# 159 196 148 190 162 168 174 148 154 [0:1:3:4:2]# 161 178 167 173 152 177 184 176 154 [0:1:0:0:1]# 157 161 155 155 196 154 173 184 157 [0:1:1:4:0]# 155 174 148 160 177 158 140 161 156 [0:1:2:1:0]# 148 162 159 168 178 190 154 155 174 [0:1:2:0:0]# 168 178 190 154 155 174 194 160 177 [0:1:3:3:2]# 173 152 177 184 176 154 157 161 170 [0:1:0:4:0]# 174 148 154 177 158 194 161 156 163 [0:1:1:2:0]# 140 161 156 162 159 196 178 190 162 [0:1:0:1:1]# 195 192 144 157 161 155 155 196 154 [0:1:3:0:0]# 162 168 178 148 154 155 158 194 160 [0:1:3:1:1]# 192 144 189 161 155 171 196 154 167 [0:1:0:4:1]# 155 196 154 173 184 157 167 152 185 [0:1:2:2:1]# 185 157 167 144 189 195 155 171 157 [0:1:3:1:0]# 196 148 162 162 168 178 148 154 155 [0:1:2:2:2]# 176 154 146 161 170 168 190 158 150 [0:1:0:3:0]# 177 158 194 161 156 163 159 196 148 [0:1:3:3:0]# 158 194 160 156 163 140 196 148 162 [0:1:0:1:2]# 168 157 161 150 131 190 177 161 178 [0:1:3:2:2]# 184 176 154 157 161 170 131 190 158 [0:1:1:3:0]# 160 177 158 140 161 156 162 159 196 [0:1:1:2:2]# 154 146 184 170 168 157 158 150 131 [0:1:1:4:1]# 167 155 196 182 173 184 157 167 152 [0:1:1:1:2]# 170 168 157 158 150 131 167 177 161 [0:1:0:0:2]# 150 131 190 177 161 178 174 173 152 [0:1:0:4:2]# 177 161 178 174 173 152 146 184 176 mean# 166.6666667 dev# 15.11254078 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk/unittest >> TBsLocalRecovery::ChaoticWriteRestartHugeDecreased [GOOD] Test command err: 2025-06-03T10:22:17.235132Z :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2948} PDiskId# 1 ownerId# 5 invalid OwnerRound, got# 101 expected# 151 error in TLogWrite for ownerId# 5 ownerRound# 101 lsn# 11 PDiskId# 1 2025-06-03T10:22:18.354065Z :BS_PDISK ERROR: {BPD01@blobstorage_pdisk_impl.cpp:2948} PDiskId# 1 ownerId# 5 invalid OwnerRound, got# 101 expected# 151 error in TLogWrite for ownerId# 5 ownerRound# 101 lsn# 12 PDiskId# 1 |56.0%| [TA] $(B)/ydb/core/config/tools/protobuf_plugin/ut/test-results/unittest/{meta.json ... results_accumulator.log} |56.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/sql/large/ydb-tests-sql-large |56.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/base/ut/ydb-core-base-ut |56.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/copy_table/ydb-tests-datashard-copy_table |56.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/async_replication/ydb-tests-datashard-async_replication |56.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/base/ut/ydb-core-blobstorage-vdisk-hulldb-base-ut >> test.py::test[hor_join-skip_yamr--Results] [GOOD] >> test.py::test[insert-select_after_insert_relabeled-default.txt-ForceBlocks] >> TLockFreeIntrusiveStackTest::ConcurrentRefCountHeavyContention [GOOD] >> TLockFreeIntrusiveStackTest::ConcurrentAutoNeverEmpty |56.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/oom/ydb-tests-olap-oom >> test.py::test[union_all-mix_map_and_project--ForceBlocks] >> test.py::test[action-action_eval_cluster_and_table-default.txt-Results] [GOOD] >> test.py::test[action-discard-default.txt-Results] |56.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/secondary_index/ydb-tests-datashard-secondary_index |56.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/stability/ydb/ydb-tests-stability-ydb |56.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/unit/client/draft/ydb-public-sdk-cpp-tests-unit-client-draft >> test.py::test[case-case_multi_val-default.txt-ForceBlocks] |56.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/sessions_pool/public-sdk-cpp-tests-integration-sessions_pool >> test.py::test[type_v3-append_diff_layout1--Results] [GOOD] >> test.py::test[udf-udaf_distinct--ForceBlocks] >> TBTreeTest::RandomInsertThreadSafe [GOOD] >> TBTreeTest::DuplicateKeysInplace >> test.py::test[blocks-pg_to_dates--ForceBlocks] [GOOD] >> test.py::test[blocks-pg_to_dates--Results] >> test.py::test[schema-user_schema_mix3--ForceBlocks] [GOOD] >> test.py::test[schema-user_schema_mix3--Results] >> test.py::test[join-full_trivial_udf_call--ForceBlocks] [GOOD] >> test.py::test[join-full_trivial_udf_call--Results] |56.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/functional/canonical/ydb-tests-functional-canonical |56.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/compstrat/ut/ydb-core-blobstorage-vdisk-hulldb-compstrat-ut |56.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hullop/ut/ydb-core-blobstorage-vdisk-hullop-ut >> TBTreeTest::DuplicateKeysInplace [GOOD] >> TBTreeTest::DuplicateKeysThreadSafe |56.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/olap/scenario/ydb-tests-olap-scenario >> TCowBTreeTest::RandomInsertThreadSafe [GOOD] >> TCowBTreeTest::SnapshotCascade [GOOD] >> TCowBTreeTest::SnapshotRollback |56.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/datashard/partitioning/ydb-tests-datashard-partitioning |56.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/public/sdk/cpp/tests/integration/topic/ydb-public-sdk-cpp-tests-integration-topic >> test.py::test[optimizers-yt_shuffle_by_keys--ForceBlocks] >> test.py::test[join-pullup_extend--Results] [GOOD] >> test.py::test[join-selfjoin_on_sorted--ForceBlocks] >> test.py::test[join-inner_all_right--ForceBlocks] [GOOD] >> test.py::test[join-inner_all_right--Results] >> test.py::test[schema-select_all_inferschema-extra_field-ForceBlocks] [GOOD] >> test.py::test[schema-select_all_inferschema-extra_field-Results] >> test.py::test[pg-tpcds-q15-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q15-default.txt-Results] >> test.py::test[optimizers-yt_shuffle_by_keys--ForceBlocks] [SKIPPED] >> test.py::test[optimizers-yt_shuffle_by_keys--Results] [SKIPPED] >> test.py::test[order_by-SortByOneFieldDesc--ForceBlocks] >> test.py::test[blocks-combine_all_minmax_nested--ForceBlocks] [GOOD] >> test.py::test[ql_filter-integer_many_right--ForceBlocks] [GOOD] >> test.py::test[ql_filter-integer_many_right--Results] >> test.py::test[optimizers-yql-2582_limit_for_join_input--ForceBlocks] [GOOD] >> test.py::test[optimizers-yql-2582_limit_for_join_input--Results] >> test.py::test[blocks-combine_all_minmax_nested--Results] |56.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/tests/fq/s3/ydb-tests-fq-s3 |56.0%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/fresh/ut/ydb-core-blobstorage-vdisk-hulldb-fresh-ut >> test.py::test[blocks-string_pass--ForceBlocks] [GOOD] >> test.py::test[blocks-string_pass--Results] >> TBTreeTest::DuplicateKeysThreadSafe [GOOD] >> TBTreeTest::ShouldCallDtorsInplace >> TBTreeTest::ShouldCallDtorsInplace [GOOD] >> TBTreeTest::ShouldCallDtorsThreadSafe [GOOD] >> TBTreeTest::Concurrent >> test.py::test[join-mergejoin_force_align2-off-ForceBlocks] |56.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/vdisk/hulldb/generic/ut/ydb-core-blobstorage-vdisk-hulldb-generic-ut |56.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/blobstorage/pdisk/ut/ydb-core-blobstorage-pdisk-ut >> test.py::test[aggr_factory-max_by-default.txt-ForceBlocks] |56.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/cms/console/validators/ut/ydb-core-cms-console-validators-ut |56.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/scheme/ut_pg/ydb-core-scheme-ut_pg >> test.py::test[schema-user_schema_mix3--Results] [GOOD] >> test.py::test[select-calculated_values-default.txt-ForceBlocks] >> test.py::test[blocks-pg_to_dates--Results] [GOOD] >> test.py::test[blocks-sort_two_mix--ForceBlocks] >> test.py::test[flatten_by-flatten_corr_name_column-default.txt-ForceBlocks] [GOOD] >> test.py::test[flatten_by-flatten_corr_name_column-default.txt-Results] >> test.py::test[key_filter-range_union_lower_excluded-default.txt-ForceBlocks] [GOOD] >> test.py::test[key_filter-range_union_lower_excluded-default.txt-Results] |56.1%| [LD] {BAZEL_DOWNLOAD} $(B)/ydb/core/testlib/actors/ut/ydb-core-testlib-actors-ut >> test.py::test[join-pushdown_filter_over_left--ForceBlocks] [GOOD] >> test.py::test[join-pushdown_filter_over_left--Results] >> test.py::test[action-action_eval_cluster_table_for--ForceBlocks] [GOOD] >> test.py::test[action-action_eval_cluster_table_for--Results] >> test.py::test[action-eval_folder_via_file_in_job--ForceBlocks] [GOOD] >> test.py::test[action-eval_folder_via_file_in_job--Results] >> test.py::test[pg-tpch-q19-default.txt-Results] [GOOD] >> test.py::test[produce-process_with_lambda-default.txt-ForceBlocks] >> test.py::test[weak_field-weak_field_strict--Results] >> test.py::test[join-full_trivial_udf_call--Results] [GOOD] >> test.py::test[join-mergejoin_with_different_key_names--ForceBlocks] [GOOD] >> test.py::test[join-full_trivial_udf_call-off-ForceBlocks] >> test.py::test[join-mergejoin_with_different_key_names--Results] >> test.py::test[action-eval_anon_table--Results] >> test.py::test[ql_filter-integer_many_right--Results] [GOOD] >> test.py::test[ql_filter-integer_members--ForceBlocks] >> TCowBTreeTest::MultipleSnapshotsWithGc [GOOD] >> TCowBTreeTest::MultipleSnapshotsWithClear >> test.py::test[pg-tpcds-q15-default.txt-Results] [GOOD] >> test.py::test[like-regexp_clause--ForceBlocks] >> TLockFreeIntrusiveStackTest::ConcurrentAutoNeverEmpty [GOOD] >> TLockFreeIntrusiveStackTest::ConcurrentAutoHeavyContention >> test.py::test[schema-select_all_inferschema-extra_field-Results] [GOOD] >> test.py::test[hor_join-yql19332_aux_cols--Results] [GOOD] >> test.py::test[in-in_with_table_of_tuples-default.txt-Results] >> test.py::test[pg-tpcds-q24-default.txt-ForceBlocks] >> test.py::test[schema-select_all_inferschema_range--ForceBlocks] >> test.py::test[blocks-string_pass--Results] [GOOD] >> test.py::test[blocks-sub_uint64_opt2--ForceBlocks] >> test.py::test[join-star_join_mirror-off-ForceBlocks] [GOOD] >> test.py::test[join-star_join_mirror-off-Results] >> test.py::test[join-star_join_inners--Results] [GOOD] >> test.py::test[join-star_join_mirror-off-Results] >> test.py::test[join-star_join_mirror-off-Results] [SKIPPED] >> test.py::test[join-yql-12022-off-ForceBlocks] >> test.py::test[blocks-combine_all_minmax_nested--Results] [GOOD] >> test.py::test[join-star_join_mirror-off-Results] [SKIPPED] >> test.py::test[join-yql-8131--Results] [SKIPPED] >> test.py::test[key_filter-contains_tuples-default.txt-Results] >> test.py::test[blocks-combine_hashed_sum--ForceBlocks] >> test.py::test[join-inner_all_right--Results] [GOOD] >> test.py::test[join-inner_grouped_by_expr-off-ForceBlocks] >> TErasureTypeTest::TestBlock42PartialRestore0 [GOOD] >> test.py::test[optimizers-yql-2582_limit_for_join_input--Results] [GOOD] >> test.py::test[optimizers-yql-5833-table_content--ForceBlocks] >> test.py::test[key_filter-range_union_lower_excluded-default.txt-Results] [GOOD] >> test.py::test[key_filter-utf8_with_legacy--ForceBlocks] |56.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock42PartialRestore0 [GOOD] >> TBTreeTest::Concurrent [GOOD] >> TBTreeTest::IteratorDestructor [GOOD] >> TCacheCacheTest::MoveToWarm [GOOD] >> TCacheCacheTest::EvictNext [GOOD] >> CompressionTest::lz4_generator_basic [GOOD] >> CompressionTest::lz4_generator_deflates [GOOD] >> StLog::Basic [GOOD] >> test.py::test[action-eval_folder_via_file_in_job--Results] [GOOD] >> test.py::test[action-subquery_merge2-default.txt-ForceBlocks] >> test.py::test[action-action_eval_cluster_table_for--Results] [GOOD] >> test.py::test[action-action_nested_query-default.txt-ForceBlocks] >> test.py::test[pg-select_common_type_unionall--ForceBlocks] [GOOD] >> test.py::test[pg-select_common_type_unionall--Results] >> test.py::test[flatten_by-flatten_corr_name_column-default.txt-Results] [GOOD] >> test.py::test[flatten_by-flatten_dict_by_opt--ForceBlocks] >> test.py::test[union_all-mix_map_and_project--ForceBlocks] [GOOD] >> test.py::test[union_all-mix_map_and_project--Results] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/util/ut/unittest >> StLog::Basic [GOOD] Test command err: Producer 0 worked for 0.62974321 seconds Producer 1 worked for 0.4876866528 seconds Consumer 0 worked for 4.903683291 seconds Consumer 1 worked for 4.211254253 seconds Consumer 2 worked for 5.006175208 seconds Consumer 3 worked for 5.284476395 seconds >> test.py::test[case-case_multi_val-default.txt-ForceBlocks] [GOOD] >> test.py::test[case-case_multi_val-default.txt-Results] >> test.py::test[join-pushdown_filter_over_left--Results] [GOOD] >> test.py::test[join-star_join_mirror--ForceBlocks] >> test.py::test[aggregate-group_by_ru_join_agg--Results] [GOOD] >> test.py::test[aggregate-group_by_with_udf_by_aggregate--ForceBlocks] >> test.py::test[aggregate-group_by_with_udf_by_aggregate--ForceBlocks] [SKIPPED] >> test.py::test[aggregate-group_by_with_udf_by_aggregate--Results] [SKIPPED] >> test.py::test[aggregate-group_by_rollup_grouping--ForceBlocks] [GOOD] >> test.py::test[aggregate-group_by_rollup_grouping--Results] >> test.py::test[bigdate-table_io-default.txt-ForceBlocks] >> test.py::test[insert-select_after_insert_relabeled-default.txt-ForceBlocks] [GOOD] >> test.py::test[insert-select_after_insert_relabeled-default.txt-Results] >> TErasureTypeTest::TestBlock42PartialRestore3 [GOOD] >> TLockFreeIntrusiveStackTest::ConcurrentAutoHeavyContention [GOOD] >> TLogPriorityMuteTests::MuteUntilTest [GOOD] >> TLogPriorityMuteTests::AtomicMuteUntilTest [GOOD] >> TLogPriorityMuteTests::UnmuteTest [GOOD] >> TLogPriorityMuteTests::AtomicUnmuteTest [GOOD] >> TLogPriorityMuteTests::CheckPriorityWithSetMuteTest [GOOD] >> TLogPriorityMuteTests::AtomicCheckPriorityWithSetMuteTest [GOOD] >> TLogPriorityMuteTests::CheckPriorityWithSetMuteDurationTest [GOOD] >> TLogPriorityMuteTests::AtomicCheckPriorityWithSetMuteDurationTest [GOOD] >> TOneOneQueueTests::TestSimpleEnqueueDequeue [GOOD] >> TOneOneQueueTests::CleanInDestructor [GOOD] >> TOneOneQueueTests::ReadIterator [GOOD] >> TPageMapTest::TestResize [GOOD] >> TPageMapTest::TestRandom |56.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestBlock42PartialRestore3 [GOOD] |56.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hulldb/barriers/ut/unittest >> TCowBTreeTest::MultipleSnapshotsWithClear [GOOD] >> TCowBTreeTest::MultipleSnapshotsWithClearWithGc >> test.py::test[udf-udaf_distinct--ForceBlocks] [GOOD] >> test.py::test[udf-udaf_distinct--Results] >> test.py::test[order_by-SortByOneFieldDesc--ForceBlocks] [GOOD] >> test.py::test[order_by-SortByOneFieldDesc--Results] >> TCowBTreeTest::SnapshotRollback [GOOD] >> TCowBTreeTest::SnapshotRollbackEarlyErase >> test.py::test[join-mergejoin_with_different_key_names--Results] [GOOD] >> test.py::test[join-premap_common_right_tablecontent--ForceBlocks] >> test.py::test[case-case_multi_val-default.txt-Results] [GOOD] >> test.py::test[case-case_then_else-default.txt-ForceBlocks] >> test.py::test[join-selfjoin_on_sorted--ForceBlocks] [GOOD] >> test.py::test[join-selfjoin_on_sorted--Results] >> test.py::test[insert-select_after_insert_relabeled-default.txt-Results] [GOOD] >> test.py::test[insert-trivial_literals_multirow-default.txt-ForceBlocks] >> test.py::test[union_all-mix_map_and_project--Results] [GOOD] >> test.py::test[view-secure_eval_dyn--ForceBlocks] >> TPageMapTest::TestRandom [GOOD] >> TPageMapTest::TestIntrusive [GOOD] >> TPageMapTest::TestSimplePointer [GOOD] >> TPageMapTest::TestSharedPointer [GOOD] >> TPageMapTest::TestSimplePointerFull >> test.py::test[weak_field-weak_field_strict--Results] [GOOD] >> test.py::test[weak_field-weak_field_type-default.txt-Results] >> TPageMapTest::TestSimplePointerFull [GOOD] >> TPriorityOperationQueueTest::ShouldNotStartUntilStart [GOOD] >> test.py::test[select-calculated_values-default.txt-ForceBlocks] [GOOD] >> test.py::test[blocks-sort_two_mix--ForceBlocks] [GOOD] >> test.py::test[blocks-sort_two_mix--Results] |56.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/util/ut/unittest >> TPriorityOperationQueueTest::ShouldNotStartUntilStart [GOOD] >> test.py::test[action-discard-default.txt-Results] [GOOD] >> test.py::test[select-calculated_values-default.txt-Results] >> test.py::test[action-eval_folder_via_file_in_job--Results] >> test.py::test[join-mergejoin_force_align2-off-ForceBlocks] [GOOD] >> test.py::test[join-mergejoin_force_align2-off-Results] [SKIPPED] >> test.py::test[join-nopushdown_filter_with_depends_on-off-ForceBlocks] >> test.py::test[action-eval_anon_table--Results] [GOOD] >> test.py::test[action-eval_folder_via_file--Results] >> test.py::test[produce-process_with_lambda-default.txt-ForceBlocks] [GOOD] >> test.py::test[produce-process_with_lambda-default.txt-Results] >> test.py::test[ql_filter-integer_members--ForceBlocks] [GOOD] >> test.py::test[ql_filter-integer_members--Results] >> test.py::test[blocks-sub_uint64_opt2--ForceBlocks] [GOOD] >> test.py::test[blocks-sub_uint64_opt2--Results] >> test.py::test[aggr_factory-max_by-default.txt-ForceBlocks] [GOOD] >> test.py::test[aggr_factory-max_by-default.txt-Results] >> test.py::test[like-regexp_clause--ForceBlocks] [GOOD] >> test.py::test[like-regexp_clause--Results] >> test.py::test[join-selfjoin_on_sorted--Results] [GOOD] >> test.py::test[join-star_join_inners_premap--ForceBlocks] >> test.py::test[order_by-SortByOneFieldDesc--Results] [GOOD] >> test.py::test[pg-join_using_tables4-default.txt-ForceBlocks] >> test.py::test[join-full_trivial_udf_call-off-ForceBlocks] [GOOD] >> test.py::test[aggregate-group_by_rollup_grouping--Results] [GOOD] >> test.py::test[aggregate-having_distinct_expr--ForceBlocks] >> TBlobStorageIngressMatrix::MatrixTest [GOOD] >> TBlobStorageIngressMatrix::ShiftedBitVecBase [GOOD] >> TBlobStorageIngressMatrix::ShiftedHandoffBitVec [GOOD] >> test.py::test[join-full_trivial_udf_call-off-Results] [SKIPPED] >> test.py::test[join-grace_join1--ForceBlocks] |56.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica |56.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica |56.1%| [AR] {RESULT} $(B)/ydb/core/mind/hive/libcore-mind-hive.a |56.1%| [LD] {RESULT} $(B)/yql/tools/yqlrun/yqlrun >> test.py::test[blocks-sort_two_mix--Results] [GOOD] >> test.py::test[column_group-hint-disable-ForceBlocks] [SKIPPED] |56.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut |56.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut |56.1%| [TS] {RESULT} ydb/tests/stress/transfer/tests/import_test |56.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/ydb-core-blobstorage-vdisk-hulldb-cache_block-ut |56.1%| [TS] {RESULT} ydb/core/viewer/json/ut/unittest |56.1%| [TS] {RESULT} ydb/tools/cfg/bin/flake8 >> test.py::test[udf-udaf_distinct--Results] [GOOD] >> test.py::test[view-file_inner--ForceBlocks] >> test.py::test[pg-select_common_type_unionall--Results] [GOOD] >> test.py::test[pg-select_subquery2-default.txt-ForceBlocks] |56.1%| [TS] {RESULT} ydb/tests/datashard/s3/flake8 >> TCowBTreeTest::MultipleSnapshotsWithClearWithGc [GOOD] >> TCowBTreeTest::DuplicateKeysInplace >> test.py::test[schema-select_all_inferschema_range--ForceBlocks] [GOOD] >> TBtreeIndexTPartLarge::SmallKeys1GB [GOOD] >> test.py::test[column_group-hint-disable-Results] [SKIPPED] >> TBtreeIndexTPartLarge::MiddleKeys1GB >> test.py::test[schema-select_all_inferschema_range--Results] >> test.py::test[column_group-hint_diff_grp_fail3--ForceBlocks] [SKIPPED] >> test.py::test[column_group-hint_diff_grp_fail3--Results] [SKIPPED] >> test.py::test[column_group-many_inserts--ForceBlocks] [SKIPPED] >> test.py::test[column_group-many_inserts--Results] >> test.py::test[column_group-many_inserts--Results] [SKIPPED] >> test.py::test[column_order-winfunc-default.txt-ForceBlocks] >> test.py::test[action-action_nested_query-default.txt-ForceBlocks] [GOOD] >> test.py::test[action-action_nested_query-default.txt-Results] >> test.py::test[select-calculated_values-default.txt-Results] [GOOD] >> test.py::test[select-from_in_front_sub-default.txt-ForceBlocks] >> test.py::test[view-secure_eval_dyn--ForceBlocks] [GOOD] >> test.py::test[view-secure_eval_dyn--Results] |56.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/ingress/ut/unittest >> TBlobStorageIngressMatrix::ShiftedHandoffBitVec [GOOD] |56.1%| [TS] {RESULT} ydb/tests/functional/postgresql/import_test |56.1%| [TS] {RESULT} ydb/tests/compatibility/import_test |56.1%| [TS] {RESULT} ydb/tests/functional/benchmarks_init/flake8 |56.2%| [TS] {RESULT} ydb/tests/datashard/dump_restore/flake8 |56.2%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part6/py2_flake8 |56.2%| [TS] {RESULT} ydb/tests/sql/large/flake8 |56.2%| [TS] {RESULT} ydb/tests/olap/column_family/compression/import_test >> test.py::test[view-secure_eval_dyn--Results] [GOOD] >> test.py::test[weak_field-hor_join_with_mix_weak_access--ForceBlocks] >> test.py::test[join-yql-12022-off-ForceBlocks] [GOOD] >> test.py::test[join-yql-12022-off-Results] |56.2%| [TS] {RESULT} ydb/library/yql/providers/dq/runtime/ut/unittest |56.2%| [TS] {RESULT} ydb/library/workload/benchmark_base/ut/unittest >> test.py::test[pg-tpcds-q24-default.txt-ForceBlocks] [GOOD] >> TCowBTreeTest::DuplicateKeysInplace [GOOD] >> test.py::test[join-yql-12022-off-Results] [SKIPPED] >> TCowBTreeTest::DuplicateKeysThreadSafe >> test.py::test[json-json_value/example--ForceBlocks] >> test.py::test[pg-tpcds-q24-default.txt-Results] |56.2%| [TS] {RESULT} ydb/tests/stress/oltp_workload/tests/flake8 |56.2%| [TA] {RESULT} $(B)/ydb/library/yql/dq/actors/spilling/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test.py::test[produce-process_with_lambda-default.txt-Results] [GOOD] >> test.py::test[produce-process_with_python_as_struct-default.txt-ForceBlocks] >> test.py::test[blocks-combine_hashed_sum--ForceBlocks] [GOOD] >> test.py::test[blocks-combine_hashed_sum--Results] >> test.py::test[action-subquery_merge2-default.txt-ForceBlocks] [GOOD] >> test.py::test[action-subquery_merge2-default.txt-Results] >> TCowBTreeTest::SnapshotRollbackEarlyErase [GOOD] >> TCowBTreeTest::ShouldCallDtorsInplace [GOOD] >> TCowBTreeTest::ShouldCallDtorsThreadSafe [GOOD] >> TEventPriorityQueueTest::TestPriority [GOOD] >> TFastTlsTest::IterationAfterThreadDeath |56.2%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part10/flake8 |56.2%| [TS] {RESULT} ydb/tests/functional/sqs/multinode/import_test |56.2%| [TS] {RESULT} ydb/tests/datashard/select/import_test >> test.py::test[optimizers-yql-5833-table_content--ForceBlocks] [GOOD] |56.2%| [TS] {RESULT} ydb/tests/functional/blobstorage/flake8 |56.2%| [TS] {RESULT} ydb/tests/functional/serializable/flake8 |56.2%| [TS] {RESULT} ydb/library/benchmarks/runner/result_convert/flake8 |56.2%| [TS] {RESULT} ydb/tests/stability/tool/flake8 |56.2%| [TS] {RESULT} ydb/tests/functional/scheme_shard/flake8 >> test.py::test[optimizers-yql-5833-table_content--Results] >> TFastTlsTest::IterationAfterThreadDeath [GOOD] >> TFastTlsTest::ManyThreadLocals [GOOD] >> TFastTlsTest::ManyConcurrentKeys |56.2%| [TS] {RESULT} ydb/tests/stress/simple_queue/import_test >> test.py::test[blocks-sub_uint64_opt2--Results] [GOOD] |56.2%| [TS] {RESULT} ydb/library/yaml_config/validator/ut/validator_builder/unittest |56.2%| [TS] {RESULT} ydb/tests/example/import_test |56.2%| [TS] {RESULT} ydb/tests/tools/nemesis/driver/flake8 >> test.py::test[blocks-type_and_callable_stats--ForceBlocks] |56.2%| [TS] {RESULT} ydb/library/yql/dq/actors/compute/ut/unittest |56.3%| [TS] {RESULT} ydb/tests/datashard/ttl/flake8 |56.3%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part8/py2_flake8 |56.3%| [TA] {RESULT} $(B)/ydb/library/yql/public/ydb_issue/ut/test-results/unittest/{meta.json ... results_accumulator.log} |56.3%| [TS] {RESULT} ydb/core/base/generated/ut/unittest >> test.py::test[like-regexp_clause--Results] [GOOD] >> TFastTlsTest::ManyConcurrentKeys [GOOD] >> TFifoQueueTest::ShouldPushPop [GOOD] >> TFragmentedBufferTest::TestIntersectedWriteRead [GOOD] >> TFragmentedBufferTest::TestIntersectedWriteRead2 [GOOD] >> TFragmentedBufferTest::TestIntersectedWriteRead3 [GOOD] >> TFragmentedBufferTest::Test3WriteRead [GOOD] >> TFragmentedBufferTest::Test5WriteRead [GOOD] >> TFragmentedBufferTest::TestGetMonolith [GOOD] >> TFragmentedBufferTest::CopyFrom [GOOD] >> TFragmentedBufferTest::ReadWriteRandom |56.3%| [LD] {RESULT} $(B)/ydb/library/yql/dq/actors/spilling/ut/ydb-library-yql-dq-actors-spilling-ut >> test.py::test[lineage-isolated-default.txt-ForceBlocks] [SKIPPED] |56.3%| [TS] {RESULT} ydb/tests/stress/node_broker/flake8 >> test.py::test[lineage-isolated-default.txt-Results] >> test.py::test[lineage-isolated-default.txt-Results] [SKIPPED] >> TCowBTreeTest::DuplicateKeysThreadSafe [GOOD] >> TCowBTreeTest::IteratorDestructor [GOOD] >> TCowBTreeTest::Concurrent >> test.py::test[join-full_trivial--ForceBlocks] |56.3%| [TS] {RESULT} ydb/tests/fq/plans/import_test |56.3%| [TS] {RESULT} ydb/library/benchmarks/runner/result_compare/import_test |56.3%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part3/py2_flake8 >> test.py::test[lineage-reduce_all_row-default.txt-ForceBlocks] [SKIPPED] >> test.py::test[join-inner_grouped_by_expr-off-ForceBlocks] [GOOD] >> test.py::test[join-inner_grouped_by_expr-off-Results] [SKIPPED] >> test.py::test[join-left_only_with_other--ForceBlocks] |56.3%| [TS] {RESULT} ydb/library/yql/providers/s3/compressors/ut/unittest >> test.py::test[lineage-reduce_all_row-default.txt-Results] [SKIPPED] >> test.py::test[key_filter-contains_tuples-default.txt-Results] [GOOD] >> test.py::test[key_filter-empty_range--Results] >> test.py::test[in-in_with_table_of_tuples-default.txt-Results] [GOOD] >> test.py::test[insert-replace_ordered_by_key_desc-default.txt-Results] |56.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut >> test.py::test[ql_filter-integer_members--Results] [GOOD] |56.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut |56.3%| [TS] {RESULT} ydb/tests/functional/api/import_test >> test.py::test[multicluster-insert_fill--ForceBlocks] >> test.py::test[case-case_then_else-default.txt-ForceBlocks] [GOOD] >> test.py::test[schema-insert-row_spec-ForceBlocks] |56.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_pdiskfit/pdiskfit/pdiskfit |56.3%| [TS] {RESULT} ydb/public/tools/lib/cmds/ut/flake8 >> test.py::test[multicluster-insert_fill--ForceBlocks] [SKIPPED] |56.3%| [TS] {RESULT} ydb/tests/functional/ttl/flake8 |56.3%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part9/py2_flake8 |56.3%| [TS] {RESULT} ydb/library/yql/providers/s3/range_helpers/ut/unittest |56.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/ydb-core-blobstorage-vdisk-synclog-ut >> test.py::test[multicluster-insert_fill--Results] [SKIPPED] >> test.py::test[optimizers-passthrough_sortness_over_map-default.txt-ForceBlocks] >> test.py::test[action-action_nested_query-default.txt-Results] [GOOD] >> test.py::test[action-insert_each_from_folder--ForceBlocks] >> test.py::test[flatten_by-flatten_dict_by_opt--ForceBlocks] [GOOD] >> test.py::test[flatten_by-flatten_dict_by_opt--Results] >> test.py::test[case-case_then_else-default.txt-Results] |56.3%| [TA] {RESULT} $(B)/ydb/core/tablet_flat/ut_pg/test-results/unittest/{meta.json ... results_accumulator.log} >> test.py::test[insert-trivial_literals_multirow-default.txt-ForceBlocks] [GOOD] >> test.py::test[insert-trivial_literals_multirow-default.txt-Results] >> test.py::test[action-subquery_merge2-default.txt-Results] [GOOD] >> test.py::test[aggr_factory-every-default.txt-ForceBlocks] |56.3%| [TS] {RESULT} ydb/tests/functional/tenants/flake8 |56.4%| [TS] {RESULT} ydb/tests/functional/script_execution/flake8 |56.4%| [LD] {RESULT} $(B)/ydb/core/sys_view/partition_stats/ut/ydb-core-sys_view-partition_stats-ut >> test.py::test[schema-select_all_inferschema_range--Results] [GOOD] >> test.py::test[schema-select_field-read_schema-ForceBlocks] >> test.py::test[pg-tpcds-q24-default.txt-Results] [GOOD] >> test.py::test[action-eval_folder_via_file_in_job--Results] [GOOD] >> test.py::test[action-eval_input_output_table--Results] >> test.py::test[pg-tpcds-q37-default.txt-ForceBlocks] >> test.py::test[action-eval_each_input_table-default.txt-ForceBlocks] >> test.py::test[action-eval_folder_via_file--Results] [GOOD] >> test.py::test[aggr_factory-max_by-default.txt-Results] [GOOD] >> TCowBTreeTest::Concurrent [GOOD] >> test.py::test[key_filter-utf8_with_legacy--ForceBlocks] [GOOD] >> test.py::test[action-eval_pragma--Results] >> test.py::test[aggr_factory-median-default.txt-ForceBlocks] >> test.py::test[key_filter-utf8_with_legacy--Results] >> TCowBTreeTest::Alignment [GOOD] >> test.py::test[schema-select_all-row_spec_diff_sort_desc-ForceBlocks] >> test.py::test[insert-trivial_literals_multirow-default.txt-Results] [GOOD] >> test.py::test[optimizers-yql-5833-table_content--Results] [GOOD] >> test.py::test[weak_field-weak_field_type-default.txt-Results] [GOOD] >> test.py::test[bigdate-table_io-default.txt-ForceBlocks] [GOOD] >> test.py::test[blocks-combine_hashed_sum--Results] [GOOD] >> test.py::test[blocks-date_greater--ForceBlocks] >> TFragmentedBufferTest::ReadWriteRandom [GOOD] >> test.py::test[insert-yql-13083--ForceBlocks] >> test.py::test[join-premap_common_right_tablecontent--ForceBlocks] [GOOD] >> test.py::test[window-full/aggregations_leadlag--Results] >> test.py::test[order_by-literal_with_assume_desc--ForceBlocks] >> test.py::test[case-case_then_else-default.txt-Results] [GOOD] >> test.py::test[pg-join_using_tables4-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-join_using_tables4-default.txt-Results] >> test.py::test[aggregate-having_distinct_expr--ForceBlocks] [GOOD] >> test.py::test[join-star_join_mirror--ForceBlocks] [GOOD] >> test.py::test[join-star_join_mirror--Results] >> test.py::test[produce-process_with_python_as_struct-default.txt-ForceBlocks] [GOOD] >> test.py::test[flatten_by-flatten_dict_by_opt--Results] [GOOD] >> test.py::test[insert-replace_ordered_by_key_desc-default.txt-Results] [GOOD] >> test.py::test[join-grace_join1--ForceBlocks] [GOOD] >> test.py::test[aggregate-group_by_rollup_duo_opt--Results] >> test.py::test[select-from_in_front_sub-default.txt-ForceBlocks] [GOOD] >> test.py::test[view-file_inner--ForceBlocks] [GOOD] >> test.py::test[weak_field-hor_join_with_mix_weak_access--ForceBlocks] [GOOD] >> test.py::test[weak_field-hor_join_with_mix_weak_access--Results] >> test.py::test[bigdate-table_io-default.txt-Results] >> test.py::test[join-nopushdown_filter_with_depends_on-off-ForceBlocks] [GOOD] >> test.py::test[blocks-type_and_callable_stats--ForceBlocks] [GOOD] >> test.py::test[key_filter-utf8_with_legacy--Results] [GOOD] >> test.py::test[aggregate-having_distinct_expr--Results] >> test.py::test[lineage-window_tablerow-default.txt-ForceBlocks] [SKIPPED] >> test.py::test[select-from_in_front_sub-default.txt-Results] >> test.py::test[flatten_by-flatten_expr_join--ForceBlocks] [SKIPPED] >> test.py::test[join-grace_join1--Results] [SKIPPED] >> test.py::test[join-join_key_cmp_udf-off-ForceBlocks] >> test.py::test[join-premap_common_right_tablecontent--Results] >> test.py::test[column_group-hint_dup_col_fail--ForceBlocks] >> test.py::test[view-file_inner--Results] >> test.py::test[join-left_only_with_other--ForceBlocks] [GOOD] >> test.py::test[lineage-window_tablerow-default.txt-Results] >> test.py::test[produce-process_with_python_as_struct-default.txt-Results] >> test.py::test[blocks-type_and_callable_stats--Results] >> test.py::test[bigdate-table_io-default.txt-Results] [GOOD] >> test.py::test[insert-select_operate_with_columns--Results] >> test.py::test[lineage-window_tablerow-default.txt-Results] [SKIPPED] >> test.py::test[flatten_by-flatten_expr_join--Results] [SKIPPED] >> test.py::test[join-premap_common_right_tablecontent--Results] [GOOD] >> test.py::test[column_group-hint_dup_col_fail--ForceBlocks] [SKIPPED] >> test.py::test[join-nopushdown_filter_with_depends_on-off-Results] >> test.py::test[join-left_only_with_other--Results] |56.4%| [TM] {RESULT} ydb/core/blobstorage/ut_mirror3of4/unittest |56.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/util/ut/unittest >> TFragmentedBufferTest::ReadWriteRandom [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/util/ut/unittest >> TCowBTreeTest::Alignment [GOOD] Test command err: Producer 0 worked for 0.2784967801 seconds Producer 1 worked for 0.2892511128 seconds Consumer 0 worked for 1.015577763 seconds on a snapshot of size 80000 Consumer 1 worked for 1.977088327 seconds on a snapshot of size 160000 Consumer 2 worked for 2.089042512 seconds on a snapshot of size 240000 Consumer 3 worked for 2.381393585 seconds on a snapshot of size 320000 Consumers had 11999992 successful seeks >> test.py::test[blocks-coalesce_complex-default.txt-ForceBlocks] >> test.py::test[join-pushdown_filter_over_inner_with_strict_udf--ForceBlocks] >> test.py::test[optimizers-test_lmap_opts--ForceBlocks] [SKIPPED] >> test.py::test[join-star_join_inners_premap--ForceBlocks] [GOOD] >> test.py::test[column_group-hint_dup_col_fail--Results] [SKIPPED] >> test.py::test[join-nopushdown_filter_with_depends_on-off-Results] [SKIPPED] >> test.py::test[flatten_by-flatten_mode-default.txt-ForceBlocks] >> test.py::test[join-star_join_inners_premap--Results] >> test.py::test[optimizers-test_lmap_opts--Results] [SKIPPED] >> test.py::test[join-premap_map_semi-off-ForceBlocks] >> test.py::test[column_group-insert_diff_groups3_fail--ForceBlocks] [SKIPPED] >> test.py::test[order_by-literal_with_assume--ForceBlocks] >> test.py::test[column_group-insert_diff_groups3_fail--Results] [SKIPPED] >> test.py::test[column_order-ordered_plus_native--ForceBlocks] >> test.py::test[produce-process_with_python_as_struct-default.txt-Results] [GOOD] >> test.py::test[produce-reduce_lambda_presort_twin_list--ForceBlocks] >> test.py::test[pg-select_subquery2-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-select_subquery2-default.txt-Results] >> test.py::test[produce-reduce_lambda_presort_twin_list--ForceBlocks] [SKIPPED] >> test.py::test[produce-reduce_lambda_presort_twin_list--Results] [SKIPPED] >> test.py::test[sampling-map--ForceBlocks] |56.4%| [TS] {RESULT} ydb/tests/functional/sqs/with_quotas/import_test |56.4%| [TM] {RESULT} ydb/library/yaml_config/ut_transform/py3test |56.4%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/client/value/test-results/gtest/{meta.json ... results_accumulator.log} >> test.py::test[action-eval_pragma--Results] [GOOD] >> test.py::test[action-subquery_merge_nested_subquery--Results] >> test.py::test[action-eval_input_output_table--Results] [GOOD] >> test.py::test[action-eval_typeof_output_table--Results] |56.4%| [TS] {RESULT} ydb/tests/functional/minidumps/import_test |56.4%| [TA] {RESULT} $(B)/ydb/library/persqueue/topic_parser/ut/test-results/unittest/{meta.json ... results_accumulator.log} |56.4%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/ut_pg/ydb-core-tablet_flat-ut_pg |56.4%| [LD] {RESULT} $(B)/ydb/library/yaml_config/tools/dump_ds_init/yaml-to-proto-dump-ds-init |56.4%| [LD] {RESULT} $(B)/ydb/core/fq/libs/db_id_async_resolver_impl/ut/ydb-core-fq-libs-db_id_async_resolver_impl-ut |56.4%| [LD] {RESULT} $(B)/ydb/library/yql/tools/dq/service_node/service_node >> test.py::test[join-full_trivial--ForceBlocks] [GOOD] >> test.py::test[join-full_trivial--Results] >> test.py::test[aggregate-having_distinct_expr--Results] [GOOD] >> test.py::test[blocks-combine_all_decimal--ForceBlocks] >> test.py::test[schema-select_field-read_schema-ForceBlocks] [GOOD] >> test.py::test[schema-select_field-read_schema-Results] |56.4%| [LD] {RESULT} $(B)/ydb/core/external_sources/ut/ydb-core-external_sources-ut |56.4%| [LD] {RESULT} $(B)/ydb/core/kqp/tools/combiner_perf/bin/combiner_perf |56.4%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part12/flake8 |56.4%| [LD] {RESULT} $(B)/ydb/core/formats/arrow/ut/ydb-core-formats-arrow-ut >> test.py::test[pg-tpcds-q37-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q37-default.txt-Results] >> test.py::test[action-insert_each_from_folder--ForceBlocks] [GOOD] >> test.py::test[action-insert_each_from_folder--Results] >> test.py::test[select-from_in_front_sub-default.txt-Results] [GOOD] >> test.py::test[select-one_unlabeled_column-default.txt-ForceBlocks] |56.4%| [LD] {RESULT} $(B)/ydb/library/yql/providers/s3/actors/ut/ydb-library-yql-providers-s3-actors-ut |56.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/huge/ut/ydb-core-blobstorage-vdisk-huge-ut >> test.py::test[order_by-literal_with_assume_desc--ForceBlocks] [GOOD] >> test.py::test[order_by-literal_with_assume_desc--Results] >> TSTreeTest::Basic [GOOD] >> TSVecTest::Basic [GOOD] >> test.py::test[pg-join_using_tables4-default.txt-Results] [GOOD] >> test.py::test[pg-select_starref2-default.txt-ForceBlocks] >> test.py::test[view-file_inner--Results] [GOOD] >> test.py::test[weak_field-optimize_weak_fields_map_combine--ForceBlocks] |56.4%| [LD] {RESULT} $(B)/ydb/library/yql/providers/pq/provider/ut/ydb-library-yql-providers-pq-provider-ut |56.5%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_replica/ydb-core-tx-scheme_board-ut_replica |56.5%| [LD] {RESULT} $(B)/ydb/library/yql/providers/solomon/actors/ut/ydb-library-yql-providers-solomon-actors-ut |56.5%| [TS] {RESULT} ydb/tests/functional/clickbench/flake8 |56.5%| [LD] {RESULT} $(B)/ydb/core/fq/libs/test_connection/ut/ydb-core-fq-libs-test_connection-ut >> test.py::test[schema-insert-row_spec-ForceBlocks] [GOOD] >> test.py::test[weak_field-hor_join_with_mix_weak_access--Results] [GOOD] >> test.py::test[window-distinct_over_window--ForceBlocks] >> test.py::test[schema-insert-row_spec-Results] |56.5%| [TA] $(B)/ydb/core/util/ut/test-results/unittest/{meta.json ... results_accumulator.log} |56.5%| [LD] {RESULT} $(B)/ydb/core/config/init/ut/ydb-core-config-init-ut >> TBlobStorageHullFresh::SolomonStandCrash [GOOD] >> TBlobStorageHullFreshSegment::IteratorTest >> test.py::test[blocks-type_and_callable_stats--Results] [GOOD] >> test.py::test[column_group-hint_append2--ForceBlocks] [SKIPPED] >> test.py::test[column_group-hint_append2--Results] [SKIPPED] >> test.py::test[column_group-hint_diff_grp_fail5--ForceBlocks] [SKIPPED] >> test.py::test[column_group-hint_diff_grp_fail5--Results] [SKIPPED] >> test.py::test[column_group-length-single-ForceBlocks] [SKIPPED] >> test.py::test[column_group-length-single-Results] [SKIPPED] >> test.py::test[column_order-select_plain-default.txt-ForceBlocks] |56.5%| [LD] {RESULT} $(B)/ydb/core/config/validation/column_shard_config_validator_ut/column_shard_config_validator_ut |56.5%| [TA] {RESULT} $(B)/ydb/core/config/tools/protobuf_plugin/ut/test-results/unittest/{meta.json ... results_accumulator.log} |56.5%| [TS] {RESULT} ydb/library/yql/providers/s3/provider/ut/unittest >> TBlobStorageHullFreshSegment::IteratorTest [GOOD] |56.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TSVecTest::Basic [GOOD] |56.5%| [LD] {RESULT} $(B)/ydb/core/kqp/federated_query/ut/ydb-core-kqp-federated_query-ut |56.5%| [TS] {RESULT} ydb/core/ymq/base/ut/unittest |56.5%| [LD] {RESULT} $(B)/ydb/library/yql/providers/generic/provider/ut/pushdown/yql-providers-generic-provider-ut-pushdown |56.5%| [TS] {RESULT} ydb/library/benchmarks/runner/run_tests/flake8 |56.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots |56.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots >> test.py::test[schema-select_field-read_schema-Results] [GOOD] >> test.py::test[schema-user_schema_append--ForceBlocks] >> test.py::test[action-eval_typeof_output_table--Results] [GOOD] >> test.py::test[action-insert_each_from_folder--Results] >> test.py::test[join-star_join_mirror--Results] [GOOD] >> test.py::test[join-star_join_semionly_premap--ForceBlocks] >> test.py::test[pg-tpcds-q37-default.txt-Results] [GOOD] >> test.py::test[pg-tpcds-q51-default.txt-ForceBlocks] |56.5%| [LD] {RESULT} $(B)/ydb/library/yql/tools/dq/worker_node/worker_node |56.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFreshSegment::IteratorTest [GOOD] |56.5%| [LD] {RESULT} $(B)/ydb/library/yql/providers/yt/actors/ut/ydb-library-yql-providers-yt-actors-ut |56.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes |56.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes >> test.py::test[pg-select_subquery2-default.txt-Results] [GOOD] >> test.py::test[pg-tpcds-q08-default.txt-ForceBlocks] |56.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat |56.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat >> test.py::test[column_order-winfunc-default.txt-ForceBlocks] [GOOD] >> test.py::test[column_order-winfunc-default.txt-Results] |56.6%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part17/flake8 |56.6%| [LD] {RESULT} $(B)/ydb/core/io_formats/arrow/scheme/ut/ydb-core-io_formats-arrow-scheme-ut >> test.py::test[aggr_factory-every-default.txt-ForceBlocks] [GOOD] >> test.py::test[aggr_factory-every-default.txt-Results] >> TBsVDiskManyPutGet::ManyPutRangeGetCompactionIndexOnly [GOOD] >> TBsVDiskManyPutGet::ManyPutRangeGet2ChannelsIndexOnly >> test.py::test[join-full_trivial--Results] [GOOD] >> test.py::test[join-full_trivial-off-ForceBlocks] |56.6%| [TS] {RESULT} ydb/tests/postgres_integrations/library/ut/import_test >> test.py::test[action-insert_each_from_folder--Results] [GOOD] >> test.py::test[action-subquery-default.txt-ForceBlocks] >> test.py::test[column_order-ordered_plus_native--ForceBlocks] [GOOD] >> TFreshAppendixTest::IterateForwardAll [GOOD] |56.6%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part13/flake8 |56.6%| [LD] {RESULT} $(B)/ydb/core/kqp/tests/tpch/tpch |56.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_vdisk/ydb-core-blobstorage-ut_vdisk |56.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_user_attributes/ydb-core-tx-schemeshard-ut_user_attributes |56.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut >> test.py::test[column_order-ordered_plus_native--Results] >> TFreshAppendixTest::IterateBackwardIncluding [GOOD] |56.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut >> test.py::test[order_by-literal_with_assume_desc--Results] [GOOD] >> test.py::test[order_by-order_by_expr--ForceBlocks] >> test.py::test[json-json_value/example--ForceBlocks] [GOOD] >> ErasureBrandNew::Block42_restore [GOOD] >> test.py::test[join-left_only_with_other--Results] [GOOD] >> test.py::test[join-star_join_inners_premap--Results] [GOOD] >> test.py::test[schema-insert-row_spec-Results] [GOOD] >> test.py::test[json-json_value/example--Results] >> ErasureBrandNew::Block42_restore_benchmark >> test.py::test[join-mapjoin_early_rewrite_star--ForceBlocks] >> test.py::test[schema-select_all-row_spec_extra_sort-ForceBlocks] >> test.py::test[join-star_join_inners_premap-off-ForceBlocks] >> test.py::test[flatten_by-flatten_mode-default.txt-ForceBlocks] [GOOD] >> test.py::test[insert-select_operate_with_columns--Results] [GOOD] >> test.py::test[action-subquery_merge_nested_subquery--Results] [GOOD] >> test.py::test[schema-select_all-row_spec_diff_sort_desc-ForceBlocks] [GOOD] >> TActorTest::TestCreateChildActor >> test.py::test[action-table_content_before_from_folder--Results] >> test.py::test[flatten_by-flatten_mode-default.txt-Results] >> test.py::test[schema-select_all-row_spec_diff_sort_desc-Results] >> test.py::test[insert-trivial_select-default.txt-Results] >> TActorTest::TestCreateChildActor [GOOD] >> TActorTest::TestBlockEvents >> TActorTest::TestBlockEvents [GOOD] |56.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TFreshAppendixTest::IterateBackwardIncluding [GOOD] |56.6%| [TS] {RESULT} ydb/tests/functional/sqs/common/import_test >> test.py::test[insert-yql-13083--ForceBlocks] [GOOD] >> test.py::test[insert-yql-13083--Results] >> test.py::test[join-join_key_cmp_udf-off-ForceBlocks] [GOOD] >> test.py::test[optimizers-passthrough_sortness_over_map-default.txt-ForceBlocks] [GOOD] >> test.py::test[optimizers-passthrough_sortness_over_map-default.txt-Results] >> test.py::test[join-join_key_cmp_udf-off-Results] [SKIPPED] >> test.py::test[join-left_all-off-ForceBlocks] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestBlockEvents [GOOD] Test command err: ... waiting for blocked 3 events ... blocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor cookie 0 ... blocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor cookie 0 ... blocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor cookie 0 ... waiting for blocked 3 events (done) ... unblocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor ... unblocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor ... waiting for blocked 1 more event ... blocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor cookie 0 ... waiting for blocked 1 more event (done) ... waiting for processed 2 more events ... waiting for processed 2 more events (done) ... unblocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor ... unblocking NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TEvTrigger from NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TSourceActor to NKikimr::NTestSuiteTActorTest::TTestCaseTestBlockEvents::Execute_(NUnitTest::TTestContext&)::TTargetActor ... waiting for processed 3 more events ... waiting for processed 3 more events (done) |56.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/graph/ut/ydb-core-graph-ut >> test.py::test[join-premap_map_semi-off-ForceBlocks] [GOOD] >> test.py::test[join-premap_map_semi-off-Results] [SKIPPED] >> test.py::test[aggr_factory-median-default.txt-ForceBlocks] [GOOD] >> test.py::test[aggr_factory-median-default.txt-Results] >> test.py::test[join-premap_merge_extrasort2-off-ForceBlocks] |56.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/graph/ut/ydb-core-graph-ut |56.6%| [LD] {RESULT} $(B)/ydb/core/mind/ut_fat/ydb-core-mind-ut_fat |56.6%| [TA] {RESULT} $(B)/ydb/core/blobstorage/incrhuge/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test.py::test[key_filter-empty_range--Results] [GOOD] >> test.py::test[key_filter-key_double_opt_suffix--Results] >> test.py::test[join-pushdown_filter_over_inner_with_strict_udf--ForceBlocks] [GOOD] >> test.py::test[join-pushdown_filter_over_inner_with_strict_udf--Results] |56.6%| [TS] {RESULT} ydb/tests/stress/oltp_workload/tests/import_test |56.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/repl/ut/ydb-core-blobstorage-vdisk-repl-ut >> test.py::test[key_filter-key_double_opt_suffix--Results] [SKIPPED] >> test.py::test[key_filter-lambda_with_null_filter--Results] |56.6%| [TS] {RESULT} ydb/tests/functional/sqs/cloud/flake8 |56.6%| [TS] {RESULT} ydb/tests/fq/yds/import_test |56.6%| [LD] {RESULT} $(B)/ydb/services/ext_index/ut/ydb-services-ext_index-ut |56.6%| [LD] {RESULT} $(B)/ydb/core/graph/ut/ydb-core-graph-ut >> test.py::test[sampling-map--ForceBlocks] [GOOD] >> test.py::test[sampling-map--Results] >> test.py::test[schema-user_schema_append--ForceBlocks] [GOOD] >> test.py::test[schema-user_schema_append--Results] [GOOD] >> test.py::test[select-from_in_front-default.txt-ForceBlocks] >> test.py::test[order_by-literal_with_assume--ForceBlocks] [GOOD] >> test.py::test[order_by-literal_with_assume--Results] |56.7%| [TS] {RESULT} ydb/library/yql/providers/generic/pushdown/ut/unittest |56.7%| [TS] {RESULT} ydb/tests/fq/mem_alloc/import_test |56.7%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part18/flake8 |56.7%| [TA] {RESULT} $(B)/ydb/core/util/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test.py::test[column_order-winfunc-default.txt-Results] [GOOD] >> test.py::test[distinct-distinct_and_join--ForceBlocks] |56.7%| [TS] {RESULT} ydb/public/tools/local_ydb/flake8 |56.7%| [TS] {RESULT} ydb/tests/postgres_integrations/library/ut/flake8 |56.7%| [LD] {RESULT} $(B)/ydb/library/yql/providers/dq/provider/ut/ydb-library-yql-providers-dq-provider-ut >> test.py::test[column_order-ordered_plus_native--Results] [GOOD] >> test.py::test[datetime-date_tz_table_sort_asc--ForceBlocks] >> test.py::test[action-eval_each_input_table-default.txt-ForceBlocks] [GOOD] >> test.py::test[action-eval_each_input_table-default.txt-Results] |56.7%| [LD] {RESULT} $(B)/ydb/tests/compatibility/ydb-tests-compatibility |56.7%| [TS] {RESULT} ydb/tests/fq/common/flake8 |56.7%| [TS] {RESULT} ydb/tests/datashard/secondary_index/flake8 |56.7%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_import/flake8 >> test.py::test[schema-select_all-row_spec_diff_sort_desc-Results] [GOOD] >> test.py::test[schema-user_schema_no_infer--ForceBlocks] |56.7%| [TS] {RESULT} ydb/tests/datashard/partitioning/flake8 |56.7%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part15/py2_flake8 |56.7%| [TS] {RESULT} ydb/tests/functional/rename/flake8 |56.7%| [TS] {RESULT} ydb/tests/functional/sqs/common/flake8 >> TActorTest::TestDie [GOOD] >> TActorTest::TestFilteredGrab >> test.py::test[select-one_unlabeled_column-default.txt-ForceBlocks] [GOOD] >> test.py::test[select-one_unlabeled_column-default.txt-Results] >> test.py::test[flatten_by-flatten_mode-default.txt-Results] [GOOD] >> test.py::test[in-in_compact_distinct-empty-ForceBlocks] |56.7%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part9/py2_flake8 |56.7%| [TS] {RESULT} ydb/tests/example/flake8 |56.7%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part6/py2_flake8 |56.7%| [TS] {RESULT} ydb/core/metering/ut/unittest |56.7%| [TS] {RESULT} ydb/tests/functional/limits/flake8 >> TActorTest::TestFilteredGrab [GOOD] |56.7%| [TS] {RESULT} ydb/tests/stress/simple_queue/tests/flake8 |56.8%| [TS] {RESULT} ydb/library/security/ut/unittest |56.8%| [TM] {RESULT} ydb/library/grpc/server/ut/unittest >> TActorTest::TestScheduleEvent [GOOD] >> TActorTest::TestScheduleReaction |56.8%| [TS] {RESULT} ydb/public/lib/ydb_cli/commands/interactive/highlight/ut/unittest >> test.py::test[aggregate-group_by_rollup_duo_opt--Results] [GOOD] >> test.py::test[aggregate-group_by_session_compact--ForceBlocks] >> TActorTest::TestScheduleReaction [GOOD] >> test.py::test[aggr_factory-every-default.txt-Results] [GOOD] >> test.py::test[aggregate-compare_by_nulls-default.txt-ForceBlocks] |56.8%| [LD] {RESULT} $(B)/ydb/tools/blobsan/blobsan >> test.py::test[pg-select_starref2-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-select_starref2-default.txt-Results] |56.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestFilteredGrab [GOOD] |56.8%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part3/py2_flake8 |56.8%| [LD] {RESULT} $(B)/ydb/tests/stability/tool/tool |56.8%| [TS] {RESULT} ydb/public/lib/ydb_cli/commands/topic_workload/ut/unittest |56.8%| [LD] {RESULT} $(B)/ydb/library/yaml_config/tools/dump/yaml-to-proto-dump >> test.py::test[weak_field-optimize_weak_fields_map_combine--ForceBlocks] [GOOD] |56.8%| [LD] {RESULT} $(B)/ydb/library/yql/dq/actors/compute/ut/ydb-library-yql-dq-actors-compute-ut >> test.py::test[weak_field-optimize_weak_fields_map_combine--Results] >> test.py::test[json-json_value/example--Results] [GOOD] >> test.py::test[key_filter-between_with_key_filter--ForceBlocks] >> test.py::test[optimizers-passthrough_sortness_over_map-default.txt-Results] [GOOD] >> test.py::test[sampling-map--Results] [GOOD] >> test.py::test[optimizers-yql-10737_lost_passthrough-default.txt-ForceBlocks] |56.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors |56.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors |56.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestScheduleReaction [GOOD] |56.8%| [TM] {RESULT} ydb/core/fq/libs/control_plane_storage/internal/ut/unittest |56.8%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part11/py2_flake8 |56.8%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part2/py2_flake8 >> test.py::test[order_by-literal_with_assume--Results] [GOOD] >> test.py::test[order_by-sort_decimals--ForceBlocks] >> test.py::test[sampling-reduce-with_premap-ForceBlocks] [SKIPPED] >> test.py::test[sampling-reduce-with_premap-Results] [SKIPPED] >> test.py::test[sampling-yql-14664_deps-default.txt-ForceBlocks] >> test.py::test[action-eval_each_input_table-default.txt-Results] [GOOD] >> test.py::test[aggr_factory-avg-default.txt-ForceBlocks] >> test.py::test[insert-yql-13083--Results] [GOOD] >> test.py::test[insert_monotonic-keep_meta-default.txt-ForceBlocks] |56.8%| [TS] {RESULT} ydb/tests/functional/api/flake8 |56.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_mirror3of4/ydb-core-blobstorage-ut_mirror3of4 |56.8%| [TS] {RESULT} ydb/library/yql/providers/s3/actors/ut/unittest |56.8%| [LD] {RESULT} $(B)/ydb/library/yql/providers/dq/runtime/ut/ydb-library-yql-providers-dq-runtime-ut >> test.py::test[blocks-coalesce_complex-default.txt-ForceBlocks] [GOOD] >> test.py::test[blocks-combine_all_decimal--ForceBlocks] [GOOD] >> test.py::test[blocks-coalesce_complex-default.txt-Results] >> test.py::test[blocks-combine_all_decimal--Results] |56.8%| [TS] {RESULT} ydb/tests/stress/olap_workload/import_test |56.9%| [TS] {RESULT} ydb/public/sdk/cpp/tests/unit/client/oauth2_token_exchange/unittest |56.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_reboots/ydb-core-tx-schemeshard-ut_reboots |56.9%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp_errors/ydb-core-tx-datashard-ut_kqp_errors |56.9%| [TS] {RESULT} ydb/tests/functional/hive/flake8 >> test.py::test[action-insert_each_from_folder--Results] [GOOD] >> test.py::test[action-subquery-default.txt-Results] >> test.py::test[join-star_join_semionly_premap--ForceBlocks] [GOOD] >> test.py::test[join-star_join_semionly_premap--Results] |56.9%| [TS] {RESULT} ydb/tests/functional/serializable/import_test |56.9%| [AR] {RESULT} $(B)/ydb/tests/stability/tool/libpy3tests-stability-tool.global.a |56.9%| [LD] {RESULT} $(B)/ydb/apps/ydb/ydb |56.9%| [TS] {RESULT} ydb/core/blobstorage/vdisk/defrag/ut/unittest |56.9%| [TS] {RESULT} ydb/tests/olap/data_quotas/flake8 |56.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut |56.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut |56.9%| [TS] {RESULT} ydb/tests/fq/generic/streaming/black |56.9%| [TA] {RESULT} $(B)/ydb/library/yql/dq/runtime/ut/test-results/unittest/{meta.json ... results_accumulator.log} |56.9%| [TS] {RESULT} ydb/core/fq/libs/signer/ut/unittest >> test.py::test[select-one_unlabeled_column-default.txt-Results] [GOOD] >> test.py::test[simple_columns-simple_columns_join_coalesce_without_left_semi_2-default.txt-ForceBlocks] >> TBtreeIndexTPartLarge::MiddleKeys1GB [GOOD] >> TBtreeIndexTPartLarge::BigKeys1GB >> test.py::test[join-pushdown_filter_over_inner_with_strict_udf--Results] [GOOD] >> test.py::test[join-simple_columns_partial--ForceBlocks] >> TActorTest::TestStateSwitch [GOOD] |56.9%| [TS] {RESULT} ydb/core/fq/libs/db_id_async_resolver_impl/ut/unittest |56.9%| [TS] {RESULT} ydb/tests/functional/scheme_tests/import_test |56.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kafka_proxy/ut/ydb-core-kafka_proxy-ut |56.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kafka_proxy/ut/ydb-core-kafka_proxy-ut |56.9%| [LD] {RESULT} $(B)/ydb/library/yql/providers/dq/actors/ut/ydb-library-yql-providers-dq-actors-ut |56.9%| [TS] {RESULT} ydb/core/viewer/tests/flake8 >> test.py::test[insert-trivial_select-default.txt-Results] [GOOD] >> test.py::test[insert_monotonic-truncate_fail--Results] |56.9%| [LD] {RESULT} $(B)/ydb/core/statistics/aggregator/ut/ydb-core-statistics-aggregator-ut |56.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut >> test.py::test[schema-select_all-row_spec_extra_sort-ForceBlocks] [GOOD] >> test.py::test[order_by-order_by_expr--ForceBlocks] [GOOD] >> test.py::test[order_by-order_by_expr--Results] >> test.py::test[schema-select_all-row_spec_extra_sort-Results] |56.9%| [TS] {RESULT} ydb/core/external_sources/ut/unittest |57.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut |57.0%| [TS] {RESULT} ydb/tests/fq/generic/streaming/flake8 >> test.py::test[pg-tpcds-q08-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q08-default.txt-Results] >> TActorTest::TestWaitForFirstEvent >> ErasureBrandNew::Block42_restore_benchmark [GOOD] |57.0%| [TS] {RESULT} ydb/core/config/validation/ut/unittest |57.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestStateSwitch [GOOD] >> test.py::test[column_order-select_plain-default.txt-ForceBlocks] [GOOD] >> test.py::test[column_order-select_plain-default.txt-Results] >> TActorTest::TestWaitForFirstEvent [GOOD] >> test.py::test[action-table_content_before_from_folder--Results] [GOOD] >> test.py::test[aggr_factory-boolor-default.txt-Results] |57.0%| [TS] {RESULT} ydb/library/yql/providers/generic/provider/ut/pushdown/unittest |57.0%| [TS] {RESULT} ydb/tests/olap/ttl_tiering/import_test |57.0%| [TS] {RESULT} ydb/tests/functional/scheme_tests/flake8 |57.0%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part14/flake8 |57.0%| [TS] {RESULT} ydb/tests/datashard/s3/import_test >> test.py::test[action-subquery-default.txt-ForceBlocks] [GOOD] >> test.py::test[action-subquery-default.txt-Results] >> test.py::test[join-mapjoin_early_rewrite_star--ForceBlocks] [GOOD] >> test.py::test[join-mapjoin_early_rewrite_star--Results] |57.0%| [LD] {RESULT} $(B)/yql/essentials/tools/sql2yql/sql2yql |57.0%| [TS] {RESULT} ydb/core/fq/libs/metrics/ut/unittest |57.0%| [LD] {RESULT} $(B)/ydb/services/datastreams/ut/ydb-services-datastreams-ut >> test.py::test[pg-tpcds-q51-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q51-default.txt-Results] >> test.py::test[blocks-date_greater--ForceBlocks] [GOOD] >> test.py::test[blocks-date_greater--Results] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/testlib/actors/ut/unittest >> TActorTest::TestWaitForFirstEvent [GOOD] Test command err: ... waiting for NKikimr::NTestSuiteTActorTest::TTestCaseTestWaitForFirstEvent::Execute_(NUnitTest::TTestContext&)::TEvTrigger ... waiting for NKikimr::NTestSuiteTActorTest::TTestCaseTestWaitForFirstEvent::Execute_(NUnitTest::TTestContext&)::TEvTrigger (done) ... waiting for NKikimr::NTestSuiteTActorTest::TTestCaseTestWaitForFirstEvent::Execute_(NUnitTest::TTestContext&)::TEvTrigger ... waiting for NKikimr::NTestSuiteTActorTest::TTestCaseTestWaitForFirstEvent::Execute_(NUnitTest::TTestContext&)::TEvTrigger (done) >> test.py::test[pg-select_starref2-default.txt-Results] [GOOD] >> test.py::test[pg-tpcds-q33-default.txt-ForceBlocks] >> test.py::test[weak_field-optimize_weak_fields_map_combine--Results] [GOOD] >> test.py::test[weak_field-weak_field_join_where--ForceBlocks] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/erasure/ut/unittest >> ErasureBrandNew::Block42_restore_benchmark [GOOD] >> test.py::test[join-full_trivial-off-ForceBlocks] [GOOD] Test command err: totalSize# 498073317 period1# 1.610916s period2# 0.657467s MB/s1# 309.1863989 MB/s2# 757.5639796 factor# 2.450185332 |57.0%| [TS] {RESULT} ydb/tests/functional/sqs/messaging/import_test |57.0%| [TS] {RESULT} ydb/library/yql/providers/s3/object_listers/ut/unittest |57.0%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part14/py2_flake8 >> test.py::test[join-full_trivial-off-Results] [SKIPPED] >> test.py::test[join-group_compact_by--ForceBlocks] >> test.py::test[aggr_factory-median-default.txt-Results] [GOOD] >> test.py::test[aggregate-aggregate_by_column_lookup_in_const_dict-default.txt-ForceBlocks] >> test.py::test[select-from_in_front-default.txt-ForceBlocks] [GOOD] >> test.py::test[select-from_in_front-default.txt-Results] >> TBsVDiskManyPutGet::ManyPutRangeGet2ChannelsIndexOnly [GOOD] >> TBsVDiskManyPutGetCheckSize::ManyPutGetCheckSize |57.0%| [TA] {RESULT} $(B)/ydb/core/jaeger_tracing/ut/test-results/unittest/{meta.json ... results_accumulator.log} |57.0%| [LD] {RESULT} $(B)/ydb/core/fq/libs/control_plane_storage/internal/ut/core-fq-libs-control_plane_storage-internal-ut |57.0%| [TS] {RESULT} ydb/tests/functional/sqs/with_quotas/flake8 |57.0%| [TA] {RESULT} $(B)/ydb/core/base/ut_auth/test-results/unittest/{meta.json ... results_accumulator.log} >> test.py::test[join-left_all-off-ForceBlocks] [GOOD] >> test.py::test[join-left_all-off-Results] [SKIPPED] >> test.py::test[join-lookupjoin_inner--ForceBlocks] |57.1%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part3/flake8 |57.1%| [TS] {RESULT} ydb/tests/datashard/dump_restore/import_test |57.1%| [TS] {RESULT} ydb/tests/functional/sqs/cloud/import_test >> test.py::test[schema-user_schema_no_infer--ForceBlocks] [GOOD] >> test.py::test[schema-user_schema_no_infer--Results] |57.1%| [TS] {RESULT} ydb/tests/olap/s3_import/large/flake8 |57.1%| [LD] {RESULT} $(B)/ydb/library/yql/tools/dqrun/dqrun >> test.py::test[join-premap_merge_extrasort2-off-ForceBlocks] [GOOD] >> test.py::test[schema-select_all-row_spec_extra_sort-Results] [GOOD] >> test.py::test[schema-user_schema_directread-default.txt-ForceBlocks] >> test.py::test[join-premap_merge_extrasort2-off-Results] [SKIPPED] >> test.py::test[join-pullup_context_dep-off-ForceBlocks] |57.1%| [TS] {RESULT} ydb/library/yql/providers/dq/actors/ut/unittest |57.1%| [TS] {RESULT} ydb/tests/postgres_integrations/go-libpq/flake8 >> test.py::test[insert_monotonic-truncate_fail--Results] [GOOD] >> test.py::test[join-cbo_7tables--Results] >> test.py::test[distinct-distinct_and_join--ForceBlocks] [GOOD] >> test.py::test[join-cbo_7tables--Results] [SKIPPED] >> test.py::test[join-emptyjoin_unused_keys--Results] [SKIPPED] >> test.py::test[join-equi_join_by_expr--Results] >> test.py::test[distinct-distinct_and_join--Results] |57.1%| [TS] {RESULT} ydb/apps/dstool/import_test |57.1%| [TS] {RESULT} ydb/tests/stability/ydb/flake8 |57.1%| [TS] {RESULT} ydb/tests/fq/solomon/flake8 |57.1%| [TS] {RESULT} ydb/library/workload/tpch/ut/unittest |57.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/partition_stats/ut/unittest >> test.py::test[pg-tpcds-q08-default.txt-Results] [GOOD] >> test.py::test[join-star_join_inners_premap-off-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q31-default.txt-ForceBlocks] >> test.py::test[join-star_join_inners_premap-off-Results] [SKIPPED] |57.1%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part8/flake8 >> test.py::test[limit-empty_sort_calc_after_limit-default.txt-ForceBlocks] |57.1%| [TS] {RESULT} ydb/tests/olap/s3_import/large/import_test |57.1%| [TS] {RESULT} ydb/core/resource_pools/ut/unittest |57.1%| [TS] {RESULT} ydb/tests/functional/suite_tests/flake8 >> test.py::test[order_by-order_by_expr--Results] [GOOD] >> test.py::test[order_by-order_by_expr_with_deps-default.txt-ForceBlocks] >> test.py::test[join-star_join_semionly_premap--Results] [GOOD] >> test.py::test[join-star_join_semionly_premap-off-ForceBlocks] >> test.py::test[blocks-combine_all_decimal--Results] [GOOD] >> test.py::test[blocks-combine_all_some_filter--ForceBlocks] >> test.py::test[blocks-coalesce_complex-default.txt-Results] [GOOD] >> test.py::test[blocks-combine_all_count_filter_opt--ForceBlocks] |57.1%| [TS] {RESULT} ydb/tests/functional/sqs/merge_split_common_table/fifo/flake8 |57.1%| [TS] {RESULT} ydb/tests/tools/nemesis/ut/import_test >> PartitionStats::Collector [GOOD] >> test.py::test[in-in_compact_distinct-empty-ForceBlocks] [GOOD] >> test.py::test[in-in_compact_distinct-empty-Results] >> test.py::test[window-distinct_over_window--ForceBlocks] [GOOD] >> test.py::test[window-distinct_over_window--Results] |57.1%| [TS] {RESULT} ydb/tests/functional/minidumps/flake8 |57.1%| [TS] {RESULT} ydb/tests/functional/sqs/merge_split_common_table/fifo/import_test >> test.py::test[action-subquery-default.txt-Results] [GOOD] >> test.py::test[aggr_factory-avg_distinct_expr-default.txt-ForceBlocks] >> test.py::test[pg-tpcds-q51-default.txt-Results] [GOOD] >> test.py::test[pg-tpcds-q59-default.txt-ForceBlocks] |57.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/partition_stats/ut/unittest |57.2%| [TS] {RESULT} ydb/core/erasure/ut_perf/unittest >> test.py::test[select-from_in_front-default.txt-Results] [GOOD] >> test.py::test[select-match_clause--ForceBlocks] >> test.py::test[schema-user_schema_no_infer--Results] [GOOD] >> test.py::test[select-literal_negative-default.txt-ForceBlocks] |57.2%| [TS] {RESULT} ydb/core/kqp/federated_query/ut/unittest |57.2%| [TS] {RESULT} ydb/tests/functional/query_cache/flake8 |57.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/partition_stats/ut/unittest >> PartitionStats::Collector [GOOD] >> test.py::test[join-mapjoin_early_rewrite_star--Results] [GOOD] >> test.py::test[join-mapjoin_with_empty_struct--ForceBlocks] |57.2%| [AR] {RESULT} $(B)/yt/yt/client/libyt-yt-client.a >> test.py::test[key_filter-lambda_with_null_filter--Results] [GOOD] >> test.py::test[key_filter-pushdown_keyextract_type_adjust-default.txt-Results] >> test.py::test[key_filter-between_with_key_filter--ForceBlocks] [GOOD] >> test.py::test[key_filter-between_with_key_filter--Results] |57.2%| [TS] {RESULT} ydb/tests/fq/restarts/import_test |57.2%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part1/py2_flake8 |57.2%| [LD] {RESULT} $(B)/ydb/library/backup/ut/ydb-library-backup-ut >> test.py::test[column_order-select_plain-default.txt-Results] [GOOD] >> test.py::test[count-count_all-default.txt-ForceBlocks] |57.2%| [LD] {RESULT} $(B)/ydb/library/yql/dq/runtime/ut/ydb-library-yql-dq-runtime-ut |57.2%| [TS] {RESULT} ydb/tests/datashard/dml/import_test |57.2%| [TS] {RESULT} ydb/tests/olap/oom/flake8 >> test.py::test[datetime-date_tz_table_sort_asc--ForceBlocks] [GOOD] >> test.py::test[datetime-date_tz_table_sort_asc--Results] |57.2%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part4/flake8 >> test.py::test[optimizers-yql-10737_lost_passthrough-default.txt-ForceBlocks] [GOOD] >> test.py::test[optimizers-yql-10737_lost_passthrough-default.txt-Results] |57.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_reboots/unittest >> test.py::test[aggregate-group_by_session_compact--ForceBlocks] [GOOD] |57.2%| [TS] {RESULT} ydb/tests/datashard/select/flake8 >> test.py::test[in-in_compact_distinct-empty-Results] [GOOD] >> test.py::test[aggregate-group_by_session_compact--Results] >> test.py::test[insert-append_sorted--ForceBlocks] >> test.py::test[simple_columns-simple_columns_join_coalesce_without_left_semi_2-default.txt-ForceBlocks] [GOOD] >> test.py::test[simple_columns-simple_columns_join_coalesce_without_left_semi_2-default.txt-Results] |57.2%| [TS] {RESULT} ydb/tests/datashard/parametrized_queries/import_test >> test.py::test[insert_monotonic-keep_meta-default.txt-ForceBlocks] [GOOD] >> test.py::test[insert_monotonic-keep_meta-default.txt-Results] >> test.py::test[action-subquery-default.txt-Results] [GOOD] >> test.py::test[aggr_factory-booland-default.txt-Results] |57.2%| [LD] {RESULT} $(B)/ydb/library/yql/providers/dq/scheduler/ut/ydb-library-yql-providers-dq-scheduler-ut |57.2%| [LD] {RESULT} $(B)/ydb/core/config/validation/ut/ydb-core-config-validation-ut |57.2%| [LD] {RESULT} $(B)/ydb/core/config/validation/auth_config_validator_ut/core-config-validation-auth_config_validator_ut |57.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ext_index/ut/unittest >> test.py::test[window-full/aggregations_leadlag--Results] [GOOD] >> test.py::test[window-full/leadlag--Results] >> test.py::test[aggregate-compare_by_nulls-default.txt-ForceBlocks] [GOOD] >> test.py::test[aggregate-compare_by_nulls-default.txt-Results] >> test.py::test[order_by-sort_decimals--ForceBlocks] [GOOD] >> test.py::test[order_by-sort_decimals--Results] >> test.py::test[sampling-yql-14664_deps-default.txt-ForceBlocks] [GOOD] >> test.py::test[sampling-yql-14664_deps-default.txt-Results] |57.2%| [TS] {RESULT} ydb/core/config/validation/auth_config_validator_ut/unittest |57.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ext_index/ut/unittest |57.3%| [TS] {RESULT} ydb/tests/stress/log/tests/import_test |57.3%| [TS] {RESULT} ydb/tests/functional/wardens/flake8 |57.3%| [LD] {RESULT} $(B)/ydb/core/kafka_proxy/ut/ydb-core-kafka_proxy-ut |57.3%| [TS] {RESULT} ydb/tests/datashard/vector_index/import_test >> test.py::test[distinct-distinct_and_join--Results] [GOOD] >> test.py::test[distinct-distinct_by_tuple-default.txt-ForceBlocks] |57.3%| [TS] {RESULT} ydb/core/fq/libs/test_connection/ut/unittest |57.3%| [TS] {RESULT} ydb/public/tools/lib/cmds/ut/py3test |57.3%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part13/py2_flake8 |57.3%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/anubis_osiris/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test.py::test[key_filter-between_with_key_filter--Results] [GOOD] >> test.py::test[key_filter-contains_optional--ForceBlocks] |57.3%| [TS] {RESULT} ydb/core/formats/arrow/ut/unittest |57.3%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part9/flake8 |57.3%| [TS] {RESULT} ydb/tests/stress/mixedpy/import_test |57.3%| [TS] {RESULT} ydb/tests/functional/audit/flake8 >> test.py::test[join-group_compact_by--ForceBlocks] [GOOD] >> test.py::test[join-group_compact_by--Results] >> TBlobStorageHullFresh::SimpleBackwardEnd [GOOD] >> TBlobStorageHullFresh::SimpleBackWardMiddle2Times [GOOD] |57.3%| [TS] {RESULT} ydb/tests/fq/yds/flake8 |57.3%| [TS] {RESULT} ydb/tests/library/ut/flake8 >> test.py::test[aggregate-aggregate_by_column_lookup_in_const_dict-default.txt-ForceBlocks] [GOOD] >> test.py::test[aggregate-aggregate_by_column_lookup_in_const_dict-default.txt-Results] |57.3%| [TS] {RESULT} ydb/core/config/validation/column_shard_config_validator_ut/unittest >> test.py::test[pg-tpcds-q33-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q33-default.txt-Results] >> test.py::test[datetime-date_tz_table_sort_asc--Results] [GOOD] >> test.py::test[expr-inline_call--ForceBlocks] >> test.py::test[join-simple_columns_partial--ForceBlocks] [GOOD] >> test.py::test[join-simple_columns_partial--Results] >> test.py::test[optimizers-yql-10737_lost_passthrough-default.txt-Results] [GOOD] >> test.py::test[order_by-literal_take_zero_sort--ForceBlocks] >> test.py::test[join-lookupjoin_inner--ForceBlocks] [GOOD] >> test.py::test[join-lookupjoin_inner--Results] |57.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFresh::SimpleBackWardMiddle2Times [GOOD] |57.3%| [TS] {RESULT} ydb/core/kqp/ut/federated_query/common/clang_format |57.3%| [TS] {RESULT} ydb/tests/stress/oltp_workload/import_test |57.3%| [TS] {RESULT} ydb/core/io_formats/arrow/scheme/ut/unittest |57.3%| [TS] {RESULT} ydb/tests/stress/transfer/import_test >> test.py::test[weak_field-weak_field_join_where--ForceBlocks] [GOOD] >> test.py::test[schema-user_schema_directread-default.txt-ForceBlocks] [GOOD] >> test.py::test[weak_field-weak_field_join_where--Results] >> test.py::test[schema-user_schema_directread-default.txt-Results] >> test.py::test[blocks-date_greater--Results] [GOOD] >> test.py::test[blocks-date_less_scalar--ForceBlocks] |57.4%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part5/py2_flake8 >> test.py::test[join-pullup_context_dep-off-ForceBlocks] [GOOD] >> test.py::test[insert_monotonic-keep_meta-default.txt-Results] [GOOD] >> KqpErrors::ProposeResultLost_RwTx+UseSink |57.4%| [TS] {RESULT} ydb/library/yaml_config/static_validator/ut/example_configs/unittest >> test.py::test[window-distinct_over_window--Results] [GOOD] >> test.py::test[window-full/noncompact_with_nulls--ForceBlocks] >> DataStreams::TestControlPlaneAndMeteringData >> test.py::test[aggregate-group_by_session_compact--Results] [GOOD] >> test.py::test[aggregate-group_compact_sorted_distinct_complex--ForceBlocks] >> test.py::test[join-bush_dis_in--ForceBlocks] >> test.py::test[join-pullup_context_dep-off-Results] [SKIPPED] >> test.py::test[join-split_to_list_as_key--ForceBlocks] >> test.py::test[aggr_factory-avg-default.txt-ForceBlocks] [GOOD] >> test.py::test[aggr_factory-avg-default.txt-Results] >> TBtreeIndexTPartLarge::BigKeys1GB [GOOD] >> TBtreeIndexTPartLarge::CutKeys |57.4%| [TS] {RESULT} ydb/library/yql/providers/dq/scheduler/ut/unittest |57.4%| [TS] {RESULT} ydb/tests/stress/mixedpy/flake8 |57.4%| [TA] {RESULT} $(B)/ydb/core/sys_view/service/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test.py::test[limit-empty_sort_calc_after_limit-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q31-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q31-default.txt-Results] >> test.py::test[limit-empty_sort_calc_after_limit-default.txt-Results] |57.4%| [TS] {RESULT} ydb/tests/olap/flake8 >> test.py::test[select-match_clause--ForceBlocks] [GOOD] >> test.py::test[select-match_clause--Results] >> test.py::test[simple_columns-simple_columns_join_coalesce_without_left_semi_2-default.txt-Results] [GOOD] >> test.py::test[pg-tpcds-q59-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q59-default.txt-Results] >> test.py::test[blocks-combine_all_count_filter_opt--ForceBlocks] [GOOD] >> test.py::test[blocks-combine_all_count_filter_opt--Results] >> test.py::test[blocks-combine_all_some_filter--ForceBlocks] [GOOD] >> test.py::test[blocks-combine_all_some_filter--Results] >> test.py::test[simple_columns-simple_columns_join_fail--ForceBlocks] >> DataStreams::TestPutRecordsOfAnauthorizedUser >> test.py::test[sampling-yql-14664_deps-default.txt-Results] [GOOD] >> test.py::test[schema-fake_column-default.txt-ForceBlocks] >> test.py::test[select-literal_negative-default.txt-ForceBlocks] [GOOD] >> test.py::test[select-literal_negative-default.txt-Results] |57.4%| [TS] {RESULT} ydb/tests/functional/autoconfig/flake8 |57.4%| [TS] {RESULT} ydb/tests/stress/statistics_workload/flake8 |57.4%| [TS] {RESULT} ydb/tests/datashard/split_merge/import_test |57.4%| [TS] {RESULT} ydb/library/yql/providers/yt/actors/ut/unittest >> test.py::test[join-star_join_semionly_premap-off-ForceBlocks] [GOOD] |57.4%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_vdisk2/ydb-core-blobstorage-ut_vdisk2 >> test.py::test[join-star_join_semionly_premap-off-Results] >> test.py::test[join-star_join_semionly_premap-off-Results] [SKIPPED] >> test.py::test[join-yql-12022--ForceBlocks] >> test.py::test[aggregate-aggregate_by_column_lookup_in_const_dict-default.txt-Results] [GOOD] >> DataStreams::TestControlPlaneAndMeteringData [GOOD] >> test.py::test[order_by-order_by_expr_with_deps-default.txt-ForceBlocks] [GOOD] >> test.py::test[order_by-order_by_expr_with_deps-default.txt-Results] >> DataStreams::ChangeBetweenRetentionModes >> test.py::test[count-count_all-default.txt-ForceBlocks] [GOOD] >> test.py::test[join-mapjoin_with_empty_struct--ForceBlocks] [GOOD] >> test.py::test[join-mapjoin_with_empty_struct--Results] >> test.py::test[aggregate-aggregation_by_udf--ForceBlocks] >> test.py::test[order_by-sort_decimals--Results] [GOOD] >> test.py::test[schema-user_schema_directread-default.txt-Results] [GOOD] >> test.py::test[order_by-sort_simple--ForceBlocks] >> test.py::test[select-hits_count--ForceBlocks] >> test.py::test[count-count_all-default.txt-Results] >> test.py::test[join-lookupjoin_inner--Results] [GOOD] >> test.py::test[join-equi_join_by_expr--Results] [GOOD] >> test.py::test[join-equi_join_by_expr-off-Results] [SKIPPED] >> test.py::test[join-lookupjoin_semi_1o2o--ForceBlocks] |57.4%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part7/py2_flake8 |57.4%| [TS] {RESULT} ydb/tests/functional/ttl/import_test >> test.py::test[join-filter_joined--Results] >> test.py::test[pg-tpcds-q33-default.txt-Results] [GOOD] >> test.py::test[pg-tpcds-q42-default.txt-ForceBlocks] |57.4%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part16/py2_flake8 >> test.py::test[join-group_compact_by--Results] [GOOD] >> test.py::test[join-inner_all--ForceBlocks] >> test.py::test[select-match_clause--Results] [GOOD] >> test.py::test[key_filter-pushdown_keyextract_type_adjust-default.txt-Results] [GOOD] >> test.py::test[key_filter-uuid--Results] >> test.py::test[select-one_labeled_column-default.txt-ForceBlocks] |57.4%| [TS] {RESULT} ydb/library/yql/providers/dq/provider/ut/unittest |57.4%| [TS] {RESULT} ydb/tests/functional/blobstorage/import_test |57.4%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/postgresql/flake8 >> test.py::test[select-literal_negative-default.txt-Results] [GOOD] >> test.py::test[select-optional_pull--ForceBlocks] >> DataStreams::ChangeBetweenRetentionModes [GOOD] >> DataStreams::TestCreateExistingStream >> test.py::test[pg-tpcds-q59-default.txt-Results] [GOOD] >> test.py::test[aggr_factory-boolor-default.txt-Results] [GOOD] >> test.py::test[pg-tpcds-q91-default.txt-ForceBlocks] >> test.py::test[aggr_factory-max_by-default.txt-Results] >> test.py::test[blocks-combine_all_some_filter--Results] [GOOD] >> test.py::test[blocks-combine_all_sum--ForceBlocks] |57.4%| [TS] {RESULT} ydb/tests/functional/tpc/large/import_test |57.4%| [TS] {RESULT} ydb/tests/functional/sqs/large/import_test |57.4%| [TS] {RESULT} ydb/core/blobstorage/crypto/ut/unittest >> test.py::test[simple_columns-simple_columns_join_fail--ForceBlocks] [GOOD] >> test.py::test[simple_columns-simple_columns_join_fail--Results] [GOOD] >> test.py::test[simple_columns-simple_columns_join_without_resolve_dublicates_mult-default.txt-ForceBlocks] |57.4%| [TS] {RESULT} ydb/mvp/oidc_proxy/ut/unittest |57.5%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/huge/ut/test-results/unittest/{meta.json ... results_accumulator.log} |57.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/incrhuge/ut/ydb-core-blobstorage-incrhuge-ut |57.5%| [LD] {RESULT} $(B)/ydb/core/fq/libs/compute/common/ut/ydb-core-fq-libs-compute-common-ut >> test.py::test[pg-tpcds-q31-default.txt-Results] [GOOD] >> test.py::test[pg-tpcds-q66-default.txt-ForceBlocks] >> test.py::test[aggregate-compare_by_nulls-default.txt-Results] [GOOD] >> test.py::test[aggregate-group_by_expr--ForceBlocks] >> test.py::test[limit-empty_sort_calc_after_limit-default.txt-Results] [GOOD] >> test.py::test[blocks-combine_all_count_filter_opt--Results] [GOOD] >> test.py::test[limit-limit--ForceBlocks] >> test.py::test[coalesce-coalesce--ForceBlocks] >> test.py::test[distinct-distinct_by_tuple-default.txt-ForceBlocks] [GOOD] >> DataStreams::TestPutRecordsOfAnauthorizedUser [GOOD] >> DataStreams::TestPutRecordsWithRead >> test.py::test[distinct-distinct_by_tuple-default.txt-Results] |57.5%| [TS] {RESULT} ydb/tests/olap/docs/generator/import_test |57.5%| [LD] {RESULT} $(B)/ydb/core/ymq/base/ut/ydb-core-ymq-base-ut >> test.py::test[count-count_all-default.txt-Results] [GOOD] >> test.py::test[count-count_by_nulls--ForceBlocks] |57.5%| [TS] {RESULT} ydb/tests/stress/olap_workload/tests/flake8 |57.5%| [TS] {RESULT} ydb/tests/functional/suite_tests/import_test |57.5%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part19/py2_flake8 |57.5%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part18/py2_flake8 |57.5%| [TS] {RESULT} ydb/tests/datashard/ttl/import_test >> test.py::test[expr-inline_call--ForceBlocks] [GOOD] >> test.py::test[weak_field-weak_field_join_where--Results] [GOOD] >> test.py::test[window-current/ansi_current_with_win--ForceBlocks] >> test.py::test[order_by-literal_take_zero_sort--ForceBlocks] [GOOD] >> test.py::test[order_by-literal_take_zero_sort--Results] >> test.py::test[expr-inline_call--Results] >> test.py::test[aggr_factory-avg_distinct_expr-default.txt-ForceBlocks] [GOOD] >> test.py::test[aggr_factory-avg_distinct_expr-default.txt-Results] |57.5%| [TS] {RESULT} ydb/tests/olap/common/flake8 >> KqpErrors::ProposeResultLost_RwTx+UseSink [GOOD] >> KqpErrors::ProposeResultLost_RwTx-UseSink >> test.py::test[join-mapjoin_with_empty_struct--Results] [GOOD] >> test.py::test[join-mergejoin_saves_output_sort_cross--ForceBlocks] |57.5%| [TS] {RESULT} ydb/core/fq/libs/hmac/ut/unittest |57.5%| [TS] {RESULT} ydb/tests/olap/ttl_tiering/flake8 |57.5%| [TM] {RESULT} ydb/library/yql/tests/sql/solomon/pytest |57.5%| [TS] {RESULT} ydb/tests/datashard/dml/flake8 >> DataStreams::TestCreateExistingStream [GOOD] >> DataStreams::ListStreamsValidation |57.5%| [TS] {RESULT} ydb/core/tx/long_tx_service/public/ut/unittest |57.5%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part16/flake8 |57.5%| [TS] {RESULT} ydb/tests/tools/kqprun/tests/import_test |57.5%| [TS] {RESULT} ydb/tests/fq/streaming_optimize/import_test |57.5%| [TS] {RESULT} ydb/library/backup/ut/unittest |57.5%| [TS] {RESULT} ydb/tests/tools/pq_read/test/flake8 |57.6%| [TS] {RESULT} ydb/tests/datashard/async_replication/flake8 |57.6%| [TS] {RESULT} ydb/core/tx/sequenceshard/public/ut/unittest |57.6%| [TS] {RESULT} ydb/tests/functional/tpc/medium/import_test |57.6%| [TS] {RESULT} ydb/services/persqueue_cluster_discovery/cluster_ordering/ut/unittest |57.6%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/oracle/flake8 |57.6%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/join/flake8 >> test.py::test[key_filter-contains_optional--ForceBlocks] [GOOD] >> test.py::test[key_filter-contains_optional--Results] |57.6%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part10/py2_flake8 |57.6%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part4/py2_flake8 |57.6%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part0/flake8 >> DataStreams::TestPutRecordsWithRead [GOOD] >> DataStreams::TestPutRecordsCornerCases |57.6%| [TS] {RESULT} ydb/public/tools/ydb_recipe/flake8 |57.6%| [TS] {RESULT} ydb/tests/fq/s3/flake8 |57.6%| [TS] {RESULT} ydb/tests/stress/olap_workload/tests/import_test |57.6%| [TS] {RESULT} ydb/tests/fq/streaming_optimize/flake8 |57.6%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part1/py2_flake8 |57.6%| [TS] {RESULT} ydb/library/yql/dq/state/ut/unittest |57.6%| [TS] {RESULT} ydb/library/yql/providers/s3/common/ut/unittest |57.6%| [TS] {RESULT} ydb/library/benchmarks/runner/runner/flake8 >> test.py::test[order_by-order_by_expr_with_deps-default.txt-Results] [GOOD] >> test.py::test[join-simple_columns_partial--Results] [GOOD] >> test.py::test[join-yql-10654_pullup_with_sys_columns--ForceBlocks] >> test.py::test[order_by-order_by_num_key_and_subkey_desc--ForceBlocks] |57.6%| [TS] {RESULT} ydb/tests/functional/limits/import_test |57.6%| [TS] {RESULT} ydb/apps/dstool/flake8 >> DataStreams::ListStreamsValidation [GOOD] >> test.py::test[expr-inline_call--Results] [GOOD] >> test.py::test[expr-tagged_runtime-default.txt-ForceBlocks] >> test.py::test[order_by-literal_take_zero_sort--Results] [GOOD] >> test.py::test[order_by-order_by_value_desc-default.txt-ForceBlocks] |57.6%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part7/flake8 |57.6%| [LD] {RESULT} $(B)/ydb/library/yql/providers/s3/provider/ut/ydb-library-yql-providers-s3-provider-ut |57.7%| [TS] {RESULT} ydb/tests/functional/encryption/import_test |57.7%| [TS] {RESULT} ydb/core/pgproxy/ut/unittest >> test.py::test[aggr_factory-avg-default.txt-Results] [GOOD] >> test.py::test[aggregate-aggrs_no_grouping--ForceBlocks] |57.7%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/unit/client/result/test-results/unittest/{meta.json ... results_accumulator.log} |57.7%| [TS] {RESULT} ydb/tests/olap/import_test |57.7%| [TS] {RESULT} ydb/tests/tools/ydb_serializable/replay/import_test |57.7%| [LD] {RESULT} $(B)/ydb/core/external_sources/object_storage/inference/ut/external_sources-object_storage-inference-ut |57.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_pdiskfit/ut/ydb-core-blobstorage-ut_pdiskfit-ut |57.7%| [TS] {RESULT} ydb/tests/functional/postgresql/flake8 >> test.py::test[join-split_to_list_as_key--ForceBlocks] [GOOD] >> test.py::test[join-split_to_list_as_key--Results] >> test.py::test[aggregate-group_compact_sorted_distinct_complex--ForceBlocks] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/datastreams/ut/unittest >> DataStreams::ListStreamsValidation [GOOD] Test command err: 2025-06-03T10:23:29.126107Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511666773711731851:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:29.126134Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f40/r3tmp/tmpNZTQuI/pdisk_1.dat TServer::EnableGrpc on GrpcPort 17076, node 1 2025-06-03T10:23:29.220359Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:23:29.220531Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:29.220540Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:29.220542Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:29.220581Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8005 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:23:29.226566Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:29.226595Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:29.228556Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:29.235383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:29.271283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:8005 2025-06-03T10:23:29.286972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:29.500776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-06-03T10:23:29.577086Z node 1 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037890:1][1:7511666773711733374:2369] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:6:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-03T10:23:29.600596Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:23:29.627541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:23:29.632876Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037894 not found 2025-06-03T10:23:29.632881Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037899 not found 2025-06-03T10:23:29.632882Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037896 not found 2025-06-03T10:23:29.632884Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037904 not found 2025-06-03T10:23:29.632885Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037902 not found 2025-06-03T10:23:29.632887Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037906 not found 2025-06-03T10:23:29.632888Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037905 not found 2025-06-03T10:23:29.632889Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037893 not found 2025-06-03T10:23:29.632890Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037907 not found 2025-06-03T10:23:29.632892Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037901 not found 2025-06-03T10:23:29.632893Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037908 not found 2025-06-03T10:23:29.632894Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037900 not found 2025-06-03T10:23:29.632896Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037897 not found 2025-06-03T10:23:29.632897Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037903 not found 2025-06-03T10:23:29.632898Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037898 not found 2025-06-03T10:23:29.633224Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037895 not found 2025-06-03T10:23:30.428239Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511666776801480760:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:30.428499Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f40/r3tmp/tmpBLm8qa/pdisk_1.dat 2025-06-03T10:23:30.463409Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22233, node 4 2025-06-03T10:23:30.474320Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:30.474330Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:30.474333Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:30.474373Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2809 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:30.528694Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:30.528728Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:30.530321Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:30.555201Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:30.589094Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:2809 2025-06-03T10:23:30.599513Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:30.687512Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:23:30.702384Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:23:30.715821Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:23:31.322637Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511666778767227874:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:31.322796Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f40/r3tmp/tmpAkCOPe/pdisk_1.dat 2025-06-03T10:23:31.342677Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8985, node 7 2025-06-03T10:23:31.356498Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:31.356519Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:31.356521Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:31.356575Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10779 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:31.422626Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:31.422675Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:31.424557Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:31.431503Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:31.448861Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:10779 2025-06-03T10:23:31.464100Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:31.512881Z node 7 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [7:7511666778767229908:3396] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/stream_TestCreateExistingStream\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypePersQueueGroup, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_pq.cpp:345" severity: 1 } 2025-06-03T10:23:32.191352Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7511666786493592920:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:32.191757Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f40/r3tmp/tmpT4T7ox/pdisk_1.dat 2025-06-03T10:23:32.218116Z node 10 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3342, node 10 2025-06-03T10:23:32.238188Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:32.238204Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:32.238206Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:32.238253Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13889 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:32.291450Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:32.291475Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:32.293147Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:32.303227Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:32.318361Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:13889 2025-06-03T10:23:32.329502Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... |57.7%| [TM] {RESULT} ydb/library/yql/providers/pq/provider/ut/unittest |57.7%| [TS] {RESULT} ydb/tests/stability/tool/import_test |57.7%| [TS] {RESULT} ydb/tools/ydbd_slice/bin/import_test >> test.py::test[aggregate-group_compact_sorted_distinct_complex--Results] |57.7%| [TS] {RESULT} ydb/tests/stress/node_broker/tests/flake8 |57.7%| [TS] {RESULT} ydb/tests/functional/config/flake8 |57.7%| [TS] {RESULT} ydb/tests/stress/oltp_workload/flake8 |57.7%| [TS] {RESULT} ydb/tests/stress/log/tests/flake8 >> test.py::test[distinct-distinct_by_tuple-default.txt-Results] [GOOD] >> test.py::test[dq-blacklisted_pragmas1--ForceBlocks] |57.7%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part17/py2_flake8 |57.7%| [TS] {RESULT} ydb/tests/stress/statistics_workload/import_test |57.7%| [TS] {RESULT} ydb/library/yaml_config/static_validator/ut/unittest |57.7%| [TS] {RESULT} ydb/tests/fq/plans/flake8 |57.8%| [TS] {RESULT} ydb/tests/olap/scenario/flake8 |57.8%| [TS] {RESULT} ydb/tests/functional/encryption/flake8 |57.8%| [TS] {RESULT} ydb/library/yaml_config/ut_transform/flake8 >> test.py::test[key_filter-contains_optional--Results] [GOOD] >> test.py::test[key_filter-contains_tuples_no_keyfilter-default.txt-ForceBlocks] >> test.py::test[pg-tpcds-q42-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q42-default.txt-Results] >> test.py::test[schema-fake_column-default.txt-ForceBlocks] [GOOD] >> test.py::test[schema-fake_column-default.txt-Results] |57.8%| [TS] {RESULT} ydb/public/tools/lib/cmds/ut/import_test |57.8%| [TS] {RESULT} ydb/library/benchmarks/runner/runner/import_test >> test.py::test[select-one_labeled_column-default.txt-ForceBlocks] [GOOD] >> test.py::test[select-one_labeled_column-default.txt-Results] >> test.py::test[aggregate-aggregation_by_udf--ForceBlocks] [GOOD] |57.8%| [TS] {RESULT} ydb/tests/olap/docs/generator/flake8 |57.8%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part11/flake8 |57.8%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part0/py2_flake8 |57.8%| [TS] {RESULT} ydb/library/benchmarks/runner/import_test |57.8%| [TS] {RESULT} ydb/core/blobstorage/base/ut/gtest |57.8%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part7/py2_flake8 >> test.py::test[join-yql-12022--ForceBlocks] [GOOD] >> test.py::test[join-yql-12022--Results] >> test.py::test[aggregate-aggregation_by_udf--Results] >> test.py::test[join-inner_all--ForceBlocks] [GOOD] >> test.py::test[join-inner_all--Results] >> test.py::test[join-lookupjoin_semi_1o2o--ForceBlocks] [GOOD] >> test.py::test[join-lookupjoin_semi_1o2o--Results] |57.8%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/ms_sql_server/flake8 |57.8%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part4/py2_flake8 |57.8%| [TS] {RESULT} ydb/tests/fq/http_api/flake8 |57.8%| [TS] {RESULT} ydb/tests/stress/kv/tests/flake8 >> test.py::test[select-hits_count--ForceBlocks] [GOOD] >> test.py::test[select-hits_count--Results] >> test.py::test[aggr_factory-avg_distinct_expr-default.txt-Results] [GOOD] >> test.py::test[aggr_factory-bitand-default.txt-ForceBlocks] >> test.py::test[join-bush_dis_in--ForceBlocks] [GOOD] >> test.py::test[join-bush_dis_in--Results] >> DataStreams::TestGetRecordsStreamWithSingleShard |57.8%| [TS] {RESULT} ydb/tests/olap/s3_import/flake8 |57.8%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/ydb/flake8 |57.8%| [TS] {RESULT} ydb/tests/olap/column_family/compression/flake8 >> test.py::test[window-full/noncompact_with_nulls--ForceBlocks] [GOOD] >> test.py::test[window-full/noncompact_with_nulls--Results] >> test.py::test[coalesce-coalesce--ForceBlocks] [GOOD] >> test.py::test[coalesce-coalesce--Results] |57.8%| [TS] {RESULT} ydb/tests/functional/sqs/large/flake8 |57.8%| [TS] {RESULT} ydb/library/benchmarks/runner/run_tests/import_test |57.8%| [TS] {RESULT} ydb/tests/tools/ydb_serializable/replay/flake8 |57.9%| [TS] {RESULT} ydb/tests/tools/pq_read/test/import_test |57.9%| [TS] {RESULT} ydb/tests/tools/ydb_serializable/flake8 |57.9%| [TS] {RESULT} ydb/tests/fq/multi_plane/flake8 >> KqpErrors::ProposeResultLost_RwTx-UseSink [GOOD] |57.9%| [TS] {RESULT} ydb/tests/functional/sqs/messaging/flake8 |57.9%| [TS] {RESULT} ydb/core/debug_tools/ut/unittest >> test.py::test[pg-tpcds-q91-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q91-default.txt-Results] |57.9%| [TS] {RESULT} ydb/tests/stress/simple_queue/flake8 >> test.py::test[limit-limit--ForceBlocks] [GOOD] >> test.py::test[limit-limit--Results] >> test.py::test[aggr_factory-booland-default.txt-Results] [GOOD] >> test.py::test[aggr_factory-histogram-default.txt-Results] >> test.py::test[pg-tpcds-q42-default.txt-Results] [GOOD] |57.9%| [TS] {RESULT} ydb/tests/sql/flake8 |57.9%| [TS] {RESULT} ydb/tests/fq/common/import_test >> test.py::test[select-optional_pull--ForceBlocks] [GOOD] |57.9%| [TS] {RESULT} ydb/library/yaml_config/tools/simple_json_diff/import_test >> test.py::test[aggregate-group_by_expr--ForceBlocks] [GOOD] >> test.py::test[join-filter_joined--Results] [GOOD] >> test.py::test[select-optional_pull--Results] |57.9%| [TS] {RESULT} ydb/tests/datashard/vector_index/flake8 >> test.py::test[aggregate-group_by_expr--Results] >> test.py::test[select-one_labeled_column-default.txt-Results] [GOOD] >> test.py::test[join-flatten_columns2-off-Results] [SKIPPED] >> test.py::test[tpch-q7-default.txt-ForceBlocks] >> test.py::test[join-from_in_front_join--Results] >> test.py::test[pg-tpcds-q73-default.txt-ForceBlocks] >> test.py::test[count-count_by_nulls--ForceBlocks] [GOOD] >> test.py::test[count-count_by_nulls--Results] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_kqp_errors/unittest >> KqpErrors::ProposeResultLost_RwTx-UseSink [GOOD] Test command err: 2025-06-03T10:23:29.823729Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:707:2415], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:23:29.823778Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:23:29.823792Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:23:29.823998Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:704:2359], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:23:29.824037Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:23:29.824054Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000fec/r3tmp/tmpqa8Nly/pdisk_1.dat 2025-06-03T10:23:29.919146Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:23:30.024605Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:23:30.112459Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:30.112512Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:30.113676Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:30.113699Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:30.125025Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:23:30.125199Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:30.125287Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:30.372772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:23:30.857967Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1578:2948], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:23:30.857994Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1588:2953], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:23:30.858001Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:23:30.858814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:23:31.354897Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:1592:2956], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:23:31.494241Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:1730:3034] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:23:31.543458Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:190: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jwtn2m1906mr6mr8380y3zq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Zjg1OTM0YzMtYjA3Njc0ZTAtM2EzZTE1ZjMtOTJhZjk5MGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Bootstrap done, become ReadyState 2025-06-03T10:23:31.543559Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:585: ActorId: [1:1756:2946] TxId: 281474976715660. Ctx: { TraceId: 01jwtn2m1906mr6mr8380y3zq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Zjg1OTM0YzMtYjA3Njc0ZTAtM2EzZTE1ZjMtOTJhZjk5MGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Executing physical tx, type: 2, stages: 1 2025-06-03T10:23:31.543579Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-06-03T10:23:31.543609Z node 1 :KQP_EXECUTER TRACE: kqp_executer_impl.h:600: ActorId: [1:1756:2946] TxId: 281474976715660. Ctx: { TraceId: 01jwtn2m1906mr6mr8380y3zq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Zjg1OTM0YzMtYjA3Njc0ZTAtM2EzZTE1ZjMtOTJhZjk5MGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got request, become WaitResolveState 2025-06-03T10:23:31.543685Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715660. Resolved key sets: 1 2025-06-03T10:23:31.543729Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:295: TxId: 281474976715660. Resolved key: { TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 2 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 4 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-06-03T10:23:31.543762Z node 1 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2034: ActorId: [1:1756:2946] TxId: 281474976715660. Ctx: { TraceId: 01jwtn2m1906mr6mr8380y3zq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Zjg1OTM0YzMtYjA3Njc0ZTAtM2EzZTE1ZjMtOTJhZjk5MGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Stage [0,0] AST: ( (return (lambda '() (block '( (let $1 (Just (Uint32 '1))) (let $2 (Just (Uint32 '2))) (let $3 (Just (Uint32 '3))) (return (Iterator (AsList (AsStruct '('"key" $1) '('"value" $1)) (AsStruct '('"key" $2) '('"value" $2)) (AsStruct '('"key" $3) '('"value" $3))))) )))) ) 2025-06-03T10:23:31.543788Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:1476: ActorId: [1:1756:2946] TxId: 281474976715660. Ctx: { TraceId: 01jwtn2m1906mr6mr8380y3zq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Zjg1OTM0YzMtYjA3Njc0ZTAtM2EzZTE1ZjMtOTJhZjk5MGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Stage [0,0] create compute task: 1 2025-06-03T10:23:31.543809Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jwtn2m1906mr6mr8380y3zq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Zjg1OTM0YzMtYjA3Njc0ZTAtM2EzZTE1ZjMtOTJhZjk5MGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:23:31.543818Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:553: TxId: 281474976715660. Ctx: { TraceId: 01jwtn2m1906mr6mr8380y3zq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Zjg1OTM0YzMtYjA3Njc0ZTAtM2EzZTE1ZjMtOTJhZjk5MGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-03T10:23:31.543899Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:793: TxId: 281474976715660. Ctx: { TraceId: 01jwtn2m1906mr6mr8380y3zq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Zjg1OTM0YzMtYjA3Njc0ZTAtM2EzZTE1ZjMtOTJhZjk5MGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [1:1759:2946] 2025-06-03T10:23:31.543912Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:785: TxId: 281474976715660. Ctx: { TraceId: 01jwtn2m1906mr6mr8380y3zq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Zjg1OTM0YzMtYjA3Njc0ZTAtM2EzZTE1ZjMtOTJhZjk5MGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [1:1759:2946], channels: 0 2025-06-03T10:23:31.543933Z node 1 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2805: ActorId: [1:1756:2946] TxId: 281474976715660. Ctx: { TraceId: 01jwtn2m1906mr6mr8380y3zq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Zjg1OTM0YzMtYjA3Njc0ZTAtM2EzZTE1ZjMtOTJhZjk5MGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 1, readonly: 0, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-03T10:23:31.543939Z node 1 :KQP_EXECUTER TRACE: kqp_data_executer.cpp:2808: ActorId: [1:1756:2946] TxId: 281474976715660. Ctx: { TraceId: 01jwtn2m1906mr6mr8380y3zq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Zjg1OTM0YzMtYjA3Njc0ZTAtM2EzZTE1ZjMtOTJhZjk5MGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Updating channels after the creation of compute actors 2025-06-03T10:23:31.543945Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:793: TxId: 281474976715660. Ctx: { TraceId: 01jwtn2m1906mr6mr8380y3zq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Zjg1OTM0YzMtYjA3Njc0ZTAtM2EzZTE1ZjMtOTJhZjk5MGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [1:1759:2946] 2025-06-03T10:23:31.543950Z node 1 :KQP_EXECUTER DEBUG: kqp_planner.cpp:785: TxId: 281474976715660. Ctx: { TraceId: 01jwtn2m1906mr6mr8380y3zq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Zjg1OTM0YzMtYjA3Njc0ZTAtM2EzZTE1ZjMtOTJhZjk5MGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [1:1759:2946], channels: 0 2025-06-03T10:23:31.543966Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:645: ActorId: [1:1756:2946] TxId: 281474976715660. Ctx: { TraceId: 01jwtn2m1906mr6mr8380y3zq0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Zjg1OTM0YzMtYjA3Njc0ZTAtM2EzZTE1ZjMtOTJhZjk5MGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [1:1759:2946], 2025-06-03T10:23:31.543974Z node 1 :KQP_EXECUTER DEBUG: kqp_data_ex ... n/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CT 1, CA [3:1822:3088], 2025-06-03T10:23:34.524659Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [3:1814:3088] TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [3:1822:3088], 2025-06-03T10:23:34.524814Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:768: ActorId: [3:1814:3088] TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Executing task: 1 on compute actor: [4:1824:2472] 2025-06-03T10:23:34.524825Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:793: TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Collect channels updates for task: 1 at actor [4:1824:2472] 2025-06-03T10:23:34.524833Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:829: TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Task: 1, output channelId: 1, dst task: 2, at actor [3:1822:3088] 2025-06-03T10:23:34.524841Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:785: TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [4:1824:2472], channels: 1 2025-06-03T10:23:34.524848Z node 3 :KQP_EXECUTER DEBUG: kqp_planner.cpp:785: TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [3:1822:3088], channels: 1 2025-06-03T10:23:34.524867Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:434: ActorId: [3:1814:3088] TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [4:1824:2472], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-06-03T10:23:34.524874Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:645: ActorId: [3:1814:3088] TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [4:1824:2472], CA [3:1822:3088], 2025-06-03T10:23:34.524881Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [3:1814:3088] TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [4:1824:2472], CA [3:1822:3088], 2025-06-03T10:23:34.524960Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:434: ActorId: [3:1814:3088] TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [4:1824:2472], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { CpuTimeUs: 143 Tasks { TaskId: 1 CpuTimeUs: 92 ComputeCpuTimeUs: 5 BuildCpuTimeUs: 87 HostName: "ghrun-pyvh3niaay" NodeId: 4 CreateTimeMs: 1748946214524 CurrentWaitInputTimeUs: 7 UpdateTimeMs: 1748946214524 } MaxMemoryUsage: 1048576 } 2025-06-03T10:23:34.524997Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:645: ActorId: [3:1814:3088] TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [4:1824:2472], CA [3:1822:3088], 2025-06-03T10:23:34.525005Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [3:1814:3088] TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [4:1824:2472], CA [3:1822:3088], 2025-06-03T10:23:34.526739Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:383: ActorId: [3:1814:3088] TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got result, channelId: 2, shardId: 0, inputIndex: 0, from: [3:1823:3088], finished: 0 2025-06-03T10:23:34.526776Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:386: ActorId: [3:1814:3088] TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send ack to channelId: 2, seqNo: 1, to: [3:1823:3088] 2025-06-03T10:23:34.528238Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:383: ActorId: [3:1814:3088] TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got result, channelId: 2, shardId: 0, inputIndex: 0, from: [3:1823:3088], finished: 1 2025-06-03T10:23:34.528258Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:386: ActorId: [3:1814:3088] TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send ack to channelId: 2, seqNo: 2, to: [3:1823:3088] 2025-06-03T10:23:34.528588Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:434: ActorId: [3:1814:3088] TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [3:1822:3088], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 410 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 249 FinishTimeMs: 1748946214528 InputRows: 3 InputBytes: 12 OutputRows: 3 OutputBytes: 12 ResultRows: 3 ResultBytes: 12 ComputeCpuTimeUs: 55 BuildCpuTimeUs: 194 HostName: "ghrun-pyvh3niaay" NodeId: 3 CreateTimeMs: 1748946214523 UpdateTimeMs: 1748946214528 } MaxMemoryUsage: 1048576 } 2025-06-03T10:23:34.528614Z node 3 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [3:1822:3088] 2025-06-03T10:23:34.528630Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:645: ActorId: [3:1814:3088] TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [4:1824:2472], 2025-06-03T10:23:34.528641Z node 3 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [3:1814:3088] TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [4:1824:2472], 2025-06-03T10:23:34.528724Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:434: ActorId: [3:1814:3088] TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [4:1824:2472], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 386 DurationUs: 2000 Tasks { TaskId: 1 CpuTimeUs: 131 FinishTimeMs: 1748946214528 OutputRows: 3 OutputBytes: 12 Tables { TablePath: "/Root/table-1" ReadRows: 3 ReadBytes: 24 AffectedPartitions: 4 } IngressRows: 3 ComputeCpuTimeUs: 44 BuildCpuTimeUs: 87 WaitInputTimeUs: 1673 HostName: "ghrun-pyvh3niaay" NodeId: 4 StartTimeMs: 1748946214526 CreateTimeMs: 1748946214524 UpdateTimeMs: 1748946214528 } MaxMemoryUsage: 1048576 } 2025-06-03T10:23:34.528736Z node 3 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [4:1824:2472] 2025-06-03T10:23:34.528788Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2151: ActorId: [3:1814:3088] TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-03T10:23:34.528796Z node 3 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2165: ActorId: [3:1814:3088] TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Terminate, become ZombieState 2025-06-03T10:23:34.528807Z node 3 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:839: ActorId: [3:1814:3088] TxId: 281474976715663. Ctx: { TraceId: 01jwtn2qk368skzch5x946bn64, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NGYzMThhZjMtYmZmNWU1MjAtNzMxOTdmYmItOWJhYTU4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000796s ReadRows: 3 ReadBytes: 24 ru: 3 rate limiter was not found force flag: 1 { items { uint32_value: 1 } items { uint32_value: 1 } }, { items { uint32_value: 2 } items { uint32_value: 2 } }, { items { uint32_value: 3 } items { uint32_value: 3 } } >> test.py::test[blocks-combine_all_sum--ForceBlocks] [GOOD] >> test.py::test[schema-fake_column-default.txt-Results] [GOOD] >> test.py::test[window-full/leadlag--Results] [GOOD] >> test.py::test[window-generic/aggregations_mixed--Results] >> test.py::test[blocks-combine_all_sum--Results] >> test.py::test[aggregate-group_compact_sorted_distinct_complex--Results] [GOOD] >> test.py::test[binding-table_range_strict_binding-default.txt-ForceBlocks] |57.9%| [TS] {RESULT} ydb/tests/datashard/split_merge/flake8 |57.9%| [TS] {RESULT} ydb/public/tools/ydb_recipe/import_test >> test.py::test[schema-select_all-read_schema-ForceBlocks] >> test.py::test[simple_columns-simple_columns_join_without_resolve_dublicates_mult-default.txt-ForceBlocks] [GOOD] >> test.py::test[simple_columns-simple_columns_join_without_resolve_dublicates_mult-default.txt-Results] >> test.py::test[aggregate-aggregation_by_udf--Results] [GOOD] |57.9%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/hulldb/cache_block/ut/test-results/unittest/{meta.json ... results_accumulator.log} |57.9%| [TS] {RESULT} ydb/tests/postgres_integrations/go-libpq/import_test |57.9%| [TS] {RESULT} ydb/public/lib/ydb_cli/common/yql_parser/ut/unittest |57.9%| [TS] {RESULT} ydb/core/config/ut/unittest >> test.py::test[aggregate-avg_interval-default.txt-ForceBlocks] |57.9%| [TS] {RESULT} ydb/library/yql/tests/sql/solomon/py2_flake8 |57.9%| [TS] {RESULT} ydb/core/viewer/tests/import_test |57.9%| [TS] {RESULT} ydb/library/yaml_config/validator/ut/validator_checks/unittest >> test.py::test[join-split_to_list_as_key--Results] [GOOD] >> test.py::test[join-mergejoin_saves_output_sort_cross--ForceBlocks] [GOOD] >> test.py::test[join-star_join-off-ForceBlocks] >> test.py::test[order_by-sort_simple--ForceBlocks] [GOOD] >> test.py::test[join-inner_all--Results] [GOOD] >> test.py::test[pg-tpcds-q66-default.txt-ForceBlocks] [GOOD] >> test.py::test[join-mergejoin_saves_output_sort_cross--Results] >> test.py::test[join-join_comp_common_table--ForceBlocks] >> test.py::test[pg-tpcds-q66-default.txt-Results] |57.9%| [TS] {RESULT} ydb/library/yaml_config/validator/ut/validator/unittest |58.0%| [TS] {RESULT} ydb/library/yql/providers/s3/credentials/ut/unittest |58.0%| [TS] {RESULT} ydb/tests/functional/sqs/merge_split_common_table/std/flake8 |58.0%| [TS] {RESULT} ydb/library/protobuf_printer/ut/unittest |58.0%| [TS] {RESULT} ydb/tests/functional/tpc/medium/flake8 >> test.py::test[order_by-sort_simple--Results] >> test.py::test[coalesce-coalesce--Results] [GOOD] >> test.py::test[column_group-hint_anon-disable-ForceBlocks] >> test.py::test[join-yql-12022--Results] [GOOD] >> test.py::test[join-yql-14829_left--ForceBlocks] >> DataStreams::TestGetRecordsStreamWithSingleShard [GOOD] >> DataStreams::TestGetRecords1MBMessagesOneByOneByTS >> test.py::test[column_group-hint_anon-disable-ForceBlocks] [SKIPPED] >> test.py::test[column_group-hint_anon-disable-Results] [SKIPPED] >> test.py::test[column_group-publish-perusage-ForceBlocks] [SKIPPED] >> test.py::test[column_group-publish-perusage-Results] [SKIPPED] >> test.py::test[join-lookupjoin_semi_1o2o--Results] [GOOD] >> test.py::test[join-lookupjoin_with_cache--ForceBlocks] >> test.py::test[limit-limit--Results] [GOOD] >> test.py::test[lineage-if_struct-default.txt-ForceBlocks] [SKIPPED] >> test.py::test[lineage-if_struct-default.txt-Results] [SKIPPED] |58.0%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/synclog/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.0%| [TS] {RESULT} ydb/tests/stress/node_broker/import_test |58.0%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part19/flake8 |58.0%| [TS] {RESULT} ydb/library/benchmarks/runner/flake8 >> test.py::test[pg-tpcds-q91-default.txt-Results] [GOOD] >> test.py::test[pg-tpch-q04-default.txt-ForceBlocks] >> test.py::test[column_order-align_publish_native--ForceBlocks] >> test.py::test[optimizers-combinebykey_fields_subset_range--ForceBlocks] >> test.py::test[order_by-order_by_num_key_and_subkey_desc--ForceBlocks] [GOOD] >> test.py::test[order_by-order_by_num_key_and_subkey_desc--Results] >> DataStreams::TestStreamStorageRetention >> test.py::test[order_by-order_by_value_desc-default.txt-ForceBlocks] [GOOD] >> test.py::test[order_by-order_by_value_desc-default.txt-Results] |58.0%| [TS] {RESULT} ydb/library/workload/tpcc/ut/unittest |58.0%| [TS] {RESULT} ydb/tests/postgres_integrations/library/ut/py3test >> test.py::test[select-hits_count--Results] [GOOD] >> test.py::test[join-yql-10654_pullup_with_sys_columns--ForceBlocks] [GOOD] >> test.py::test[select-trivial_order_by-default.txt-ForceBlocks] >> test.py::test[join-yql-10654_pullup_with_sys_columns--Results] |58.0%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part6/flake8 |58.0%| [TS] {RESULT} ydb/tests/functional/serverless/flake8 |58.0%| [TS] {RESULT} ydb/tests/tools/kqprun/recipe/flake8 |58.0%| [TS] {RESULT} ydb/tests/functional/serverless/import_test |58.0%| [TS] {RESULT} ydb/tests/functional/autoconfig/import_test |58.0%| [TM] {RESULT} ydb/core/blob_depot/ut/unittest >> test.py::test[expr-tagged_runtime-default.txt-ForceBlocks] [GOOD] >> test.py::test[expr-tagged_runtime-default.txt-Results] >> test.py::test[count-count_by_nulls--Results] [GOOD] >> test.py::test[distinct-distinct_columns-default.txt-ForceBlocks] >> test.py::test[select-optional_pull--Results] [GOOD] >> test.py::test[select-select_all_filtered-default.txt-ForceBlocks] |58.0%| [TS] {RESULT} ydb/tests/stress/transfer/tests/flake8 |58.0%| [TS] {RESULT} ydb/tests/stress/simple_queue/tests/import_test |58.0%| [TS] {RESULT} ydb/tests/functional/benchmarks_init/import_test |58.0%| [TS] {RESULT} ydb/tests/tools/kqprun/tests/flake8 |58.0%| [TS] {RESULT} ydb/tests/tools/nemesis/ut/flake8 |58.1%| [TS] {RESULT} ydb/tests/functional/restarts/flake8 >> test.py::test[blocks-combine_all_sum--Results] [GOOD] >> test.py::test[blocks-distinct_opt_state_keys--ForceBlocks] >> test.py::test[aggregate-group_by_expr--Results] [GOOD] >> test.py::test[aggregate-group_by_expr_mul_col--ForceBlocks] |58.1%| [TS] {RESULT} ydb/tests/functional/scheme_shard/import_test >> DataStreams::TestPutRecordsCornerCases [GOOD] >> DataStreams::TestPutRecords >> test.py::test[dq-blacklisted_pragmas1--ForceBlocks] [GOOD] >> test.py::test[dq-blacklisted_pragmas1--Results] [SKIPPED] >> test.py::test[hor_join-group_ranges--ForceBlocks] >> DataStreams::TestStreamStorageRetention [GOOD] >> DataStreams::TestStreamPagination |58.1%| [TS] {RESULT} ydb/tests/functional/audit/import_test |58.1%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part0/py2_flake8 |58.1%| [TS] {RESULT} ydb/core/backup/common/ut/unittest |58.1%| [TS] {RESULT} ydb/tests/datashard/parametrized_queries/flake8 |58.1%| [TS] {RESULT} ydb/core/tx/columnshard/tools/visualize_portions/flake8 |58.1%| [TS] {RESULT} ydb/tests/functional/sqs/multinode/flake8 |58.1%| [TS] {RESULT} ydb/tests/compatibility/flake8 >> test.py::test[aggr_factory-max_by-default.txt-Results] [GOOD] >> test.py::test[aggr_factory-min-default.txt-Results] |58.1%| [TS] {RESULT} ydb/tests/functional/wardens/import_test |58.1%| [TS] {RESULT} ydb/tests/stress/transfer/flake8 |58.1%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part2/flake8 |58.1%| [TS] {RESULT} ydb/library/benchmarks/runner/result_convert/import_test >> test.py::test[window-current/ansi_current_with_win--ForceBlocks] [GOOD] >> test.py::test[key_filter-uuid--Results] [GOOD] >> test.py::test[key_filter-yql_5895_or-default.txt-Results] |58.1%| [TS] {RESULT} ydb/tests/functional/ydb_cli/flake8 |58.1%| [TS] {RESULT} ydb/tools/tstool/flake8 >> test.py::test[window-current/ansi_current_with_win--Results] >> test.py::test[order_by-order_by_value_desc-default.txt-Results] [GOOD] >> test.py::test[pg-doubles_search_path-default.txt-ForceBlocks] |58.1%| [TS] {RESULT} ydb/tests/fq/generic/analytics/flake8 |58.1%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part8/py2_flake8 |58.1%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/mysql/flake8 |58.1%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part5/py2_flake8 >> test.py::test[insert-append_sorted--ForceBlocks] [GOOD] >> test.py::test[insert-append_sorted--Results] >> test.py::test[simple_columns-simple_columns_join_without_resolve_dublicates_mult-default.txt-Results] [GOOD] >> test.py::test[simple_columns-simple_columns_tablerow-default.txt-ForceBlocks] >> test.py::test[join-bush_dis_in--Results] [GOOD] |58.1%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part5/flake8 |58.2%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part15/flake8 >> DataStreams::TestPutRecords [GOOD] >> test.py::test[join-bush_dis_in-off-ForceBlocks] >> test.py::test[order_by-order_by_num_key_and_subkey_desc--Results] [GOOD] >> test.py::test[order_by-sort_simple--Results] [GOOD] >> test.py::test[pg-in_mixed--ForceBlocks] >> test.py::test[window-full/noncompact_with_nulls--Results] [GOOD] >> test.py::test[window-leading/aggregations--ForceBlocks] >> test.py::test[pg-join_using_tables1-default.txt-ForceBlocks] |58.2%| [TS] {RESULT} ydb/library/yql/tests/sql/hybrid_file/part10/py2_flake8 |58.2%| [TS] {RESULT} ydb/tests/fq/generic/analytics/black |58.2%| [TS] {RESULT} ydb/tests/functional/kqp/plan2svg/import_test >> test.py::test[join-mergejoin_saves_output_sort_cross--Results] [GOOD] >> test.py::test[join-mergejoin_saves_output_sort_nested--ForceBlocks] >> test.py::test[pg-tpcds-q66-default.txt-Results] [GOOD] >> test.py::test[join-yql-10654_pullup_with_sys_columns--Results] [GOOD] >> test.py::test[pg-tpcds-q71-default.txt-ForceBlocks] >> test.py::test[expr-tagged_runtime-default.txt-Results] [GOOD] >> test.py::test[hor_join-yield_off--ForceBlocks] [SKIPPED] >> test.py::test[join-yql-14847-off-ForceBlocks] >> test.py::test[hor_join-yield_off--Results] [SKIPPED] >> test.py::test[schema-select_all-read_schema-ForceBlocks] [GOOD] >> test.py::test[schema-select_all-read_schema-Results] |58.2%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_file/part1/flake8 |58.2%| [TS] {RESULT} ydb/tests/functional/cms/flake8 |58.2%| [TS] {RESULT} ydb/tests/functional/kqp/plan2svg/flake8 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/datastreams/ut/unittest >> DataStreams::TestPutRecords [GOOD] Test command err: 2025-06-03T10:23:29.905396Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511666774027012967:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:29.905432Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f3e/r3tmp/tmpm4bpMa/pdisk_1.dat 2025-06-03T10:23:30.006861Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:30.006895Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:30.008824Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 64329, node 1 2025-06-03T10:23:30.025047Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:23:30.025823Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:30.025835Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:30.025837Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:30.025892Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18645 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:30.047775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:30.101234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:18645 2025-06-03T10:23:30.111906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:31.753748Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511666779279983159:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:31.753767Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f3e/r3tmp/tmp81dnzA/pdisk_1.dat 2025-06-03T10:23:31.785223Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5015, node 4 2025-06-03T10:23:31.803598Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:31.803618Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:31.803620Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:31.803681Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8507 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:31.854323Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:31.854359Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:31.856081Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:31.859771Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:31.890796Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:8507 2025-06-03T10:23:31.907347Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:31.947439Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715661:0, at schemeshard: 72057594046644480 encryption_type: NONE sequence_number: "0" shard_id: "shard-000000" encryption_type: NONE records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "0" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000002" } records { sequence_number: "1" shard_id: "shard-000002" } records { sequence_number: "1" shard_id: "shard-000004" } records { sequence_number: "2" shard_id: "shard-000002" } records { sequence_number: "3" shard_id: "shard-000002" } records { sequence_number: "4" shard_id: "shard-000002" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000003" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000003" } records { sequence_number: "2" shard_id: "shard-000003" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000003" } records { sequence_number: "5" shard_id: "shard-000002" } records { sequence_number: "6" shard_id: "shard-000002" } records { sequence_number: "0" shard_id: "shard-000001" } records { sequence_number: "3" shard_id: "shard-000004" } records { sequence_number: "4" shard_id: "shard-000004" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000003" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000002" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000000" } 2025-06-03T10:23:31.957516Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:23:31.972391Z :INFO: [/Root/] [/Root/] [506f9e1d-b474e925-75ee9e7a-5c8d2ee9] Starting read session 2025-06-03T10:23:31.972414Z :DEBUG: [/Root/] [/Root/] [506f9e1d-b474e925-75ee9e7a-5c8d2ee9] Starting session to cluster null (localhost:5015) 2025-06-03T10:23:31.972902Z :DEBUG: [/Root/] [/Root/] [506f9e1d-b474e925-75ee9e7a-5c8d2ee9] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:23:31.972914Z :DEBUG: [/Root/] [/Root/] [506f9e1d-b474e925-75ee9e7a-5c8d2ee9] [null] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:23:31.972930Z :DEBUG: [/Root/] [/Root/] [506f9e1d-b474e925-75ee9e7a-5c8d2ee9] [null] Reconnecting session to cluster null in 0.000000s 2025-06-03T10:23:31.975128Z :DEBUG: [/Root/] [/Root/] [506f9e1d-b474e925-75ee9e7a-5c8d2ee9] [null] Successfully connected. Initializing session 2025-06-03T10:23:31.977583Z node 4 :PQ_READ_PROXY DEBUG: grpc_pq_read.h:111: new grpc connection 2025-06-03T10:23:31.977598Z node 4 :PQ_READ_PROXY DEBUG: grpc_pq_read.h:133: new session created cookie 1 2025-06-03T10:23:31.979569Z node 4 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer session grpc read done: success# 1, data# { init_request { topics_read_settings { topic: "/Root/stream_TestPutRecordsWithRead" } read_only_original: true consumer: "user1" read_params { max_read_size: 104857600 } } } 2025-06-03T10:23:31.979652Z node 4 :PQ_READ_PROXY INFO: read_session_actor.cpp:916: session cookie 1 consumer user1 session user1_4_1_9596113827501220571_v1 read init: from# ipv6:[::1]:36718, request# { init_request { topics_read_settings { topic: "/Root/stream_TestPutRecordsWithRead" } read_only_original: true consumer: "user1" read_params { max_read_size: 104857600 } } } 2025-06-03T10:23:31.979711Z node 4 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 1 consumer user1 session user1_4_1_9596113827501220571_v1 auth for : user1 2025-06-03T10:23:31.980324Z node 4 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 1 consumer user1 session user1_4_1_9596113827501220571_v1 Handle describ ... null] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:23:36.877133Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 4 (0-1) 2025-06-03T10:23:36.877180Z :DEBUG: [/Root/] Take Data. Partition 4. Read: {0, 0} (0-0) 2025-06-03T10:23:36.877190Z :DEBUG: [/Root/] Take Data. Partition 4. Read: {0, 1} (1-1) 2025-06-03T10:23:36.877200Z :DEBUG: [/Root/] [/Root/] [85f60a51-cf62bb0a-d249a3a3-ee0ffb38] [null] The application data is transferred to the client. Number of messages 2, size 0 bytes 2025-06-03T10:23:36.898664Z :DEBUG: [/Root/] [/Root/] [85f60a51-cf62bb0a-d249a3a3-ee0ffb38] [null] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:23:36.898768Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 0 (0-1) 2025-06-03T10:23:36.898778Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 3 (0-2) 2025-06-03T10:23:36.898828Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 3 (3-3) 2025-06-03T10:23:36.898833Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (0-1) 2025-06-03T10:23:36.898835Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2025-06-03T10:23:36.898852Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (3-3) 2025-06-03T10:23:36.898893Z :DEBUG: [/Root/] Take Data. Partition 0. Read: {0, 0} (0-0) 2025-06-03T10:23:36.898915Z :DEBUG: [/Root/] Take Data. Partition 0. Read: {0, 1} (1-1) 2025-06-03T10:23:36.898937Z :DEBUG: [/Root/] [/Root/] [85f60a51-cf62bb0a-d249a3a3-ee0ffb38] [null] The application data is transferred to the client. Number of messages 2, size 0 bytes 2025-06-03T10:23:36.898952Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (4-4) 2025-06-03T10:23:36.898958Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (5-5) 2025-06-03T10:23:36.898962Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (7-7) 2025-06-03T10:23:36.898962Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (6-6) 2025-06-03T10:23:36.898966Z :DEBUG: [/Root/] Decompression task done. Partition/PartitionSessionId: 1 (8-8) 2025-06-03T10:23:36.911314Z :DEBUG: [/Root/] Take Data. Partition 3. Read: {0, 0} (0-0) 2025-06-03T10:23:36.911325Z :DEBUG: [/Root/] Take Data. Partition 3. Read: {0, 1} (1-1) 2025-06-03T10:23:36.911497Z :DEBUG: [/Root/] Take Data. Partition 3. Read: {1, 0} (2-2) 2025-06-03T10:23:36.911506Z :DEBUG: [/Root/] Take Data. Partition 3. Read: {2, 0} (3-3) 2025-06-03T10:23:36.911534Z :DEBUG: [/Root/] [/Root/] [85f60a51-cf62bb0a-d249a3a3-ee0ffb38] [null] The application data is transferred to the client. Number of messages 4, size 1049088 bytes 2025-06-03T10:23:36.911624Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {0, 0} (0-0) 2025-06-03T10:23:36.911769Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {1, 0} (1-1) 2025-06-03T10:23:36.912055Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {1, 1} (2-2) 2025-06-03T10:23:36.912221Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {2, 0} (3-3) 2025-06-03T10:23:36.912771Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {2, 1} (4-4) 2025-06-03T10:23:36.912936Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {3, 0} (5-5) 2025-06-03T10:23:36.913081Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {3, 1} (6-6) 2025-06-03T10:23:36.913255Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {3, 2} (7-7) 2025-06-03T10:23:36.914355Z :DEBUG: [/Root/] Take Data. Partition 1. Read: {3, 3} (8-8) 2025-06-03T10:23:36.914377Z :DEBUG: [/Root/] [/Root/] [85f60a51-cf62bb0a-d249a3a3-ee0ffb38] [null] The application data is transferred to the client. Number of messages 9, size 8388611 bytes 2025-06-03T10:23:36.921013Z :INFO: [/Root/] [/Root/] [85f60a51-cf62bb0a-d249a3a3-ee0ffb38] Closing read session. Close timeout: 0.000000s 2025-06-03T10:23:36.921038Z :INFO: [/Root/] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): null:stream_TestPutRecordsCornerCases:1:5:8:0 null:stream_TestPutRecordsCornerCases:3:4:3:0 null:stream_TestPutRecordsCornerCases:2:3:0:0 null:stream_TestPutRecordsCornerCases:0:2:1:0 null:stream_TestPutRecordsCornerCases:4:1:1:0 2025-06-03T10:23:36.921047Z :INFO: [/Root/] [/Root/] [85f60a51-cf62bb0a-d249a3a3-ee0ffb38] Counters: { Errors: 0 CurrentSessionLifetimeMs: 49 BytesRead: 9437699 MessagesRead: 17 BytesReadCompressed: 9437699 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:23:36.921067Z :NOTICE: [/Root/] [/Root/] [85f60a51-cf62bb0a-d249a3a3-ee0ffb38] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-03T10:23:36.921076Z :DEBUG: [/Root/] [/Root/] [85f60a51-cf62bb0a-d249a3a3-ee0ffb38] [null] Abort session to cluster 2025-06-03T10:23:36.921337Z :NOTICE: [/Root/] [/Root/] [85f60a51-cf62bb0a-d249a3a3-ee0ffb38] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-03T10:23:36.921725Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer user1 session user1_7_1_3472655401503782670_v1 grpc read failed 2025-06-03T10:23:36.921770Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer user1 session user1_7_1_3472655401503782670_v1 grpc closed 2025-06-03T10:23:36.921799Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer user1 session user1_7_1_3472655401503782670_v1 is DEAD 2025-06-03T10:23:37.631726Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7511666804712805711:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:37.631754Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f3e/r3tmp/tmpdVteO2/pdisk_1.dat 2025-06-03T10:23:37.647489Z node 10 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30749, node 10 2025-06-03T10:23:37.659813Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:37.659826Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:37.659828Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:37.659865Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12020 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:37.731959Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:37.732000Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:37.733574Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:37.736197Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:37.757644Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:12020 2025-06-03T10:23:37.767576Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:37.800045Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715661:0, at schemeshard: 72057594046644480
: Error: Access for stream /Root/stream_TestPutRecords is denied for subject user2@builtin, code: 500018 2025-06-03T10:23:37.802890Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480 PutRecordsResponse = encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "0" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000002" } records { sequence_number: "1" shard_id: "shard-000002" } records { sequence_number: "1" shard_id: "shard-000004" } records { sequence_number: "2" shard_id: "shard-000002" } records { sequence_number: "3" shard_id: "shard-000002" } records { sequence_number: "4" shard_id: "shard-000002" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000003" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000003" } records { sequence_number: "2" shard_id: "shard-000003" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000003" } records { sequence_number: "5" shard_id: "shard-000002" } records { sequence_number: "6" shard_id: "shard-000002" } records { sequence_number: "0" shard_id: "shard-000001" } records { sequence_number: "3" shard_id: "shard-000004" } records { sequence_number: "4" shard_id: "shard-000004" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000003" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000004" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000002" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000000" } PutRecord response = encryption_type: NONE sequence_number: "7" shard_id: "shard-000004" |58.2%| [TS] {RESULT} ydb/tests/functional/large_serializable/import_test >> test.py::test[in-in_immediate_subquery-default.txt-ForceBlocks] >> test.py::test[pg-tpcds-q73-default.txt-ForceBlocks] [GOOD] |58.2%| [TS] {RESULT} ydb/library/yaml_config/ut_transform/import_test |58.2%| [TS] {RESULT} ydb/mvp/meta/ut/unittest |58.2%| [TS] {RESULT} ydb/core/log_backend/ut/unittest >> test.py::test[key_filter-contains_tuples_no_keyfilter-default.txt-ForceBlocks] [GOOD] >> test.py::test[key_filter-contains_tuples_no_keyfilter-default.txt-Results] >> test.py::test[pg-tpcds-q73-default.txt-Results] |58.2%| [TM] {RESULT} ydb/core/tablet_flat/ut_util/unittest |58.2%| [TS] {RESULT} ydb/library/benchmarks/runner/result_compare/flake8 |58.2%| [TS] {RESULT} ydb/library/yaml_config/tools/simple_json_diff/flake8 >> DataStreams::TestStreamPagination [GOOD] >> DataStreams::TestShardPagination |58.2%| [TS] {RESULT} ydb/library/yaml_config/ut/unittest |58.2%| [TS] {RESULT} ydb/tests/datashard/copy_table/flake8 |58.2%| [TS] {RESULT} ydb/core/fq/libs/http_api_client/flake8 |58.2%| [TS] {RESULT} ydb/tests/fq/restarts/flake8 |58.2%| [TS] {RESULT} ydb/tests/functional/query_cache/import_test |58.2%| [TS] {RESULT} ydb/tests/olap/lib/flake8 |58.3%| [TS] {RESULT} ydb/tests/functional/ydb_cli/import_test |58.3%| [TS] {RESULT} ydb/tests/fq/mem_alloc/flake8 |58.3%| [TS] {RESULT} ydb/tests/functional/tpc/large/flake8 >> test.py::test[join-from_in_front_join--Results] [GOOD] >> test.py::test[join-from_in_front_join-off-Results] [SKIPPED] >> test.py::test[join-full_join-off-Results] [SKIPPED] >> test.py::test[join-grace_join1-grace-Results] [SKIPPED] >> test.py::test[join-group_compact_by--Results] |58.3%| [TM] {RESULT} ydb/core/blobstorage/backpressure/ut_client/unittest |58.3%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part12/py2_flake8 |58.3%| [TS] {RESULT} ydb/tests/functional/canonical/flake8 |58.3%| [TS] {RESULT} ydb/tests/tools/kqprun/recipe/import_test |58.3%| [TS] {RESULT} ydb/tests/functional/large_serializable/flake8 >> test.py::test[insert-append_sorted--Results] [GOOD] >> test.py::test[insert-keepmeta_nonstrict_fail--ForceBlocks] >> test.py::test[aggregate-avg_interval-default.txt-ForceBlocks] [GOOD] >> test.py::test[schema-select_all-read_schema-Results] [GOOD] >> test.py::test[schema-select_all_inferschema2--ForceBlocks] >> test.py::test[aggregate-avg_interval-default.txt-Results] |58.3%| [TS] {RESULT} ydb/tests/functional/script_execution/import_test |58.3%| [TS] {RESULT} ydb/tests/sql/lib/flake8 >> test.py::test[aggr_factory-bitand-default.txt-ForceBlocks] [GOOD] >> test.py::test[aggr_factory-bitand-default.txt-Results] >> DataStreams::TestNonChargeableUser >> DataStreams::TestShardPagination [GOOD] |58.3%| [TS] {RESULT} ydb/tests/olap/load/flake8 |58.3%| [TS] {RESULT} ydb/core/config/init/ut/unittest |58.3%| [TS] {RESULT} ydb/library/yql/tests/sql/dq_file/part2/py2_flake8 |58.3%| [TS] {RESULT} ydb/tests/stress/olap_workload/flake8 |58.3%| [TS] {RESULT} ydb/core/tx/scheme_board/ut_double_indexed/unittest |58.3%| [TS] {RESULT} ydb/tests/fq/yt/kqp_yt_import/import_test |58.3%| [TA] {RESULT} $(B)/ydb/core/fq/libs/compute/common/ut/test-results/unittest/{meta.json ... results_accumulator.log} |58.3%| [TS] {RESULT} ydb/core/external_sources/object_storage/inference/ut/gtest >> test.py::test[select-trivial_order_by-default.txt-ForceBlocks] [GOOD] >> test.py::test[select-trivial_order_by-default.txt-Results] >> test.py::test[pg-tpcds-q73-default.txt-Results] [GOOD] >> test.py::test[pg-tpcds-q77-default.txt-ForceBlocks] >> test.py::test[select-select_all_filtered-default.txt-ForceBlocks] [GOOD] |58.4%| [TS] {RESULT} ydb/library/yql/providers/generic/connector/tests/datasource/clickhouse/flake8 |58.4%| [TS] {RESULT} ydb/tests/fq/http_api/import_test |58.4%| [TS] {RESULT} ydb/tests/olap/load/import_test |58.4%| [LD] {RESULT} $(B)/ydb/library/yaml_config/ut/ydb-library-yaml_config-ut >> test.py::test[select-select_all_filtered-default.txt-Results] |58.4%| [TS] {RESULT} ydb/tests/library/ut/import_test >> test.py::test[window-current/ansi_current_with_win--Results] [GOOD] >> test.py::test[window-generic/aggregations_mixed_leadlag--ForceBlocks] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/datastreams/ut/unittest >> DataStreams::TestShardPagination [GOOD] Test command err: 2025-06-03T10:23:36.762265Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511666803681671539:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:36.762287Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f36/r3tmp/tmpURYS0Z/pdisk_1.dat 2025-06-03T10:23:36.853201Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:23:36.862890Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:36.862928Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:36.864388Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61562, node 1 2025-06-03T10:23:36.871603Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:36.871619Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:36.871622Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:36.871675Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65388 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:36.929901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:36.954482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:65388 2025-06-03T10:23:36.970852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting...
: Error: retention hours and storage megabytes must fit one of: { hours : [0, 24], storage : [0, 0]}, { hours : [0, 168], storage : [51200, 1048576]}, provided values: hours 168, storage 40960, code: 500080 2025-06-03T10:23:37.032439Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511666807976640934:3455] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/stream_TestStreamStorageRetention\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 2], type: EPathTypePersQueueGroup, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_pq.cpp:345" severity: 1 } 2025-06-03T10:23:37.817061Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511666804504691814:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:37.817086Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f36/r3tmp/tmp6xRgOF/pdisk_1.dat 2025-06-03T10:23:37.838637Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4152, node 4 2025-06-03T10:23:37.852098Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:37.852116Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:37.852118Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:37.852169Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3918 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:37.917570Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:37.917628Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:37.919341Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:37.926856Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:37.944917Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:3918 2025-06-03T10:23:37.973562Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:39.508261Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511666813209006134:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:39.508286Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f36/r3tmp/tmpoLD6uS/pdisk_1.dat 2025-06-03T10:23:39.545664Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21692, node 7 2025-06-03T10:23:39.561329Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:39.561348Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:39.561350Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:39.561411Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28529 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:39.608815Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:39.608851Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:39.610498Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:39.623746Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:39.638826Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:28529 2025-06-03T10:23:39.652278Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... >> test.py::test[distinct-distinct_columns-default.txt-ForceBlocks] [GOOD] >> test.py::test[distinct-distinct_columns-default.txt-Results] >> test.py::test[aggregate-group_by_expr_mul_col--ForceBlocks] [GOOD] >> test.py::test[aggregate-group_by_expr_mul_col--Results] >> test.py::test[key_filter-contains_tuples_no_keyfilter-default.txt-Results] [GOOD] >> test.py::test[key_filter-is_null_multi_key--ForceBlocks] >> test.py::test[key_filter-yql_5895_or-default.txt-Results] [GOOD] >> test.py::test[like-like_clause-default.txt-Results] >> test_generator.py::TestTpcdsGenerator::test_s1_parts [GOOD] >> test.py::test[aggr_factory-histogram-default.txt-Results] [GOOD] >> test.py::test[aggregate-GroupByTwoFields--Results] >> test.py::test[optimizers-combinebykey_fields_subset_range--ForceBlocks] [GOOD] >> test.py::test[optimizers-combinebykey_fields_subset_range--Results] >> test.py::test[join-yql-14829_left--ForceBlocks] [GOOD] >> test.py::test[join-yql-14829_left--Results] >> test.py::test[hor_join-group_ranges--ForceBlocks] [GOOD] >> test.py::test[hor_join-group_ranges--Results] >> test.py::test[insert-keepmeta_nonstrict_fail--ForceBlocks] [GOOD] >> test.py::test[binding-table_range_strict_binding-default.txt-ForceBlocks] [GOOD] >> test.py::test[binding-table_range_strict_binding-default.txt-Results] >> test.py::test[insert-keepmeta_nonstrict_fail--Results] [GOOD] >> test.py::test[insert-keepmeta_proto_fail--ForceBlocks] >> test.py::test[tpch-q7-default.txt-ForceBlocks] [GOOD] >> test.py::test[tpch-q7-default.txt-Results] >> test.py::test[pg-doubles_search_path-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-doubles_search_path-default.txt-Results] >> test.py::test[aggregate-avg_interval-default.txt-Results] [GOOD] >> test.py::test[aggregate-count_distinct_with_filter--ForceBlocks] >> test.py::test[join-lookupjoin_with_cache--ForceBlocks] [GOOD] >> test.py::test[join-lookupjoin_with_cache--Results] >> test.py::test[select-trivial_order_by-default.txt-Results] [GOOD] >> test.py::test[type_v3-mergejoin_with_sort--ForceBlocks] |58.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpcdsGenerator::test_s1_parts [GOOD] >> test.py::test[pg-join_using_tables1-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-join_using_tables1-default.txt-Results] >> DataStreams::TestNonChargeableUser [GOOD] >> DataStreams::TestPutEmptyMessage >> test.py::test[column_order-align_publish_native--ForceBlocks] [GOOD] >> test.py::test[column_order-align_publish_native--Results] >> DataStreams::TestReservedResourcesMetering >> test.py::test[select-select_all_filtered-default.txt-Results] [GOOD] >> test.py::test[select-select_all_group_by_column--ForceBlocks] >> test.py::test[join-star_join-off-ForceBlocks] [GOOD] >> test.py::test[join-star_join-off-Results] >> test.py::test[pg-in_mixed--ForceBlocks] [GOOD] >> test.py::test[join-star_join-off-Results] [SKIPPED] >> test.py::test[pg-in_mixed--Results] >> test.py::test[join-yql-14847--ForceBlocks] >> DataStreams::TestPutEmptyMessage [GOOD] >> DataStreams::TestListStreamConsumers >> test.py::test[aggr_factory-bitand-default.txt-Results] [GOOD] >> test.py::test[aggregate-agg_filter_pushdown--ForceBlocks] >> test.py::test[distinct-distinct_columns-default.txt-Results] [GOOD] >> test.py::test[distinct-distinct_window-default.txt-ForceBlocks] >> DataStreams::TestUpdateStorage >> test.py::test[pg-tpcds-q71-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q71-default.txt-Results] >> test.py::test[insert-keepmeta_proto_fail--ForceBlocks] [GOOD] >> test.py::test[insert-keepmeta_proto_fail--Results] [GOOD] >> test.py::test[insert-keepmeta_view_fail--ForceBlocks] >> test.py::test[optimizers-combinebykey_fields_subset_range--Results] [GOOD] >> test.py::test[aggregate-group_by_expr_mul_col--Results] [GOOD] >> test.py::test[aggregate-group_by_expr_with_join--ForceBlocks] >> test.py::test[binding-table_range_strict_binding-default.txt-Results] [GOOD] >> test.py::test[blocks-combine_all_sum_filter_opt--ForceBlocks] >> test.py::test[optimizers-flatmap_with_non_struct_out--ForceBlocks] [SKIPPED] >> test.py::test[hor_join-group_ranges--Results] [GOOD] >> test.py::test[optimizers-flatmap_with_non_struct_out--Results] [SKIPPED] >> test.py::test[optimizers-yql-7532_wrong_field_subset_for_calcoverwindow-default.txt-ForceBlocks] >> test.py::test[hor_join-skip_yamr--ForceBlocks] >> test.py::test[simple_columns-simple_columns_tablerow-default.txt-ForceBlocks] [GOOD] >> test.py::test[simple_columns-simple_columns_tablerow-default.txt-Results] >> test.py::test[pg-doubles_search_path-default.txt-Results] [GOOD] >> test.py::test[pg-tpcds-q32-default.txt-ForceBlocks] >> DataStreams::TestListStreamConsumers [GOOD] >> DataStreams::TestListShards1Shard >> test.py::test[join-bush_dis_in-off-ForceBlocks] [GOOD] >> test.py::test[join-mergejoin_saves_output_sort_nested--ForceBlocks] [GOOD] >> test.py::test[join-mergejoin_saves_output_sort_nested--Results] >> test.py::test[join-yql-14847-off-ForceBlocks] [GOOD] >> test.py::test[join-bush_dis_in-off-Results] [SKIPPED] >> test.py::test[join-yql-14847-off-Results] [SKIPPED] >> test.py::test[join-bush_in_in-off-ForceBlocks] >> test.py::test[in-in_immediate_subquery-default.txt-ForceBlocks] [GOOD] >> DataStreams::TestUpdateStorage [GOOD] >> DataStreams::TestStreamTimeRetention >> test.py::test[schema-select_all_inferschema2--ForceBlocks] [GOOD] >> test.py::test[join-yql-8131-off-ForceBlocks] [SKIPPED] >> test.py::test[join-yql-8131-off-Results] >> test.py::test[schema-select_all_inferschema2--Results] >> test.py::test[column_order-align_publish_native--Results] [GOOD] >> test.py::test[count-count_const_no_grouping-default.txt-ForceBlocks] >> test.py::test[join-yql-8131-off-Results] [SKIPPED] >> test.py::test[join-yql-8980--ForceBlocks] >> test.py::test[pg-tpch-q04-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpch-q04-default.txt-Results] >> test.py::test[blocks-distinct_opt_state_keys--ForceBlocks] [GOOD] >> test.py::test[blocks-distinct_opt_state_keys--Results] >> test.py::test[pg-in_mixed--Results] [GOOD] >> test.py::test[pg-select_yql_type--ForceBlocks] >> test.py::test[pg-join_using_tables1-default.txt-Results] [GOOD] >> test.py::test[pg-tpcds-q03-default.txt-ForceBlocks] >> DataStreams::TestListShards1Shard [GOOD] >> test.py::test[aggregate-aggrs_no_grouping--ForceBlocks] [GOOD] >> test.py::test[join-lookupjoin_with_cache--Results] [GOOD] >> test.py::test[join-mapjoin_early_rewrite_sequence--ForceBlocks] >> test.py::test[aggregate-aggrs_no_grouping--Results] >> test.py::test[insert-keepmeta_view_fail--ForceBlocks] [GOOD] >> test.py::test[insert-keepmeta_view_fail--Results] >> test.py::test[pg-tpcds-q71-default.txt-Results] [GOOD] >> test.py::test[insert-keepmeta_view_fail--Results] [GOOD] >> test.py::test[insert-override_view_fail--ForceBlocks] >> test.py::test[pg-tpcds-q88-default.txt-ForceBlocks] >> test.py::test[blocks-date_less_scalar--ForceBlocks] [GOOD] >> test.py::test[blocks-date_less_scalar--Results] >> test.py::test[like-like_clause-default.txt-Results] [GOOD] >> test.py::test[like-like_clause_no_pattern-default.txt-Results] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/datastreams/ut/unittest >> DataStreams::TestListShards1Shard [GOOD] Test command err: 2025-06-03T10:23:40.319555Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511666817807001789:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:40.319580Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f2e/r3tmp/tmpLbV5ut/pdisk_1.dat 2025-06-03T10:23:40.392141Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11542, node 1 2025-06-03T10:23:40.403671Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:40.403688Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:40.403690Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:40.403746Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:23:40.420285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:40.420324Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:40.422065Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19350 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:40.455768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:40.517345Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:19350 2025-06-03T10:23:40.539416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:40.624875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715661:0, at schemeshard: 72057594046644480 encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000000" } records { sequence_number: "10" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000000" } records { sequence_number: "12" shard_id: "shard-000000" } records { sequence_number: "13" shard_id: "shard-000000" } records { sequence_number: "14" shard_id: "shard-000000" } records { sequence_number: "15" shard_id: "shard-000000" } records { sequence_number: "16" shard_id: "shard-000000" } records { sequence_number: "17" shard_id: "shard-000000" } records { sequence_number: "18" shard_id: "shard-000000" } records { sequence_number: "19" shard_id: "shard-000000" } records { sequence_number: "20" shard_id: "shard-000000" } records { sequence_number: "21" shard_id: "shard-000000" } records { sequence_number: "22" shard_id: "shard-000000" } records { sequence_number: "23" shard_id: "shard-000000" } records { sequence_number: "24" shard_id: "shard-000000" } records { sequence_number: "25" shard_id: "shard-000000" } records { sequence_number: "26" shard_id: "shard-000000" } records { sequence_number: "27" shard_id: "shard-000000" } records { sequence_number: "28" shard_id: "shard-000000" } records { sequence_number: "29" shard_id: "shard-000000" } 2025-06-03T10:23:40.629644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:23:40.645124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:23:40.649585Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-03T10:23:40.649601Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-03T10:23:40.649604Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-03T10:23:40.652204Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2025-06-03T10:23:40.652229Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found 2025-06-03T10:23:40.652236Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037888-1748946220618-1","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1748946220,"finish":1748946220},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1748946220}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"used_storage-root-72075186224037888-1748946220618-2","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1748946220,"finish":1748946220},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1748946220}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037888-1748946220640-3","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1748946220,"finish":1748946220},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1748946220}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"used_storage-root-72075186224037888-1748946220640-4","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1748946220,"finish":1748946220},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1748946220}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037890-1748946220640-5","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1748946220,"finish":1748946220},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"v1","source_id":"72075186224037890","source_wt":1748946220}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"used_storage-root-72075186224037890-1748946220640-6","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1748946220,"finish":1748946220},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037890","source_wt":1748946220}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037888-1748946220618-1","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":0,"reserved_storage_bytes":90596966400},"usage":{"quantity":0,"unit":"second","start":1748946220,"finish":1748946220},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1748946220}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"used_storage-root-72075186224037888-1748946220618-2","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1748946220,"finish":1748946220},"labels":{"datastreams_stream_name":"stream_TestNonChargeableUser","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1748946220}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestNonChargeableUser","id":"reserved_resources-root-72075186224037888-1748946220640-3","schema":"yds.resources.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consu ... pp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:43.416425Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:43.427241Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:43.442218Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:19780 2025-06-03T10:23:43.453281Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... WARNING: All log messages before y_absl::InitializeLog() is called are written to STDERR E0000 00:00:1748946223.485856 89706 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1748946223.485929 89706 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1748946223.486999 89706 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1748946223.487022 89706 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1748946223.487566 89706 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1748946223.487576 89706 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1748946223.488020 89706 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1748946223.488028 89706 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-03T10:23:43.489873Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:23:43.498978Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480 E0000 00:00:1748946223.512794 89706 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1748946223.512828 89706 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-03T10:23:43.514424Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480 E0000 00:00:1748946223.526927 89706 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1748946223.526971 89706 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-03T10:23:43.528666Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715664:0, at schemeshard: 72057594046644480 E0000 00:00:1748946223.542866 89706 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1748946223.542904 89706 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1748946223.545124 89706 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1748946223.545163 89706 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1748946223.562946 89706 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1748946223.562980 89706 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn 2025-06-03T10:23:43.550482Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:23:43.558758Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037892 not found 2025-06-03T10:23:43.559080Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037889 not found 2025-06-03T10:23:43.559181Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037893 not found 2025-06-03T10:23:43.559409Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037891 not found 2025-06-03T10:23:43.559413Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037890 not found 2025-06-03T10:23:43.559417Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037888 not found 2025-06-03T10:23:44.199555Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7511666837728406606:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:44.199581Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f2e/r3tmp/tmpNjkvtt/pdisk_1.dat 2025-06-03T10:23:44.234318Z node 10 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10236, node 10 2025-06-03T10:23:44.249133Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:44.249152Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:44.249154Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:44.249218Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14368 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:44.300214Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:44.300253Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:44.301955Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:44.315360Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:44.328612Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:14368 2025-06-03T10:23:44.344096Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... E0000 00:00:1748946224.421148 90356 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1748946224.422317 90356 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1748946224.423389 90356 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1748946224.424080 90356 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn E0000 00:00:1748946224.425051 90356 message_lite.cc:131] Can't parse message of type "NKikimrPQ.TYdsNextToken" because it is missing required fields: CreationTimestamp, MaxResults, AlreadyRead, StreamArn >> test.py::test[aggr_factory-min-default.txt-Results] [GOOD] >> test.py::test[key_filter-is_null_multi_key--ForceBlocks] [GOOD] >> test.py::test[aggr_factory-some-default.txt-Results] >> test.py::test[tpch-q7-default.txt-Results] [GOOD] >> test.py::test[udf-regexp_udf--ForceBlocks] >> test.py::test[window-generic/aggregations_mixed--Results] [GOOD] >> test.py::test[key_filter-is_null_multi_key--Results] >> test.py::test[window-lagging/aggregations_leadlag--Results] >> test.py::test[simple_columns-simple_columns_tablerow-default.txt-Results] [GOOD] >> test.py::test[table_range-tablepath_with_non_existing--ForceBlocks] >> test.py::test[window-leading/aggregations--ForceBlocks] [GOOD] >> test.py::test[window-leading/aggregations--Results] |58.6%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part9/pytest >> test.py::test[in-in_immediate_subquery-default.txt-ForceBlocks] [GOOD] >> test.py::test[join-group_compact_by--Results] [GOOD] >> test.py::test[join-inner_with_order--Results] >> test.py::test[schema-select_all_inferschema2--Results] [GOOD] >> test.py::test[schema-select_reordered-default.txt-ForceBlocks] >> test.py::test[select-select_all_group_by_column--ForceBlocks] [GOOD] >> test.py::test[type_v3-mergejoin_with_sort--ForceBlocks] [GOOD] >> test.py::test[select-select_all_group_by_column--Results] >> test.py::test[type_v3-mergejoin_with_sort--Results] >> test.py::test[pg-tpcds-q77-default.txt-ForceBlocks] [GOOD] >> test.py::test[join-yql-14829_left--Results] [GOOD] >> test.py::test[join-yql_465--ForceBlocks] >> test.py::test[insert-override_view_fail--ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q77-default.txt-Results] >> test.py::test[insert-override_view_fail--Results] [GOOD] >> test.py::test[join-anyjoin_common_dup--ForceBlocks] >> DataStreams::TestGetShardIterator >> test.py::test[aggregate-count_distinct_with_filter--ForceBlocks] [GOOD] >> test.py::test[aggregate-count_distinct_with_filter--Results] >> DataStreams::TestDeleteStream >> test.py::test[aggregate-GroupByTwoFields--Results] [GOOD] >> test.py::test[join-mergejoin_saves_output_sort_nested--Results] [GOOD] >> test.py::test[key_filter-is_null_multi_key--Results] [GOOD] >> test.py::test[key_filter-part_key_over_dynamic--ForceBlocks] >> test.py::test[aggregate-aggregate_distinct_list-default.txt-Results] >> test.py::test[join-premap_common_multiparents-off-ForceBlocks] >> TBtreeIndexTPartLarge::CutKeys [GOOD] >> TBtreeIndexTPartLarge::Group >> test.py::test[hor_join-skip_yamr--ForceBlocks] [GOOD] >> DataStreams::TestGetShardIterator [GOOD] >> DataStreams::TestGetRecordsWithoutPermission >> test.py::test[join-yql-14847--ForceBlocks] [GOOD] >> test.py::test[join-yql-14847--Results] >> test.py::test[blocks-combine_all_sum_filter_opt--ForceBlocks] [GOOD] >> test.py::test[blocks-combine_all_sum_filter_opt--Results] >> test.py::test[select-select_all_group_by_column--Results] [GOOD] >> test.py::test[aggregate-agg_filter_pushdown--ForceBlocks] [GOOD] >> test.py::test[aggregate-agg_filter_pushdown--Results] >> DataStreams::TestDeleteStream [GOOD] >> DataStreams::TestDeleteStreamWithEnforceFlag >> test.py::test[select-shift_columns-default.txt-ForceBlocks] >> test.py::test[pg-tpcds-q03-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q03-default.txt-Results] >> test.py::test[pg-tpcds-q32-default.txt-ForceBlocks] [GOOD] >> test.py::test[count-count_const_no_grouping-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q32-default.txt-Results] >> test.py::test[aggregate-count_distinct_with_filter--Results] [GOOD] >> test.py::test[count-count_const_no_grouping-default.txt-Results] >> DataStreams::TestGetRecordsWithoutPermission [GOOD] >> DataStreams::TestGetRecordsWithCount >> test.py::test[aggregate-group_by_gs_and_having-default.txt-ForceBlocks] >> test.py::test[pg-tpcds-q77-default.txt-Results] [GOOD] >> test.py::test[aggregate-group_by_expr_with_join--ForceBlocks] [GOOD] >> test.py::test[aggregate-group_by_expr_with_join--Results] >> test.py::test[type_v3-mergejoin_with_sort--Results] [GOOD] >> test.py::test[type_v3-split--ForceBlocks] [SKIPPED] >> test.py::test[type_v3-split--Results] >> test.py::test[window-generic/aggregations_mixed_leadlag--ForceBlocks] [GOOD] >> test.py::test[pg-tpch-q08-default.txt-ForceBlocks] >> DataStreams::TestReservedResourcesMetering [GOOD] >> DataStreams::TestReservedStorageMetering >> test.py::test[type_v3-split--Results] [SKIPPED] >> test.py::test[type_v3-uuid--ForceBlocks] >> test.py::test[window-generic/aggregations_mixed_leadlag--Results] >> test.py::test[join-yql-8980--ForceBlocks] [GOOD] >> test.py::test[join-yql-8980--Results] |58.6%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part11/pytest >> test.py::test[hor_join-skip_yamr--ForceBlocks] [GOOD] >> DataStreams::TestDeleteStreamWithEnforceFlag [GOOD] >> DataStreams::TestDeleteStreamWithEnforceFlagFalse >> test.py::test[pg-select_yql_type--ForceBlocks] [GOOD] >> test.py::test[pg-select_yql_type--Results] >> test.py::test[join-bush_in_in-off-ForceBlocks] [GOOD] >> test.py::test[join-bush_in_in-off-Results] [SKIPPED] >> test.py::test[join-count_bans-off-ForceBlocks] >> test.py::test[like-like_clause_no_pattern-default.txt-Results] [GOOD] >> test.py::test[limit-empty_sort_after_limit-default.txt-Results] >> test.py::test[udf-regexp_udf--ForceBlocks] [GOOD] >> test.py::test[udf-regexp_udf--Results] >> test.py::test[join-join_comp_common_table--ForceBlocks] [GOOD] >> test.py::test[join-join_comp_common_table--Results] >> test.py::test[window-leading/aggregations--Results] [GOOD] >> test.py::test[window-rank/opt--ForceBlocks] >> test.py::test[blocks-combine_all_sum_filter_opt--Results] [GOOD] >> test.py::test[blocks-distinct_pure_all--ForceBlocks] >> test.py::test[optimizers-yql-7532_wrong_field_subset_for_calcoverwindow-default.txt-ForceBlocks] [GOOD] >> test.py::test[optimizers-yql-7532_wrong_field_subset_for_calcoverwindow-default.txt-Results] >> test.py::test[pg-tpcds-q03-default.txt-Results] [GOOD] >> test.py::test[pg-tpcds-q14-default.txt-ForceBlocks] >> test.py::test[distinct-distinct_window-default.txt-ForceBlocks] [GOOD] >> test.py::test[distinct-distinct_window-default.txt-Results] >> test.py::test[schema-select_reordered-default.txt-ForceBlocks] [GOOD] >> test.py::test[count-count_const_no_grouping-default.txt-Results] [GOOD] >> test.py::test[schema-select_reordered-default.txt-Results] >> test.py::test[join-mapjoin_early_rewrite_sequence--ForceBlocks] [GOOD] >> test.py::test[join-mapjoin_early_rewrite_sequence--Results] >> test.py::test[pg-tpch-q04-default.txt-Results] [GOOD] >> test.py::test[pragma-file-default.txt-ForceBlocks] >> test.py::test[table_range-tablepath_with_non_existing--ForceBlocks] [GOOD] >> test.py::test[table_range-tablepath_with_non_existing--Results] >> DataStreams::TestDeleteStreamWithEnforceFlagFalse [GOOD] >> DataStreams::TestGetRecords1MBMessagesOneByOneBySeqNo >> test.py::test[blocks-distinct_opt_state_keys--Results] [GOOD] >> test.py::test[blocks-filter_by_column_with_drop--ForceBlocks] >> DataStreams::TestUpdateStream >> test.py::test[pg-tpcds-q32-default.txt-Results] [GOOD] >> test.py::test[pg-tpcds-q36-default.txt-ForceBlocks] >> test.py::test[join-yql-14847--Results] [GOOD] >> test.py::test[join-yql-8131--ForceBlocks] [SKIPPED] >> test.py::test[aggregate-agg_filter_pushdown--Results] [GOOD] >> test.py::test[aggregate-aggregate_with_default_yson_options-default.txt-ForceBlocks] >> test.py::test[join-yql-8131--Results] [SKIPPED] >> test.py::test[key_filter-contains_tuples-default.txt-ForceBlocks] >> test.py::test[udf-regexp_udf--Results] [GOOD] >> test.py::test[union_all-union_all_multiin--ForceBlocks] >> test.py::test[blocks-date_less_scalar--Results] [GOOD] >> test.py::test[blocks-date_not_equals_scalar--ForceBlocks] >> test.py::test[schema-select_reordered-default.txt-Results] [GOOD] >> test.py::test[join-inner_with_order--Results] [GOOD] >> test.py::test[join-inner_with_order-off-Results] [SKIPPED] |58.6%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part17/pytest >> test.py::test[count-count_const_no_grouping-default.txt-Results] [GOOD] >> test.py::test[aggregate-aggrs_no_grouping--Results] [GOOD] >> test.py::test[aggregate-group_by_gs_flatten_expr-default.txt-ForceBlocks] >> DataStreams::TestStreamTimeRetention [GOOD] >> DataStreams::TestUnsupported >> test.py::test[join-yql_465--ForceBlocks] [GOOD] >> test.py::test[join-yql_465--Results] >> test.py::test[pg-tpcds-q88-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q88-default.txt-Results] >> test.py::test[pg-select_yql_type--Results] [GOOD] >> test.py::test[pg-tpcds-q02-default.txt-ForceBlocks] >> test.py::test[table_range-tablepath_with_non_existing--Results] [GOOD] >> test.py::test[tpch-q4-default.txt-ForceBlocks] >> test.py::test[aggregate-group_by_expr_with_join--Results] [GOOD] >> test.py::test[aggregate-group_by_hop_only_start--ForceBlocks] >> DataStreams::TestUpdateStream [GOOD] >> DataStreams::Test_AutoPartitioning_Describe >> test.py::test[select-shift_columns-default.txt-ForceBlocks] [GOOD] >> test.py::test[select-shift_columns-default.txt-Results] |58.6%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part11/pytest >> test.py::test[schema-select_reordered-default.txt-Results] [GOOD] >> test.py::test[key_filter-part_key_over_dynamic--ForceBlocks] [GOOD] >> test.py::test[key_filter-part_key_over_dynamic--Results] >> test.py::test[optimizers-yql-7532_wrong_field_subset_for_calcoverwindow-default.txt-Results] [GOOD] >> test.py::test[order_by-assume_over_input_desc--ForceBlocks] >> DataStreams::TestUnsupported [GOOD] >> test.py::test[join-mapjoin_early_rewrite_sequence--Results] [GOOD] >> test.py::test[join-mapjoin_early_rewrite_sequence-off-ForceBlocks] |58.7%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/hybrid_file/part2/pytest >> test.py::test[join-inner_with_order-off-Results] [SKIPPED] >> test.py::test[window-generic/aggregations_mixed_leadlag--Results] [GOOD] >> test.py::test[join-yql_465--Results] [GOOD] >> test.py::test[window-win_func_aggr_4func_sort_desc--ForceBlocks] >> test.py::test[limit-insert_with_limit-dynamic-ForceBlocks] >> test.py::test[join-anyjoin_common_dup--ForceBlocks] [GOOD] >> test.py::test[join-anyjoin_common_dup--Results] |58.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> test.py::test[join-yql-8980--Results] [GOOD] >> test.py::test[json-jsondocument/insert--ForceBlocks] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/datastreams/ut/unittest >> DataStreams::TestUnsupported [GOOD] Test command err: 2025-06-03T10:23:43.605117Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511666834522632239:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:43.605547Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f25/r3tmp/tmpY7F46d/pdisk_1.dat 2025-06-03T10:23:43.705867Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:43.705904Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 18907, node 1 2025-06-03T10:23:43.708197Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:43.718955Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:23:43.719152Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:43.719164Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:43.719165Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:43.719192Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1626 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:43.739464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:43.764230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:1626 2025-06-03T10:23:43.776216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:43.854077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:23:44.475798Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511666837844745299:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:44.475833Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f25/r3tmp/tmpM1Vmlc/pdisk_1.dat 2025-06-03T10:23:44.499419Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14139, node 4 2025-06-03T10:23:44.512745Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:44.512761Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:44.512763Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:44.512826Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31719 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:44.576516Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:44.576555Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:44.578247Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:44.585676Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:44.600569Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:31719 2025-06-03T10:23:44.611088Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:44.685545Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:23:44.694999Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480 encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000000" } records { sequence_number: "10" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000000" } records { sequence_number: "12" shard_id: "shard-000000" } records { sequence_number: "13" shard_id: "shard-000000" } records { sequence_number: "14" shard_id: "shard-000000" } records { sequence_number: "15" shard_id: "shard-000000" } records { sequence_number: "16" shard_id: "shard-000000" } records { sequence_number: "17" shard_id: "shard-000000" } records { sequence_number: "18" shard_id: "shard-000000" } records { sequence_number: "19" shard_id: "shard-000000" } records { sequence_number: "20" shard_id: "shard-000000" } records { sequence_number: "21" shard_id: "shard-000000" } records { sequence_number: "22" shard_id: "shard-000000" } records { sequence_number: "23" shard_id: "shard-000000" } records { sequence_number: "24" shard_id: "shard-000000" } records { sequence_number: "25" shard_id: "shard-000000" } records { sequence_number: "26" shard_id: "shard-000000" } records { sequence_number: "27" shard_id: "shard-000000" } records { sequence_number: "28" shard_id: "shard-000000" } records { sequence_number: "29" shard_id: "shard-000000" } encryption_type: NONE records { sequence_number: "30" shard_id: "shard-000000" } records { sequence_number: "31" shard_id: "shard-000000" } records { sequence_number: "32" shard_id: "shard-000000" } records { sequence_number: "33" shard_id: "shard-000000" } records { sequence_number: "34" shard_id: "shard-000000" } records { sequence_number: "35" shard_id: "shard-000000" } records { sequence_number: "36" shard_id: "shard-000000" } records { sequence_number: "37" shard_id: "shard-000000" } records { sequence_number: "38" shard_id: "shard-000000" } records { sequence_number: "39" shard_id: "shard-000000" } records { sequence_number: "40" shard_id: "shard-000000" } records { sequence_number: "41" shard_id: "shard-000000" } records { sequence_number: "42" shard_id: "shard-000000" } records { sequence_number: "43" shard_id: "shard-000000" } records { sequence_number: "44" shard_id: "shard-000000" } records { sequence_number: "45" shard_id: "shard-000000" } records { sequence_number: "46" shard_id: "shard-000000" } records { sequence_number: "47" shard_id: "shard-000000" } records { sequence_number: "48" shard_id: "shard-000000" } records { sequence_number: "49" shard_id: "shard-000000" } records { sequence_number: "50" shard_id: "shard-000000" } records { sequence_number: "51" shard_id: "shard-000000" } records { sequence_number: "52" shard_id: "shard-000000" } records { sequence_number: "53" shard_id: "shard-000000" } records { sequence_number: "54" shard_id: "shard-000000" } records { sequence_number: "55" shard_id: "shard-000000" } records { sequence_number: "56" shard_id: "shard-000000" } records { sequence_number: "57" shard_id: "shard-000000" } records { sequence_number: "58" shard_id: "shard-000000" } records { sequence_number: "59" shard_id: "shard-00 ... rd-000000" } records { sequence_number: "74" shard_id: "shard-000000" } records { sequence_number: "75" shard_id: "shard-000000" } records { sequence_number: "76" shard_id: "shard-000000" } records { sequence_number: "77" shard_id: "shard-000000" } records { sequence_number: "78" shard_id: "shard-000000" } records { sequence_number: "79" shard_id: "shard-000000" } records { sequence_number: "80" shard_id: "shard-000000" } records { sequence_number: "81" shard_id: "shard-000000" } records { sequence_number: "82" shard_id: "shard-000000" } records { sequence_number: "83" shard_id: "shard-000000" } records { sequence_number: "84" shard_id: "shard-000000" } records { sequence_number: "85" shard_id: "shard-000000" } records { sequence_number: "86" shard_id: "shard-000000" } records { sequence_number: "87" shard_id: "shard-000000" } records { sequence_number: "88" shard_id: "shard-000000" } records { sequence_number: "89" shard_id: "shard-000000" } encryption_type: NONE records { sequence_number: "90" shard_id: "shard-000000" } records { sequence_number: "91" shard_id: "shard-000000" } records { sequence_number: "92" shard_id: "shard-000000" } records { sequence_number: "93" shard_id: "shard-000000" } records { sequence_number: "94" shard_id: "shard-000000" } records { sequence_number: "95" shard_id: "shard-000000" } records { sequence_number: "96" shard_id: "shard-000000" } records { sequence_number: "97" shard_id: "shard-000000" } records { sequence_number: "98" shard_id: "shard-000000" } records { sequence_number: "99" shard_id: "shard-000000" } records { sequence_number: "100" shard_id: "shard-000000" } records { sequence_number: "101" shard_id: "shard-000000" } records { sequence_number: "102" shard_id: "shard-000000" } records { sequence_number: "103" shard_id: "shard-000000" } records { sequence_number: "104" shard_id: "shard-000000" } records { sequence_number: "105" shard_id: "shard-000000" } records { sequence_number: "106" shard_id: "shard-000000" } records { sequence_number: "107" shard_id: "shard-000000" } records { sequence_number: "108" shard_id: "shard-000000" } records { sequence_number: "109" shard_id: "shard-000000" } records { sequence_number: "110" shard_id: "shard-000000" } records { sequence_number: "111" shard_id: "shard-000000" } records { sequence_number: "112" shard_id: "shard-000000" } records { sequence_number: "113" shard_id: "shard-000000" } records { sequence_number: "114" shard_id: "shard-000000" } records { sequence_number: "115" shard_id: "shard-000000" } records { sequence_number: "116" shard_id: "shard-000000" } records { sequence_number: "117" shard_id: "shard-000000" } records { sequence_number: "118" shard_id: "shard-000000" } records { sequence_number: "119" shard_id: "shard-000000" } encryption_type: NONE records { sequence_number: "120" shard_id: "shard-000000" } records { sequence_number: "121" shard_id: "shard-000000" } records { sequence_number: "122" shard_id: "shard-000000" } records { sequence_number: "123" shard_id: "shard-000000" } records { sequence_number: "124" shard_id: "shard-000000" } records { sequence_number: "125" shard_id: "shard-000000" } records { sequence_number: "126" shard_id: "shard-000000" } records { sequence_number: "127" shard_id: "shard-000000" } records { sequence_number: "128" shard_id: "shard-000000" } records { sequence_number: "129" shard_id: "shard-000000" } records { sequence_number: "130" shard_id: "shard-000000" } records { sequence_number: "131" shard_id: "shard-000000" } records { sequence_number: "132" shard_id: "shard-000000" } records { sequence_number: "133" shard_id: "shard-000000" } records { sequence_number: "134" shard_id: "shard-000000" } records { sequence_number: "135" shard_id: "shard-000000" } records { sequence_number: "136" shard_id: "shard-000000" } records { sequence_number: "137" shard_id: "shard-000000" } records { sequence_number: "138" shard_id: "shard-000000" } records { sequence_number: "139" shard_id: "shard-000000" } records { sequence_number: "140" shard_id: "shard-000000" } records { sequence_number: "141" shard_id: "shard-000000" } records { sequence_number: "142" shard_id: "shard-000000" } records { sequence_number: "143" shard_id: "shard-000000" } records { sequence_number: "144" shard_id: "shard-000000" } records { sequence_number: "145" shard_id: "shard-000000" } records { sequence_number: "146" shard_id: "shard-000000" } records { sequence_number: "147" shard_id: "shard-000000" } records { sequence_number: "148" shard_id: "shard-000000" } records { sequence_number: "149" shard_id: "shard-000000" } 2025-06-03T10:23:49.476208Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7511666837844745299:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:49.476262Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1748946224680-2","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1748946224,"finish":1748946224},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1748946224}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1748946224691-3","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1748946224,"finish":1748946224},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1748946224}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1748946224706-4","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1748946224,"finish":1748946225},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1748946225}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1748946225713-5","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1748946225,"finish":1748946226},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1748946226}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1748946226716-6","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1748946226,"finish":1748946227},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1748946227}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestStreamTimeRetention","id":"used_storage-root-72075186224037888-1748946227720-7","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1748946227,"finish":1748946228},"labels":{"datastreams_stream_name":"stream_TestStreamTimeRetention","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1748946228}' 2025-06-03T10:23:51.330718Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511666868824448128:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:51.330970Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f25/r3tmp/tmpYgk39Q/pdisk_1.dat 2025-06-03T10:23:51.351563Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1158, node 7 2025-06-03T10:23:51.368700Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:51.368712Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:51.368714Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:51.368760Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13948 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:51.431186Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:51.431220Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:51.432842Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:51.437347Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:51.460365Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:13948 2025-06-03T10:23:51.473255Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... >> test.py::test[join-premap_common_multiparents-off-ForceBlocks] [GOOD] >> test.py::test[join-premap_common_multiparents-off-Results] [SKIPPED] >> test.py::test[join-premap_common_semi--ForceBlocks] >> test.py::test[limit-empty_sort_after_limit-default.txt-Results] [GOOD] >> test.py::test[limit-insert_with_limit--Results] >> test.py::test[aggr_factory-some-default.txt-Results] [GOOD] >> test.py::test[aggr_factory-stddev-default.txt-Results] >> test.py::test[pg-tpcds-q88-default.txt-Results] [GOOD] >> test.py::test[pg-tpcds-q97-default.txt-ForceBlocks] >> test.py::test[select-shift_columns-default.txt-Results] [GOOD] >> test.py::test[select-table_content_with_tmp_folder--ForceBlocks] >> test.py::test[type_v3-uuid--ForceBlocks] [GOOD] >> test.py::test[type_v3-uuid--Results] >> test.py::test[distinct-distinct_window-default.txt-Results] [GOOD] >> test.py::test[expr-evaluate_parse_inf_nan--ForceBlocks] >> test.py::test[expr-evaluate_parse_inf_nan--ForceBlocks] [SKIPPED] >> test.py::test[expr-evaluate_parse_inf_nan--Results] [SKIPPED] >> test.py::test[key_filter-part_key_over_dynamic--Results] [GOOD] >> test.py::test[key_filter-yql-8117-table_key_filter--ForceBlocks] >> test.py::test[pragma-file-default.txt-ForceBlocks] [GOOD] >> test.py::test[pragma-file-default.txt-Results] |58.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> test.py::test[blocks-distinct_pure_all--ForceBlocks] [GOOD] >> test.py::test[blocks-distinct_pure_all--Results] >> test.py::test[aggregate-group_by_gs_and_having-default.txt-ForceBlocks] [GOOD] >> test.py::test[aggregate-group_by_gs_and_having-default.txt-Results] |58.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> test.py::test[blocks-filter_by_column_with_drop--ForceBlocks] [GOOD] >> TraverseColumnShard::TraverseColumnTableRebootColumnshard |58.7%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part5/pytest >> test.py::test[expr-evaluate_parse_inf_nan--Results] [SKIPPED] |58.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> test.py::test[blocks-filter_by_column_with_drop--Results] >> test.py::test[pragma-file-default.txt-Results] [GOOD] >> test.py::test[produce-reduce_all_field_subset--ForceBlocks] >> test.py::test[type_v3-uuid--Results] [GOOD] >> test.py::test[window-lagging/aggregations_leadlag--Results] [GOOD] >> test.py::test[window-mixed/aggregations--Results] >> test.py::test[aggregate-aggregate_with_default_yson_options-default.txt-ForceBlocks] [GOOD] >> test.py::test[aggregate-aggregate_with_default_yson_options-default.txt-Results] >> AnalyzeDatashard::AnalyzeOneTable >> test_generator.py::TestTpcdsGenerator::test_s1_state [GOOD] >> test.py::test[pg-tpcds-q02-default.txt-ForceBlocks] [GOOD] >> test.py::test[union_all-union_all_multiin--ForceBlocks] [GOOD] >> test.py::test[union_all-union_all_multiin--Results] >> test.py::test[pg-tpcds-q02-default.txt-Results] >> test.py::test[join-count_bans-off-ForceBlocks] [GOOD] >> test.py::test[join-count_bans-off-Results] [SKIPPED] >> test.py::test[join-flatten_columns2--ForceBlocks] >> DataStreams::Test_AutoPartitioning_Describe [GOOD] >> DataStreams::Test_Crreate_AutoPartitioning_Disabled >> AnalyzeColumnshard::AnalyzeRebootColumnShard >> test.py::test[blocks-distinct_pure_all--Results] [GOOD] >> test.py::test[blocks-div_uint64--ForceBlocks] |58.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> test.py::test[blocks-filter_by_column_with_drop--Results] [GOOD] >> test.py::test[blocks-filter_direct_col--ForceBlocks] >> test.py::test[aggregate-group_by_hop_only_start--ForceBlocks] [GOOD] >> test.py::test[aggregate-group_by_hop_only_start--Results] [SKIPPED] >> test.py::test[aggregate-group_by_mul_ru_ru--ForceBlocks] |58.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpcdsGenerator::test_s1_state [GOOD] |58.7%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part5/pytest >> test.py::test[type_v3-uuid--Results] [GOOD] >> test.py::test[window-rank/opt--ForceBlocks] [GOOD] >> test.py::test[window-rank/opt--Results] |58.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> test.py::test[tpch-q4-default.txt-ForceBlocks] [GOOD] >> test.py::test[tpch-q4-default.txt-Results] >> test.py::test[limit-insert_with_limit--Results] [GOOD] >> test.py::test[limit-limit--Results] >> test.py::test[limit-insert_with_limit-dynamic-ForceBlocks] [GOOD] >> test.py::test[limit-insert_with_limit-dynamic-Results] >> DataStreams::Test_Crreate_AutoPartitioning_Disabled [GOOD] >> test.py::test[aggregate-aggregate_with_default_yson_options-default.txt-Results] [GOOD] >> test.py::test[aggregate-group_by_expr_only_join--ForceBlocks] >> test.py::test[union_all-union_all_multiin--Results] [GOOD] >> test.py::test[weak_field-optimize_weak_fields_combine--ForceBlocks] >> test.py::test[pg-tpcds-q02-default.txt-Results] [GOOD] >> test.py::test[pg-tpcds-q06-default.txt-ForceBlocks] >> test.py::test[aggregate-group_by_gs_flatten_expr-default.txt-ForceBlocks] [GOOD] >> test.py::test[aggregate-group_by_gs_flatten_expr-default.txt-Results] >> test.py::test[order_by-assume_over_input_desc--ForceBlocks] [GOOD] >> test.py::test[order_by-assume_over_input_desc--Results] >> test.py::test[join-mapjoin_early_rewrite_sequence-off-ForceBlocks] [GOOD] >> test.py::test[join-mapjoin_early_rewrite_sequence-off-Results] [SKIPPED] >> test.py::test[join-mergejoin_saves_output_sort--ForceBlocks] >> test.py::test[json-jsondocument/insert--ForceBlocks] [GOOD] >> test.py::test[json-jsondocument/insert--Results] >> test.py::test[aggregate-group_by_gs_and_having-default.txt-Results] [GOOD] >> test.py::test[aggregate-group_by_gs_subselect_asterisk-default.txt-ForceBlocks] >> TraverseColumnShard::TraverseColumnTable >> test.py::test[join-premap_common_semi--ForceBlocks] [GOOD] >> test.py::test[pg-tpch-q08-default.txt-ForceBlocks] [GOOD] >> test.py::test[join-premap_common_semi--Results] >> DataStreams::TestReservedStorageMetering [GOOD] >> DataStreams::TestReservedConsumersMetering >> test.py::test[pg-tpch-q08-default.txt-Results] >> AnalyzeDatashard::DropTableNavigateError ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/datastreams/ut/unittest >> DataStreams::Test_Crreate_AutoPartitioning_Disabled [GOOD] Test command err: 2025-06-03T10:23:50.798415Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511666863381227797:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:50.798625Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f16/r3tmp/tmp3ToxpH/pdisk_1.dat 2025-06-03T10:23:50.904261Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:50.904292Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:50.906478Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:50.914209Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5644, node 1 2025-06-03T10:23:50.928416Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:50.928431Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:50.928435Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:50.928502Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27310 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:50.967518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:50.988708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:27310 2025-06-03T10:23:51.004568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:51.101169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-06-03T10:23:51.132819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:23:51.867506Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511666864709597872:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:51.867591Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f16/r3tmp/tmpTs9TLG/pdisk_1.dat 2025-06-03T10:23:51.893555Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26543, node 4 2025-06-03T10:23:51.913497Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:51.913511Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:51.913514Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:51.913563Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12995 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:51.967372Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:51.967420Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:51.969181Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:51.972891Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:52.002102Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:12995 2025-06-03T10:23:52.019326Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "0" shard_id: "shard-000002" } records { sequence_number: "0" shard_id: "shard-000001" } records { sequence_number: "1" shard_id: "shard-000001" } records { sequence_number: "1" shard_id: "shard-000002" } records { sequence_number: "2" shard_id: "shard-000001" } records { sequence_number: "3" shard_id: "shard-000001" } records { sequence_number: "4" shard_id: "shard-000001" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000002" } records { sequence_number: "3" shard_id: "shard-000002" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000002" } records { sequence_number: "5" shard_id: "shard-000002" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "6" shard_id: "shard-000002" } records { sequence_number: "5" shard_id: "shard-000001" } records { sequence_number: "6" shard_id: "shard-000001" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "7" shard_id: "shard-000002" } records { sequence_number: "8" shard_id: "shard-000002" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000002" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "10" shard_id: "shard-000002" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000002" } records { sequence_number: "7" shard_id: "shard-000001" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000000" } ALTER_SCHEME: { Name: "test-topic" Split { Partition: 1 SplitBoundary: "a" } } 2025-06-03T10:23:53.065699Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 107:0, at schemeshard: 72057594046644480 2025-06-03T10:23:54.147209Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:23:54.176794Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:23:54.210332Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:23:54.266601Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:23:54.968495Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511666880696479475:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:54.968619Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f16/r3tmp/tmpwgGJma/pdisk_1.dat 2025-06-03T10:23:54.999223Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4870, node 7 2025-06-03T10:23:55.049113Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:55.049129Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:55.049132Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:55.049197Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8881 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:23:55.072194Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:55.072233Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:55.074651Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:55.078778Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:55.102374Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:8881 2025-06-03T10:23:55.122841Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:55.133951Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 >> TraverseColumnShard::TraverseColumnTableHiveDistributionZeroNodes |58.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> test.py::test[select-table_content_with_tmp_folder--ForceBlocks] [GOOD] >> test.py::test[select-table_content_with_tmp_folder--Results] >> test.py::test[key_filter-contains_tuples-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q14-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q14-default.txt-Results] >> test.py::test[pg-tpcds-q36-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q36-default.txt-Results] >> test.py::test[pg-tpcds-q97-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q97-default.txt-Results] >> test.py::test[json-jsondocument/insert--Results] [GOOD] >> test.py::test[key_filter-dependent_value-default.txt-ForceBlocks] >> test.py::test[limit-insert_with_limit-dynamic-Results] [GOOD] >> test.py::test[limit-many_top_sorts-default.txt-ForceBlocks] >> test.py::test[order_by-assume_over_input_desc--Results] [GOOD] >> test.py::test[key_filter-yql-8117-table_key_filter--ForceBlocks] [GOOD] >> test.py::test[key_filter-yql-8117-table_key_filter--Results] >> test.py::test[window-win_func_aggr_4func_sort_desc--ForceBlocks] [GOOD] >> test.py::test[window-win_func_aggr_4func_sort_desc--Results] >> test.py::test[tpch-q4-default.txt-Results] [GOOD] >> test.py::test[type_v3-append_diff_layout1--ForceBlocks] >> TraverseDatashard::TraverseTwoTablesTwoServerlessDbs >> TraverseColumnShard::TraverseServerlessColumnTable |58.7%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part9/pytest >> test.py::test[key_filter-contains_tuples-default.txt-ForceBlocks] [GOOD] >> test.py::test[join-anyjoin_common_dup--Results] [GOOD] >> test.py::test[join-anyjoin_common_dup-off-ForceBlocks] >> test.py::test[join-premap_common_semi--Results] [GOOD] >> test.py::test[join-premap_common_semi-off-ForceBlocks] >> test.py::test[produce-reduce_all_field_subset--ForceBlocks] [GOOD] >> test.py::test[produce-reduce_all_field_subset--Results] >> test.py::test[limit-limit--Results] [GOOD] |58.7%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part11/pytest >> test.py::test[order_by-assume_over_input_desc--Results] [GOOD] >> test.py::test[lineage-select_field-default.txt-Results] [SKIPPED] >> test.py::test[lineage-window_many-default.txt-Results] [SKIPPED] >> test.py::test[lineage-with_inline-default.txt-Results] [SKIPPED] >> test.py::test[multicluster-basic-default.txt-Results] [SKIPPED] >> test.py::test[pg-tpcds-q97-default.txt-Results] [GOOD] >> test.py::test[pg-tpch-q02-default.txt-ForceBlocks] >> test.py::test[multicluster-partition_by_key_force--Results] [SKIPPED] >> test.py::test[join-flatten_columns2--ForceBlocks] [GOOD] >> test.py::test[join-flatten_columns2--Results] >> test.py::test[select-table_content_with_tmp_folder--Results] [GOOD] >> test.py::test[select-trivial_group_by-default.txt-ForceBlocks] >> test.py::test[aggregate-group_by_gs_flatten_expr-default.txt-Results] [GOOD] >> test.py::test[aggregate-group_by_rollup_column_ref--ForceBlocks] >> test.py::test[blocks-filter_direct_col--ForceBlocks] [GOOD] >> test.py::test[blocks-filter_direct_col--Results] >> test.py::test[blocks-div_uint64--ForceBlocks] [GOOD] >> test.py::test[blocks-div_uint64--Results] >> test.py::test[window-rank/opt--Results] [GOOD] >> test.py::test[window-win_by_all_percentile_interval-default.txt-ForceBlocks] >> AnalyzeColumnshard::AnalyzeDeadline >> test_generator.py::TestTpcdsGenerator::test_s1 [GOOD] >> TBsVDiskManyPutGetCheckSize::ManyPutGetCheckSize [GOOD] >> test.py::test[key_filter-yql-8117-table_key_filter--Results] [GOOD] >> test.py::test[lambda-lambda_with_tie-default.txt-ForceBlocks] >> test.py::test[pg-tpcds-q06-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q06-default.txt-Results] |58.7%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/hybrid_file/part2/pytest >> test.py::test[multicluster-partition_by_key_force--Results] [SKIPPED] >> test.py::test[join-join_comp_common_table--Results] [GOOD] >> test.py::test[join-join_without_column-off-ForceBlocks] >> test.py::test[aggregate-aggregate_distinct_list-default.txt-Results] [GOOD] >> test.py::test[aggregate-aggregate_with_deep_aggregated_column--Results] >> test.py::test[pg-tpcds-q36-default.txt-Results] [GOOD] >> test.py::test[pg-tpcds-q65-default.txt-ForceBlocks] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk/unittest >> TBsVDiskManyPutGetCheckSize::ManyPutGetCheckSize [GOOD] Test command err: 2025-06-03T10:23:59.280962Z :BS_VDISK_GET CRIT: query_base.h:102: PDiskId# 1 VDISK[0:_:0:0:0]: (0) TEvVGetResult: Result message is too large; size# 67108001 orig# {ExtrQuery# [5000:1:0:0:0:100000:1] sh# 257 sz# 99743 c# 0}{ExtrQuery# [5000:1:1:0:0:100000:1] sh# 257 sz# 99743 c# 1}{ExtrQuery# [5000:1:2:0:0:100000:1] sh# 257 sz# 99743 c# 2}{ExtrQuery# [5000:1:3:0:0:100000:1] sh# 257 sz# 99743 c# 3}{ExtrQuery# [5000:1:4:0:0:100000:1] sh# 257 sz# 99743 c# 4}{ExtrQuery# [5000:1:5:0:0:100000:1] sh# 257 sz# 99743 c# 5}{ExtrQuery# [5000:1:6:0:0:100000:1] sh# 257 sz# 99743 c# 6}{ExtrQuery# [5000:1:7:0:0:100000:1] sh# 257 sz# 99743 c# 7}{ExtrQuery# [5000:1:8:0:0:100000:1] sh# 257 sz# 99743 c# 8}{ExtrQuery# [5000:1:9:0:0:100000:1] sh# 257 sz# 99743 c# 9}{ExtrQuery# [5000:1:10:0:0:100000:1] sh# 257 sz# 99743 c# 10}{ExtrQuery# [5000:1:11:0:0:100000:1] sh# 257 sz# 99743 c# 11}{ExtrQuery# [5000:1:12:0:0:100000:1] sh# 257 sz# 99743 c# 12}{ExtrQuery# [5000:1:13:0:0:100000:1] sh# 257 sz# 99743 c# 13}{ExtrQuery# [5000:1:14:0:0:100000:1] sh# 257 sz# 99743 c# 14}{ExtrQuery# [5000:1:15:0:0:100000:1] sh# 257 sz# 99743 c# 15}{ExtrQuery# [5000:1:16:0:0:100000:1] sh# 257 sz# 99743 c# 16}{ExtrQuery# [5000:1:17:0:0:100000:1] sh# 257 sz# 99743 c# 17}{ExtrQuery# [5000:1:18:0:0:100000:1] sh# 257 sz# 99743 c# 18}{ExtrQuery# [5000:1:19:0:0:100000:1] sh# 257 sz# 99743 c# 19}{ExtrQuery# [5000:1:20:0:0:100000:1] sh# 257 sz# 99743 c# 20}{ExtrQuery# [5000:1:21:0:0:100000:1] sh# 257 sz# 99743 c# 21}{ExtrQuery# [5000:1:22:0:0:100000:1] sh# 257 sz# 99743 c# 22}{ExtrQuery# [5000:1:23:0:0:100000:1] sh# 257 sz# 99743 c# 23}{ExtrQuery# [5000:1:24:0:0:100000:1] sh# 257 sz# 99743 c# 24}{ExtrQuery# [5000:1:25:0:0:100000:1] sh# 257 sz# 99743 c# 25}{ExtrQuery# [5000:1:26:0:0:100000:1] sh# 257 sz# 99743 c# 26}{ExtrQuery# [5000:1:27:0:0:100000:1] sh# 257 sz# 99743 c# 27}{ExtrQuery# [5000:1:28:0:0:100000:1] sh# 257 sz# 99743 c# 28}{ExtrQuery# [5000:1:29:0:0:100000:1] sh# 257 sz# 99743 c# 29}{ExtrQuery# [5000:1:30:0:0:100000:1] sh# 257 sz# 99743 c# 30}{ExtrQuery# [5000:1:31:0:0:100000:1] sh# 257 sz# 99743 c# 31}{ExtrQuery# [5000:1:32:0:0:100000:1] sh# 257 sz# 99743 c# 32}{ExtrQuery# [5000:1:33:0:0:100000:1] sh# 257 sz# 99743 c# 33}{ExtrQuery# [5000:1:34:0:0:100000:1] sh# 257 sz# 99743 c# 34}{ExtrQuery# [5000:1:35:0:0:100000:1] sh# 257 sz# 99743 c# 35}{ExtrQuery# [5000:1:36:0:0:100000:1] sh# 257 sz# 99743 c# 36}{ExtrQuery# [5000:1:37:0:0:100000:1] sh# 257 sz# 99743 c# 37}{ExtrQuery# [5000:1:38:0:0:100000:1] sh# 257 sz# 99743 c# 38}{ExtrQuery# [5000:1:39:0:0:100000:1] sh# 257 sz# 99743 c# 39}{ExtrQuery# [5000:1:40:0:0:100000:1] sh# 257 sz# 99743 c# 40}{ExtrQuery# [5000:1:41:0:0:100000:1] sh# 257 sz# 99743 c# 41}{ExtrQuery# [5000:1:42:0:0:100000:1] sh# 257 sz# 99743 c# 42}{ExtrQuery# [5000:1:43:0:0:100000:1] sh# 257 sz# 99743 c# 43}{ExtrQuery# [5000:1:44:0:0:100000:1] sh# 257 sz# 99743 c# 44}{ExtrQuery# [5000:1:45:0:0:100000:1] sh# 257 sz# 99743 c# 45}{ExtrQuery# [5000:1:46:0:0:100000:1] sh# 257 sz# 99743 c# 46}{ExtrQuery# [5000:1:47:0:0:100000:1] sh# 257 sz# 99743 c# 47}{ExtrQuery# [5000:1:48:0:0:100000:1] sh# 257 sz# 99743 c# 48}{ExtrQuery# [5000:1:49:0:0:100000:1] sh# 257 sz# 99743 c# 49}{ExtrQuery# [5000:1:50:0:0:100000:1] sh# 257 sz# 99743 c# 50}{ExtrQuery# [5000:1:51:0:0:100000:1] sh# 257 sz# 99743 c# 51}{ExtrQuery# [5000:1:52:0:0:100000:1] sh# 257 sz# 99743 c# 52}{ExtrQuery# [5000:1:53:0:0:100000:1] sh# 257 sz# 99743 c# 53}{ExtrQuery# [5000:1:54:0:0:100000:1] sh# 257 sz# 99743 c# 54}{ExtrQuery# [5000:1:55:0:0:100000:1] sh# 257 sz# 99743 c# 55}{ExtrQuery# [5000:1:56:0:0:100000:1] sh# 257 sz# 99743 c# 56}{ExtrQuery# [5000:1:57:0:0:100000:1] sh# 257 sz# 99743 c# 57}{ExtrQuery# [5000:1:58:0:0:100000:1] sh# 257 sz# 99743 c# 58}{ExtrQuery# [5000:1:59:0:0:100000:1] sh# 257 sz# 99743 c# 59}{ExtrQuery# [5000:1:60:0:0:100000:1] sh# 257 sz# 99743 c# 60}{ExtrQuery# [5000:1:61:0:0:100000:1] sh# 257 sz# 99743 c# 61}{ExtrQuery# [5000:1:62:0:0:100000:1] sh# 257 sz# 99743 c# 62}{ExtrQuery# [5000:1:63:0:0:100000:1] sh# 257 sz# 99743 c# 63}{ExtrQuery# [5000:1:64:0:0:100000:1] sh# 257 sz# 99743 c# 64}{ExtrQuery# [5000:1:65:0:0:100000:1] sh# 257 sz# 99743 c# 65}{ExtrQuery# [5000:1:66:0:0:100000:1] sh# 257 sz# 99743 c# 66}{ExtrQuery# [5000:1:67:0:0:100000:1] sh# 257 sz# 99743 c# 67}{ExtrQuery# [5000:1:68:0:0:100000:1] sh# 257 sz# 99743 c# 68}{ExtrQuery# [5000:1:69:0:0:100000:1] sh# 257 sz# 99743 c# 69}{ExtrQuery# [5000:1:70:0:0:100000:1] sh# 257 sz# 99743 c# 70}{ExtrQuery# [5000:1:71:0:0:100000:1] sh# 257 sz# 99743 c# 71}{ExtrQuery# [5000:1:72:0:0:100000:1] sh# 257 sz# 99743 c# 72}{ExtrQuery# [5000:1:73:0:0:100000:1] sh# 257 sz# 99743 c# 73}{ExtrQuery# [5000:1:74:0:0:100000:1] sh# 257 sz# 99743 c# 74}{ExtrQuery# [5000:1:75:0:0:100000:1] sh# 257 sz# 99743 c# 75}{ExtrQuery# [5000:1:76:0:0:100000:1] sh# 257 sz# 99743 c# 76}{ExtrQuery# [5000:1:77:0:0:100000:1] sh# 257 sz# 99743 c# 77}{ExtrQuery# [5000:1:78:0:0:100000:1] sh# 257 sz# 99743 c# 78}{ExtrQuery# [5000:1:79:0:0:100000:1] sh# 257 sz# 99743 c# 79}{ExtrQuery# [5000:1:80:0:0:100000:1] sh# 257 sz# 99743 c# 80}{ExtrQuery# [5000:1:81:0:0:100000:1] sh# 257 sz# 99743 c# 81}{ExtrQuery# [5000:1:82:0:0:100000:1] sh# 257 sz# 99743 c# 82}{ExtrQuery# [5000:1:83:0:0:100000:1] sh# 257 sz# 99743 c# 83}{ExtrQuery# [5000:1:84:0:0:100000:1] sh# 257 sz# 99743 c# 84}{ExtrQuery# [5000:1:85:0:0:100000:1] sh# 257 sz# 99743 c# 85}{ExtrQuery# [5000:1:86:0:0:100000:1] sh# 257 sz# 99743 c# 86}{ExtrQuery# [5000:1:87:0:0:100000:1] sh# 257 sz# 99743 c# 87}{ExtrQuery# [5000:1:88:0:0:100000:1] sh# 257 sz# 99743 c# 88}{ExtrQuery# [5000:1:89:0:0:100000:1] sh# 257 sz# 99743 c# 89}{ExtrQuery# [5000:1:90:0:0:100000:1] sh# 257 sz# 99743 c# 90}{ExtrQuery# [5000:1:91:0:0:100000:1] sh# 257 sz# 99743 c# 91}{ExtrQuery# [5000:1:92:0:0:100000:1] sh# 257 sz# 99743 c# 92}{ExtrQuery# [5000:1:93:0:0:100000:1] sh# 257 sz# 99743 c# 93}{ExtrQuery# [5000:1:94:0:0:100000:1] sh# 257 sz# 99743 c# 94}{ExtrQuery# [5000:1:95:0:0:100000:1] sh# 257 sz# 99743 c# 95}{ExtrQuery# [5000:1:96:0:0:100000:1] sh# 257 sz# 99743 c# 96}{ExtrQuery# [5000:1:97:0:0:100000:1] sh# 257 sz# 99743 c# 97}{ExtrQuery# [5000:1:98:0:0:100000:1] sh# 257 sz# 99743 c# 98}{ExtrQuery# [5000:1:99:0:0:100000:1] sh# 257 sz# 99743 c# 99}{ExtrQuery# [5000:1:100:0:0:100000:1] sh# 257 sz# 99743 c# 100}{ExtrQuery# [5000:1:101:0:0:100000:1] sh# 257 sz# 99743 c# 101}{ExtrQuery# [5000:1:102:0:0:100000:1] sh# 257 sz# 99743 c# 102}{ExtrQuery# [5000:1:103:0:0:100000:1] sh# 257 sz# 99743 c# 103}{ExtrQuery# [5000:1:104:0:0:100000:1] sh# 257 sz# 99743 c# 104}{ExtrQuery# [5000:1:105:0:0:100000:1] sh# 257 sz# 99743 c# 105}{ExtrQuery# [5000:1:106:0:0:100000:1] sh# 257 sz# 99743 c# 106}{ExtrQuery# [5000:1:107:0:0:100000:1] sh# 257 sz# 99743 c# 107}{ExtrQuery# [5000:1:108:0:0:100000:1] sh# 257 sz# 99743 c# 108}{ExtrQuery# [5000:1:109:0:0:100000:1] sh# 257 sz# 99743 c# 109}{ExtrQuery# [5000:1:110:0:0:100000:1] sh# 257 sz# 99743 c# 110}{ExtrQuery# [5000:1:111:0:0:100000:1] sh# 257 sz# 99743 c# 111}{ExtrQuery# [5000:1:112:0:0:100000:1] sh# 257 sz# 99743 c# 112}{ExtrQuery# [5000:1:113:0:0:100000:1] sh# 257 sz# 99743 c# 113}{ExtrQuery# [5000:1:114:0:0:100000:1] sh# 257 sz# 99743 c# 114}{ExtrQuery# [5000:1:115:0:0:100000:1] sh# 257 sz# 99743 c# 115}{ExtrQuery# [5000:1:116:0:0:100000:1] sh# 257 sz# 99743 c# 116}{ExtrQuery# [5000:1:117:0:0:100000:1] sh# 257 sz# 99743 c# 117}{ExtrQuery# [5000:1:118:0:0:100000:1] sh# 257 sz# 99743 c# 118}{ExtrQuery# [5000:1:119:0:0:100000:1] sh# 257 sz# 99743 c# 119}{ExtrQuery# [5000:1:120:0:0:100000:1] sh# 257 sz# 99743 c# 120}{ExtrQuery# [5000:1:121:0:0:100000:1] sh# 257 sz# 99743 c# 121}{ExtrQuery# [5000:1:122:0:0:100000:1] sh# 257 sz# 99743 c# 122}{ExtrQuery# [5000:1:123:0:0:100000:1] sh# 257 sz# 99743 c# 123}{ExtrQuery# [5000:1:124:0:0:100000:1] sh# 257 sz# 99743 c# 124}{ExtrQuery# [5000:1:125:0:0:100000:1] sh# 257 sz# 99743 c# 125}{ExtrQuery# [5000:1:126:0:0:100000:1] sh# 257 sz# 99743 c# 126}{ExtrQuery# [5000:1:127:0:0:100000:1] sh# 257 sz# 99743 c# 127}{ExtrQuery# [5000:1:128:0:0:100000:1] sh# 257 sz# 99743 c# 128}{ExtrQuery# [5000:1:129:0:0:100000:1] sh# 257 sz# 99743 c# 129}{ExtrQuery# [5000:1:130:0:0:100000:1] sh# 257 sz# 99743 c# 130}{ExtrQuery# [5000:1:131:0:0:100000:1] sh# 257 sz# 99743 c# 131}{ExtrQuery# [5000:1:132:0:0:100000:1] sh# 257 sz# 99743 c# 132}{ExtrQuery# [5000:1:133:0:0:100000:1] sh# 257 sz# 99743 c# 133}{ExtrQuery# [5000:1:134:0:0:100000:1] sh# 257 sz# 99743 c# 134}{ExtrQuery# [5000:1:135:0:0:100000:1] sh# 257 sz# 99743 c# 135}{ExtrQuery# [5000:1:136:0:0:100000:1] sh# 257 sz# 99743 c# 136}{ExtrQuery# [5000:1:137:0:0:100000:1] sh# 257 sz# 99743 c# 137}{ExtrQuery# [5000:1:138:0:0:100000:1] sh# 257 sz# 99743 c# 138}{ExtrQuery# [5000:1:139:0:0:100000:1] sh# 257 sz# 99743 c# 139}{ExtrQuery# [5000:1:140:0:0:100000:1] sh# 257 sz# 99743 c# 140}{ExtrQuery# [5000:1:141:0:0:100000:1] sh# 257 sz# 99743 c# 141}{ExtrQuery# [5000:1:142:0:0:100000:1] sh# 257 sz# 99743 c# 142}{ExtrQuery# [5000:1:143:0:0:100000:1] sh# 257 sz# 99743 c# 143}{ExtrQuery# [5000:1:144:0:0:100000:1] sh# 257 sz# 99743 c# 144}{ExtrQuery# [5000:1:145:0:0:100000:1] sh# 257 sz# 99743 c# 145}{ExtrQuery# [5000:1:146:0:0:100000:1] sh# 257 sz# 99743 c# 146}{ExtrQuery# [5000:1:147:0:0:100000:1] sh# 257 sz# 99743 c# 147}{ExtrQuery# [5000:1:148:0:0:100000:1] sh# 257 sz# 99743 c# 148}{ExtrQuery# [5000:1:149:0:0:100000:1] sh# 257 sz# 99743 c# 149}{ExtrQuery# [5000:1:150:0:0:100000:1] sh# 257 sz# 99743 c# 150}{ExtrQuery# [5000:1:151:0:0:100000:1] sh# 257 sz# 99743 c# 151}{ExtrQuery# [5000:1:152:0:0:100000:1] sh# 257 sz# 99743 c# 152}{ExtrQuery# [5000:1:153:0:0:100000:1] sh# 257 sz# 99743 c# 153}{ExtrQuery# [5000:1:154:0:0:100000:1] sh# 257 sz# 99743 c# 154}{ExtrQuery# [5000:1:155:0:0:100000:1] sh# 257 sz# 99743 c# 155}{ExtrQuery# [5000:1:156:0:0:100000:1] sh# 257 sz# 99743 c# 156}{ExtrQuery# [5000:1:157:0:0:100000:1] sh# 257 sz# 99743 c# 157}{ExtrQuery# [5000:1:158:0:0:100000:1] sh# 257 sz# 99743 c# 158}{ExtrQuery# [5000:1:159:0:0:100000:1] sh# 257 sz# 99743 c# 159}{ExtrQuery# [5000:1:160:0:0:100000:1] sh# 257 sz# 99743 c# 160}{ExtrQuery# [5000:1:161:0:0:100000:1] sh# 257 sz# 99743 c# 161}{ExtrQuery# [5000:1:162:0:0:100000:1] sh# 257 sz# 99743 c# 162}{ExtrQuery# [5000:1:163:0:0:100000:1] sh# 257 sz# 99743 c# 163}{ExtrQuery# [5000:1:164:0:0:100000:1] sh# 257 sz# 99743 c# 164}{ExtrQuery# [5000:1:165:0:0:100000:1] sh# 257 sz# 99743 c# 165}{ExtrQuery# [5000:1:166:0:0:100000:1] sh# 257 sz# 99743 c# 166}{ExtrQuery# [5000:1:167:0:0:100000:1] sh# 257 sz# 99743 c# 167}{ExtrQuery# [5000:1:168:0:0:100000:1] sh# 257 sz# 99743 c# 168}{ExtrQuery# [5000:1:169:0:0:100000:1] sh# 257 sz# 99743 c# 169}{ExtrQuery# [5000:1:170:0:0:100000:1] sh# 257 sz# 99743 c# 170}{ExtrQuery# [5000:1:171:0:0:100000:1] sh# 257 sz# 99743 c# 171}{ExtrQuery# [5000:1:172:0:0:100000:1] sh# 257 sz# 99743 c# 172}{ExtrQuery# [5000:1:173:0:0:100000:1] sh# 257 sz# 99743 c# 173}{ExtrQuery# [5000:1:174:0:0:100000:1] sh# 257 sz# 99743 c# 174}{ExtrQuery# [5000:1:175:0:0:100000:1] sh# 257 sz# 99743 c# 175}{ExtrQuery# [5000:1:176:0:0:100000:1] sh# 257 sz# 99743 c# 176}{ExtrQuery# [5000:1:177:0:0:100000:1] sh# 257 sz# 99743 c# 177}{ExtrQuery# [5000:1:178:0:0:100000:1] sh# 257 sz# 99743 c# 178}{ExtrQuery# [5000:1:179:0:0:100000:1] sh# 257 sz# 99743 c# 179}{ExtrQuery# [5000:1:180:0:0:100000:1] sh# 257 sz# 99743 c# 180}{ExtrQuery# [5000:1:181:0:0:100000:1] sh# 257 sz# 99743 c# 181}{ExtrQuery# [5000:1:182:0:0:100000:1] sh# 257 sz# 99743 c# 182}{ExtrQuery# [5000:1:183:0:0:100000:1] sh# 257 sz# 99743 c# 183}{ExtrQuery# [5000:1:184:0:0:100000:1] sh# 257 sz# 99743 c# 184}{ExtrQuery# [5000:1:185:0:0:100000:1] sh# 257 sz# 99743 c# 185}{ExtrQuery# [5000:1:186:0:0:100000:1] sh# 257 sz# 99743 c# 186}{ExtrQuery# [5000:1:187:0:0:100000:1] sh# 257 sz# 99743 c# 187}{ExtrQuery# [5000:1:188:0:0:100000:1] sh# 257 sz# 99743 c# 188}{ExtrQuery# [5000:1:189:0:0:100000:1] sh# 257 sz# 99743 c# 189}{ExtrQuery# [5000:1:190:0:0:100000:1] sh# 257 sz# 99743 c# 190}{ExtrQuery# [5000:1:191 ... sz# 99743 c# 484}{ExtrQuery# [5000:1:485:0:0:100000:1] sh# 257 sz# 99743 c# 485}{ExtrQuery# [5000:1:486:0:0:100000:1] sh# 257 sz# 99743 c# 486}{ExtrQuery# [5000:1:487:0:0:100000:1] sh# 257 sz# 99743 c# 487}{ExtrQuery# [5000:1:488:0:0:100000:1] sh# 257 sz# 99743 c# 488}{ExtrQuery# [5000:1:489:0:0:100000:1] sh# 257 sz# 99743 c# 489}{ExtrQuery# [5000:1:490:0:0:100000:1] sh# 257 sz# 99743 c# 490}{ExtrQuery# [5000:1:491:0:0:100000:1] sh# 257 sz# 99743 c# 491}{ExtrQuery# [5000:1:492:0:0:100000:1] sh# 257 sz# 99743 c# 492}{ExtrQuery# [5000:1:493:0:0:100000:1] sh# 257 sz# 99743 c# 493}{ExtrQuery# [5000:1:494:0:0:100000:1] sh# 257 sz# 99743 c# 494}{ExtrQuery# [5000:1:495:0:0:100000:1] sh# 257 sz# 99743 c# 495}{ExtrQuery# [5000:1:496:0:0:100000:1] sh# 257 sz# 99743 c# 496}{ExtrQuery# [5000:1:497:0:0:100000:1] sh# 257 sz# 99743 c# 497}{ExtrQuery# [5000:1:498:0:0:100000:1] sh# 257 sz# 99743 c# 498}{ExtrQuery# [5000:1:499:0:0:100000:1] sh# 257 sz# 99743 c# 499}{ExtrQuery# [5000:1:500:0:0:100000:1] sh# 257 sz# 99743 c# 500}{ExtrQuery# [5000:1:501:0:0:100000:1] sh# 257 sz# 99743 c# 501}{ExtrQuery# [5000:1:502:0:0:100000:1] sh# 257 sz# 99743 c# 502}{ExtrQuery# [5000:1:503:0:0:100000:1] sh# 257 sz# 99743 c# 503}{ExtrQuery# [5000:1:504:0:0:100000:1] sh# 257 sz# 99743 c# 504}{ExtrQuery# [5000:1:505:0:0:100000:1] sh# 257 sz# 99743 c# 505}{ExtrQuery# [5000:1:506:0:0:100000:1] sh# 257 sz# 99743 c# 506}{ExtrQuery# [5000:1:507:0:0:100000:1] sh# 257 sz# 99743 c# 507}{ExtrQuery# [5000:1:508:0:0:100000:1] sh# 257 sz# 99743 c# 508}{ExtrQuery# [5000:1:509:0:0:100000:1] sh# 257 sz# 99743 c# 509}{ExtrQuery# [5000:1:510:0:0:100000:1] sh# 257 sz# 99743 c# 510}{ExtrQuery# [5000:1:511:0:0:100000:1] sh# 257 sz# 99743 c# 511}{ExtrQuery# [5000:1:512:0:0:100000:1] sh# 257 sz# 99743 c# 512}{ExtrQuery# [5000:1:513:0:0:100000:1] sh# 257 sz# 99743 c# 513}{ExtrQuery# [5000:1:514:0:0:100000:1] sh# 257 sz# 99743 c# 514}{ExtrQuery# [5000:1:515:0:0:100000:1] sh# 257 sz# 99743 c# 515}{ExtrQuery# [5000:1:516:0:0:100000:1] sh# 257 sz# 99743 c# 516}{ExtrQuery# [5000:1:517:0:0:100000:1] sh# 257 sz# 99743 c# 517}{ExtrQuery# [5000:1:518:0:0:100000:1] sh# 257 sz# 99743 c# 518}{ExtrQuery# [5000:1:519:0:0:100000:1] sh# 257 sz# 99743 c# 519}{ExtrQuery# [5000:1:520:0:0:100000:1] sh# 257 sz# 99743 c# 520}{ExtrQuery# [5000:1:521:0:0:100000:1] sh# 257 sz# 99743 c# 521}{ExtrQuery# [5000:1:522:0:0:100000:1] sh# 257 sz# 99743 c# 522}{ExtrQuery# [5000:1:523:0:0:100000:1] sh# 257 sz# 99743 c# 523}{ExtrQuery# [5000:1:524:0:0:100000:1] sh# 257 sz# 99743 c# 524}{ExtrQuery# [5000:1:525:0:0:100000:1] sh# 257 sz# 99743 c# 525}{ExtrQuery# [5000:1:526:0:0:100000:1] sh# 257 sz# 99743 c# 526}{ExtrQuery# [5000:1:527:0:0:100000:1] sh# 257 sz# 99743 c# 527}{ExtrQuery# [5000:1:528:0:0:100000:1] sh# 257 sz# 99743 c# 528}{ExtrQuery# [5000:1:529:0:0:100000:1] sh# 257 sz# 99743 c# 529}{ExtrQuery# [5000:1:530:0:0:100000:1] sh# 257 sz# 99743 c# 530}{ExtrQuery# [5000:1:531:0:0:100000:1] sh# 257 sz# 99743 c# 531}{ExtrQuery# [5000:1:532:0:0:100000:1] sh# 257 sz# 99743 c# 532}{ExtrQuery# [5000:1:533:0:0:100000:1] sh# 257 sz# 99743 c# 533}{ExtrQuery# [5000:1:534:0:0:100000:1] sh# 257 sz# 99743 c# 534}{ExtrQuery# [5000:1:535:0:0:100000:1] sh# 257 sz# 99743 c# 535}{ExtrQuery# [5000:1:536:0:0:100000:1] sh# 257 sz# 99743 c# 536}{ExtrQuery# [5000:1:537:0:0:100000:1] sh# 257 sz# 99743 c# 537}{ExtrQuery# [5000:1:538:0:0:100000:1] sh# 257 sz# 99743 c# 538}{ExtrQuery# [5000:1:539:0:0:100000:1] sh# 257 sz# 99743 c# 539}{ExtrQuery# [5000:1:540:0:0:100000:1] sh# 257 sz# 99743 c# 540}{ExtrQuery# [5000:1:541:0:0:100000:1] sh# 257 sz# 99743 c# 541}{ExtrQuery# [5000:1:542:0:0:100000:1] sh# 257 sz# 99743 c# 542}{ExtrQuery# [5000:1:543:0:0:100000:1] sh# 257 sz# 99743 c# 543}{ExtrQuery# [5000:1:544:0:0:100000:1] sh# 257 sz# 99743 c# 544}{ExtrQuery# [5000:1:545:0:0:100000:1] sh# 257 sz# 99743 c# 545}{ExtrQuery# [5000:1:546:0:0:100000:1] sh# 257 sz# 99743 c# 546}{ExtrQuery# [5000:1:547:0:0:100000:1] sh# 257 sz# 99743 c# 547}{ExtrQuery# [5000:1:548:0:0:100000:1] sh# 257 sz# 99743 c# 548}{ExtrQuery# [5000:1:549:0:0:100000:1] sh# 257 sz# 99743 c# 549}{ExtrQuery# [5000:1:550:0:0:100000:1] sh# 257 sz# 99743 c# 550}{ExtrQuery# [5000:1:551:0:0:100000:1] sh# 257 sz# 99743 c# 551}{ExtrQuery# [5000:1:552:0:0:100000:1] sh# 257 sz# 99743 c# 552}{ExtrQuery# [5000:1:553:0:0:100000:1] sh# 257 sz# 99743 c# 553}{ExtrQuery# [5000:1:554:0:0:100000:1] sh# 257 sz# 99743 c# 554}{ExtrQuery# [5000:1:555:0:0:100000:1] sh# 257 sz# 99743 c# 555}{ExtrQuery# [5000:1:556:0:0:100000:1] sh# 257 sz# 99743 c# 556}{ExtrQuery# [5000:1:557:0:0:100000:1] sh# 257 sz# 99743 c# 557}{ExtrQuery# [5000:1:558:0:0:100000:1] sh# 257 sz# 99743 c# 558}{ExtrQuery# [5000:1:559:0:0:100000:1] sh# 257 sz# 99743 c# 559}{ExtrQuery# [5000:1:560:0:0:100000:1] sh# 257 sz# 99743 c# 560}{ExtrQuery# [5000:1:561:0:0:100000:1] sh# 257 sz# 99743 c# 561}{ExtrQuery# [5000:1:562:0:0:100000:1] sh# 257 sz# 99743 c# 562}{ExtrQuery# [5000:1:563:0:0:100000:1] sh# 257 sz# 99743 c# 563}{ExtrQuery# [5000:1:564:0:0:100000:1] sh# 257 sz# 99743 c# 564}{ExtrQuery# [5000:1:565:0:0:100000:1] sh# 257 sz# 99743 c# 565}{ExtrQuery# [5000:1:566:0:0:100000:1] sh# 257 sz# 99743 c# 566}{ExtrQuery# [5000:1:567:0:0:100000:1] sh# 257 sz# 99743 c# 567}{ExtrQuery# [5000:1:568:0:0:100000:1] sh# 257 sz# 99743 c# 568}{ExtrQuery# [5000:1:569:0:0:100000:1] sh# 257 sz# 99743 c# 569}{ExtrQuery# [5000:1:570:0:0:100000:1] sh# 257 sz# 99743 c# 570}{ExtrQuery# [5000:1:571:0:0:100000:1] sh# 257 sz# 99743 c# 571}{ExtrQuery# [5000:1:572:0:0:100000:1] sh# 257 sz# 99743 c# 572}{ExtrQuery# [5000:1:573:0:0:100000:1] sh# 257 sz# 99743 c# 573}{ExtrQuery# [5000:1:574:0:0:100000:1] sh# 257 sz# 99743 c# 574}{ExtrQuery# [5000:1:575:0:0:100000:1] sh# 257 sz# 99743 c# 575}{ExtrQuery# [5000:1:576:0:0:100000:1] sh# 257 sz# 99743 c# 576}{ExtrQuery# [5000:1:577:0:0:100000:1] sh# 257 sz# 99743 c# 577}{ExtrQuery# [5000:1:578:0:0:100000:1] sh# 257 sz# 99743 c# 578}{ExtrQuery# [5000:1:579:0:0:100000:1] sh# 257 sz# 99743 c# 579}{ExtrQuery# [5000:1:580:0:0:100000:1] sh# 257 sz# 99743 c# 580}{ExtrQuery# [5000:1:581:0:0:100000:1] sh# 257 sz# 99743 c# 581}{ExtrQuery# [5000:1:582:0:0:100000:1] sh# 257 sz# 99743 c# 582}{ExtrQuery# [5000:1:583:0:0:100000:1] sh# 257 sz# 99743 c# 583}{ExtrQuery# [5000:1:584:0:0:100000:1] sh# 257 sz# 99743 c# 584}{ExtrQuery# [5000:1:585:0:0:100000:1] sh# 257 sz# 99743 c# 585}{ExtrQuery# [5000:1:586:0:0:100000:1] sh# 257 sz# 99743 c# 586}{ExtrQuery# [5000:1:587:0:0:100000:1] sh# 257 sz# 99743 c# 587}{ExtrQuery# [5000:1:588:0:0:100000:1] sh# 257 sz# 99743 c# 588}{ExtrQuery# [5000:1:589:0:0:100000:1] sh# 257 sz# 99743 c# 589}{ExtrQuery# [5000:1:590:0:0:100000:1] sh# 257 sz# 99743 c# 590}{ExtrQuery# [5000:1:591:0:0:100000:1] sh# 257 sz# 99743 c# 591}{ExtrQuery# [5000:1:592:0:0:100000:1] sh# 257 sz# 99743 c# 592}{ExtrQuery# [5000:1:593:0:0:100000:1] sh# 257 sz# 99743 c# 593}{ExtrQuery# [5000:1:594:0:0:100000:1] sh# 257 sz# 99743 c# 594}{ExtrQuery# [5000:1:595:0:0:100000:1] sh# 257 sz# 99743 c# 595}{ExtrQuery# [5000:1:596:0:0:100000:1] sh# 257 sz# 99743 c# 596}{ExtrQuery# [5000:1:597:0:0:100000:1] sh# 257 sz# 99743 c# 597}{ExtrQuery# [5000:1:598:0:0:100000:1] sh# 257 sz# 99743 c# 598}{ExtrQuery# [5000:1:599:0:0:100000:1] sh# 257 sz# 99743 c# 599}{ExtrQuery# [5000:1:600:0:0:100000:1] sh# 257 sz# 99743 c# 600}{ExtrQuery# [5000:1:601:0:0:100000:1] sh# 257 sz# 99743 c# 601}{ExtrQuery# [5000:1:602:0:0:100000:1] sh# 257 sz# 99743 c# 602}{ExtrQuery# [5000:1:603:0:0:100000:1] sh# 257 sz# 99743 c# 603}{ExtrQuery# [5000:1:604:0:0:100000:1] sh# 257 sz# 99743 c# 604}{ExtrQuery# [5000:1:605:0:0:100000:1] sh# 257 sz# 99743 c# 605}{ExtrQuery# [5000:1:606:0:0:100000:1] sh# 257 sz# 99743 c# 606}{ExtrQuery# [5000:1:607:0:0:100000:1] sh# 257 sz# 99743 c# 607}{ExtrQuery# [5000:1:608:0:0:100000:1] sh# 257 sz# 99743 c# 608}{ExtrQuery# [5000:1:609:0:0:100000:1] sh# 257 sz# 99743 c# 609}{ExtrQuery# [5000:1:610:0:0:100000:1] sh# 257 sz# 99743 c# 610}{ExtrQuery# [5000:1:611:0:0:100000:1] sh# 257 sz# 99743 c# 611}{ExtrQuery# [5000:1:612:0:0:100000:1] sh# 257 sz# 99743 c# 612}{ExtrQuery# [5000:1:613:0:0:100000:1] sh# 257 sz# 99743 c# 613}{ExtrQuery# [5000:1:614:0:0:100000:1] sh# 257 sz# 99743 c# 614}{ExtrQuery# [5000:1:615:0:0:100000:1] sh# 257 sz# 99743 c# 615}{ExtrQuery# [5000:1:616:0:0:100000:1] sh# 257 sz# 99743 c# 616}{ExtrQuery# [5000:1:617:0:0:100000:1] sh# 257 sz# 99743 c# 617}{ExtrQuery# [5000:1:618:0:0:100000:1] sh# 257 sz# 99743 c# 618}{ExtrQuery# [5000:1:619:0:0:100000:1] sh# 257 sz# 99743 c# 619}{ExtrQuery# [5000:1:620:0:0:100000:1] sh# 257 sz# 99743 c# 620}{ExtrQuery# [5000:1:621:0:0:100000:1] sh# 257 sz# 99743 c# 621}{ExtrQuery# [5000:1:622:0:0:100000:1] sh# 257 sz# 99743 c# 622}{ExtrQuery# [5000:1:623:0:0:100000:1] sh# 257 sz# 99743 c# 623}{ExtrQuery# [5000:1:624:0:0:100000:1] sh# 257 sz# 99743 c# 624}{ExtrQuery# [5000:1:625:0:0:100000:1] sh# 257 sz# 99743 c# 625}{ExtrQuery# [5000:1:626:0:0:100000:1] sh# 257 sz# 99743 c# 626}{ExtrQuery# [5000:1:627:0:0:100000:1] sh# 257 sz# 99743 c# 627}{ExtrQuery# [5000:1:628:0:0:100000:1] sh# 257 sz# 99743 c# 628}{ExtrQuery# [5000:1:629:0:0:100000:1] sh# 257 sz# 99743 c# 629}{ExtrQuery# [5000:1:630:0:0:100000:1] sh# 257 sz# 99743 c# 630}{ExtrQuery# [5000:1:631:0:0:100000:1] sh# 257 sz# 99743 c# 631}{ExtrQuery# [5000:1:632:0:0:100000:1] sh# 257 sz# 99743 c# 632}{ExtrQuery# [5000:1:633:0:0:100000:1] sh# 257 sz# 99743 c# 633}{ExtrQuery# [5000:1:634:0:0:100000:1] sh# 257 sz# 99743 c# 634}{ExtrQuery# [5000:1:635:0:0:100000:1] sh# 257 sz# 99743 c# 635}{ExtrQuery# [5000:1:636:0:0:100000:1] sh# 257 sz# 99743 c# 636}{ExtrQuery# [5000:1:637:0:0:100000:1] sh# 257 sz# 99743 c# 637}{ExtrQuery# [5000:1:638:0:0:100000:1] sh# 257 sz# 99743 c# 638}{ExtrQuery# [5000:1:639:0:0:100000:1] sh# 257 sz# 99743 c# 639}{ExtrQuery# [5000:1:640:0:0:100000:1] sh# 257 sz# 99743 c# 640}{ExtrQuery# [5000:1:641:0:0:100000:1] sh# 257 sz# 99743 c# 641}{ExtrQuery# [5000:1:642:0:0:100000:1] sh# 257 sz# 99743 c# 642}{ExtrQuery# [5000:1:643:0:0:100000:1] sh# 257 sz# 99743 c# 643}{ExtrQuery# [5000:1:644:0:0:100000:1] sh# 257 sz# 99743 c# 644}{ExtrQuery# [5000:1:645:0:0:100000:1] sh# 257 sz# 99743 c# 645}{ExtrQuery# [5000:1:646:0:0:100000:1] sh# 257 sz# 99743 c# 646}{ExtrQuery# [5000:1:647:0:0:100000:1] sh# 257 sz# 99743 c# 647}{ExtrQuery# [5000:1:648:0:0:100000:1] sh# 257 sz# 99743 c# 648}{ExtrQuery# [5000:1:649:0:0:100000:1] sh# 257 sz# 99743 c# 649}{ExtrQuery# [5000:1:650:0:0:100000:1] sh# 257 sz# 99743 c# 650}{ExtrQuery# [5000:1:651:0:0:100000:1] sh# 257 sz# 99743 c# 651}{ExtrQuery# [5000:1:652:0:0:100000:1] sh# 257 sz# 99743 c# 652}{ExtrQuery# [5000:1:653:0:0:100000:1] sh# 257 sz# 99743 c# 653}{ExtrQuery# [5000:1:654:0:0:100000:1] sh# 257 sz# 99743 c# 654}{ExtrQuery# [5000:1:655:0:0:100000:1] sh# 257 sz# 99743 c# 655}{ExtrQuery# [5000:1:656:0:0:100000:1] sh# 257 sz# 99743 c# 656}{ExtrQuery# [5000:1:657:0:0:100000:1] sh# 257 sz# 99743 c# 657}{ExtrQuery# [5000:1:658:0:0:100000:1] sh# 257 sz# 99743 c# 658}{ExtrQuery# [5000:1:659:0:0:100000:1] sh# 257 sz# 99743 c# 659}{ExtrQuery# [5000:1:660:0:0:100000:1] sh# 257 sz# 99743 c# 660}{ExtrQuery# [5000:1:661:0:0:100000:1] sh# 257 sz# 99743 c# 661}{ExtrQuery# [5000:1:662:0:0:100000:1] sh# 257 sz# 99743 c# 662}{ExtrQuery# [5000:1:663:0:0:100000:1] sh# 257 sz# 99743 c# 663}{ExtrQuery# [5000:1:664:0:0:100000:1] sh# 257 sz# 99743 c# 664}{ExtrQuery# [5000:1:665:0:0:100000:1] sh# 257 sz# 99743 c# 665}{ExtrQuery# [5000:1:666:0:0:100000:1] sh# 257 sz# 99743 c# 666}{ExtrQuery# [5000:1:667:0:0:100000:1] sh# 257 sz# 99743 c# 667}{ExtrQuery# [5000:1:668:0:0:100000:1] sh# 257 sz# 99743 c# 668}{ExtrQuery# [5000:1:669:0:0:100000:1] sh# 257 sz# 99743 c# 669}{ExtrQuery# [5000:1:670:0:0:100000:1] sh# 257 sz# 99743 c# 670}{ExtrQuery# [5000:1:671:0:0:100000:1] sh# 257 sz# 99743 c# 671}{ExtrQuery# [5000:1:672:0:0:17027:1] sh# 257 sz# 16770 c# 672} {MsgQoS} Notify# 0 Internals# 0 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0}; VDISK CAN NOT REPLY ON TEvVGet REQUEST >> test.py::test[window-win_func_aggr_4func_sort_desc--Results] [GOOD] >> test.py::test[window-win_func_aggr_with_qualified_all_no_simple_columns--ForceBlocks] >> test.py::test[pg-tpcds-q14-default.txt-Results] [GOOD] >> AnalyzeColumnshard::AnalyzeStatus >> test.py::test[pg-tpcds-q25-default.txt-ForceBlocks] >> test.py::test[pg-tpch-q08-default.txt-Results] [GOOD] >> test.py::test[pg-wide_sort--ForceBlocks] |58.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpcdsGenerator::test_s1 [GOOD] >> test.py::test[blocks-div_uint64--Results] [GOOD] >> test.py::test[weak_field-optimize_weak_fields_combine--ForceBlocks] [GOOD] >> test.py::test[weak_field-optimize_weak_fields_combine--Results] >> test.py::test[blocks-mod_uint64_opt2--ForceBlocks] >> test.py::test[aggr_factory-stddev-default.txt-Results] [GOOD] >> test.py::test[aggregate-aggregate_list_in_key-default.txt-Results] >> test.py::test[aggregate-group_by_expr_only_join--ForceBlocks] [GOOD] >> test.py::test[aggregate-group_by_expr_only_join--Results] >> test.py::test[blocks-filter_direct_col--Results] [GOOD] >> test.py::test[blocks-interval_mul_scalar--ForceBlocks] >> test.py::test[produce-reduce_all_field_subset--Results] [GOOD] >> test.py::test[produce-reduce_all_multi_in-default.txt-ForceBlocks] >> test.py::test[produce-reduce_all_multi_in-default.txt-ForceBlocks] [SKIPPED] >> test.py::test[produce-reduce_all_multi_in-default.txt-Results] [SKIPPED] >> test.py::test[produce-reduce_multi_in_ref--ForceBlocks] |58.7%| [TA] $(B)/ydb/core/blobstorage/ut_vdisk/test-results/unittest/{meta.json ... results_accumulator.log} >> test.py::test[pg-tpcds-q06-default.txt-Results] [GOOD] >> test.py::test[join-flatten_columns2--Results] [GOOD] >> test.py::test[join-grace_join1-grace-ForceBlocks] >> test.py::test[aggregate-group_by_gs_subselect_asterisk-default.txt-ForceBlocks] [GOOD] >> test.py::test[aggregate-group_by_gs_subselect_asterisk-default.txt-Results] >> test.py::test[pg-tpcds-q11-default.txt-ForceBlocks] >> AnalyzeColumnshard::AnalyzeRebootSaBeforeSave >> DataStreams::TestGetRecords1MBMessagesOneByOneBySeqNo [GOOD] >> test.py::test[key_filter-dependent_value-default.txt-ForceBlocks] [GOOD] >> test.py::test[key_filter-dependent_value-default.txt-Results] >> test.py::test[type_v3-append_diff_layout1--ForceBlocks] [GOOD] >> TraverseColumnShard::TraverseColumnTableAggrStatNonLocalTablet >> test.py::test[limit-many_top_sorts-default.txt-ForceBlocks] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/datastreams/ut/unittest >> DataStreams::TestGetRecords1MBMessagesOneByOneBySeqNo [GOOD] Test command err: 2025-06-03T10:23:47.796205Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511666848817968256:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:47.796446Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f1e/r3tmp/tmpdpBO6Q/pdisk_1.dat TServer::EnableGrpc on GrpcPort 13157, node 1 2025-06-03T10:23:47.896954Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:47.896987Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:47.898613Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:47.911978Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:23:47.912324Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:47.912335Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:47.912338Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:47.912390Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17649 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:47.955797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:47.989706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:17649 2025-06-03T10:23:48.006688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:48.087146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:23:48.090382Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-03T10:23:48.090387Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-03T10:23:48.090389Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found 2025-06-03T10:23:48.090392Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-03T10:23:48.092408Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2025-06-03T10:23:48.092419Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,4) wasn't found 2025-06-03T10:23:48.092424Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found 2025-06-03T10:23:48.092429Z node 1 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found 2025-06-03T10:23:48.706371Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511666854663378420:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:48.706579Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f1e/r3tmp/tmpoihsMV/pdisk_1.dat 2025-06-03T10:23:48.737383Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4043, node 4 2025-06-03T10:23:48.748172Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:48.748183Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:48.748185Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:48.748220Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25125 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:48.806823Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:48.806853Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:48.808579Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:48.822619Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:48.852885Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:25125 2025-06-03T10:23:48.868970Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:48.949016Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:23:48.959458Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:23:48.963569Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037889 not found 2025-06-03T10:23:48.963574Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037891 not found 2025-06-03T10:23:48.963576Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037890 not found 2025-06-03T10:23:48.963579Z node 4 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 4, TabletId: 72075186224037888 not found 2025-06-03T10:23:48.969490Z node 4 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2025-06-03T10:23:48.969518Z node 4 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,4) wasn't found 2025-06-03T10:23:48.969523Z node 4 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found 2025-06-03T10:23:48.969528Z node 4 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found 2025-06-03T10:23:49.579077Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511666859429114968:2273];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:49.579126Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f1e/r3tmp/tmpO6QJgG/pdisk_1.dat 2025-06-03T10:23:49.615731Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31682, node 7 2025-06-03T10:23:49.633010Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:49.633030Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:49.633033Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:49.633091Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23010 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:49.678775Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:49.678806Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:49.680355Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:49.699425Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:49.713456Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:23010 2025-06-03T10:23:49.723821Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:49.758787Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:23:49.774778Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:23:49.785783Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:23:49.789866Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037890 not found 2025-06-03T10:23:49.789871Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037888 not found 2025-06-03T10:23:49.790090Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037889 not found 2025-06-03T10:23:49.790380Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037891 not found 2025-06-03T10:23:49.793877Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,2) wasn't found 2025-06-03T10:23:49.793890Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,4) wasn't found 2025-06-03T10:23:49.793894Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,3) wasn't found 2025-06-03T10:23:49.793897Z node 7 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found 2025-06-03T10:23:50.482714Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7511666861954350433:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:50.482747Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f1e/r3tmp/tmpo0Zd1N/pdisk_1.dat TServer::EnableGrpc on GrpcPort 23019, node 10 2025-06-03T10:23:50.518776Z node 10 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:23:50.519432Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:50.519443Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:50.519445Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:50.519497Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15229 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:50.583470Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:50.583508Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:50.585165Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:50.596320Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:50.616317Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:15229 2025-06-03T10:23:50.630899Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:55.485740Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7511666861954350433:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:55.485808Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; >> test.py::test[limit-many_top_sorts-default.txt-Results] >> test.py::test[weak_field-optimize_weak_fields_combine--Results] [GOOD] >> test.py::test[window-distinct_over_window_full_frames--ForceBlocks] |58.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest |58.8%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part11/pytest >> test.py::test[type_v3-append_diff_layout1--ForceBlocks] [GOOD] >> test.py::test[join-premap_common_semi-off-ForceBlocks] [GOOD] >> test.py::test[join-premap_common_semi-off-Results] [SKIPPED] >> test.py::test[join-pullup_extend--ForceBlocks] >> DataStreams::TestReservedConsumersMetering [GOOD] >> test.py::test[select-trivial_group_by-default.txt-ForceBlocks] [GOOD] >> test.py::test[select-trivial_group_by-default.txt-Results] >> test.py::test[key_filter-dependent_value-default.txt-Results] [GOOD] >> test.py::test[key_filter-key_double_opt_suffix--ForceBlocks] [SKIPPED] >> test.py::test[key_filter-key_double_opt_suffix--Results] [SKIPPED] >> test.py::test[blocks-date_not_equals_scalar--ForceBlocks] [GOOD] >> test.py::test[blocks-date_not_equals_scalar--Results] >> test.py::test[lambda-lambda_with_tie-default.txt-ForceBlocks] [GOOD] >> test.py::test[lambda-lambda_with_tie-default.txt-Results] |58.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut |58.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut |58.8%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_vdisk/test-results/unittest/{meta.json ... results_accumulator.log} |58.8%| [LD] {RESULT} $(B)/ydb/core/driver_lib/run/ut/ydb-core-driver_lib-run-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/datastreams/ut/unittest >> DataStreams::TestReservedConsumersMetering [GOOD] Test command err: 2025-06-03T10:23:42.491551Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511666826648882019:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:42.491579Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f2c/r3tmp/tmppRBaRS/pdisk_1.dat TServer::EnableGrpc on GrpcPort 23730, node 1 TClient is connected to server localhost:16918 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:23:42.591913Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:42.591940Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:42.593831Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:42.596867Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:23:42.597095Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:42.597105Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:42.597106Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:42.597151Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:42.637547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:42.668718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:16918 2025-06-03T10:23:42.685624Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:42.732604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715661:0, at schemeshard: 72057594046644480 encryption_type: NONE records { sequence_number: "0" shard_id: "shard-000001" } records { sequence_number: "0" shard_id: "shard-000009" } records { sequence_number: "0" shard_id: "shard-000004" } records { sequence_number: "0" shard_id: "shard-000005" } records { sequence_number: "0" shard_id: "shard-000008" } records { sequence_number: "1" shard_id: "shard-000004" } records { sequence_number: "2" shard_id: "shard-000004" } records { sequence_number: "1" shard_id: "shard-000005" } records { sequence_number: "1" shard_id: "shard-000001" } records { sequence_number: "1" shard_id: "shard-000009" } records { sequence_number: "0" shard_id: "shard-000006" } records { sequence_number: "2" shard_id: "shard-000001" } records { sequence_number: "0" shard_id: "shard-000007" } records { sequence_number: "1" shard_id: "shard-000007" } records { sequence_number: "0" shard_id: "shard-000000" } records { sequence_number: "2" shard_id: "shard-000007" } records { sequence_number: "3" shard_id: "shard-000004" } records { sequence_number: "2" shard_id: "shard-000005" } records { sequence_number: "0" shard_id: "shard-000003" } records { sequence_number: "2" shard_id: "shard-000009" } records { sequence_number: "1" shard_id: "shard-000008" } records { sequence_number: "1" shard_id: "shard-000000" } records { sequence_number: "1" shard_id: "shard-000006" } records { sequence_number: "2" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000009" } records { sequence_number: "3" shard_id: "shard-000001" } records { sequence_number: "4" shard_id: "shard-000009" } records { sequence_number: "4" shard_id: "shard-000004" } records { sequence_number: "3" shard_id: "shard-000000" } records { sequence_number: "4" shard_id: "shard-000001" } encryption_type: NONE records { sequence_number: "5" shard_id: "shard-000001" } records { sequence_number: "5" shard_id: "shard-000009" } records { sequence_number: "5" shard_id: "shard-000004" } records { sequence_number: "3" shard_id: "shard-000005" } records { sequence_number: "2" shard_id: "shard-000008" } records { sequence_number: "6" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000004" } records { sequence_number: "4" shard_id: "shard-000005" } records { sequence_number: "6" shard_id: "shard-000001" } records { sequence_number: "6" shard_id: "shard-000009" } records { sequence_number: "2" shard_id: "shard-000006" } records { sequence_number: "7" shard_id: "shard-000001" } records { sequence_number: "3" shard_id: "shard-000007" } records { sequence_number: "4" shard_id: "shard-000007" } records { sequence_number: "4" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000007" } records { sequence_number: "8" shard_id: "shard-000004" } records { sequence_number: "5" shard_id: "shard-000005" } records { sequence_number: "1" shard_id: "shard-000003" } records { sequence_number: "7" shard_id: "shard-000009" } records { sequence_number: "3" shard_id: "shard-000008" } records { sequence_number: "5" shard_id: "shard-000000" } records { sequence_number: "3" shard_id: "shard-000006" } records { sequence_number: "6" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000009" } records { sequence_number: "8" shard_id: "shard-000001" } records { sequence_number: "9" shard_id: "shard-000009" } records { sequence_number: "9" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000000" } records { sequence_number: "9" shard_id: "shard-000001" } encryption_type: NONE records { sequence_number: "10" shard_id: "shard-000001" } records { sequence_number: "10" shard_id: "shard-000009" } records { sequence_number: "10" shard_id: "shard-000004" } records { sequence_number: "6" shard_id: "shard-000005" } records { sequence_number: "4" shard_id: "shard-000008" } records { sequence_number: "11" shard_id: "shard-000004" } records { sequence_number: "12" shard_id: "shard-000004" } records { sequence_number: "7" shard_id: "shard-000005" } records { sequence_number: "11" shard_id: "shard-000001" } records { sequence_number: "11" shard_id: "shard-000009" } records { sequence_number: "4" shard_id: "shard-000006" } records { sequence_number: "12" shard_id: "shard-000001" } records { sequence_number: "6" shard_id: "shard-000007" } records { sequence_number: "7" shard_id: "shard-000007" } records { sequence_number: "8" shard_id: "shard-000000" } records { sequence_number: "8" shard_id: "shard-000007" } records { sequence_number: "13" shard_id: "shard-000004" } records { sequence_number: "8" shard_id: "shard-000005" } records { sequence_number: "2" shard_id: "shard-000003" } records { sequence_number: "12" shard_id: "shard-000009" } records { sequence_number: "5" shard_id: "shard-000008" } records { sequence_number: "9" shard_id: "shard-000000" } records { sequence_number: "5" shard_id: "shard-000006" } records { sequence_number: "10" shard_id: "shard-000000" } records { sequence_number: "13" shard_id: "shard-000009" } records { sequence_number: "13" shard_id: "shard-000001" } records { sequence_number: "14" shard_id: "shard-000009" } records { sequence_number: "14" shard_id: "shard-000004" } records { sequence_number: "11" shard_id: "shard-000000" } records { sequence_number: "14" shard_id: "shard-000001" } encryption_type: NONE records { sequence_number: "15" shard_id: "shard-000001" } records { sequence_number: "15" shard_id: "shard-000009" } records { sequence_number: "15" shard_id: "shard-000004" } records { sequence_number: "9" shard_id: "shard-000005" } records { sequence_number: "6" shard_id: "shard-000008" } records { sequence_number: "16" shard_id: "shard-000004" } records { sequence_number: "17" shard_id: "shard-000004" } records { sequence_number: "10" shard_id: "shard-000005" } records { sequence_number: "16" shard_id: "shard-000001" } records { sequence_number: "16" shard_id: "shard-000009" } records { sequence_number: "6" shard_id: "shard-000006" } records { sequence_number: "17" shard_id: "shard-000001" } records { sequence_number: "9" shard_id: "shard-000007" } records { sequence_number: "10" shard_id: "shard-000007" } records { sequence_number: "12" shard_id: "shard-000000" } records { sequence_number: "11" shard_id: "shard-000007" } records { sequence_number: "18" shard_id: "shard-000004" } records { sequence_number: "11" shard_id: "shard-000005" } records { sequence_number: "3" shard_id: "shard-000003" } records { sequence_number: "17" shard_id: "shard-000009" } records { sequence_number: "7" shard_id: "shard-000008" } records { sequence_number: "13" shard_id: "shard-000000" } records { sequence_number: "7" shard_id: "shard-000006" } records { sequence_number: "14" shard_id: "shard-000000" } records { sequence_number: "18" shard_id: "shard-000009" } records { sequence_number: "18" shard_id: "shard-000001" } records { sequence_number: "19" shard_id: "shard-000009" } records { sequence_number: "19" shard_id: "shard-000004" } records { sequence_number: "15" shard_id: "shard-000000" } records { sequence_number: "19" shard_id: "shard-000001" } encryption_type: NONE records { sequence_number: "20" shard_id: "shard-000001" } records { sequence_number: "20" shard_id: "shard-000009" } records { sequence_number: "20" shard_id: "shard-000004" } records { sequence_number: "12" shard_id: "shard-000005" } records { sequence_number: "8" shard_id: "shard-000008" } records { sequence_number: "21" shard_id: "shard-000004" } records { sequence_number: "22" shard_id: "shard-000004" } recor ... older_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1748946236613-170","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":0,"unit":"second","start":1748946236,"finish":1748946236},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1748946236}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1748946236613-171","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":0,"unit":"mbyte*second","start":1748946236,"finish":1748946236},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1748946236}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1748946236613-172","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":0,"unit":"byte*second","start":1748946236,"finish":1748946236},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1748946236}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1748946236636-173","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1748946236,"finish":1748946237},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1748946237}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1748946236636-174","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1748946236,"finish":1748946237},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1748946237}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1748946236636-175","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1748946236,"finish":1748946237},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1748946237}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1748946236636-176","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1748946236,"finish":1748946237},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1748946237}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1748946237653-177","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1748946237,"finish":1748946238},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1748946238}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1748946237653-178","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1748946237,"finish":1748946238},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1748946238}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1748946237653-179","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1748946237,"finish":1748946238},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1748946238}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1748946237653-180","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1748946237,"finish":1748946238},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1748946238}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1748946238665-181","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1748946238,"finish":1748946239},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1748946239}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1748946238665-182","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1748946238,"finish":1748946239},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1748946239}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1748946238665-183","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1748946238,"finish":1748946239},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1748946239}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1748946238665-184","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1748946238,"finish":1748946239},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1748946239}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1748946239698-185","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1748946239,"finish":1748946240},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1748946240}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1748946239698-186","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1748946239,"finish":1748946240},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1748946240}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1748946239698-187","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1748946239,"finish":1748946240},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1748946240}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1748946239698-188","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1748946239,"finish":1748946240},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1748946240}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"put_units-root-72075186224037888-1748946240718-189","schema":"yds.events.puts.v1","tags":{},"usage":{"quantity":1,"unit":"put_events","start":1748946240,"finish":1748946241},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1748946241}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1748946240718-190","schema":"yds.throughput.reserved.v1","tags":{"reserved_throughput_bps":1048576,"reserved_consumers_count":2},"usage":{"quantity":1,"unit":"second","start":1748946240,"finish":1748946241},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1748946241}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"yds.reserved_resources-root-72075186224037888-1748946240718-191","schema":"yds.storage.reserved.v1","tags":{},"usage":{"quantity":56320,"unit":"mbyte*second","start":1748946240,"finish":1748946241},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"v1","source_id":"72075186224037888","source_wt":1748946241}' Got line from metering file data: '{"cloud_id":"somecloud","folder_id":"somefolder","resource_id":"/Root/stream_TestReservedConsumersMetering","id":"used_storage-root-72075186224037888-1748946240718-192","schema":"ydb.serverless.v1","tags":{"ydb_size":0},"usage":{"quantity":1,"unit":"byte*second","start":1748946240,"finish":1748946241},"labels":{"datastreams_stream_name":"stream_TestReservedConsumersMetering","ydb_database":"root"},"version":"1.0.0","source_id":"72075186224037888","source_wt":1748946241}' |58.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller |58.8%| [LD] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller >> AnalyzeColumnshard::AnalyzeTwoColumnTables >> test.py::test[aggregate-group_by_expr_only_join--Results] [GOOD] |58.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/ydb-core-mind-bscontroller-ut_bscontroller |58.8%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part5/pytest >> test.py::test[key_filter-key_double_opt_suffix--Results] [SKIPPED] >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeSave >> test.py::test[aggregate-group_by_gs_few_empty--ForceBlocks] >> test.py::test[limit-many_top_sorts-default.txt-Results] [GOOD] >> test.py::test[aggregate-group_by_gs_subselect_asterisk-default.txt-Results] [GOOD] >> test.py::test[aggregate-group_by_mul_gb_ru--ForceBlocks] >> test.py::test[pg-tpcds-q65-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q65-default.txt-Results] >> test.py::test[pg-tpcds-q25-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q25-default.txt-Results] >> test.py::test[limit-sort_calc_limit--ForceBlocks] >> test.py::test[join-mergejoin_saves_output_sort--ForceBlocks] [GOOD] >> test.py::test[join-mergejoin_saves_output_sort--Results] >> TraverseDatashard::TraverseOneTable |58.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut |58.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut |58.8%| [LD] {RESULT} $(B)/ydb/library/ncloud/impl/ut/ydb-library-ncloud-impl-ut >> test.py::test[pg-wide_sort--ForceBlocks] [GOOD] >> test.py::test[blocks-mod_uint64_opt2--ForceBlocks] [GOOD] >> test.py::test[blocks-mod_uint64_opt2--Results] >> test.py::test[aggregate-group_by_rollup_column_ref--ForceBlocks] [GOOD] >> test.py::test[aggregate-group_by_rollup_column_ref--Results] >> DataStreams::TestGetRecords1MBMessagesOneByOneByTS [GOOD] >> DataStreams::TestGetRecordsStreamWithMultipleShards >> test.py::test[join-join_without_column-off-ForceBlocks] [GOOD] >> test.py::test[lambda-lambda_with_tie-default.txt-Results] [GOOD] >> test.py::test[lineage-select_field_filter-default.txt-ForceBlocks] >> test.py::test[join-join_without_column-off-Results] [SKIPPED] >> test.py::test[join-lookupjoin_semi_subq--ForceBlocks] >> test.py::test[lineage-select_field_filter-default.txt-ForceBlocks] [SKIPPED] >> test.py::test[lineage-select_field_filter-default.txt-Results] [SKIPPED] >> test.py::test[lineage-window_many-default.txt-ForceBlocks] [SKIPPED] >> test.py::test[lineage-window_many-default.txt-Results] [SKIPPED] >> test.py::test[aggregate-aggregate_with_deep_aggregated_column--Results] [GOOD] >> test.py::test[aggregate-aggregate_with_lambda--Results] >> test.py::test[join-anyjoin_common_dup-off-ForceBlocks] [GOOD] >> test.py::test[join-anyjoin_common_dup-off-Results] [SKIPPED] >> test.py::test[select-trivial_group_by-default.txt-Results] [GOOD] >> test.py::test[select-trivial_where-one-ForceBlocks] >> test.py::test[window-win_by_all_percentile_interval-default.txt-ForceBlocks] [GOOD] >> test.py::test[window-win_by_all_percentile_interval-default.txt-Results] >> test.py::test[window-mixed/aggregations--Results] [GOOD] >> test.py::test[window-rank/plain--Results] >> TraverseDatashard::TraverseTwoTables >> DataStreams::TestGetRecordsStreamWithMultipleShards [GOOD] >> DataStreams::TestGetRecordsWithBigSeqno |58.8%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part5/pytest >> test.py::test[join-anyjoin_common_dup-off-Results] [SKIPPED] |58.8%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part12/pytest >> test.py::test[lineage-window_many-default.txt-Results] [SKIPPED] |58.8%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part9/pytest >> test.py::test[pg-wide_sort--ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q25-default.txt-Results] [GOOD] |58.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> test.py::test[pg-tpcds-q50-default.txt-ForceBlocks] >> TraverseDatashard::TraverseTwoTablesTwoServerlessDbs [GOOD] >> DataStreams::TestGetRecordsWithBigSeqno [GOOD] >> test.py::test[pg-tpcds-q11-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q11-default.txt-Results] >> test.py::test[pg-tpcds-q65-default.txt-Results] [GOOD] >> test.py::test[window-win_func_aggr_with_qualified_all_no_simple_columns--ForceBlocks] [GOOD] >> test.py::test[window-win_func_aggr_with_qualified_all_no_simple_columns--Results] >> test.py::test[join-grace_join1-grace-ForceBlocks] [GOOD] >> test.py::test[join-grace_join1-grace-Results] [SKIPPED] >> test.py::test[aggregate-group_by_mul_ru_ru--ForceBlocks] [GOOD] >> test.py::test[aggregate-group_by_mul_ru_ru--Results] >> test.py::test[blocks-mod_uint64_opt2--Results] [GOOD] >> test.py::test[blocks-not--ForceBlocks] >> test.py::test[produce-reduce_multi_in_ref--ForceBlocks] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/datastreams/ut/unittest >> DataStreams::TestGetRecordsWithBigSeqno [GOOD] Test command err: 2025-06-03T10:23:34.666195Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511666794715813761:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:34.666229Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f38/r3tmp/tmpWGIhW7/pdisk_1.dat 2025-06-03T10:23:34.738913Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27854, node 1 2025-06-03T10:23:34.759064Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:34.759091Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:34.759093Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:34.759139Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:23:34.766556Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:34.766592Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:34.768353Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21050 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:34.793489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:34.823862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:21050 2025-06-03T10:23:34.838794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:34.931185Z node 1 :PERSQUEUE ERROR: partition_read.cpp:677: [PQ: 72075186224037888, Partition: 0, State: StateIdle] reading from too big offset - topic stream_TestGetRecordsStreamWithSingleShard partition 0 client $without_consumer EndOffset 30 offset 100000 2025-06-03T10:23:36.547375Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511666802083008646:2272];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:36.547400Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f38/r3tmp/tmpqXLeMn/pdisk_1.dat 2025-06-03T10:23:36.562865Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11965, node 4 2025-06-03T10:23:36.574377Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:36.574390Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:36.574392Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:36.574424Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7775 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:36.647568Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:36.647595Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:36.649101Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:36.652233Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:36.668246Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:7775 2025-06-03T10:23:36.683311Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:41.549324Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7511666802083008646:2272];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:41.549362Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:23:51.558888Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7306: Cannot get console configs 2025-06-03T10:23:51.558905Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f38/r3tmp/tmp6g1ZDX/pdisk_1.dat 2025-06-03T10:24:05.698270Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511666926397221008:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:05.698431Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:05.724482Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27029, node 7 2025-06-03T10:24:05.749902Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:05.749917Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:05.749921Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:05.749994Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28506 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:24:05.790610Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:05.790649Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:05.797651Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:05.806749Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:24:05.858802Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:28506 2025-06-03T10:24:05.879637Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:24:07.194295Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7511666936504995593:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:07.194459Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f38/r3tmp/tmp7zbDY2/pdisk_1.dat 2025-06-03T10:24:07.381890Z node 10 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5850, node 10 2025-06-03T10:24:07.469732Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:07.469745Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:07.469747Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:07.469801Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:07.497607Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:07.497635Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:5204 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:24:07.513866Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:24:07.818297Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:24:07.841836Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:24:07.936124Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:5204 2025-06-03T10:24:07.997172Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:24:08.021533Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 >> test.py::test[produce-reduce_multi_in_ref--Results] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> TraverseDatashard::TraverseTwoTablesTwoServerlessDbs [GOOD] Test command err: 2025-06-03T10:23:58.450837Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:453:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:23:58.450921Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:23:58.450955Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002a8a/r3tmp/tmpCH3hVO/pdisk_1.dat 2025-06-03T10:23:58.599653Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23120, node 1 2025-06-03T10:23:58.747596Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:58.747622Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:58.747628Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:58.747743Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:23:58.748406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:23:58.849911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:58.849960Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:58.864834Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23313 2025-06-03T10:23:59.255160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:00.306875Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:24:00.346335Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:00.346378Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:00.418866Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:00.419572Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:00.682151Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:00.682341Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:00.682540Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:00.682593Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:00.682660Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:00.682698Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:00.682719Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:00.682745Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:00.682765Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:00.846023Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:00.846079Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:00.858695Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:00.935285Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:00.959859Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:24:00.959898Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:24:00.979645Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:24:00.979977Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:24:00.980008Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:24:00.980015Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:24:00.980023Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:24:00.980031Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:24:00.980038Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:24:00.980046Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:24:00.980476Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:24:01.000522Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:01.000562Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1865:2600], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:01.001698Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1872:2606] 2025-06-03T10:24:01.021931Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1913:2626] 2025-06-03T10:24:01.022053Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-03T10:24:01.022371Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1913:2626], schemeshard id = 72075186224037897 2025-06-03T10:24:01.028167Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:24:01.028197Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:24:01.028210Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-03T10:24:01.031624Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:24:01.033609Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:24:01.033656Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:24:01.178371Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:24:01.320122Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:24:01.417628Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:24:02.039066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:24:02.683751Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:02.801917Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7814: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-03T10:24:02.801950Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7830: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-03T10:24:02.801970Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:2564:2936], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-03T10:24:02.802259Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2565:2937] 2025-06-03T10:24:02.802367Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2565:2937], schemeshard id = 72075186224037899 2025-06-03T10:24:03.678059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:24:04.178924Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:04.373356Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7814: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037905 2025-06-03T10:24:04.373389Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7830: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037905 2025-06-03T10:24:04.373405Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:3046:3134], at schemeshard: 72075186224037905, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037905 2025-06-03T10:24:04.373635Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:3047:3135] 2025-06-03T10:24:04.373698Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:3047:3135], schemeshard id = 72075186224037905 2025-06-03T10:24:05.502179Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3164:3394], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:05.502229Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:05.506475Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72075186224037899 2025-06-03T10:24:05.748921Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3473:3444], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:05.754193Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:05.754927Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:3478:3448]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:24:05.754986Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:24:05.755035Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 18446744073709551615 ] 2025-06-03T10:24:05.755045Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:3481:3451] 2025-06-03T10:24:05.755056Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:3481:3451] 2025-06-03T10:24:05.755276Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:3482:3367] 2025-06-03T10:24:05.755347Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:3481:3451], server id = [2:3482:3367], tablet id = 72075186224037894, status = OK 2025-06-03T10:24:05.755439Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:3482:3367], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:24:05.755456Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-03T10:24:05.755515Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:24:05.755527Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:3478:3448], StatRequests.size() = 1 2025-06-03T10:24:05.758839Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3486:3455], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:05.758887Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:05.758989Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3491:3460], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:05.760804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715664:3, at schemeshard: 72057594046644480 2025-06-03T10:24:05.981557Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-03T10:24:05.981592Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-03T10:24:06.098632Z node 1 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [1:3481:3451], schemeshard count = 1 2025-06-03T10:24:06.377742Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:3493:3462], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715664 completed, doublechecking } 2025-06-03T10:24:06.554766Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:3613:3536] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:24:06.574193Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:3636:3552]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:24:06.574253Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:24:06.574260Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:3636:3552], StatRequests.size() = 1 2025-06-03T10:24:06.612339Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715666. Ctx: { TraceId: 01jwtn3p3mat72791ay8knqpaa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGUxZjFlNjgtYTI3YmZiZTAtYTgzZTExY2YtNjY5MGMyZjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:24:06.680383Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72075186224037905 2025-06-03T10:24:07.136164Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [1:3992:3616]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:24:07.136220Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-03T10:24:07.136351Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:171: [72075186224037894] EvRequestStats, node id = 1, schemeshard count = 1, urgent = 0 2025-06-03T10:24:07.136361Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-03T10:24:07.136417Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:24:07.136427Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 3, ReplyToActorId = [1:3992:3616], StatRequests.size() = 1 2025-06-03T10:24:07.169641Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 4 ], ReplyToActorId[ [1:4001:3625]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:24:07.169694Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 4 ] 2025-06-03T10:24:07.169701Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 4, ReplyToActorId = [1:4001:3625], StatRequests.size() = 1 2025-06-03T10:24:07.184984Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715668. Ctx: { TraceId: 01jwtn3qek0g2gw4dfxkvnqsc6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2EwMjRjZjUtNGVmYjI5NWYtNDQzZWZjNTMtZTRiZDdlMDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:24:07.263321Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:4047:3629]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-03T10:24:07.263977Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:24:07.263987Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-03T10:24:07.264034Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:24:07.264042Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-03T10:24:07.264050Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037899, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-03T10:24:07.288142Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 1 ], RowsCount[ 0 ] 2025-06-03T10:24:07.288224Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 2025-06-03T10:24:07.288352Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:4071:3641]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-03T10:24:07.288895Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:24:07.288902Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-03T10:24:07.288949Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:24:07.288957Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 2 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-03T10:24:07.288966Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 2 ], PathId[ [OwnerId: 72075186224037905, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-03T10:24:07.289429Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 2 ], RowsCount[ 0 ] 2025-06-03T10:24:07.289462Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 2 >> test.py::test[blocks-date_not_equals_scalar--Results] [GOOD] >> test.py::test[blocks-decimal_op_decimal--ForceBlocks] >> test.py::test[pg-tpch-q02-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpch-q02-default.txt-Results] >> test.py::test[aggregate-group_by_rollup_column_ref--Results] [GOOD] >> test.py::test[aggregate-group_by_ru_partition_by_grouping-default.txt-ForceBlocks] >> AnalyzeColumnshard::AnalyzeRebootSaBeforeAnalyzeTableResponse |58.8%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part8/pytest >> test.py::test[pg-tpcds-q65-default.txt-Results] [GOOD] >> AnalyzeColumnshard::AnalyzeServerless >> test.py::test[window-win_by_all_percentile_interval-default.txt-Results] [GOOD] >> test.py::test[window-win_func_aggr_stat--ForceBlocks] |58.8%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part11/pytest >> test.py::test[join-grace_join1-grace-Results] [SKIPPED] >> AnalyzeColumnshard::AnalyzeSameOperationId >> AnalyzeColumnshard::AnalyzeAnalyzeOneColumnTableSpecificColumns >> test.py::test[pg-tpcds-q11-default.txt-Results] [GOOD] >> test.py::test[blocks-interval_mul_scalar--ForceBlocks] [GOOD] >> test.py::test[blocks-interval_mul_scalar--Results] >> test.py::test[join-pullup_extend--ForceBlocks] [GOOD] >> AnalyzeColumnshard::AnalyzeRebootSaBeforeAggregate >> AutoConfig::GetASPoolsWith3CPUs [GOOD] >> TraverseDatashard::TraverseOneTable [GOOD] >> test.py::test[join-mergejoin_saves_output_sort--Results] [GOOD] >> test.py::test[join-mergejoin_sorts_output_for_sort_right--ForceBlocks] [SKIPPED] >> test.py::test[join-mergejoin_sorts_output_for_sort_right--Results] >> test.py::test[join-mergejoin_sorts_output_for_sort_right--Results] [SKIPPED] >> test.py::test[join-mergejoin_with_different_key_names_nonsorted--ForceBlocks] >> BsControllerConfig::OverlayMap |58.8%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part5/pytest >> test.py::test[pg-tpcds-q11-default.txt-Results] [GOOD] >> BsControllerConfig::OverlayMap [GOOD] |58.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/driver_lib/run/ut/unittest >> AutoConfig::GetASPoolsWith3CPUs [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> TraverseDatashard::TraverseOneTable [GOOD] Test command err: 2025-06-03T10:24:06.506815Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:453:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:24:06.506875Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:24:06.506900Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002a64/r3tmp/tmp8ztaQx/pdisk_1.dat 2025-06-03T10:24:06.813563Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16746, node 1 2025-06-03T10:24:07.128333Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:07.128373Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:07.128378Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:07.128496Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:07.130150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:07.253621Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:07.253665Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:07.287669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28314 2025-06-03T10:24:08.436911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:11.458706Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:24:11.524416Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:11.524453Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:11.610166Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:11.617650Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:12.475338Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:12.476289Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:12.476336Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:12.476361Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:12.476401Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:12.476417Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:12.476435Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:12.477142Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:12.477161Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:12.701872Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:12.701926Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:12.726008Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:13.236034Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:13.408888Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:24:13.408929Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:24:13.518661Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:24:13.520259Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:24:13.520286Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:24:13.520292Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:24:13.520297Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:24:13.520304Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:24:13.520310Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:24:13.520317Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:24:13.520959Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:24:13.608386Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:13.608422Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1863:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:13.618029Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1877:2607] 2025-06-03T10:24:13.620055Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1896:2617] 2025-06-03T10:24:13.620098Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1896:2617], schemeshard id = 72075186224037897 2025-06-03T10:24:13.621564Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:24:13.644172Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:24:13.644192Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:24:13.644202Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:24:13.649613Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:24:13.686272Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:24:13.686335Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:24:14.126570Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:24:14.440155Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:24:14.489697Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:24:15.704232Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2219:3063], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:15.704281Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:15.741957Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:24:16.564516Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2523:3112], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:16.564583Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:16.565067Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2528:3116]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:24:16.565117Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:24:16.565128Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2530:3118] 2025-06-03T10:24:16.565140Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2530:3118] 2025-06-03T10:24:16.581622Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2531:2984] 2025-06-03T10:24:16.581774Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2530:3118], server id = [2:2531:2984], tablet id = 72075186224037894, status = OK 2025-06-03T10:24:16.581832Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2531:2984], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:24:16.581851Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-03T10:24:16.581914Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:24:16.581928Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2528:3116], StatRequests.size() = 1 2025-06-03T10:24:16.595225Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2535:3122], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:16.595263Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:16.595343Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2540:3127], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:16.634868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480 2025-06-03T10:24:16.865481Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-03T10:24:16.865510Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-03T10:24:16.941484Z node 1 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [1:2530:3118], schemeshard count = 1 2025-06-03T10:24:17.341968Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2542:3129], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-03T10:24:17.519535Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:2653:3197] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:24:17.544830Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2676:3213]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:24:17.544879Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:24:17.544886Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2676:3213], StatRequests.size() = 1 2025-06-03T10:24:17.570470Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtn40mv39qrc54g8m67tgtj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODc4NTdmMmYtYTc3NWFkNWItOGM0NjEzMy1hMjU4NTkw, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:24:17.631828Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:2725:3032]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-03T10:24:17.632639Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:24:17.632649Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-03T10:24:17.632944Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:24:17.632955Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-03T10:24:17.632964Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-03T10:24:17.645828Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 1 ], RowsCount[ 0 ] 2025-06-03T10:24:17.645941Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 >> test.py::test[produce-reduce_multi_in_ref--Results] [GOOD] >> test.py::test[produce-reduce_with_assume_in_subquery--ForceBlocks] [SKIPPED] >> test.py::test[produce-reduce_with_assume_in_subquery--Results] [SKIPPED] >> test.py::test[window-win_func_aggr_with_qualified_all_no_simple_columns--Results] [GOOD] >> test.py::test[window-win_multiaggr_list-default.txt-ForceBlocks] |58.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::OverlayMap [GOOD] |58.9%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part11/pytest >> test.py::test[join-pullup_extend--ForceBlocks] [GOOD] >> TNebiusAccessServiceTest::Authorize [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ncloud/impl/ut/unittest >> TNebiusAccessServiceTest::Authorize [GOOD] Test command err: 2025-06-03T10:24:22.461121Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [122dbfbfa5f0] Connect to grpc://localhost:5529 2025-06-03T10:24:22.462775Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [122dbfbfa5f0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "perm" } resource_path { path { id: "path_id" } } iam_token: "**** (717F937C)" } } } 2025-06-03T10:24:22.465948Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [122dbfbfa5f0] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user_id" } } } } } 2025-06-03T10:24:22.466230Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [122dbfbfa5f0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "perm" } resource_path { path { id: "path_id" } } iam_token: "**** (79225CA9)" } } } 2025-06-03T10:24:22.466910Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [122dbfbfa5f0] Status 7 Permission Denied 2025-06-03T10:24:22.467090Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [122dbfbfa5f0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "denied" } resource_path { path { id: "path_id" } } iam_token: "**** (717F937C)" } } } 2025-06-03T10:24:22.467551Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [122dbfbfa5f0] Status 7 Permission Denied 2025-06-03T10:24:22.467721Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [122dbfbfa5f0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "perm" } resource_path { path { id: "p" } } iam_token: "**** (717F937C)" } } } 2025-06-03T10:24:22.468105Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [122dbfbfa5f0] Status 7 Permission Denied >> TraverseDatashard::TraverseTwoTables [GOOD] >> test.py::test[window-distinct_over_window_full_frames--ForceBlocks] [GOOD] |58.9%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part5/pytest >> test.py::test[produce-reduce_with_assume_in_subquery--Results] [SKIPPED] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> TraverseDatashard::TraverseTwoTables [GOOD] Test command err: 2025-06-03T10:24:10.317902Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:453:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:24:10.317961Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:24:10.317986Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002a5e/r3tmp/tmpkN6dYD/pdisk_1.dat 2025-06-03T10:24:10.971151Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13490, node 1 2025-06-03T10:24:11.545610Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:11.545635Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:11.545639Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:11.545733Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:11.548456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:11.684921Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:11.684959Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:11.710331Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24551 2025-06-03T10:24:12.306348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:18.506272Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:24:18.518158Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:18.518194Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:18.586168Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:18.593537Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:18.955220Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:18.955346Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:18.955464Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:18.955503Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:18.955549Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:18.955566Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:18.955581Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:18.956306Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:18.956323Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:19.161861Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:19.161902Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:19.173773Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:19.371450Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:19.417676Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:24:19.417702Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:24:19.451180Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:24:19.453771Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:24:19.453802Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:24:19.453808Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:24:19.453813Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:24:19.453819Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:24:19.453825Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:24:19.453832Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:24:19.454765Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:24:19.482693Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:19.482724Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1865:2600], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:19.484666Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1872:2606] 2025-06-03T10:24:19.497866Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1913:2626] 2025-06-03T10:24:19.497928Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:24:19.499026Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1913:2626], schemeshard id = 72075186224037897 2025-06-03T10:24:19.507816Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:24:19.507837Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:24:19.507847Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:24:19.513816Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:24:19.518354Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:24:19.518385Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:24:19.737795Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:24:19.876382Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:24:19.969786Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:24:20.724789Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2211:3056], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:20.724824Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:20.729110Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:24:21.097907Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2516:3106], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:21.097963Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:21.098435Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2521:3110]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:24:21.098481Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:24:21.098493Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2523:3112] 2025-06-03T10:24:21.098502Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2523:3112] 2025-06-03T10:24:21.098686Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2524:2983] 2025-06-03T10:24:21.098744Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2523:3112], server id = [2:2524:2983], tablet id = 72075186224037894, status = OK 2025-06-03T10:24:21.098791Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2524:2983], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:24:21.098805Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-03T10:24:21.098859Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:24:21.098868Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2521:3110], StatRequests.size() = 1 2025-06-03T10:24:21.102686Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2528:3116], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:21.102718Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:21.102794Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2533:3121], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:21.105195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480 2025-06-03T10:24:21.237991Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-03T10:24:21.238036Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-03T10:24:21.333589Z node 1 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [1:2523:3112], schemeshard count = 1 2025-06-03T10:24:21.657915Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2535:3123], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-03T10:24:21.822182Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:2644:3191] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:24:21.824157Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2667:3207]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:24:21.824191Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:24:21.824198Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2667:3207], StatRequests.size() = 1 2025-06-03T10:24:21.839979Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtn452we1k1s57txk0k3t9s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTA1M2E1YjgtMWNlZjgyNmItOTQ1YjJhODAtMjQyZTI0ZTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:24:21.876049Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72075186224037897 2025-06-03T10:24:22.235645Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [1:3017:3273]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:24:22.235699Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-03T10:24:22.235705Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 3, ReplyToActorId = [1:3017:3273], StatRequests.size() = 1 2025-06-03T10:24:22.246349Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 4 ], ReplyToActorId[ [1:3026:3282]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:24:22.246396Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 4 ] 2025-06-03T10:24:22.246402Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 4, ReplyToActorId = [1:3026:3282], StatRequests.size() = 1 2025-06-03T10:24:22.256815Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jwtn466r5z1b8zjyhfgbwwq7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWUyNDFmMDMtN2JiYzA3ODYtZDA1ZTY4MDItMjQwNTc0NWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:24:22.349276Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:3076:3247]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-03T10:24:22.352554Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:24:22.352572Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-03T10:24:22.352640Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:24:22.352649Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-03T10:24:22.352657Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-03T10:24:22.358831Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 1 ], RowsCount[ 0 ] 2025-06-03T10:24:22.358945Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 2025-06-03T10:24:22.359026Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:3100:3259]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-03T10:24:22.359614Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:24:22.359628Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-03T10:24:22.359692Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:24:22.359703Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 2 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-03T10:24:22.359711Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 2 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 5] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-03T10:24:22.360192Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 2 ], RowsCount[ 0 ] 2025-06-03T10:24:22.360269Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 2 >> test.py::test[select-trivial_where-one-ForceBlocks] [GOOD] >> test.py::test[aggregate-aggregate_with_lambda--Results] [GOOD] >> test.py::test[aggregate-compare_by_tuple--Results] >> test.py::test[select-trivial_where-one-Results] |58.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ncloud/impl/ut/unittest |58.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ncloud/impl/ut/unittest |58.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ncloud/impl/ut/unittest |58.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ncloud/impl/ut/unittest >> test.py::test[join-lookupjoin_semi_subq--ForceBlocks] [GOOD] >> test.py::test[join-lookupjoin_semi_subq--Results] >> test.py::test[pg-tpcds-q50-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q50-default.txt-Results] >> test.py::test[blocks-interval_mul_scalar--Results] [GOOD] >> test.py::test[blocks-lazy_nonstrict_nested--ForceBlocks] |58.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup |58.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup |58.9%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_incremental_backup/ydb-core-tx-datashard-ut_incremental_backup |58.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ncloud/impl/ut/unittest >> test.py::test[limit-sort_calc_limit--ForceBlocks] [GOOD] >> test.py::test[limit-sort_calc_limit--Results] >> test.py::test[aggregate-group_by_mul_gb_ru--ForceBlocks] [GOOD] >> test.py::test[aggregate-group_by_mul_gb_ru--Results] |58.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots |58.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots |58.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection_reboots/tx-schemeshard-ut_backup_collection_reboots |58.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut >> test.py::test[blocks-not--ForceBlocks] [GOOD] >> test.py::test[aggregate-group_by_gs_few_empty--ForceBlocks] [GOOD] >> test.py::test[aggregate-group_by_gs_few_empty--Results] |59.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut |59.0%| [LD] {RESULT} $(B)/ydb/core/tx/replication/ydb_proxy/ut/ydb-core-tx-replication-ydb_proxy-ut >> test.py::test[blocks-not--Results] |59.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow |59.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow |59.0%| [LD] {RESULT} $(B)/ydb/core/persqueue/ut/slow/ydb-core-persqueue-ut-slow |59.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ncloud/impl/ut/unittest >> test.py::test[blocks-decimal_op_decimal--ForceBlocks] [GOOD] >> test.py::test[blocks-decimal_op_decimal--Results] |59.0%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part16/pytest >> test.py::test[window-distinct_over_window_full_frames--ForceBlocks] [GOOD] |59.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots |59.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots |59.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_serverless_reboots/ydb-core-tx-schemeshard-ut_serverless_reboots |59.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots >> TNebiusAccessServiceTest::Authenticate [GOOD] |59.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots |59.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_subdomain_reboots/ydb-core-tx-schemeshard-ut_subdomain_reboots |59.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order |59.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order >> BsControllerConfig::Basic |59.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_order/ydb-core-tx-datashard-ut_order >> TNebiusAccessServiceTest::PassRequestId [GOOD] >> test.py::test[select-trivial_where-one-Results] [GOOD] >> test.py::test[select-unlabeled--ForceBlocks] >> test.py::test[pg-tpcds-q50-default.txt-Results] [GOOD] >> test.py::test[pg-tpcds-q69-default.txt-ForceBlocks] >> BsControllerConfig::PDiskCreate |59.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ncloud/impl/ut/unittest >> BsControllerConfig::ReassignGroupDisk >> BsControllerConfig::OverlayMapCrossReferences ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ncloud/impl/ut/unittest >> TNebiusAccessServiceTest::Authenticate [GOOD] Test command err: 2025-06-03T10:24:24.809798Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [50d47fbfa5f0] Connect to grpc://localhost:24459 2025-06-03T10:24:24.810582Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [50d47fbfa5f0] Request AuthenticateRequest { iam_token: "**** (3C4833B6)" } 2025-06-03T10:24:24.812907Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [50d47fbfa5f0] Status 7 Permission Denied 2025-06-03T10:24:24.813065Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [50d47fbfa5f0] Request AuthenticateRequest { iam_token: "**** (86DDB286)" } 2025-06-03T10:24:24.813675Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [50d47fbfa5f0] Response AuthenticateResponse { account { user_account { id: "1234" } } } >> test.py::test[blocks-not--Results] [GOOD] >> test.py::test[blocks-top_sort_two_mix--ForceBlocks] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ncloud/impl/ut/unittest >> TNebiusAccessServiceTest::PassRequestId [GOOD] Test command err: 2025-06-03T10:24:24.901405Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [72367fbfa5f0]{reqId} Connect to grpc://localhost:62410 2025-06-03T10:24:24.902271Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [72367fbfa5f0]{reqId} Request AuthenticateRequest { iam_token: "**** (717F937C)" } 2025-06-03T10:24:24.908026Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [72367fbfa5f0]{reqId} Response AuthenticateResponse { account { user_account { id: "1234" } } } >> BsControllerConfig::OverlayMapCrossReferences [GOOD] >> BsControllerConfig::ExtendByCreatingSeparateBox >> test.py::test[join-lookupjoin_semi_subq--Results] [GOOD] >> test.py::test[join-mergejoin_big_primary-off-ForceBlocks] >> test.py::test[limit-sort_calc_limit--Results] [GOOD] >> test.py::test[lineage-list_literal4-default.txt-ForceBlocks] [SKIPPED] >> test.py::test[lineage-list_literal4-default.txt-Results] [SKIPPED] >> test.py::test[optimizers-yql-8041-fuse_with_desc_map--ForceBlocks] >> BsControllerConfig::SelectAllGroups >> test.py::test[blocks-decimal_op_decimal--Results] [GOOD] >> test.py::test[blocks-interval_div--ForceBlocks] |59.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::OverlayMapCrossReferences [GOOD] >> test.py::test[window-win_func_aggr_stat--ForceBlocks] [GOOD] >> test.py::test[window-win_func_aggr_stat--Results] >> BsControllerConfig::ManyPDisksRestarts >> test.py::test[join-mergejoin_with_different_key_names_nonsorted--ForceBlocks] [GOOD] >> test.py::test[join-mergejoin_with_different_key_names_nonsorted--Results] |59.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows |59.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows |59.1%| [TA] $(B)/ydb/library/ncloud/impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> BsControllerConfig::MergeIntersectingBoxes |59.1%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_upload_rows/ydb-core-tx-datashard-ut_upload_rows >> test.py::test[aggregate-group_by_mul_ru_ru--Results] [GOOD] >> test.py::test[aggregate-group_by_session--ForceBlocks] >> test.py::test[window-win_multiaggr_list-default.txt-ForceBlocks] [GOOD] >> test.py::test[window-win_multiaggr_list-default.txt-Results] >> test.py::test[pg-tpch-q02-default.txt-Results] [GOOD] >> test.py::test[pg-tpch-q19-default.txt-ForceBlocks] |59.1%| [TA] {RESULT} $(B)/ydb/library/ncloud/impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test.py::test[aggregate-group_by_gs_few_empty--Results] [GOOD] >> test.py::test[aggregate-group_by_gs_simp--ForceBlocks] |59.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/driver_lib/run/ut/unittest >> test.py::test[aggregate-group_by_mul_gb_ru--Results] [GOOD] >> test.py::test[aggregate-group_by_rollup_grouping_hum_bind--ForceBlocks] >> test.py::test[blocks-lazy_nonstrict_nested--ForceBlocks] [GOOD] >> test.py::test[blocks-lazy_nonstrict_nested--Results] |59.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut |59.1%| [LD] {RESULT} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut |59.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_v1/ut/describes_ut/ydb-services-persqueue_v1-ut-describes_ut >> BsControllerConfig::SelectAllGroups [GOOD] >> test.py::test[aggregate-group_by_ru_partition_by_grouping-default.txt-ForceBlocks] [GOOD] >> test.py::test[aggregate-group_by_ru_partition_by_grouping-default.txt-Results] >> BsControllerConfig::AddDriveSerial >> test.py::test[window-win_func_aggr_stat--Results] [GOOD] >> test.py::test[select-unlabeled--ForceBlocks] [GOOD] >> IncrementalBackup::SimpleBackup >> BsControllerConfig::PDiskCreate [GOOD] >> BsControllerConfig::Basic [GOOD] >> BsControllerConfig::DeleteStoragePool >> test.py::test[join-mergejoin_with_different_key_names_nonsorted--Results] [GOOD] >> test.py::test[window-win_multiaggr_list-default.txt-Results] [GOOD] >> test.py::test[ypath-direct_read_from_dynamic--ForceBlocks] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::SelectAllGroups [GOOD] Test command err: 2025-06-03T10:24:26.039401Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:24:26.040385Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:24:26.040467Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-03T10:24:26.040838Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:24:26.040953Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-03T10:24:26.041008Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:26.041014Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:521} Handle TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:26.041061Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-03T10:24:26.042187Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-03T10:24:26.042218Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-03T10:24:26.042259Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-03T10:24:26.042279Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:26.042292Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:26.042301Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion >> test.py::test[blocks-lazy_nonstrict_nested--Results] [GOOD] >> test.py::test[pg-tpcds-q69-default.txt-ForceBlocks] [GOOD] >> test.py::test[blocks-member--ForceBlocks] >> test.py::test[pg-tpcds-q69-default.txt-Results] >> BsControllerConfig::ReassignGroupDisk [GOOD] |59.1%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part9/pytest >> test.py::test[window-win_func_aggr_stat--Results] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::PDiskCreate [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:215:2066] recipient: [1:189:2076] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:215:2066] recipient: [1:189:2076] Leader for TabletID 72057594037932033 is [1:223:2078] sender: [1:225:2066] recipient: [1:189:2076] 2025-06-03T10:24:25.051810Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:24:25.052572Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:24:25.052662Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-03T10:24:25.053077Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:24:25.053190Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-03T10:24:25.053259Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:25.053266Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:521} Handle TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:25.053335Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-03T10:24:25.054514Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-03T10:24:25.054561Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-03T10:24:25.054604Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-03T10:24:25.054619Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:25.054629Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:25.054636Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:223:2078] sender: [1:248:2066] recipient: [1:20:2067] 2025-06-03T10:24:25.065140Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-03T10:24:25.065209Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:25.076984Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:25.077059Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:25.077077Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:25.077090Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:25.077122Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:25.077131Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:25.077137Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:25.077148Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:25.091437Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:25.091501Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:25.105570Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:25.105648Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:19} TTxLoadEverything Execute 2025-06-03T10:24:25.105887Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:557} TTxLoadEverything Complete 2025-06-03T10:24:25.105894Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2188} LoadFinished 2025-06-03T10:24:25.105939Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-03T10:24:25.105953Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:562} TTxLoadEverything InitQueue processed 2025-06-03T10:24:25.109968Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "test box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } } } Command { QueryBaseConfig { } } } 2025-06-03T10:24:25.110155Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1000 Path# /dev/disk1 2025-06-03T10:24:25.110163Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1001 Path# /dev/disk2 2025-06-03T10:24:25.110168Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1002 Path# /dev/disk3 2025-06-03T10:24:25.110173Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /dev/disk1 2025-06-03T10:24:25.110178Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1001 Path# /dev/disk2 2025-06-03T10:24:25.110183Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1002 Path# /dev/disk3 2025-06-03T10:24:25.110188Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1000 Path# /dev/disk1 2025-06-03T10:24:25.110194Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1001 Path# /dev/disk2 2025-06-03T10:24:25.110199Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1002 Path# /dev/disk3 2025-06-03T10:24:25.110203Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1000 Path# /dev/disk1 2025-06-03T10:24:25.110209Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1001 Path# /dev/disk2 2025-06-03T10:24:25.110216Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1002 Path# /dev/disk3 2025-06-03T10:24:25.110225Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1000 Path# /dev/disk1 2025-06-03T10:24:25.110230Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1001 Path# /dev/disk2 2025-06-03T10:24:25.110235Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1002 Path# /dev/disk3 2025-06-03T10:24:25.110240Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1000 Path# /dev/disk1 2025-06-03T10:24:25.110244Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1001 Path# /dev/disk2 2025-06-03T10:24:25.110249Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1002 Path# /dev/disk3 2025-06-03T10:24:25.110254Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1000 Path# /dev/disk1 2025-06-03T10:24:25.110259Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1001 Path# /dev/disk2 2025-06-03T10:24:25.110264Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1002 Path# /dev/disk3 2025-06-03T10:24:25.110269Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1000 Path# /dev/disk1 2025-06-03T10:24:25.110274Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1001 Path# /dev/disk2 2025-06-03T10:24:25.110279Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1002 Path# /dev/disk3 2025-06-03T10:24:25.110284Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1000 Path# /dev/disk1 2025-06-03T10:24:25.110289Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1001 Path# /dev/disk2 2025-06-03T10:24:25.110294Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1002 Path# /dev/disk3 2025-06-03T10:24:25.110306Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1000 Path# /dev/disk1 2025-06-03T10:24:25.110311Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1001 Path# /dev/disk2 2025-06-03T10:24:25.110316Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1002 Path# /dev/disk3 Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:215:2066] recipient: [11:194:2076] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:215:2066] recipient: [11:194:2076] Leader for TabletID 72057594037932033 is [11:225:2078] sender: [11:226:2066] recipient: [11:194:2076] 2025-06-03T10:24:26.667148Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:24:26.667384Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:24:26.667436Z node 11 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-03T10:24:26.667645Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:24:26.667723Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-03T10:24:26.667750Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:26.667755Z node 11 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:521} Handle TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:26.667804Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-03T10:24:26.668523Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-03T10:24:26.668550Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-03T10:24:26.668576Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-03T10:24:26.668589Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:26.668599Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:26.668606Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [11:225:2078] sender: [11:248:2066] recipient: [11:20:2067] 2025-06-03T10:24:26.679113Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-03T10:24:26.679178Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:26.689586Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:26.689655Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:26.689675Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:26.689690Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:26.689727Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:26.689737Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:26.689745Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:26.689759Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:26.700053Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:26.700087Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:26.710387Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:26.710448Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:19} TTxLoadEverything Execute 2025-06-03T10:24:26.710670Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:557} TTxLoadEverything Complete 2025-06-03T10:24:26.710679Z node 11 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2188} LoadFinished 2025-06-03T10:24:26.710727Z node 11 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-03T10:24:26.710737Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:562} TTxLoadEverything InitQueue processed 2025-06-03T10:24:26.711002Z node 11 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 2 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "test box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 2 } } } Command { QueryBaseConfig { } } } 2025-06-03T10:24:26.711138Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1000 Path# /dev/disk1 2025-06-03T10:24:26.711146Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1001 Path# /dev/disk2 2025-06-03T10:24:26.711152Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1002 Path# /dev/disk3 2025-06-03T10:24:26.711158Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 12:1000 Path# /dev/disk1 2025-06-03T10:24:26.711164Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 12:1001 Path# /dev/disk2 2025-06-03T10:24:26.711171Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 12:1002 Path# /dev/disk3 2025-06-03T10:24:26.711177Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 13:1000 Path# /dev/disk1 2025-06-03T10:24:26.711183Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 13:1001 Path# /dev/disk2 2025-06-03T10:24:26.711189Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 13:1002 Path# /dev/disk3 2025-06-03T10:24:26.711194Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 14:1000 Path# /dev/disk1 2025-06-03T10:24:26.711200Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 14:1001 Path# /dev/disk2 2025-06-03T10:24:26.711210Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 14:1002 Path# /dev/disk3 2025-06-03T10:24:26.711215Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 15:1000 Path# /dev/disk1 2025-06-03T10:24:26.711221Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 15:1001 Path# /dev/disk2 2025-06-03T10:24:26.711226Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 15:1002 Path# /dev/disk3 2025-06-03T10:24:26.711231Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 16:1000 Path# /dev/disk1 2025-06-03T10:24:26.711237Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 16:1001 Path# /dev/disk2 2025-06-03T10:24:26.711242Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 16:1002 Path# /dev/disk3 2025-06-03T10:24:26.711247Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 17:1000 Path# /dev/disk1 2025-06-03T10:24:26.711255Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 17:1001 Path# /dev/disk2 2025-06-03T10:24:26.711261Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 17:1002 Path# /dev/disk3 2025-06-03T10:24:26.711266Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 18:1000 Path# /dev/disk1 2025-06-03T10:24:26.711272Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 18:1001 Path# /dev/disk2 2025-06-03T10:24:26.711281Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 18:1002 Path# /dev/disk3 2025-06-03T10:24:26.711286Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 19:1000 Path# /dev/disk1 2025-06-03T10:24:26.711292Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 19:1001 Path# /dev/disk2 2025-06-03T10:24:26.711298Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 19:1002 Path# /dev/disk3 2025-06-03T10:24:26.711303Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 20:1000 Path# /dev/disk1 2025-06-03T10:24:26.711308Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 20:1001 Path# /dev/disk2 2025-06-03T10:24:26.711313Z node 11 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 20:1002 Path# /dev/disk3 >> test.py::test[window-rank/plain--Results] [GOOD] >> test.py::test[blocks-top_sort_two_mix--ForceBlocks] [GOOD] >> test.py::test[window-row_number_no_part_multi_input-default.txt-Results] >> test.py::test[blocks-top_sort_two_mix--Results] |59.1%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part12/pytest >> test.py::test[join-mergejoin_with_different_key_names_nonsorted--Results] [GOOD] |59.1%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part2/pytest >> test.py::test[select-unlabeled--ForceBlocks] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::ReassignGroupDisk [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:279:2068] recipient: [1:245:2078] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:279:2068] recipient: [1:245:2078] Leader for TabletID 72057594037932033 is [1:288:2080] sender: [1:291:2068] recipient: [1:245:2078] 2025-06-03T10:24:25.126125Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:24:25.127315Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:24:25.127413Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-03T10:24:25.127939Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:24:25.128015Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-03T10:24:25.128051Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:25.128057Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:521} Handle TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:25.128157Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-03T10:24:25.129452Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-03T10:24:25.129488Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-03T10:24:25.129545Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-03T10:24:25.129569Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:25.129585Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:25.129594Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:288:2080] sender: [1:314:2068] recipient: [1:22:2069] 2025-06-03T10:24:25.141597Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-03T10:24:25.141673Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:25.153571Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:25.153640Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:25.153660Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:25.153673Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:25.153709Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:25.153718Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:25.153725Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:25.153738Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:25.164362Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:25.164430Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:25.174864Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:25.174939Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:19} TTxLoadEverything Execute 2025-06-03T10:24:25.175180Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:557} TTxLoadEverything Complete 2025-06-03T10:24:25.175189Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2188} LoadFinished 2025-06-03T10:24:25.175234Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-03T10:24:25.175248Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:562} TTxLoadEverything InitQueue processed 2025-06-03T10:24:25.178486Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk" } } } Command { DefineBox { BoxId: 1 Name: "box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 8 PDiskFilter { Property { Type: ROT } } } } } 2025-06-03T10:24:25.178729Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1000 Path# /dev/disk 2025-06-03T10:24:25.178741Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /dev/disk 2025-06-03T10:24:25.178747Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1000 Path# /dev/disk 2025-06-03T10:24:25.178753Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1000 Path# /dev/disk 2025-06-03T10:24:25.178759Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1000 Path# /dev/disk 2025-06-03T10:24:25.178765Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1000 Path# /dev/disk 2025-06-03T10:24:25.178771Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1000 Path# /dev/disk 2025-06-03T10:24:25.178776Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1000 Path# /dev/disk 2025-06-03T10:24:25.178781Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1000 Path# /dev/disk 2025-06-03T10:24:25.178787Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1000 Path# /dev/disk 2025-06-03T10:24:25.178801Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1000 Path# /dev/disk 2025-06-03T10:24:25.178806Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 12:1000 Path# /dev/disk Response# Status { Success: true } Status { Success: true } Status { Success: true } Success: true ConfigTxSeqNo: 1 2025-06-03T10:24:25.192793Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { UpdateDriveStatus { HostKey { NodeId: 1 } Path: "/dev/disk" Status: INACTIVE } } } Response# Status { Success: true } Success: true ConfigTxSeqNo: 2 Leader for TabletID 72057594037932033 is [0:0:0] sender: [13:279:2068] recipient: [13:245:2078] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [13:279:2068] recipient: [13:245:2078] Leader for TabletID 72057594037932033 is [13:291:2080] sender: [13:292:2068] recipient: [13:245:2078] 2025-06-03T10:24:27.323586Z node 13 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:24:27.323773Z node 13 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:24:27.323821Z node 13 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-03T10:24:27.323882Z node 13 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:24:27.324037Z node 13 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-03T10:24:27.324055Z node 13 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:27.324059Z node 13 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:521} Handle TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:27.324100Z node 13 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-03T10:24:27.324768Z node 13 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-03T10:24:27.324786Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-03T10:24:27.324809Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-03T10:24:27.324821Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:27.324830Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:27.324838Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [13:291:2080] sender: [13:314:2068] recipient: [13:22:2069] 2025-06-03T10:24:27.335233Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-03T10:24:27.335285Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:27.345625Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:27.345676Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:27.345690Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:27.345700Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:27.345727Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:27.345733Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:27.345738Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:27.345744Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:27.356090Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:27.356152Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:27.366466Z node 13 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:27.366517Z node 13 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:19} TTxLoadEverything Execute 2025-06-03T10:24:27.366701Z node 13 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:557} TTxLoadEverything Complete 2025-06-03T10:24:27.366707Z node 13 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2188} LoadFinished 2025-06-03T10:24:27.366743Z node 13 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-03T10:24:27.366750Z node 13 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:562} TTxLoadEverything InitQueue processed 2025-06-03T10:24:27.366962Z node 13 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 2 Drive { Path: "/dev/disk" } } } Command { DefineBox { BoxId: 1 Name: "box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 2 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 2 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 8 PDiskFilter { Property { Type: ROT } } } } } 2025-06-03T10:24:27.367058Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 13:1000 Path# /dev/disk 2025-06-03T10:24:27.367063Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 14:1000 Path# /dev/disk 2025-06-03T10:24:27.367070Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 15:1000 Path# /dev/disk 2025-06-03T10:24:27.367073Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 16:1000 Path# /dev/disk 2025-06-03T10:24:27.367076Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 17:1000 Path# /dev/disk 2025-06-03T10:24:27.367080Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 18:1000 Path# /dev/disk 2025-06-03T10:24:27.367083Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 19:1000 Path# /dev/disk 2025-06-03T10:24:27.367086Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 20:1000 Path# /dev/disk 2025-06-03T10:24:27.367089Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 21:1000 Path# /dev/disk 2025-06-03T10:24:27.367093Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 22:1000 Path# /dev/disk 2025-06-03T10:24:27.367096Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 23:1000 Path# /dev/disk 2025-06-03T10:24:27.367099Z node 13 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 24:1000 Path# /dev/disk Response# Status { Success: true } Status { Success: true } Status { Success: true } Success: true ConfigTxSeqNo: 1 2025-06-03T10:24:27.379373Z node 13 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { UpdateDriveStatus { HostKey { NodeId: 1 } Path: "/dev/disk" Status: INACTIVE } } } Response# Status { ErrorDescription: "Host not found NodeId# 1 HostKey# NodeId: 1\n incorrect" FailReason: kHostNotFound FailParam { NodeId: 1 } } ErrorDescription: "Host not found NodeId# 1 HostKey# NodeId: 1\n incorrect" >> test.py::test[optimizers-yql-8041-fuse_with_desc_map--ForceBlocks] [GOOD] >> test.py::test[optimizers-yql-8041-fuse_with_desc_map--Results] >> TPQTestSlow::TestOnDiskStoredSourceIds >> test.py::test[blocks-interval_div--ForceBlocks] [GOOD] >> test.py::test[blocks-interval_div--Results] >> test.py::test[join-mergejoin_big_primary-off-ForceBlocks] [GOOD] >> TBlobStorageHullFreshSegment::PerfAppendix >> test.py::test[join-mergejoin_big_primary-off-Results] [SKIPPED] >> test.py::test[join-mergejoin_force_align1--ForceBlocks] [SKIPPED] >> test.py::test[join-mergejoin_force_align1--Results] [SKIPPED] >> test.py::test[join-mergejoin_with_reverse_key_order-off-ForceBlocks] >> TPQTestSlow::TestWriteVeryBigMessage >> test.py::test[aggregate-aggregate_list_in_key-default.txt-Results] [GOOD] >> test.py::test[aggregate-aggregate_with_const_yson_options-default.txt-Results] >> BsControllerConfig::AddDriveSerial [GOOD] >> BsControllerConfig::AddDriveSerialMassive >> test.py::test[pg-tpcds-q69-default.txt-Results] [GOOD] >> test.py::test[pg-tpcds-q76-default.txt-ForceBlocks] >> IncrementalBackup::SimpleBackup [GOOD] >> IncrementalBackup::MultiRestore >> TBlobStorageHullFresh::AppendixPerf >> test.py::test[blocks-top_sort_two_mix--Results] [GOOD] >> test.py::test[aggregate-group_by_ru_partition_by_grouping-default.txt-Results] [GOOD] >> test.py::test[aggregate-group_compact_sorted--ForceBlocks] >> test.py::test[aggregate-group_by_session--ForceBlocks] [GOOD] >> test.py::test[aggregate-group_by_session--Results] >> DataStreams::TestGetRecordsWithCount [GOOD] >> DataStreams::TestInvalidRetentionCombinations >> DataShardOutOfOrder::TestImmediateQueueThenSplit+UseSink >> DataShardOutOfOrder::TestOutOfOrderReadOnlyAllowed+EvWrite |59.1%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part3/pytest >> test.py::test[blocks-top_sort_two_mix--Results] [GOOD] >> test.py::test[aggregate-compare_by_tuple--Results] [GOOD] >> test.py::test[aggregate-dedup_state_keys--Results] >> DataStreams::TestInvalidRetentionCombinations [GOOD] >> test.py::test[blocks-interval_div--Results] [GOOD] >> test.py::test[blocks-interval_sub_interval_scalar--ForceBlocks] >> test.py::test[pg-tpch-q19-default.txt-ForceBlocks] [GOOD] >> test.py::test[ypath-direct_read_from_dynamic--ForceBlocks] [GOOD] >> test.py::test[optimizers-yql-8041-fuse_with_desc_map--Results] [GOOD] >> test.py::test[ypath-direct_read_from_dynamic--Results] |59.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots |59.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots |59.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_user_attributes_reboots/core-tx-schemeshard-ut_user_attributes_reboots ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/datastreams/ut/unittest >> DataStreams::TestInvalidRetentionCombinations [GOOD] Test command err: 2025-06-03T10:23:47.390189Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511666851009289994:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:47.390228Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f20/r3tmp/tmpWdfuNF/pdisk_1.dat 2025-06-03T10:23:47.453561Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61949, node 1 2025-06-03T10:23:47.471806Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:47.471821Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:47.471824Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:47.471877Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4076 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:23:47.493336Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:47.493367Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:47.496418Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:47.523806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:47.546147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:4076 2025-06-03T10:23:47.562358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:48.295168Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511666855520627906:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:48.295263Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f20/r3tmp/tmpMIUEVa/pdisk_1.dat 2025-06-03T10:23:48.328832Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14215, node 4 2025-06-03T10:23:48.337057Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:48.337076Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:48.337078Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:48.337133Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9367 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:48.395562Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:48.395604Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:48.397330Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:48.411543Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:48.433562Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:9367 2025-06-03T10:23:48.447858Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:48.483260Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:23:48.485552Z node 4 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [4:7511666855520629108:2811], for# user2@builtin, access# DescribeSchema 2025-06-03T10:23:48.486161Z node 4 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [4:7511666855520629111:2812], for# user2@builtin, access# DescribeSchema 2025-06-03T10:23:48.486965Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:23:49.187638Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511666859999091132:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:49.187692Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f20/r3tmp/tmprGWQuy/pdisk_1.dat 2025-06-03T10:23:49.202232Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64214, node 7 2025-06-03T10:23:49.216023Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:49.216044Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:49.216047Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:49.216091Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23585 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:23:49.287969Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:49.287997Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:49.289521Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:49.292743Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:49.310009Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:23585 2025-06-03T10:23:49.324788Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:23:54.188645Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7511666859999091132:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:23:54.188685Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:24:04.197374Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7306: Cannot get console configs 2025-06-03T10:24:04.197412Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:33.846490Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7511667046676522615:2146];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f20/r3tmp/tmppFS1j6/pdisk_1.dat 2025-06-03T10:24:33.851489Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:33.866055Z node 10 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24267, node 10 2025-06-03T10:24:33.879557Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:33.879573Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:33.879575Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:33.879627Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30467 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:24:33.946672Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:33.946710Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:33.948368Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:33.951584Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:24:34.003505Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:30467 2025-06-03T10:24:34.020827Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting...
: Error: retention hours and storage megabytes must fit one of: { hours : [0, 24], storage : [0, 0]}, { hours : [0, 168], storage : [51200, 1048576]}, provided values: hours 168, storage 10, code: 500080
: Error: retention hours and storage megabytes must fit one of: { hours : [0, 24], storage : [0, 0]}, { hours : [0, 168], storage : [51200, 1048576]}, provided values: hours 144, storage 0, code: 500080
: Error: write_speed per second in partition must have values from set {131072,524288,1048576}, got 130048, code: 500080
: Error: write_speed per second in partition must have values from set {131072,524288,1048576}, got 1049600, code: 500080 >> test.py::test[blocks-member--ForceBlocks] [GOOD] >> test.py::test[blocks-member--Results] >> IncrementalBackup::MultiRestore [GOOD] >> IncrementalBackup::E2EBackupCollection >> DataShardOutOfOrder::TestOutOfOrderRestartLocksReorderedWithoutBarrier >> TBlobStorageHullFreshSegment::PerfAppendix [GOOD] >> TBlobStorageHullFreshSegment::PerfSkipList >> DataShardOutOfOrder::TestImmediateQueueThenSplit+UseSink [GOOD] >> DataShardOutOfOrder::TestImmediateQueueThenSplit-UseSink |59.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut |59.2%| [LD] {RESULT} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut |59.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/security/ldap_auth_provider/ut/ydb-core-security-ldap_auth_provider-ut |59.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut >> test.py::test[aggregate-group_by_gs_simp--ForceBlocks] [GOOD] >> test.py::test[aggregate-group_by_gs_simp--Results] >> test.py::test[aggregate-group_by_session--Results] [GOOD] >> test.py::test[aggregate-group_by_session_distinct_compact--ForceBlocks] |59.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut |59.2%| [LD] {RESULT} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/ydb-services-persqueue_v1-ut-new_schemecache_ut >> DataShardOutOfOrder::TestOutOfOrderReadOnlyAllowed+EvWrite [GOOD] >> DataShardOutOfOrder::TestOutOfOrderReadOnlyAllowed-EvWrite >> test.py::test[aggregate-group_by_rollup_grouping_hum_bind--ForceBlocks] [GOOD] >> BsControllerConfig::AddDriveSerialMassive [GOOD] |59.2%| [TA] $(B)/ydb/services/datastreams/ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.2%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part11/pytest >> test.py::test[pg-tpch-q19-default.txt-ForceBlocks] [GOOD] >> TBlobStorageHullFreshSegment::PerfSkipList [GOOD] |59.2%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part16/pytest >> test.py::test[optimizers-yql-8041-fuse_with_desc_map--Results] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::AddDriveSerialMassive [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:215:2066] recipient: [1:189:2076] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:215:2066] recipient: [1:189:2076] Leader for TabletID 72057594037932033 is [1:223:2078] sender: [1:225:2066] recipient: [1:189:2076] 2025-06-03T10:24:28.295237Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:24:28.296313Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:24:28.296385Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-03T10:24:28.296659Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:24:28.296831Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-03T10:24:28.296876Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:28.296880Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:521} Handle TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:28.296921Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-03T10:24:28.298035Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-03T10:24:28.298073Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-03T10:24:28.298127Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-03T10:24:28.298150Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:28.298168Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:28.298179Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:223:2078] sender: [1:247:2066] recipient: [1:20:2067] 2025-06-03T10:24:28.310014Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-03T10:24:28.310074Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:28.320630Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:28.320690Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:28.320708Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:28.320722Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:28.320754Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:28.320764Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:28.320772Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:28.320781Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:28.331121Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:28.331181Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:28.341569Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:28.341637Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:19} TTxLoadEverything Execute 2025-06-03T10:24:28.341878Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:557} TTxLoadEverything Complete 2025-06-03T10:24:28.341886Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2188} LoadFinished 2025-06-03T10:24:28.341928Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-03T10:24:28.341941Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:562} TTxLoadEverything InitQueue processed 2025-06-03T10:24:28.344590Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } 2025-06-03T10:24:28.345109Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } 2025-06-03T10:24:28.345224Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:215:2066] recipient: [11:194:2076] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:215:2066] recipient: [11:194:2076] Leader for TabletID 72057594037932033 is [11:225:2078] sender: [11:226:2066] recipient: [11:194:2076] 2025-06-03T10:24:29.968942Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:24:29.969210Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:24:29.969275Z node 11 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-03T10:24:29.974134Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:24:29.974334Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-03T10:24:29.974384Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:29.974392Z node 11 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:521} Handle TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:29.974461Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-03T10:24:29.975786Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-03T10:24:29.975837Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-03T10:24:29.975873Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-03T10:24:29.975904Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:29.975919Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:29.975928Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [11:225:2078] sender: [11:247:2066] recipient: [11:20:2067] 2025-06-03T10:24:29.989592Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-03T10:24:29.989664Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:30.001580Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:30.001646Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:30.001666Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:30.001681Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:30.001718Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:30.001728Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:30.001735Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:30.001748Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:30.013592Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:30.013659Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:30.025616Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:30.025680Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:19} TTxLoadEverything Execute 2025-06-03T10:24:30.025894Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:557} TTxLoadEverything Complete 2025-06-03T10:24:30.025902Z node 11 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2188} LoadFinished 2025-06-03T10:24:30.025948Z node 11 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-03T10:24:30.025957Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:562} TTxLoadEverything InitQueue processed 2025-06-03T10:24:30.026180Z node 11 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_123" BoxId: 1 } } } 2025-06-03T10:24:30.026451Z node 11 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# ... ommand { AddDriveSerial { Serial: "SN_5" BoxId: 1 } } } 2025-06-03T10:24:32.258688Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_6" BoxId: 1 } } } 2025-06-03T10:24:32.258758Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_7" BoxId: 1 } } } 2025-06-03T10:24:32.258824Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_8" BoxId: 1 } } } 2025-06-03T10:24:32.258897Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_9" BoxId: 1 } } } 2025-06-03T10:24:32.259170Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_0" } } } 2025-06-03T10:24:32.259242Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_1" } } } 2025-06-03T10:24:32.259327Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_2" } } } 2025-06-03T10:24:32.259398Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_3" } } } 2025-06-03T10:24:32.259688Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_4" } } } 2025-06-03T10:24:32.259762Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_5" } } } 2025-06-03T10:24:32.260032Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_6" } } } 2025-06-03T10:24:32.260332Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_7" } } } 2025-06-03T10:24:32.260410Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_8" } } } 2025-06-03T10:24:32.260485Z node 21 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_9" } } } Leader for TabletID 72057594037932033 is [0:0:0] sender: [31:215:2066] recipient: [31:193:2076] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [31:215:2066] recipient: [31:193:2076] Leader for TabletID 72057594037932033 is [31:223:2078] sender: [31:227:2066] recipient: [31:193:2076] 2025-06-03T10:24:33.810537Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:24:33.810825Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:24:33.810890Z node 31 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-03T10:24:33.811107Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:24:33.811272Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-03T10:24:33.811325Z node 31 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:33.811333Z node 31 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:521} Handle TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:33.811387Z node 31 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-03T10:24:33.812524Z node 31 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-03T10:24:33.812558Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-03T10:24:33.812591Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-03T10:24:33.812612Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:33.812626Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:33.812638Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [31:223:2078] sender: [31:247:2066] recipient: [31:20:2067] 2025-06-03T10:24:33.823071Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-03T10:24:33.823127Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:33.833500Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:33.833553Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:33.833571Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:33.833584Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:33.833614Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:33.833624Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:33.833631Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:33.833640Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:33.843974Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:33.844027Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:33.854349Z node 31 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:33.854399Z node 31 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:19} TTxLoadEverything Execute 2025-06-03T10:24:33.854629Z node 31 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:557} TTxLoadEverything Complete 2025-06-03T10:24:33.854638Z node 31 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2188} LoadFinished 2025-06-03T10:24:33.854679Z node 31 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-03T10:24:33.854688Z node 31 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:562} TTxLoadEverything InitQueue processed 2025-06-03T10:24:33.854845Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_0" BoxId: 1 } } } 2025-06-03T10:24:33.855133Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_1" BoxId: 1 } } } 2025-06-03T10:24:33.855224Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_2" BoxId: 1 } } } 2025-06-03T10:24:33.855313Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_3" BoxId: 1 } } } 2025-06-03T10:24:33.855400Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_4" BoxId: 1 } } } 2025-06-03T10:24:33.855494Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_5" BoxId: 1 } } } 2025-06-03T10:24:33.855579Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_6" BoxId: 1 } } } 2025-06-03T10:24:33.855663Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_7" BoxId: 1 } } } 2025-06-03T10:24:33.855741Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_8" BoxId: 1 } } } 2025-06-03T10:24:33.855827Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { AddDriveSerial { Serial: "SN_9" BoxId: 1 } } } 2025-06-03T10:24:33.855916Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_0" } } } 2025-06-03T10:24:33.856010Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_1" } } } 2025-06-03T10:24:33.856099Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_2" } } } 2025-06-03T10:24:33.856189Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_3" } } } 2025-06-03T10:24:33.856275Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_4" } } } 2025-06-03T10:24:33.856370Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_5" } } } 2025-06-03T10:24:33.856463Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_6" } } } 2025-06-03T10:24:33.856559Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_7" } } } 2025-06-03T10:24:33.856658Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_8" } } } 2025-06-03T10:24:33.856754Z node 31 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { RemoveDriveSerial { Serial: "SN_9" } } } >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflow >> test.py::test[ypath-direct_read_from_dynamic--Results] [GOOD] |59.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFreshSegment::PerfSkipList [GOOD] >> test.py::test[blocks-member--Results] [GOOD] |59.2%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part9/pytest >> test.py::test[aggregate-group_by_rollup_grouping_hum_bind--ForceBlocks] [GOOD] >> DataShardOutOfOrder::TestImmediateQueueThenSplit-UseSink [GOOD] >> test.py::test[pg-tpcds-q76-default.txt-ForceBlocks] [GOOD] >> test.py::test[pg-tpcds-q76-default.txt-Results] >> test.py::test[join-mergejoin_with_reverse_key_order-off-ForceBlocks] [GOOD] >> test.py::test[join-mergejoin_with_reverse_key_order-off-Results] [SKIPPED] >> test.py::test[join-nested_semi_join-off-ForceBlocks] >> DataShardOutOfOrder::TestOutOfOrderReadOnlyAllowed-EvWrite [GOOD] >> TBtreeIndexTPartLarge::Group [GOOD] >> TBtreeIndexTPartLarge::History |59.2%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part11/pytest >> test.py::test[blocks-member--Results] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestImmediateQueueThenSplit-UseSink [GOOD] Test command err: 2025-06-03T10:24:34.218347Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:24:34.218451Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:24:34.218488Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c78/r3tmp/tmpYbzoJj/pdisk_1.dat 2025-06-03T10:24:34.365264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:34.385443Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:34.386883Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946273788484 != 1748946273788488 2025-06-03T10:24:34.429408Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:34.429458Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:34.440122Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:34.514368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:34.533010Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:24:34.533281Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:24:34.533402Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:24:34.533480Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:24:34.543483Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:24:34.543751Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:24:34.543791Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:24:34.544008Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:24:34.544019Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:24:34.544026Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:24:34.544109Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:24:34.544134Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:24:34.544150Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:24:34.554550Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:24:34.559683Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:24:34.559788Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:24:34.559853Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:24:34.559860Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:24:34.559865Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:24:34.559871Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:24:34.559965Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:24:34.559974Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:24:34.560107Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:24:34.560135Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:24:34.560271Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:24:34.560281Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:24:34.560292Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:24:34.560299Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:24:34.560303Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:24:34.560310Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:24:34.560315Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:24:34.560330Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:24:34.560336Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:24:34.560344Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:24:34.560366Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:24:34.560371Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:24:34.560395Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:24:34.560455Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:24:34.560467Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:24:34.560486Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:24:34.560506Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:24:34.560511Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:24:34.560517Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:24:34.560521Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:24:34.560600Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-03T10:24:34.560605Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-03T10:24:34.560610Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-03T10:24:34.560614Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:24:34.560626Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-03T10:24:34.560630Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-03T10:24:34.560637Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-03T10:24:34.560641Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-03T10:24:34.560646Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-03T10:24:34.560926Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269746185, Sender [1:683:2579], Recipient [1:663:2568]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-03T10:24:34.560935Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:24:34.571328Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:24:34.571373Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:24:34.571383Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:24:34.571397Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-06-03T10:24:34.571430Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:24:34.716104Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:24:34.716139Z node 1 :TX_DATASHARD TRACE: datashard_impl. ... : 0 ru: 1 rate limiter was not found force flag: 1 2025-06-03T10:24:37.026029Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715677. Resolved key sets: 0 2025-06-03T10:24:37.026042Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=2&id=YzkyMjIwODctMmIwZjQ4MjAtZDVmZTQyMjQtNGMzY2NiNTk=, ActorId: [2:880:2715], ActorState: ExecuteState, TraceId: 01jwtn4mc99qaezg5wqx0axpx7, Create QueryResponse for error on request, msg: 2025-06-03T10:24:37.026078Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=2&id=OGZhY2I3YjUtNDkwOTM4MDMtZjFkMGU3NGMtMzU3M2IzZWM=, ActorId: [2:883:2718], ActorState: ExecuteState, TraceId: 01jwtn4mcaft6z1k3rj82nes0g, Create QueryResponse for error on request, msg: 2025-06-03T10:24:37.026232Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715677. Ctx: { TraceId: 01jwtn4mc9cwm23rm4nbd7caer, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDNmY2MxMGQtZTBlN2E4N2YtZDhhMjc3OGUtZDZkYjUzOGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:24:37.026240Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:553: TxId: 281474976715677. Ctx: { TraceId: 01jwtn4mc9cwm23rm4nbd7caer, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDNmY2MxMGQtZTBlN2E4N2YtZDhhMjc3OGUtZDZkYjUzOGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-03T10:24:37.026245Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2805: ActorId: [2:1133:2713] TxId: 281474976715677. Ctx: { TraceId: 01jwtn4mc9cwm23rm4nbd7caer, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDNmY2MxMGQtZTBlN2E4N2YtZDhhMjc3OGUtZDZkYjUzOGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-03T10:24:37.026253Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2151: ActorId: [2:1133:2713] TxId: 281474976715677. Ctx: { TraceId: 01jwtn4mc9cwm23rm4nbd7caer, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDNmY2MxMGQtZTBlN2E4N2YtZDhhMjc3OGUtZDZkYjUzOGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-03T10:24:37.026259Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:839: ActorId: [2:1133:2713] TxId: 281474976715677. Ctx: { TraceId: 01jwtn4mc9cwm23rm4nbd7caer, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZDNmY2MxMGQtZTBlN2E4N2YtZDhhMjc3OGUtZDZkYjUzOGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-03T10:24:37.026297Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715678. Resolved key sets: 0 2025-06-03T10:24:37.026464Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715678. Ctx: { TraceId: 01jwtn4mca463gkd14ex8h98jc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzFiNjdhOGItNGM3YzFmZWYtYzgzN2YxYy0xNmFmM2M2Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:24:37.026471Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:553: TxId: 281474976715678. Ctx: { TraceId: 01jwtn4mca463gkd14ex8h98jc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzFiNjdhOGItNGM3YzFmZWYtYzgzN2YxYy0xNmFmM2M2Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-03T10:24:37.026478Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2805: ActorId: [2:1136:2720] TxId: 281474976715678. Ctx: { TraceId: 01jwtn4mca463gkd14ex8h98jc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzFiNjdhOGItNGM3YzFmZWYtYzgzN2YxYy0xNmFmM2M2Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-03T10:24:37.026497Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2151: ActorId: [2:1136:2720] TxId: 281474976715678. Ctx: { TraceId: 01jwtn4mca463gkd14ex8h98jc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzFiNjdhOGItNGM3YzFmZWYtYzgzN2YxYy0xNmFmM2M2Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-03T10:24:37.026502Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:839: ActorId: [2:1136:2720] TxId: 281474976715678. Ctx: { TraceId: 01jwtn4mca463gkd14ex8h98jc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzFiNjdhOGItNGM3YzFmZWYtYzgzN2YxYy0xNmFmM2M2Yg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-03T10:24:37.026720Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715679. Resolved key sets: 0 2025-06-03T10:24:37.026819Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715680. Resolved key sets: 0 2025-06-03T10:24:37.026830Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715679. Ctx: { TraceId: 01jwtn4mca0418vddb9ff6pnq7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI4NGM3NDMtY2E5Y2RmZjctNWMzZDhlMTgtMjM2NjVlNTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:24:37.026835Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:553: TxId: 281474976715679. Ctx: { TraceId: 01jwtn4mca0418vddb9ff6pnq7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI4NGM3NDMtY2E5Y2RmZjctNWMzZDhlMTgtMjM2NjVlNTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-03T10:24:37.026840Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2805: ActorId: [2:1145:2724] TxId: 281474976715679. Ctx: { TraceId: 01jwtn4mca0418vddb9ff6pnq7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI4NGM3NDMtY2E5Y2RmZjctNWMzZDhlMTgtMjM2NjVlNTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-03T10:24:37.026847Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2151: ActorId: [2:1145:2724] TxId: 281474976715679. Ctx: { TraceId: 01jwtn4mca0418vddb9ff6pnq7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI4NGM3NDMtY2E5Y2RmZjctNWMzZDhlMTgtMjM2NjVlNTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-03T10:24:37.026852Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:839: ActorId: [2:1145:2724] TxId: 281474976715679. Ctx: { TraceId: 01jwtn4mca0418vddb9ff6pnq7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MWI4NGM3NDMtY2E5Y2RmZjctNWMzZDhlMTgtMjM2NjVlNTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-03T10:24:37.026874Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715680. Ctx: { TraceId: 01jwtn4mc99qaezg5wqx0axpx7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzkyMjIwODctMmIwZjQ4MjAtZDVmZTQyMjQtNGMzY2NiNTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:24:37.026878Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:553: TxId: 281474976715680. Ctx: { TraceId: 01jwtn4mc99qaezg5wqx0axpx7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzkyMjIwODctMmIwZjQ4MjAtZDVmZTQyMjQtNGMzY2NiNTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-03T10:24:37.026883Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2805: ActorId: [2:1149:2715] TxId: 281474976715680. Ctx: { TraceId: 01jwtn4mc99qaezg5wqx0axpx7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzkyMjIwODctMmIwZjQ4MjAtZDVmZTQyMjQtNGMzY2NiNTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-03T10:24:37.026889Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2151: ActorId: [2:1149:2715] TxId: 281474976715680. Ctx: { TraceId: 01jwtn4mc99qaezg5wqx0axpx7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzkyMjIwODctMmIwZjQ4MjAtZDVmZTQyMjQtNGMzY2NiNTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-03T10:24:37.026893Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:839: ActorId: [2:1149:2715] TxId: 281474976715680. Ctx: { TraceId: 01jwtn4mc99qaezg5wqx0axpx7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzkyMjIwODctMmIwZjQ4MjAtZDVmZTQyMjQtNGMzY2NiNTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-03T10:24:37.026898Z node 2 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715681. Resolved key sets: 0 2025-06-03T10:24:37.027005Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715681. Ctx: { TraceId: 01jwtn4mcaft6z1k3rj82nes0g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGZhY2I3YjUtNDkwOTM4MDMtZjFkMGU3NGMtMzU3M2IzZWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:24:37.027011Z node 2 :KQP_EXECUTER DEBUG: kqp_planner.cpp:553: TxId: 281474976715681. Ctx: { TraceId: 01jwtn4mcaft6z1k3rj82nes0g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGZhY2I3YjUtNDkwOTM4MDMtZjFkMGU3NGMtMzU3M2IzZWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-03T10:24:37.027015Z node 2 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2805: ActorId: [2:1150:2718] TxId: 281474976715681. Ctx: { TraceId: 01jwtn4mcaft6z1k3rj82nes0g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGZhY2I3YjUtNDkwOTM4MDMtZjFkMGU3NGMtMzU3M2IzZWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-03T10:24:37.027021Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2151: ActorId: [2:1150:2718] TxId: 281474976715681. Ctx: { TraceId: 01jwtn4mcaft6z1k3rj82nes0g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGZhY2I3YjUtNDkwOTM4MDMtZjFkMGU3NGMtMzU3M2IzZWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-03T10:24:37.027028Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:839: ActorId: [2:1150:2718] TxId: 281474976715681. Ctx: { TraceId: 01jwtn4mcaft6z1k3rj82nes0g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OGZhY2I3YjUtNDkwOTM4MDMtZjFkMGU3NGMtMzU3M2IzZWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 ------- [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part11/pytest >> test.py::test[ypath-direct_read_from_dynamic--Results] [GOOD] Test command err: 127.0.0.1 - - [03/Jun/2025 10:23:05] "GET /foo.txt HTTP/1.1" 200 - 127.0.0.1 - - [03/Jun/2025 10:23:09] "GET /foo.txt HTTP/1.1" 200 - 127.0.0.1 - - [03/Jun/2025 10:23:13] "GET /foo.txt HTTP/1.1" 200 - >> test.py::test[aggregate-group_compact_sorted--ForceBlocks] [GOOD] >> test.py::test[aggregate-group_compact_sorted--Results] >> TTopicApiDescribes::DescribeTopic |59.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/describes_ut/unittest >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflow [GOOD] >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflowAndRetry ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestOutOfOrderReadOnlyAllowed-EvWrite [GOOD] Test command err: 2025-06-03T10:24:34.479369Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:24:34.479468Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:24:34.479503Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c81/r3tmp/tmpZarUWT/pdisk_1.dat 2025-06-03T10:24:34.618206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:34.636669Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:34.637945Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946273902231 != 1748946273902235 2025-06-03T10:24:34.679799Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:34.679837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:34.690390Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:34.764281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:34.783646Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:24:34.783947Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:24:34.784058Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:24:34.784130Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:24:34.795152Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:24:34.795382Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:24:34.795416Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:24:34.795614Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:24:34.795624Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:24:34.795631Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:24:34.795703Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:24:34.795722Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:24:34.795736Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:24:34.806050Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:24:34.811375Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:24:34.811460Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:24:34.811501Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:24:34.811508Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:24:34.811514Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:24:34.811521Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:24:34.811600Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:24:34.811608Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:24:34.811723Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:24:34.811748Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:24:34.811859Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:24:34.811868Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:24:34.811876Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:24:34.811882Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:24:34.811887Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:24:34.811894Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:24:34.811899Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:24:34.811911Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:24:34.811919Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:24:34.811926Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:24:34.811946Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:24:34.811951Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:24:34.811973Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:24:34.812024Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:24:34.812039Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:24:34.812055Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:24:34.812074Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:24:34.812079Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:24:34.812086Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:24:34.812091Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:24:34.812160Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-03T10:24:34.812164Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-03T10:24:34.812169Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-03T10:24:34.812173Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:24:34.812183Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-03T10:24:34.812187Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-03T10:24:34.812191Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-03T10:24:34.812196Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-03T10:24:34.812201Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-03T10:24:34.812451Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269746185, Sender [1:683:2579], Recipient [1:663:2568]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-03T10:24:34.812460Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:24:34.822720Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:24:34.822740Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:24:34.822746Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:24:34.822757Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-06-03T10:24:34.822771Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:24:34.965665Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:24:34.965690Z node 1 :TX_DATASHARD TRACE: datashard_impl. ... 6277650 UpdateTimeMs: 1748946277653 } MaxMemoryUsage: 1048576 } 2025-06-03T10:24:37.653871Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1029:2826] 2025-06-03T10:24:37.653882Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:645: ActorId: [2:1022:2808] TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1031:2828], CA [2:1032:2829], CA [2:1033:2830], CA [2:1030:2827], CA [2:1034:2831], 2025-06-03T10:24:37.653892Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [2:1022:2808] TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 5 compute actor(s) and 0 datashard(s): CA [2:1031:2828], CA [2:1032:2829], CA [2:1033:2830], CA [2:1030:2827], CA [2:1034:2831], 2025-06-03T10:24:37.654025Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:434: ActorId: [2:1022:2808] TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1030:2827], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 413 DurationUs: 1000 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 355 FinishTimeMs: 1748946277653 InputRows: 1 InputBytes: 5 OutputRows: 1 OutputBytes: 5 ComputeCpuTimeUs: 341 BuildCpuTimeUs: 14 HostName: "ghrun-pyvh3niaay" NodeId: 2 StartTimeMs: 1748946277652 CreateTimeMs: 1748946277650 UpdateTimeMs: 1748946277653 } MaxMemoryUsage: 1048576 } 2025-06-03T10:24:37.654038Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1030:2827] 2025-06-03T10:24:37.654046Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:645: ActorId: [2:1022:2808] TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1031:2828], CA [2:1032:2829], CA [2:1033:2830], CA [2:1034:2831], 2025-06-03T10:24:37.654053Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [2:1022:2808] TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 4 compute actor(s) and 0 datashard(s): CA [2:1031:2828], CA [2:1032:2829], CA [2:1033:2830], CA [2:1034:2831], 2025-06-03T10:24:37.654099Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:434: ActorId: [2:1022:2808] TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1031:2828], task: 4, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 252 DurationUs: 1000 Tasks { TaskId: 4 StageId: 3 CpuTimeUs: 203 FinishTimeMs: 1748946277653 InputRows: 1 InputBytes: 5 OutputRows: 1 OutputBytes: 5 ComputeCpuTimeUs: 189 BuildCpuTimeUs: 14 HostName: "ghrun-pyvh3niaay" NodeId: 2 StartTimeMs: 1748946277652 CreateTimeMs: 1748946277650 UpdateTimeMs: 1748946277653 } MaxMemoryUsage: 1048576 } 2025-06-03T10:24:37.654109Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1031:2828] 2025-06-03T10:24:37.654118Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:645: ActorId: [2:1022:2808] TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1032:2829], CA [2:1033:2830], CA [2:1034:2831], 2025-06-03T10:24:37.654125Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [2:1022:2808] TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 3 compute actor(s) and 0 datashard(s): CA [2:1032:2829], CA [2:1033:2830], CA [2:1034:2831], 2025-06-03T10:24:37.654147Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:434: ActorId: [2:1022:2808] TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1032:2829], task: 5, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 380 Tasks { TaskId: 5 StageId: 4 CpuTimeUs: 317 FinishTimeMs: 1748946277653 InputRows: 2 InputBytes: 10 OutputRows: 2 OutputBytes: 7 ComputeCpuTimeUs: 295 BuildCpuTimeUs: 22 HostName: "ghrun-pyvh3niaay" NodeId: 2 StartTimeMs: 1748946277653 CreateTimeMs: 1748946277650 UpdateTimeMs: 1748946277653 } MaxMemoryUsage: 1048576 } 2025-06-03T10:24:37.654157Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1032:2829] 2025-06-03T10:24:37.654164Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:645: ActorId: [2:1022:2808] TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1033:2830], CA [2:1034:2831], 2025-06-03T10:24:37.654170Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [2:1022:2808] TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 2 compute actor(s) and 0 datashard(s): CA [2:1033:2830], CA [2:1034:2831], 2025-06-03T10:24:37.654213Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:434: ActorId: [2:1022:2808] TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1033:2830], task: 6, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 110 Tasks { TaskId: 6 StageId: 5 CpuTimeUs: 59 FinishTimeMs: 1748946277654 InputRows: 2 InputBytes: 7 OutputRows: 2 OutputBytes: 7 ComputeCpuTimeUs: 41 BuildCpuTimeUs: 18 HostName: "ghrun-pyvh3niaay" NodeId: 2 CreateTimeMs: 1748946277650 UpdateTimeMs: 1748946277654 } MaxMemoryUsage: 1048576 } 2025-06-03T10:24:37.654224Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1033:2830] 2025-06-03T10:24:37.654231Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:645: ActorId: [2:1022:2808] TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [2:1034:2831], 2025-06-03T10:24:37.654237Z node 2 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [2:1022:2808] TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, waiting for 1 compute actor(s) and 0 datashard(s): CA [2:1034:2831], 2025-06-03T10:24:37.654287Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:434: ActorId: [2:1022:2808] TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [2:1034:2831], task: 7, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 126 DurationUs: 1000 Tasks { TaskId: 7 StageId: 6 CpuTimeUs: 79 FinishTimeMs: 1748946277654 InputRows: 2 InputBytes: 7 OutputRows: 2 OutputBytes: 7 ResultRows: 2 ResultBytes: 7 ComputeCpuTimeUs: 63 BuildCpuTimeUs: 16 HostName: "ghrun-pyvh3niaay" NodeId: 2 StartTimeMs: 1748946277653 CreateTimeMs: 1748946277650 UpdateTimeMs: 1748946277654 } MaxMemoryUsage: 1048576 } 2025-06-03T10:24:37.654296Z node 2 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [2:1034:2831] 2025-06-03T10:24:37.654357Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2151: ActorId: [2:1022:2808] TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-03T10:24:37.654370Z node 2 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:839: ActorId: [2:1022:2808] TxId: 281474976715665. Ctx: { TraceId: 01jwtn4n640gbnryesejmgkpwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTgzNWFjNGYtMmZjNGYxNmMtYmEwOWE5MDYtZjIyNGMzZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.001703s ReadRows: 2 ReadBytes: 16 ru: 2 rate limiter was not found force flag: 1 { items { uint32_value: 3 } items { uint32_value: 2 } }, { items { uint32_value: 4 } items { uint32_value: 2 } } |59.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/describes_ut/unittest |59.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/describes_ut/unittest >> test.py::test[pg-tpcds-q76-default.txt-Results] [GOOD] >> test.py::test[aggregate-aggregate_with_const_yson_options-default.txt-Results] [GOOD] >> test.py::test[aggregate-aggregation_by_udf--Results] >> TTopicApiDescribes::GetLocalDescribe >> IncrementalBackup::E2EBackupCollection [GOOD] >> DataShardOutOfOrder::TestOutOfOrderRestartLocksReorderedWithoutBarrier [GOOD] >> DataShardOutOfOrder::TestPlannedHalfOverloadedSplit+UseSink >> test.py::test[aggregate-group_by_gs_simp--Results] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_incremental_backup/unittest >> IncrementalBackup::E2EBackupCollection [GOOD] Test command err: 2025-06-03T10:24:29.294150Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:24:29.294247Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:24:29.294279Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c0f/r3tmp/tmpZNF5di/pdisk_1.dat 2025-06-03T10:24:29.421617Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [1:594:2519], Recipient [1:410:2404]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:24:29.421642Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:24:29.421647Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:24:29.421659Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [1:591:2517], Recipient [1:410:2404]: {TEvModifySchemeTransaction txid# 1 TabletId# 72057594046644480} 2025-06-03T10:24:29.421663Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:24:29.439230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-06-03T10:24:29.439311Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:29.439375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-03T10:24:29.439442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:24:29.439451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:29.439466Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:24:29.439705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-03T10:24:29.439730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-03T10:24:29.439737Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:24:29.439743Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-06-03T10:24:29.439790Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435072, Sender [1:410:2404], Recipient [1:410:2404]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-03T10:24:29.439798Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4899: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-03T10:24:29.439814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:29.439824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-03T10:24:29.439831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:24:29.439838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:24:29.439867Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:24:29.439941Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:24:29.439946Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-06-03T10:24:29.439964Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435072, Sender [1:410:2404], Recipient [1:410:2404]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-03T10:24:29.439969Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4899: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-03T10:24:29.439975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:29.439981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-06-03T10:24:29.439986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:24:29.439996Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:24:29.440043Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:24:29.440047Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 1:0 2025-06-03T10:24:29.440061Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435072, Sender [1:410:2404], Recipient [1:410:2404]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-03T10:24:29.440066Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4899: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-03T10:24:29.440071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:29.440076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:29.440083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-06-03T10:24:29.440086Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:24:29.440094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:24:29.440793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:24:29.440938Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:24:29.440952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:24:29.441020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-03T10:24:29.441411Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877760, Sender [1:599:2524], Recipient [1:410:2404]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594046316545 Status: OK ServerId: [1:601:2525] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-03T10:24:29.441425Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4977: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-03T10:24:29.441433Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5708: Handle TEvClientConnected, tabletId: 72057594046316545, status: OK, at schemeshard: 72057594046644480 2025-06-03T10:24:29.441456Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269091328, Sender [1:406:2400], Recipient [1:410:2404]: NKikimrTx.TEvProposeTransactionStatus Status: 16 StepId: 500 TxId: 1 2025-06-03T10:24:29.441533Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [1:603:2527], Recipient [1:410:2404]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:24:29.441538Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:24:29.441543Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:24:29.441565Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124996, Sender [1:591:2517], Recipient [1:410:2404]: NKikimrScheme.TEvNotifyTxCompletion TxId: 1 2025-06-03T10:24:29.441570Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4895: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-03T10:24:29.441587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-06-03T10:24:29.441594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-03T10:24:29.441600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-06-03T10:24:29.458203Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 273285138, Sender [1:43:2090], Recipient [1:410:2404]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } } ItemKinds: 26 ItemKinds: 34 ItemKinds: 52 ItemKinds: 54 ItemKinds: 73 Local: true } 2025-06-03T10:24:29.458234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } 2025-06-03T10:24:29.458239Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-0 ... pp:1018: NTableState::TProposedWaitParts operationId# 281474976715668:1 HandleReply TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 1211 RawX2: 12884904810 } Origin: 72075186224037892 State: 2 TxId: 281474976715668 Step: 0 Generation: 1 2025-06-03T10:24:38.701917Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715668:1, shardIdx: 72057594046644480:5, datashard: 72075186224037892, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-03T10:24:38.701921Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 281474976715668:1, at schemeshard: 72057594046644480 2025-06-03T10:24:38.701926Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 281474976715668:1, datashard: 72075186224037892, at schemeshard: 72057594046644480 2025-06-03T10:24:38.701934Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976715668:1 129 -> 240 2025-06-03T10:24:38.701989Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_create_restore_incremental_backup.cpp:253: TRestoreMultipleIncrementalBackups TDone, operationId: 281474976715668:1 Constructed op# SrcTablePaths: "/Root/.backups/collections/MyCollection/19700101000002Z_incremental/Table" DstTablePath: "/Root/Table" SrcPathIds { OwnerId: 72057594046644480 LocalId: 15 } 2025-06-03T10:24:38.702009Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:24:38.702137Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715668:1, at schemeshard: 72057594046644480 2025-06-03T10:24:38.702143Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:24:38.702148Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715668:1 2025-06-03T10:24:38.702161Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:1211:2922] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715668 at schemeshard: 72057594046644480 2025-06-03T10:24:38.702190Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715668 datashard 72075186224037892 state Ready 2025-06-03T10:24:38.702197Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037892 Got TEvSchemaChangedResult from SS at 72075186224037892 2025-06-03T10:24:38.702236Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435072, Sender [3:410:2404], Recipient [3:410:2404]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-03T10:24:38.702241Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4899: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-03T10:24:38.702248Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715668:1, at schemeshard: 72057594046644480 2025-06-03T10:24:38.702255Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_restore_incremental_backup.cpp:260: [72057594046644480] TRestoreMultipleIncrementalBackups TDone, operationId: 281474976715668:1 ProgressState 2025-06-03T10:24:38.702264Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:24:38.702270Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715668:1 progress is 1/2 2025-06-03T10:24:38.702275Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 1/2 2025-06-03T10:24:38.702282Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1103: All parts have reached barrier, tx: 281474976715668, done: 1, blocked: 1 2025-06-03T10:24:38.702296Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_copy_table.cpp:289: TCopyTable TCopyTableBarrier operationId: 281474976715668:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 281474976715668 Name: CopyTableBarrier }, at tablet# 72057594046644480 2025-06-03T10:24:38.702300Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976715668:0 240 -> 240 2025-06-03T10:24:38.702316Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715668:1 progress is 1/2 2025-06-03T10:24:38.702320Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 1/2 2025-06-03T10:24:38.702325Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976715668, ready parts: 1/2, is published: true 2025-06-03T10:24:38.702373Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:24:38.702377Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715668:0 2025-06-03T10:24:38.702391Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435072, Sender [3:410:2404], Recipient [3:410:2404]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-03T10:24:38.702395Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4899: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-03T10:24:38.702400Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:24:38.702404Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046644480] TDone opId# 281474976715668:0 ProgressState 2025-06-03T10:24:38.702415Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:24:38.702418Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715668:0 progress is 2/2 2025-06-03T10:24:38.702422Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 2/2 2025-06-03T10:24:38.702426Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715668:0 progress is 2/2 2025-06-03T10:24:38.702429Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 2/2 2025-06-03T10:24:38.702433Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976715668, ready parts: 2/2, is published: true 2025-06-03T10:24:38.702445Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:1418:3087] message: TxId: 281474976715668 2025-06-03T10:24:38.702451Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715668 ready parts: 2/2 2025-06-03T10:24:38.702458Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715668:0 2025-06-03T10:24:38.702463Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976715668:0 2025-06-03T10:24:38.702506Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 16] was 4 2025-06-03T10:24:38.702510Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 12] was 3 2025-06-03T10:24:38.702516Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715668:1 2025-06-03T10:24:38.702519Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976715668:1 2025-06-03T10:24:38.702525Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 15] was 3 2025-06-03T10:24:38.702529Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046644480, LocalPathId: 16] was 3 2025-06-03T10:24:38.702601Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:24:38.702612Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:1418:3087] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715668 at schemeshard: 72057594046644480 2025-06-03T10:24:38.702767Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [3:1425:3093], Recipient [3:410:2404]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:24:38.702773Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:24:38.702777Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-03T10:24:38.715742Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [3:1532:3180], Recipient [3:410:2404]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:24:38.715777Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:24:38.715784Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-03T10:24:38.805588Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:410:2404]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:24:38.805629Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4890: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:24:38.805654Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124999, Sender [3:410:2404], Recipient [3:410:2404]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:24:38.805660Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4889: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:24:38.977568Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715669. Ctx: { TraceId: 01jwtn4ph641tcg449wxk5226k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ODNkMzZkMjItZDdlMGQ5NTUtZmU0ZTM5ZDgtZTUxYjUyM2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root { items { uint32_value: 2 } items { uint32_value: 200 } }, { items { uint32_value: 3 } items { uint32_value: 30 } } >> TTxDataShardUploadRows::ShouldRejectOnChangeQueueOverflowAndRetry [GOOD] >> TTxDataShardUploadRows::BulkUpsertDuringAddIndexRaceCorruption >> TTopicApiDescribes::GetPartitionDescribe >> TPersQueueCommonTest::TestLimiterLimitsWithBlobsRateLimit >> test.py::test[window-row_number_no_part_multi_input-default.txt-Results] [GOOD] >> test.py::test[window-row_number_to_map-default.txt-Results] |59.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_user_attributes_reboots/unittest |59.3%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part12/pytest >> test.py::test[pg-tpcds-q76-default.txt-Results] [GOOD] >> test.py::test[aggregate-group_compact_sorted--Results] [GOOD] >> test.py::test[aggregate-histogram_cdf-default.txt-ForceBlocks] >> TPersqueueControlPlaneTestSuite::TestAddRemoveReadRule >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClass >> TPersQueueNewSchemeCacheTest::CheckGrpcWriteNoDC |59.3%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part11/pytest >> test.py::test[aggregate-group_by_gs_simp--Results] [GOOD] >> DataShardOutOfOrder::TestPlannedHalfOverloadedSplit+UseSink [GOOD] >> TPersQueueCommonTest::Auth_CreateGrpcStreamWithInvalidTokenInInitialMetadata_SessionClosedWithUnauthenticatedError >> test.py::test[aggregate-dedup_state_keys--Results] [GOOD] >> test.py::test[aggregate-group_by_cube_grouping_and_expr-default.txt-Results] >> BsControllerConfig::ExtendByCreatingSeparateBox [GOOD] >> BsControllerConfig::ExtendBoxAndStoragePool >> test.py::test[blocks-interval_sub_interval_scalar--ForceBlocks] [GOOD] >> test.py::test[blocks-interval_sub_interval_scalar--Results] >> TTxDataShardUploadRows::BulkUpsertDuringAddIndexRaceCorruption [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_order/unittest >> DataShardOutOfOrder::TestPlannedHalfOverloadedSplit+UseSink [GOOD] Test command err: 2025-06-03T10:24:36.365916Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:24:36.366007Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:24:36.366040Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c71/r3tmp/tmpq0W6zf/pdisk_1.dat 2025-06-03T10:24:36.526681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:36.546359Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:36.547561Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946275629053 != 1748946275629057 2025-06-03T10:24:36.598083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:36.598132Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:36.609170Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:36.694227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:36.726131Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:24:36.726462Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:24:36.726607Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:24:36.726705Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:24:36.739856Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:24:36.740154Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:24:36.740196Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:24:36.740415Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:24:36.740425Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:24:36.740434Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:24:36.740520Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:24:36.740547Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:24:36.740562Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:24:36.752908Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:24:36.768846Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:24:36.768963Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:24:36.769014Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:24:36.769022Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:24:36.769028Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:24:36.769034Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:24:36.769140Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:24:36.769150Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:24:36.769318Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:24:36.769354Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:24:36.769508Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:24:36.769520Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:24:36.769532Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:24:36.769538Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:24:36.769543Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:24:36.769551Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:24:36.769557Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:24:36.769577Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:24:36.769583Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:24:36.769592Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:24:36.769618Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:24:36.769624Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:24:36.769654Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:24:36.769715Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:24:36.769728Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:24:36.769753Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:24:36.769777Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:24:36.769782Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:24:36.769789Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:24:36.769794Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:24:36.769887Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-03T10:24:36.769893Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-03T10:24:36.769897Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-03T10:24:36.769901Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:24:36.769915Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-03T10:24:36.769920Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-03T10:24:36.769926Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-03T10:24:36.769929Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-03T10:24:36.769935Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-03T10:24:36.770278Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269746185, Sender [1:683:2579], Recipient [1:663:2568]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-03T10:24:36.770289Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:24:36.782435Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:24:36.782478Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:24:36.782504Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:24:36.782520Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-06-03T10:24:36.782547Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:24:36.955070Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:24:36.955099Z node 1 :TX_DATASHARD TRACE: datashard_impl. ... EvClientDestroyed { TabletId: 72075186224037891 ClientId: [2:1162:2899] ServerId: [2:1166:2903] } 2025-06-03T10:24:41.430256Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3163: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-03T10:24:41.430269Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553157, Sender [2:1031:2809], Recipient [2:749:2628]: NKikimrTxDataShard.TEvSplitTransferSnapshotAck TabletId: 72075186224037893 OperationCookie: 281474976715665 2025-06-03T10:24:41.430275Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:461: 72075186224037889 Received snapshot Ack from dst 72075186224037893 for split OpId 281474976715665 2025-06-03T10:24:41.430324Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [2:1031:2809], Recipient [2:1031:2809]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:24:41.430328Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:24:41.430373Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877763, Sender [2:1164:2901], Recipient [2:749:2628]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186224037893 ClientId: [2:1164:2901] ServerId: [2:1169:2906] } 2025-06-03T10:24:41.430376Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3163: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-03T10:24:41.430408Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 270270976, Sender [2:24:2071], Recipient [2:1024:2805]: {TEvRegisterTabletResult TabletId# 72075186224037891 Entry# 2000} 2025-06-03T10:24:41.430413Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3167: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-03T10:24:41.430417Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037891 time 2000 2025-06-03T10:24:41.430422Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037891 2025-06-03T10:24:41.430434Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037891 2025-06-03T10:24:41.430440Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037891 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:24:41.430447Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037891 2025-06-03T10:24:41.430452Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037891 has no attached operations 2025-06-03T10:24:41.430466Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037891 2025-06-03T10:24:41.430471Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2025-06-03T10:24:41.430476Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037891 2025-06-03T10:24:41.430482Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037893 2025-06-03T10:24:41.430485Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037893 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:24:41.430488Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037893 2025-06-03T10:24:41.430491Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037893 has no attached operations 2025-06-03T10:24:41.430494Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037893 2025-06-03T10:24:41.430497Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037893 TxInFly 0 2025-06-03T10:24:41.430500Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037893 2025-06-03T10:24:41.430554Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877764, Sender [2:1166:2903], Recipient [2:1024:2805]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:24:41.430559Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:24:41.430564Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037891, clientId# [2:1162:2899], serverId# [2:1166:2903], sessionId# [0:0:0] 2025-06-03T10:24:41.430571Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 270270976, Sender [2:24:2071], Recipient [2:1031:2809]: {TEvRegisterTabletResult TabletId# 72075186224037893 Entry# 2000} 2025-06-03T10:24:41.430574Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3167: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-03T10:24:41.430577Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037893 time 2000 2025-06-03T10:24:41.430580Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037893 2025-06-03T10:24:41.430601Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877764, Sender [2:1169:2906], Recipient [2:1031:2809]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:24:41.430606Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3166: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:24:41.430611Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037893, clientId# [2:1164:2901], serverId# [2:1169:2906], sessionId# [0:0:0] 2025-06-03T10:24:41.430928Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 270270978, Sender [2:24:2071], Recipient [2:1024:2805]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 2000 ReadStep# 2000 } 2025-06-03T10:24:41.430934Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-03T10:24:41.430939Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037891 coordinator 72057594046316545 last step 0 next step 2000 2025-06-03T10:24:41.430947Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037891: waitStep# 2000 readStep# 2000 observedStep# 2000 2025-06-03T10:24:41.430954Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2846: CheckMediatorStateRestored at 72075186224037891 promoting UnprotectedReadEdge to v2000/18446744073709551615 2025-06-03T10:24:41.430976Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 270270978, Sender [2:24:2071], Recipient [2:1031:2809]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 2000 ReadStep# 2000 } 2025-06-03T10:24:41.430980Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-03T10:24:41.430983Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037893 coordinator 72057594046316545 last step 0 next step 2000 2025-06-03T10:24:41.430987Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2812: CheckMediatorStateRestored at 72075186224037893: waitStep# 2000 readStep# 2000 observedStep# 2000 2025-06-03T10:24:41.430991Z node 2 :TX_DATASHARD TRACE: datashard.cpp:2846: CheckMediatorStateRestored at 72075186224037893 promoting UnprotectedReadEdge to v2000/18446744073709551615 2025-06-03T10:24:41.441901Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:485: 72075186224037888 ack split to schemeshard 281474976715664 2025-06-03T10:24:41.443095Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:485: 72075186224037889 ack split to schemeshard 281474976715665 2025-06-03T10:24:41.444709Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553158, Sender [2:410:2404], Recipient [2:671:2572] 2025-06-03T10:24:41.444732Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:565: Got TEvSplitPartitioningChanged: opId: 281474976715664, at datashard: 72075186224037888, state: SplitSrcWaitForPartitioningChanged 2025-06-03T10:24:41.454464Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553158, Sender [2:410:2404], Recipient [2:756:2632] 2025-06-03T10:24:41.454495Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:565: Got TEvSplitPartitioningChanged: opId: 281474976715665, at datashard: 72075186224037889, state: SplitSrcWaitForPartitioningChanged 2025-06-03T10:24:41.455256Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:532: 72075186224037888 ack split partitioning changed to schemeshard 281474976715664 2025-06-03T10:24:41.455273Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037888 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-03T10:24:41.455695Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268828683, Sender [2:655:2562], Recipient [2:664:2568]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-03T10:24:41.455754Z node 2 :TX_DATASHARD DEBUG: datashard_split_src.cpp:532: 72075186224037889 ack split partitioning changed to schemeshard 281474976715665 2025-06-03T10:24:41.455760Z node 2 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037889 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-03T10:24:41.455817Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268828683, Sender [2:742:2623], Recipient [2:749:2628]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-03T10:24:41.881865Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 278003712, Sender [2:978:2670], Recipient [2:664:2568]: NKikimrDataEvents.TEvWrite Operations { Type: OPERATION_UPSERT TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } ColumnIds: 1 ColumnIds: 2 PayloadIndex: 0 PayloadFormat: FORMAT_CELLVEC } TxId: 281474976715663 TxMode: MODE_VOLATILE_PREPARE Locks { SendingShards: 72075186224037888 SendingShards: 72075186224037889 ReceivingShards: 72075186224037888 ReceivingShards: 72075186224037889 Op: Commit } 2025-06-03T10:24:41.881899Z node 2 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-06-03T10:24:41.881936Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_WRONG_SHARD_STATE;details=Rejecting data TxId 281474976715663 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state);tx_id=281474976715663; 2025-06-03T10:24:41.881955Z node 2 :TX_DATASHARD NOTICE: datashard.cpp:3137: Rejecting data TxId 281474976715663 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state) 2025-06-03T10:24:41.882088Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715664, at schemeshard: 72057594046644480 2025-06-03T10:24:41.882173Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715665, at schemeshard: 72057594046644480 >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithInvalidToken_SessionClosedWithUnauthenticatedError >> TPersqueueControlPlaneTestSuite::SetupReadLockSessionWithDatabase ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_upload_rows/unittest >> TTxDataShardUploadRows::BulkUpsertDuringAddIndexRaceCorruption [GOOD] Test command err: 2025-06-03T10:24:37.568687Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:24:37.568784Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:24:37.568815Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016f8/r3tmp/tmpZEoAXr/pdisk_1.dat 2025-06-03T10:24:37.714740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:37.732778Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:37.734274Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946276864705 != 1748946276864709 2025-06-03T10:24:37.777202Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:37.777251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:37.787884Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:37.875540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:37.902017Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:675:2576] 2025-06-03T10:24:37.902130Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:24:37.914867Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:677:2578] 2025-06-03T10:24:37.914970Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:24:37.916716Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:24:37.916774Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:24:37.916989Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:24:37.917000Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:24:37.917009Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:24:37.917096Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:24:37.917131Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:24:37.917149Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:708:2576] in generation 1 2025-06-03T10:24:37.917256Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:24:37.917278Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:24:37.917480Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-03T10:24:37.917493Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-03T10:24:37.917500Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-03T10:24:37.917543Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:24:37.917563Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:24:37.917577Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:709:2578] in generation 1 2025-06-03T10:24:37.928391Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:24:37.934210Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:24:37.934362Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:24:37.934405Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:712:2597] 2025-06-03T10:24:37.934412Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:24:37.934418Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:24:37.934426Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:24:37.934570Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:24:37.934580Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-03T10:24:37.934592Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:24:37.934601Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:713:2598] 2025-06-03T10:24:37.934605Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-03T10:24:37.934608Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-03T10:24:37.934612Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:24:37.934757Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:24:37.934790Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:24:37.934834Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:24:37.934843Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:24:37.934855Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:24:37.934861Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:24:37.934868Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-03T10:24:37.934879Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-03T10:24:37.934911Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:668:2572], serverId# [1:690:2585], sessionId# [0:0:0] 2025-06-03T10:24:37.934917Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-03T10:24:37.934921Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:24:37.934925Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-03T10:24:37.934929Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-03T10:24:37.935083Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:24:37.935156Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:24:37.935181Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:24:37.935301Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:669:2573], serverId# [1:696:2589], sessionId# [0:0:0] 2025-06-03T10:24:37.935352Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-03T10:24:37.935378Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-03T10:24:37.935391Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-03T10:24:37.935757Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:24:37.935773Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-03T10:24:37.946748Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:24:37.946813Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:24:37.947061Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-03T10:24:37.947072Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-03T10:24:38.114347Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:731:2610], serverId# [1:734:2613], sessionId# [0:0:0] 2025-06-03T10:24:38.114574Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:733:2612], serverId# [1:735:2614], sessionId# [0:0:0] 2025-06-03T10:24:38.115663Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2025-06-03T10:24:38.115686Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:24:38.115793Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-03T10:24:38.115804Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72 ... t finished with status SCHEME_ERROR 2025-06-03T10:24:43.497207Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [3:59:2106] Handle TEvExecuteKqpTransaction 2025-06-03T10:24:43.497248Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [3:59:2106] TxId# 281474976715662 ProcessProposeKqpTransaction 2025-06-03T10:24:43.497836Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtn4ty38ej1ryjahrwfnynz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YzYzNDY3Y2MtYzRjMDllM2UtNzZhMTgyNDQtZTY0ZDE1MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:24:43.498772Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553215, Sender [3:1100:2893], Recipient [3:665:2569]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 3 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2025-06-03T10:24:43.498843Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-03T10:24:43.498859Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v8000/0 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v8000/18446744073709551615 ImmediateWriteEdgeReplied# v8000/18446744073709551615 2025-06-03T10:24:43.498869Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v8000/18446744073709551615 2025-06-03T10:24:43.498881Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CheckRead 2025-06-03T10:24:43.498904Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-03T10:24:43.498909Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CheckRead 2025-06-03T10:24:43.498915Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-03T10:24:43.498920Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-03T10:24:43.498936Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037888 2025-06-03T10:24:43.498941Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-03T10:24:43.498945Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-03T10:24:43.498949Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit ExecuteRead 2025-06-03T10:24:43.498953Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit ExecuteRead 2025-06-03T10:24:43.498969Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 3 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2025-06-03T10:24:43.499041Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[3:1100:2893], 0} after executionsCount# 1 2025-06-03T10:24:43.499054Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[3:1100:2893], 0} sends rowCount# 2, bytes# 64, quota rows left# 999, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-03T10:24:43.499073Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[3:1100:2893], 0} finished in read 2025-06-03T10:24:43.499084Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-03T10:24:43.499088Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit ExecuteRead 2025-06-03T10:24:43.499092Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit CompletedOperations 2025-06-03T10:24:43.499095Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CompletedOperations 2025-06-03T10:24:43.499107Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-03T10:24:43.499110Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-06-03T10:24:43.499115Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished 2025-06-03T10:24:43.499120Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-03T10:24:43.499151Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-03T10:24:43.499424Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553219, Sender [3:1100:2893], Recipient [3:665:2569]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-03T10:24:43.499432Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 2 } }, { items { uint32_value: 3 } items { uint32_value: 4 } } 2025-06-03T10:24:43.526193Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [3:59:2106] Handle TEvExecuteKqpTransaction 2025-06-03T10:24:43.526231Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [3:59:2106] TxId# 281474976715663 ProcessProposeKqpTransaction 2025-06-03T10:24:43.526501Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jwtn4tzcbfmpbpw82w4at88j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=YzBhNDc2YjgtYWRhZjM1M2UtN2I2ZTY5MjEtNDMxZDhiMzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:24:43.527302Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553215, Sender [3:1130:2917], Recipient [3:901:2728]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 8 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false RangesSize: 1 2025-06-03T10:24:43.527365Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037889, FollowerId 0 2025-06-03T10:24:43.527380Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037889 CompleteEdge# v6000/281474976710759 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v5000/18446744073709551615 ImmediateWriteEdgeReplied# v5000/18446744073709551615 2025-06-03T10:24:43.527390Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037889 changed HEAD read to non-repeatable v8000/18446744073709551615 2025-06-03T10:24:43.527406Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit CheckRead 2025-06-03T10:24:43.527428Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-03T10:24:43.527435Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit CheckRead 2025-06-03T10:24:43.527442Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-03T10:24:43.527447Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-03T10:24:43.527463Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037889 2025-06-03T10:24:43.527469Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-03T10:24:43.527473Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-03T10:24:43.527479Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit ExecuteRead 2025-06-03T10:24:43.527484Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit ExecuteRead 2025-06-03T10:24:43.527501Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037889 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 8 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false } 2025-06-03T10:24:43.527580Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037889 Complete read# {[3:1130:2917], 0} after executionsCount# 1 2025-06-03T10:24:43.527591Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037889 read iterator# {[3:1130:2917], 0} sends rowCount# 2, bytes# 64, quota rows left# 32765, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-03T10:24:43.527608Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037889 read iterator# {[3:1130:2917], 0} finished in read 2025-06-03T10:24:43.527620Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-03T10:24:43.527625Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit ExecuteRead 2025-06-03T10:24:43.527629Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit CompletedOperations 2025-06-03T10:24:43.527634Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit CompletedOperations 2025-06-03T10:24:43.527647Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-03T10:24:43.527651Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit CompletedOperations 2025-06-03T10:24:43.527657Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:4] at 72075186224037889 has finished 2025-06-03T10:24:43.527663Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2025-06-03T10:24:43.527693Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2025-06-03T10:24:43.528023Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553219, Sender [3:1130:2917], Recipient [3:901:2728]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-03T10:24:43.528037Z node 3 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037889 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 2 } }, { items { uint32_value: 3 } items { uint32_value: 4 } } |59.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut |59.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut >> BsControllerConfig::MergeIntersectingBoxes [GOOD] >> BsControllerConfig::MoveGroups >> TPersQueueCommonTest::TestWriteWithRateLimiterWithBlobsRateLimit [GOOD] >> TPersQueueCommonTest::TestWriteWithRateLimiterWithUserPayloadRateLimit |59.3%| [TA] {RESULT} $(B)/ydb/services/datastreams/ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/nodewarden/ut/ydb-core-blobstorage-nodewarden-ut >> TPersQueueCommonTest::Auth_MultipleUpdateTokenRequestIterationsWithValidToken_GotUpdateTokenResponseForEachRequest >> BsControllerConfig::DeleteStoragePool [GOOD] >> TPersQueueCommonTest::Auth_CreateGrpcStreamWithInvalidTokenInInitialMetadata_SessionClosedWithUnauthenticatedError [GOOD] >> TPersQueueCommonTest::Auth_MultipleInflightWriteUpdateTokenRequestWithDifferentValidToken_SessionClosedWithOverloadedError >> TPersqueueControlPlaneTestSuite::TestAddRemoveReadRule [GOOD] >> TPersqueueDataPlaneTestSuite::WriteSession >> test.py::test[blocks-interval_sub_interval_scalar--Results] [GOOD] >> test.py::test[blocks-lazy_nonstrict_with_scalar_ctx--ForceBlocks] >> TPersQueueCommonTest::TestLimiterLimitsWithBlobsRateLimit [GOOD] >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit >> test.py::test[aggregate-group_by_session_distinct_compact--ForceBlocks] [GOOD] >> test.py::test[aggregate-group_by_session_distinct_compact--Results] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::DeleteStoragePool [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:215:2066] recipient: [1:189:2076] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:215:2066] recipient: [1:189:2076] Leader for TabletID 72057594037932033 is [1:223:2078] sender: [1:225:2066] recipient: [1:189:2076] 2025-06-03T10:24:24.902603Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:24:24.903577Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:24:24.903671Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-03T10:24:24.904027Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:24:24.904165Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-03T10:24:24.904221Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:24.904230Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:521} Handle TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:24.904281Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-03T10:24:24.905432Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-03T10:24:24.905467Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-03T10:24:24.905516Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-03T10:24:24.905539Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:24.905552Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:24.905562Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:223:2078] sender: [1:247:2066] recipient: [1:20:2067] 2025-06-03T10:24:24.917535Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-03T10:24:24.917594Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:24.929197Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:24.929260Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:24.929278Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:24.929290Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:24.929338Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:24.929348Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:24.929354Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:24.929363Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:24.939779Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:24.939850Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:24.952365Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:24.952451Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:19} TTxLoadEverything Execute 2025-06-03T10:24:24.952757Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:557} TTxLoadEverything Complete 2025-06-03T10:24:24.952781Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2188} LoadFinished 2025-06-03T10:24:24.952844Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-03T10:24:24.952862Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:562} TTxLoadEverything InitQueue processed 2025-06-03T10:24:24.955874Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {} Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:215:2066] recipient: [11:194:2076] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [11:215:2066] recipient: [11:194:2076] Leader for TabletID 72057594037932033 is [11:225:2078] sender: [11:226:2066] recipient: [11:194:2076] 2025-06-03T10:24:26.903803Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:24:26.904072Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:24:26.904140Z node 11 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-03T10:24:26.904390Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:24:26.904510Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-03T10:24:26.904551Z node 11 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:26.904558Z node 11 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:521} Handle TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:26.904609Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-03T10:24:26.905726Z node 11 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-03T10:24:26.905767Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-03T10:24:26.905800Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-03T10:24:26.905822Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:26.905836Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:26.905847Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [11:225:2078] sender: [11:247:2066] recipient: [11:20:2067] 2025-06-03T10:24:26.916312Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-03T10:24:26.916379Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:26.926762Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:26.926826Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:26.926843Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:26.926872Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:26.926902Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:26.926911Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:26.926917Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:26.926929Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:26.937316Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:26.937380Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:26.947701Z node 11 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:26.947750Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:19} TTxLoadEverything Execute 2025-06-03T10:24:26.947897Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:557} TTxLoadEverything Complete 2025-06-03T10:24:26.947903Z node 11 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2188} LoadFinished 2025-06-03T10:24:26.947937Z node 11 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-03T10:24:26.947942Z node 11 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:562} TTxLoadEverything InitQueue processed 2025-06-03T10:24:26.948081Z node 11 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {} Leader for TabletID 72057594037932033 is [0:0:0] sender: [21:3015:2106] recipient: [21:2914:2116] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [21:3015:2106] recipient: [21:2914:2116] Leader for TabletID 72057594037932033 is [21:3063:2118] sender: [21:3064:2106] recipient: [21:2914:2116] 2025-06-03T10:24:29.393431Z node 21 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:24:29.393760Z node 21 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:24:29.393824Z n ... ev/disk3 2025-06-03T10:24:37.887304Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 96:1000 Path# /dev/disk1 2025-06-03T10:24:37.887309Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 96:1001 Path# /dev/disk2 2025-06-03T10:24:37.887316Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 96:1002 Path# /dev/disk3 2025-06-03T10:24:37.887321Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 97:1000 Path# /dev/disk1 2025-06-03T10:24:37.887326Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 97:1001 Path# /dev/disk2 2025-06-03T10:24:37.887332Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 97:1002 Path# /dev/disk3 2025-06-03T10:24:37.887337Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 98:1000 Path# /dev/disk1 2025-06-03T10:24:37.887343Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 98:1001 Path# /dev/disk2 2025-06-03T10:24:37.887348Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 98:1002 Path# /dev/disk3 2025-06-03T10:24:37.887353Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 99:1000 Path# /dev/disk1 2025-06-03T10:24:37.887359Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 99:1001 Path# /dev/disk2 2025-06-03T10:24:37.887364Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 99:1002 Path# /dev/disk3 2025-06-03T10:24:37.887370Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 100:1000 Path# /dev/disk1 2025-06-03T10:24:37.887376Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 100:1001 Path# /dev/disk2 2025-06-03T10:24:37.887381Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 100:1002 Path# /dev/disk3 2025-06-03T10:24:37.887387Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 101:1000 Path# /dev/disk1 2025-06-03T10:24:37.887393Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 101:1001 Path# /dev/disk2 2025-06-03T10:24:37.887400Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 101:1002 Path# /dev/disk3 2025-06-03T10:24:37.887406Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 102:1000 Path# /dev/disk1 2025-06-03T10:24:37.887412Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 102:1001 Path# /dev/disk2 2025-06-03T10:24:37.887418Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 102:1002 Path# /dev/disk3 2025-06-03T10:24:37.887423Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 103:1000 Path# /dev/disk1 2025-06-03T10:24:37.887429Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 103:1001 Path# /dev/disk2 2025-06-03T10:24:37.887434Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 103:1002 Path# /dev/disk3 2025-06-03T10:24:37.887440Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 104:1000 Path# /dev/disk1 2025-06-03T10:24:37.887445Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 104:1001 Path# /dev/disk2 2025-06-03T10:24:37.887451Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 104:1002 Path# /dev/disk3 2025-06-03T10:24:37.887456Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 105:1000 Path# /dev/disk1 2025-06-03T10:24:37.887463Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 105:1001 Path# /dev/disk2 2025-06-03T10:24:37.887468Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 105:1002 Path# /dev/disk3 2025-06-03T10:24:37.887474Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 106:1000 Path# /dev/disk1 2025-06-03T10:24:37.887479Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 106:1001 Path# /dev/disk2 2025-06-03T10:24:37.887484Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 106:1002 Path# /dev/disk3 2025-06-03T10:24:37.887489Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 107:1000 Path# /dev/disk1 2025-06-03T10:24:37.887494Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 107:1001 Path# /dev/disk2 2025-06-03T10:24:37.887500Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 107:1002 Path# /dev/disk3 2025-06-03T10:24:37.887505Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 108:1000 Path# /dev/disk1 2025-06-03T10:24:37.887511Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 108:1001 Path# /dev/disk2 2025-06-03T10:24:37.887517Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 108:1002 Path# /dev/disk3 2025-06-03T10:24:37.887522Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 109:1000 Path# /dev/disk1 2025-06-03T10:24:37.887528Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 109:1001 Path# /dev/disk2 2025-06-03T10:24:37.887534Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 109:1002 Path# /dev/disk3 2025-06-03T10:24:37.887541Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 110:1000 Path# /dev/disk1 2025-06-03T10:24:37.887547Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 110:1001 Path# /dev/disk2 2025-06-03T10:24:37.887552Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 110:1002 Path# /dev/disk3 2025-06-03T10:24:37.887557Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 111:1000 Path# /dev/disk1 2025-06-03T10:24:37.887562Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 111:1001 Path# /dev/disk2 2025-06-03T10:24:37.887568Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 111:1002 Path# /dev/disk3 2025-06-03T10:24:37.887573Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 112:1000 Path# /dev/disk1 2025-06-03T10:24:37.887580Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 112:1001 Path# /dev/disk2 2025-06-03T10:24:37.887585Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 112:1002 Path# /dev/disk3 2025-06-03T10:24:37.887591Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 113:1000 Path# /dev/disk1 2025-06-03T10:24:37.887597Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 113:1001 Path# /dev/disk2 2025-06-03T10:24:37.887602Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 113:1002 Path# /dev/disk3 2025-06-03T10:24:37.887607Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 114:1000 Path# /dev/disk1 2025-06-03T10:24:37.887612Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 114:1001 Path# /dev/disk2 2025-06-03T10:24:37.887618Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 114:1002 Path# /dev/disk3 2025-06-03T10:24:37.887623Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 115:1000 Path# /dev/disk1 2025-06-03T10:24:37.887629Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 115:1001 Path# /dev/disk2 2025-06-03T10:24:37.887635Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 115:1002 Path# /dev/disk3 2025-06-03T10:24:37.887640Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 116:1000 Path# /dev/disk1 2025-06-03T10:24:37.887645Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 116:1001 Path# /dev/disk2 2025-06-03T10:24:37.887651Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 116:1002 Path# /dev/disk3 2025-06-03T10:24:37.887657Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 117:1000 Path# /dev/disk1 2025-06-03T10:24:37.887662Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 117:1001 Path# /dev/disk2 2025-06-03T10:24:37.887668Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 117:1002 Path# /dev/disk3 2025-06-03T10:24:37.887674Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 118:1000 Path# /dev/disk1 2025-06-03T10:24:37.887680Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 118:1001 Path# /dev/disk2 2025-06-03T10:24:37.887685Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 118:1002 Path# /dev/disk3 2025-06-03T10:24:37.887692Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 119:1000 Path# /dev/disk1 2025-06-03T10:24:37.887698Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 119:1001 Path# /dev/disk2 2025-06-03T10:24:37.887703Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 119:1002 Path# /dev/disk3 2025-06-03T10:24:37.887709Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 120:1000 Path# /dev/disk1 2025-06-03T10:24:37.887714Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 120:1001 Path# /dev/disk2 2025-06-03T10:24:37.887720Z node 71 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 120:1002 Path# /dev/disk3 2025-06-03T10:24:37.901986Z node 71 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "storage pool 1" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 50 PDiskFilter { Property { Type: ROT } } } } } 2025-06-03T10:24:37.949481Z node 71 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { DefineStoragePool { BoxId: 1 StoragePoolId: 2 Name: "storage pool 2" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 50 PDiskFilter { Property { Type: SSD } } } } Command { DeleteStoragePool { BoxId: 1 StoragePoolId: 2 ItemConfigGeneration: 1 } } } 2025-06-03T10:24:37.975427Z node 71 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { DeleteStoragePool { BoxId: 1 StoragePoolId: 1 ItemConfigGeneration: 1 } } Command { QueryBaseConfig { } } } >> TPersQueueNewSchemeCacheTest::TestReadAtTimestamp_3 >> test.py::test[join-nested_semi_join-off-ForceBlocks] [GOOD] >> TTopicApiDescribes::DescribeTopic [GOOD] >> TPersQueueCommonTest::Auth_MultipleInflightWriteUpdateTokenRequestWithDifferentValidToken_SessionClosedWithOverloadedError [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsFromAdLdapServer >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithInvalidToken_SessionClosedWithUnauthenticatedError [GOOD] >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithValidTokenButWithoutACL_SessionClosedWithUnauthorizedError ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/describes_ut/unittest >> TTopicApiDescribes::DescribeTopic [GOOD] Test command err: 2025-06-03T10:24:38.312903Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667066926132294:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:38.312991Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:38.320237Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667068522221839:2222];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002631/r3tmp/tmpXdDAcd/pdisk_1.dat 2025-06-03T10:24:38.351967Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:38.352216Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:24:38.357392Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:24:38.438510Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:38.439985Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:38.440074Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:38.444544Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:38.451393Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:38.451422Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 25193, node 1 2025-06-03T10:24:38.452931Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:38.453348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:38.459230Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/002631/r3tmp/yandexKtulet.tmp 2025-06-03T10:24:38.459249Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/002631/r3tmp/yandexKtulet.tmp 2025-06-03T10:24:38.459333Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/002631/r3tmp/yandexKtulet.tmp 2025-06-03T10:24:38.459394Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:38.482716Z INFO: TTestServer started on Port 12419 GrpcPort 25193 TClient is connected to server localhost:12419 PQClient connected to localhost:25193 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:24:38.556008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:24:38.591269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-03T10:24:38.743018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-03T10:24:39.152625Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667072817189330:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:39.152652Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:39.152820Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667072817189357:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:39.154593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480 2025-06-03T10:24:39.164604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976720657, at schemeshard: 72057594046644480 2025-06-03T10:24:39.165221Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511667072817189359:2314], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-03T10:24:39.254143Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511667072817189387:2166] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:24:39.361052Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511667071221100548:2341], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:24:39.362929Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7511667072817189401:2318], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:24:39.363586Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=2&id=M2MxNThkMWEtN2QxYTcxN2EtZTFiNDU2OTctY2M1ODA1Yzg=, ActorId: [2:7511667072817189328:2309], ActorState: ExecuteState, TraceId: 01jwtn4pqc2gpk5pz9pyt69612, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:24:39.363722Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:24:39.361984Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=ZmFkOTQ0MDctN2FkZDlmZGUtZDJlMTM5NTMtY2IwZTllMWU=, ActorId: [1:7511667071221100495:2334], ActorState: ExecuteState, TraceId: 01jwtn4ptd4vyk3ck5mvwfn6dx, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:24:39.362442Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:24:39.362446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:24:39.412121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:24:39.531634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-03T10:24:39.724734Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jwtn4q5cdbzyk5sc4g51n2yc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2Q3OWRkMDgtYTk0YzE1NjYtYTUxYjE1OTUtNWE0ZjgyOWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7511667071221101004:3050] 2025-06-03T10:24:43.313798Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511667066926132294:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:43.315980 ... 00000 } max_write_time_lag { } bytes_written { } } } } } Describe topic with location 2025-06-03T10:24:45.886315Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:150: new Describe topic request 2025-06-03T10:24:45.886339Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:455: TDescribeTopicActor for request path: "/Root/PQ//rt3.dc1--topic-x" include_location: true 2025-06-03T10:24:45.886367Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1186: Describe topic actor for path /Root/PQ//rt3.dc1--topic-x 2025-06-03T10:24:45.886626Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7511667096990906508:2592]: Request location 2025-06-03T10:24:45.887111Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [1:7511667096990906508:2592]: Got location Got response: operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Topic.DescribeTopicResult] { self { name: "rt3.dc1--topic-x" owner: "root@builtin" type: TOPIC created_at { plan_step: 1748946285016 tx_id: 281474976715677 } } partitioning_settings { min_active_partitions: 15 max_active_partitions: 1 auto_partitioning_settings { strategy: AUTO_PARTITIONING_STRATEGY_DISABLED partition_write_speed { stabilization_window { seconds: 300 } up_utilization_percent: 80 down_utilization_percent: 20 } } } partitions { active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 1 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 2 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 3 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 4 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 5 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 6 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 7 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 8 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 9 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 10 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 11 active: true partition_location { node_id: 1 generation: 2 } } partitions { partition_id: 12 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 13 active: true partition_location { node_id: 2 generation: 2 } } partitions { partition_id: 14 active: true partition_location { node_id: 1 generation: 2 } } retention_period { seconds: 64800 } partition_write_speed_bytes_per_second: 2097152 partition_write_burst_bytes: 2097152 attributes { key: "__max_partition_message_groups_seqno_stored" value: "6000000" } attributes { key: "_message_group_seqno_retention_period_ms" value: "1382400000" } consumers { name: "shared/user" read_from { } attributes { key: "_service_type" value: "data-streams" } } } } } Describe topic with no stats or location 2025-06-03T10:24:45.888248Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:150: new Describe topic request 2025-06-03T10:24:45.888267Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:455: TDescribeTopicActor for request path: "/Root/PQ//rt3.dc1--topic-x" 2025-06-03T10:24:45.888289Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1186: Describe topic actor for path /Root/PQ//rt3.dc1--topic-x Got response: operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Topic.DescribeTopicResult] { self { name: "rt3.dc1--topic-x" owner: "root@builtin" type: TOPIC created_at { plan_step: 1748946285016 tx_id: 281474976715677 } } partitioning_settings { min_active_partitions: 15 max_active_partitions: 1 auto_partitioning_settings { strategy: AUTO_PARTITIONING_STRATEGY_DISABLED partition_write_speed { stabilization_window { seconds: 300 } up_utilization_percent: 80 down_utilization_percent: 20 } } } partitions { active: true } partitions { partition_id: 1 active: true } partitions { partition_id: 2 active: true } partitions { partition_id: 3 active: true } partitions { partition_id: 4 active: true } partitions { partition_id: 5 active: true } partitions { partition_id: 6 active: true } partitions { partition_id: 7 active: true } partitions { partition_id: 8 active: true } partitions { partition_id: 9 active: true } partitions { partition_id: 10 active: true } partitions { partition_id: 11 active: true } partitions { partition_id: 12 active: true } partitions { partition_id: 13 active: true } partitions { partition_id: 14 active: true } retention_period { seconds: 64800 } partition_write_speed_bytes_per_second: 2097152 partition_write_burst_bytes: 2097152 attributes { key: "__max_partition_message_groups_seqno_stored" value: "6000000" } attributes { key: "_message_group_seqno_retention_period_ms" value: "1382400000" } consumers { name: "shared/user" read_from { } attributes { key: "_service_type" value: "data-streams" } } } } } Describe bad topic 2025-06-03T10:24:45.889166Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:150: new Describe topic request 2025-06-03T10:24:45.889179Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:455: TDescribeTopicActor for request path: "/Root/PQ//bad-topic" include_stats: true include_location: true 2025-06-03T10:24:45.889198Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1186: Describe topic actor for path /Root/PQ//bad-topic Got response: operation { ready: true status: SCHEME_ERROR issues { message: "path \'Root/PQ/bad-topic\' does not exist or you do not have access rights" issue_code: 500018 severity: 1 } } 2025-06-03T10:24:45.883637Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7511667096990906491:2587] connected; active server actors: 1 2025-06-03T10:24:45.886855Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7511667096990906510:2593] connected; active server actors: 1 2025-06-03T10:24:45.887011Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 0, NodeId 2, Generation 2 2025-06-03T10:24:45.887013Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 1, NodeId 1, Generation 2 2025-06-03T10:24:45.887015Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037895, partitionId 2, NodeId 2, Generation 2 2025-06-03T10:24:45.887017Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 2, Generation 2 2025-06-03T10:24:45.887018Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037899, partitionId 4, NodeId 1, Generation 2 2025-06-03T10:24:45.887020Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 5, NodeId 1, Generation 2 2025-06-03T10:24:45.887022Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 6, NodeId 1, Generation 2 2025-06-03T10:24:45.887024Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037897, partitionId 7, NodeId 2, Generation 2 2025-06-03T10:24:45.887026Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037894, partitionId 8, NodeId 2, Generation 2 2025-06-03T10:24:45.887028Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037895, partitionId 9, NodeId 2, Generation 2 2025-06-03T10:24:45.887030Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 10, NodeId 1, Generation 2 2025-06-03T10:24:45.887032Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037898, partitionId 11, NodeId 1, Generation 2 2025-06-03T10:24:45.887034Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037894, partitionId 12, NodeId 2, Generation 2 2025-06-03T10:24:45.887036Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037897, partitionId 13, NodeId 2, Generation 2 2025-06-03T10:24:45.887038Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037898, partitionId 14, NodeId 1, Generation 2 2025-06-03T10:24:45.887350Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037900][rt3.dc1--topic-x] pipe [1:7511667096990906510:2593] disconnected; active server actors: 1 2025-06-03T10:24:45.887352Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037900][rt3.dc1--topic-x] pipe [1:7511667096990906510:2593] disconnected no session ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueCommonTest::Auth_MultipleInflightWriteUpdateTokenRequestWithDifferentValidToken_SessionClosedWithOverloadedError [GOOD] Test command err: === Server->StartServer(false); 2025-06-03T10:24:44.068506Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667092312246892:2207];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:44.068546Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:44.060066Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667092344124579:2275];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:44.060089Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002164/r3tmp/tmpdkEoz9/pdisk_1.dat 2025-06-03T10:24:44.158658Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:24:44.164657Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:24:44.211411Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31509, node 1 2025-06-03T10:24:44.261289Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/002164/r3tmp/yandexM14ING.tmp 2025-06-03T10:24:44.261368Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/002164/r3tmp/yandexM14ING.tmp 2025-06-03T10:24:44.261437Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/002164/r3tmp/yandexM14ING.tmp 2025-06-03T10:24:44.261478Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:44.264517Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:44.264546Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:44.266480Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:44.266806Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:44.269482Z INFO: TTestServer started on Port 65520 GrpcPort 31509 TClient is connected to server localhost:65520 PQClient connected to localhost:31509 === TenantModeEnabled() = 1 === Init PQ - start server on port 31509 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:24:44.343739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976720657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-03T10:24:44.343796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:44.343860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-03T10:24:44.343917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976720657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:24:44.343926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:24:44.345343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976720657, response: Status: StatusAccepted TxId: 281474976720657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-03T10:24:44.345369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976720657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-03T10:24:44.345413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:44.345421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976720657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-03T10:24:44.345424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 281474976720657:0 ProgressState no shards to create, do next state 2025-06-03T10:24:44.345427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976720657:0 2 -> 3 2025-06-03T10:24:44.346310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976720657, at schemeshard: 72057594046644480 2025-06-03T10:24:44.346317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976720657, ready parts: 0/1, is published: true 2025-06-03T10:24:44.346322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976720657, at schemeshard: 72057594046644480 2025-06-03T10:24:44.349918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:44.349942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976720657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-03T10:24:44.349956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976720657:0 3 -> 128 2025-06-03T10:24:44.350583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:44.350588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:44.350594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976720657:0, at tablet# 72057594046644480 2025-06-03T10:24:44.350601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 281474976720657 ready parts: 1/1 2025-06-03T10:24:44.351602Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976720657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:24:44.352204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976720657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976720657 msg type: 269090816 2025-06-03T10:24:44.352266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 281474976720657, partId: 4294967295, tablet: 72057594046316545 2025-06-03T10:24:44.353265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1748946284400, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-03T10:24:44.353332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976720657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1748946284400 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-03T10:24:44.353340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976720657:0, at tablet# 72057594046644480 2025-06-03T10:24:44.353417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976720657:0 128 -> 240 2025-06-03T10:24:44.353427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976720657:0, at tablet# 72057594046644480 2025-06-03T10:24:44.353466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-03T10:24:44.353479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-03T10:24:44.354032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-03T10:24:44.354037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976720657, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-03T10:24:44.354089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-03T10:24:44.354092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7511667092344124966:2385], at schemeshard: 72057594046644480, txId: 281474976720657, path id: 1 2025-06-03T10:24:44.354101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:44.354108Z node 1 :FLAT_TX_SCHEMESHARD INFO: sche ... on=0) Received event: NActors::TEvents::TEvPoison Finish: 0 === InitializeWritePQService done === PersQueueClient === InitializePQ completed BEFORE MODIFY PERMISSIONS 2025-06-03T10:24:47.686544Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root/acc" OperationType: ESchemeOpModifyACL ModifyACL { Name: "topic1" DiffACL: "\n\031\010\001\022\025\032\023test_user_0@builtin\n!\010\000\022\035\010\001\020\366\213\001\032\023test_user_0@builtin \003\n\031\010\001\022\025\032\023test_user_1@builtin\n!\010\000\022\035\010\001\020\366\213\001\032\023test_user_1@builtin \003\n\031\010\001\022\025\032\023test_user_2@builtin\n!\010\000\022\035\010\001\020\366\213\001\032\023test_user_2@builtin \003" } } TxId: 281474976720665 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:35786" , at schemeshard: 72057594046644480 2025-06-03T10:24:47.686598Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_modify_acl.cpp:33: TModifyACL Propose, path: /Root/acc/topic1, operationId: 281474976720665:0, at schemeshard: 72057594046644480 2025-06-03T10:24:47.686624Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5223: ExamineTreeVFS visit path id [OwnerId: 72057594046644480, LocalPathId: 10] name: topic1 type: EPathTypePersQueueGroup state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046644480, LocalPathId: 9] 2025-06-03T10:24:47.686626Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5239: ExamineTreeVFS run path id: [OwnerId: 72057594046644480, LocalPathId: 10] 2025-06-03T10:24:47.686666Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976720665:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046644480 2025-06-03T10:24:47.686671Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976720665:0, at schemeshard: 72057594046644480 2025-06-03T10:24:47.686691Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976720665:0 progress is 1/1 2025-06-03T10:24:47.686695Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976720665 ready parts: 1/1 2025-06-03T10:24:47.686698Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976720665:0 progress is 1/1 2025-06-03T10:24:47.686700Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976720665 ready parts: 1/1 2025-06-03T10:24:47.686711Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 3 2025-06-03T10:24:47.686727Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976720665, ready parts: 1/1, is published: false 2025-06-03T10:24:47.686743Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 10], at schemeshard: 72057594046644480 2025-06-03T10:24:47.686744Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976720665 ready parts: 1/1 2025-06-03T10:24:47.686747Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976720665:0 2025-06-03T10:24:47.686749Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976720665, publications: 1, subscribers: 0 2025-06-03T10:24:47.686753Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976720665, [OwnerId: 72057594046644480, LocalPathId: 10], 3 2025-06-03T10:24:47.687562Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976720665, response: Status: StatusSuccess TxId: 281474976720665 SchemeshardId: 72057594046644480, at schemeshard: 72057594046644480 2025-06-03T10:24:47.687605Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976720665, database: /Root, subject: , status: StatusSuccess, operation: MODIFY ACL, path: /Root/acc/topic1, add access: +W:test_user_0@builtin, add access: +W:test_user_1@builtin, add access: +W:test_user_2@builtin, remove access: -():test_user_0@builtin:-, remove access: -():test_user_1@builtin:-, remove access: -():test_user_2@builtin:- 2025-06-03T10:24:47.687638Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-03T10:24:47.687640Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976720665, path id: [OwnerId: 72057594046644480, LocalPathId: 10] 2025-06-03T10:24:47.687680Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-03T10:24:47.687683Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:7511667104557575534:2375], at schemeshard: 72057594046644480, txId: 281474976720665, path id: 10 2025-06-03T10:24:47.687935Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976720665 2025-06-03T10:24:47.687946Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976720665 2025-06-03T10:24:47.687947Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976720665 2025-06-03T10:24:47.687952Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976720665, pathId: [OwnerId: 72057594046644480, LocalPathId: 10], version: 3 2025-06-03T10:24:47.687955Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 4 2025-06-03T10:24:47.687975Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976720665, subscribers: 0 2025-06-03T10:24:47.688759Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976720665 2025-06-03T10:24:47.692699Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-03T10:24:47.692707Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 2 2025-06-03T10:24:47.697604Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: grpc read done: success: 1 data: init_request { topic: "/Root/acc/topic1" message_group_id: "test-group-id" } 2025-06-03T10:24:47.697645Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 2 topic: "/Root/acc/topic1" message_group_id: "test-group-id" from ipv6:[::1]:35784 2025-06-03T10:24:47.697650Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=2 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:35784 proto=v1 topic=/Root/acc/topic1 durationSec=0 2025-06-03T10:24:47.697654Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-03T10:24:47.698161Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 2 sessionId: describe result for acl check 2025-06-03T10:24:47.698202Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-03T10:24:47.698204Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-03T10:24:47.698208Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-03T10:24:47.698226Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7511667108852543730:2376] (SourceId=test-group-id, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-03T10:24:47.698230Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 2 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-03T10:24:47.701631Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:798: TPartitionWriter 72075186224037889 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037889, NodeId 4, Generation: 1 2025-06-03T10:24:47.703048Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie test-group-id|83e146fd-dc06e24b-bee9d58c-e42745d0_0 generated for partition 0 topic 'acc/topic1' owner test-group-id 2025-06-03T10:24:47.703716Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 2 partition: 0 MaxSeqNo: 0 sessionId: test-group-id|83e146fd-dc06e24b-bee9d58c-e42745d0_0 2025-06-03T10:24:47.705800Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: test-group-id|83e146fd-dc06e24b-bee9d58c-e42745d0_0 grpc read done: success: 1 data: update_token_request [content omitted] 2025-06-03T10:24:47.705908Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: test-group-id|83e146fd-dc06e24b-bee9d58c-e42745d0_0 grpc read done: success: 1 data: update_token_request [content omitted] 2025-06-03T10:24:47.705917Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:809: session v1 error cookie: 2 reason: got another 'update_token_request' while previous still in progress, only single token update is allowed at a time sessionId: test-group-id|83e146fd-dc06e24b-bee9d58c-e42745d0_0 2025-06-03T10:24:47.705985Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: test-group-id|83e146fd-dc06e24b-bee9d58c-e42745d0_0 is DEAD 2025-06-03T10:24:47.706062Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037889 (partition=0) Received event: NActors::TEvents::TEvPoison |59.3%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part2/pytest >> test.py::test[join-nested_semi_join-off-ForceBlocks] [GOOD] >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit [GOOD] >> TPersqueueDataPlaneTestSuite::WriteSession [GOOD] >> TPersqueueControlPlaneTestSuite::SetupReadLockSessionWithDatabase [GOOD] >> TPersqueueControlPlaneTestSuite::SetupWriteLockSessionWithDatabase >> TPersQueueCommonTest::Auth_MultipleUpdateTokenRequestIterationsWithValidToken_GotUpdateTokenResponseForEachRequest [GOOD] >> TPersQueueCommonTest::Auth_WriteSessionWithValidTokenAndACEAndThenRemoveACEAndSendWriteRequest_SessionClosedWithUnauthorizedErrorAfterSuccessfullWriteResponse >> TTopicApiDescribes::GetLocalDescribe [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersqueueDataPlaneTestSuite::WriteSession [GOOD] Test command err: === Server->StartServer(false); 2025-06-03T10:24:43.217812Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667091266167025:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:43.217828Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:43.642880Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667091509022723:2211];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:43.643012Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:24:43.645883Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:43.647500Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002174/r3tmp/tmpSApfSf/pdisk_1.dat 2025-06-03T10:24:43.913623Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:43.927531Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:43.927558Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:43.929832Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:43.929852Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:43.936511Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:43.936551Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:43.937524Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20178, node 1 2025-06-03T10:24:44.053618Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/002174/r3tmp/yandexzzs38q.tmp 2025-06-03T10:24:44.053639Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/002174/r3tmp/yandexzzs38q.tmp 2025-06-03T10:24:44.053709Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/002174/r3tmp/yandexzzs38q.tmp 2025-06-03T10:24:44.053763Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:44.063062Z INFO: TTestServer started on Port 18263 GrpcPort 20178 TClient is connected to server localhost:18263 PQClient connected to localhost:20178 === TenantModeEnabled() = 1 === Init PQ - start server on port 20178 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:24:44.121438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-03T10:24:44.121498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:44.121553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-03T10:24:44.121613Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:24:44.121622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:44.122254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-03T10:24:44.122274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-03T10:24:44.122314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:44.122323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-03T10:24:44.122325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-03T10:24:44.122330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710657:0 2 -> 3 2025-06-03T10:24:44.122750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:44.122756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-03T10:24:44.122760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710657:0 3 -> 128 2025-06-03T10:24:44.123087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:24:44.123092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-03T10:24:44.123095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:24:44.123141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:44.123145Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:44.123149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-03T10:24:44.123154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 waiting... 2025-06-03T10:24:44.123799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:24:44.124118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-03T10:24:44.124158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-03T10:24:44.124633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1748946284169, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-03T10:24:44.124663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1748946284169 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-03T10:24:44.124668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-03T10:24:44.124716Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710657:0 128 -> 240 2025-06-03T10:24:44.124720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-03T10:24:44.124757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-03T10:24:44.124763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-03T10:24:44.125127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-03T10:24:44.125130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710657, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-03T10:24:44.125179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-03T10:24:44.125184Z node 1 :FLAT_TX_SCHEMESHARD ... Q_READ_PROXY DEBUG: grpc_pq_read.h:133: new session created cookie 2 2025-06-03T10:24:48.972591Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer session grpc read done: success# 1, data# { init_request { topics_read_settings { topic: "/Root/account1/write_topic" } read_only_original: true consumer: "consumer_aba" read_params { max_read_size: 104857600 } } } 2025-06-03T10:24:48.972651Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:916: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2256839499528114474_v1 read init: from# ipv6:[::1]:33746, request# { init_request { topics_read_settings { topic: "/Root/account1/write_topic" } read_only_original: true consumer: "consumer_aba" read_params { max_read_size: 104857600 } } } 2025-06-03T10:24:48.972717Z node 3 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2256839499528114474_v1 auth for : consumer_aba 2025-06-03T10:24:48.974134Z node 3 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2256839499528114474_v1 Handle describe topics response 2025-06-03T10:24:48.974159Z node 3 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2256839499528114474_v1 auth is DEAD 2025-06-03T10:24:48.974178Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2256839499528114474_v1 auth ok: topics# 1, initDone# 0 2025-06-03T10:24:48.974554Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:1196: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2256839499528114474_v1 register session: topic# /Root/account1/write_topic 2025-06-03T10:24:48.974779Z :INFO: [/Root] [/Root] [1f5cdafc-f8ea3b02-513337e7-ab8089dc] [null] Server session id: consumer_aba_3_2_2256839499528114474_v1 2025-06-03T10:24:48.974847Z :DEBUG: [/Root] [/Root] [1f5cdafc-f8ea3b02-513337e7-ab8089dc] [null] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:24:48.977511Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037894][write_topic] pipe [3:7511667113295122346:2413] connected; active server actors: 1 2025-06-03T10:24:48.977644Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2256839499528114474_v1 grpc read done: success# 1, data# { read { } } 2025-06-03T10:24:48.977761Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2256839499528114474_v1 got read request: guid# e0b8297b-cc53730c-7a5c2f14-fab7d0ae 2025-06-03T10:24:48.977711Z node 4 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1699: [72075186224037894][write_topic] consumer "consumer_aba" register session for pipe [3:7511667113295122346:2413] session consumer_aba_3_2_2256839499528114474_v1 2025-06-03T10:24:48.977726Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:635: [72075186224037894][write_topic] consumer consumer_aba register readable partition 0 2025-06-03T10:24:48.977739Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:665: [72075186224037894][write_topic] consumer consumer_aba family created family=1 (Status=Free, Partitions=[0]) 2025-06-03T10:24:48.977747Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:867: [72075186224037894][write_topic] consumer consumer_aba register reading session ReadingSession "consumer_aba_3_2_2256839499528114474_v1" (Sender=[3:7511667113295122343:2413], Pipe=[3:7511667113295122346:2413], Partitions=[], ActiveFamilyCount=0) 2025-06-03T10:24:48.977752Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037894][write_topic] consumer consumer_aba rebalancing was scheduled 2025-06-03T10:24:48.977764Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1255: [72075186224037894][write_topic] consumer consumer_aba balancing. Sessions=1, Families=1, UnradableFamilies=1 [1 (0), ], RequireBalancing=0 [] 2025-06-03T10:24:48.977774Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1302: [72075186224037894][write_topic] consumer consumer_aba balancing family=1 (Status=Free, Partitions=[0]) for ReadingSession "consumer_aba_3_2_2256839499528114474_v1" (Sender=[3:7511667113295122343:2413], Pipe=[3:7511667113295122346:2413], Partitions=[], ActiveFamilyCount=0) 2025-06-03T10:24:48.977785Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:545: [72075186224037894][write_topic] consumer consumer_aba family 1 status Active partitions [0] session "consumer_aba_3_2_2256839499528114474_v1" sender [3:7511667113295122343:2413] lock partition 0 for ReadingSession "consumer_aba_3_2_2256839499528114474_v1" (Sender=[3:7511667113295122343:2413], Pipe=[3:7511667113295122346:2413], Partitions=[], ActiveFamilyCount=1) generation 1 step 1 2025-06-03T10:24:48.977796Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037894][write_topic] consumer consumer_aba start rebalancing. familyCount=1, sessionCount=1, desiredFamilyCount=1, allowPlusOne=0 2025-06-03T10:24:48.977801Z node 4 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037894][write_topic] consumer consumer_aba balancing duration: 0.000034s 2025-06-03T10:24:48.978835Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:1315: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2256839499528114474_v1 assign: record# { Partition: 0 TabletId: 72075186224037893 Topic: "write_topic" Generation: 1 Step: 1 Session: "consumer_aba_3_2_2256839499528114474_v1" ClientId: "consumer_aba" PipeClient { RawX1: 7511667113295122346 RawX2: 4503612512274797 } Path: "/Root/account1/write_topic" } 2025-06-03T10:24:48.978864Z node 3 :PQ_READ_PROXY INFO: partition_actor.cpp:1132: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2256839499528114474_v1 INITING TopicId: Topic /Root/account1/write_topic in database: Root, partition 0(assignId:1) 2025-06-03T10:24:48.979102Z node 3 :PQ_READ_PROXY INFO: partition_actor.cpp:972: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2256839499528114474_v1 TopicId: Topic /Root/account1/write_topic in database: Root, partition 0(assignId:1) pipe restart attempt 0 pipe creation result: OK TabletId: 72075186224037893 Generation: 1, pipe: [3:7511667113295122348:2416] 2025-06-03T10:24:48.979193Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: consumer_aba_3_2_2256839499528114474_v1:1 with generation 1 Got new read session event: 2025-06-03T10:24:48.983599Z node 3 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2256839499528114474_v1 TopicId: Topic /Root/account1/write_topic in database: Root, partition 0(assignId:1) initDone 0 event { CmdGetClientOffsetResult { Offset: 0 EndOffset: 1 WriteTimestampMS: 1748946288843 CreateTimestampMS: 1748946288838 SizeLag: 165 WriteTimestampEstimateMS: 1748946288843 ClientHasAnyCommits: false } Cookie: 18446744073709551615 } CreatePartitionStream { PartitionStreamId: 1 TopicPath: account1/write_topic Cluster: PartitionId: 0 CommittedOffset: 0 EndOffset: 1 } 2025-06-03T10:24:48.984231Z :INFO: [/Root] [/Root] [1f5cdafc-f8ea3b02-513337e7-ab8089dc] Closing read session. Close timeout: 0.000000s 2025-06-03T10:24:48.984246Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): null:account1/write_topic:0:1:0:0 2025-06-03T10:24:48.984255Z :INFO: [/Root] [/Root] [1f5cdafc-f8ea3b02-513337e7-ab8089dc] Counters: { Errors: 0 CurrentSessionLifetimeMs: 18 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:24:48.984274Z :NOTICE: [/Root] [/Root] [1f5cdafc-f8ea3b02-513337e7-ab8089dc] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-03T10:24:48.984282Z :DEBUG: [/Root] [/Root] [1f5cdafc-f8ea3b02-513337e7-ab8089dc] [null] Abort session to cluster 2025-06-03T10:24:48.984420Z :NOTICE: [/Root] [/Root] [1f5cdafc-f8ea3b02-513337e7-ab8089dc] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-03T10:24:48.983627Z node 3 :PQ_READ_PROXY INFO: partition_actor.cpp:683: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2256839499528114474_v1 INIT DONE TopicId: Topic /Root/account1/write_topic in database: Root, partition 0(assignId:1) EndOffset 1 readOffset 0 committedOffset 0 2025-06-03T10:24:48.983652Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1413: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2256839499528114474_v1 sending to client partition status 2025-06-03T10:24:48.985327Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037894][write_topic] pipe [3:7511667113295122346:2413] disconnected; active server actors: 1 2025-06-03T10:24:48.985340Z node 4 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037894][write_topic] pipe [3:7511667113295122346:2413] client consumer_aba disconnected session consumer_aba_3_2_2256839499528114474_v1 2025-06-03T10:24:48.984791Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2256839499528114474_v1 grpc read done: success# 0, data# { } 2025-06-03T10:24:48.984796Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2256839499528114474_v1 grpc read failed 2025-06-03T10:24:48.984802Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2256839499528114474_v1 grpc closed 2025-06-03T10:24:48.984812Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer consumer_aba session consumer_aba_3_2_2256839499528114474_v1 is DEAD 2025-06-03T10:24:48.985473Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: consumer_aba_3_2_2256839499528114474_v1 2025-06-03T10:24:49.349769Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7511667117590089656:2419], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:24:49.350326Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=3&id=YjUyZTY4ODItNWZkMzFlOWYtNmM2NzkwYjItZDUzMjBhNDY=, ActorId: [3:7511667117590089654:2418], ActorState: ExecuteState, TraceId: 01jwtn50p2ecfkg6hhssr64r04, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:24:49.350458Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueCommonTest::TestLimiterLimitsWithUserPayloadRateLimit [GOOD] Test command err: === Server->StartServer(false); 2025-06-03T10:24:42.387152Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667084520900677:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:42.397753Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:42.471722Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667084364649151:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:42.649410Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:42.649483Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:24:42.660442Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00217a/r3tmp/tmpM9D9Sb/pdisk_1.dat 2025-06-03T10:24:42.939942Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:42.941421Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:42.941444Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:42.941487Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:42.941849Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:42.947888Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:42.947922Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:42.949796Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27919, node 1 2025-06-03T10:24:43.254849Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/00217a/r3tmp/yandex2psvVh.tmp 2025-06-03T10:24:43.255112Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/00217a/r3tmp/yandex2psvVh.tmp 2025-06-03T10:24:43.255172Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/00217a/r3tmp/yandex2psvVh.tmp 2025-06-03T10:24:43.255211Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:43.462638Z INFO: TTestServer started on Port 29095 GrpcPort 27919 TClient is connected to server localhost:29095 PQClient connected to localhost:27919 === TenantModeEnabled() = 1 === Init PQ - start server on port 27919 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:24:43.835142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-03T10:24:43.835206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:43.835703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-03T10:24:43.837004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:24:43.838388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:43.846809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-03T10:24:43.846836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-03T10:24:43.846905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:43.846916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-03T10:24:43.846918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-03T10:24:43.846922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710657:0 2 -> 3 waiting... 2025-06-03T10:24:43.847873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:43.847895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-03T10:24:43.848100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710657:0 3 -> 128 2025-06-03T10:24:43.848853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:43.848857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:43.848861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-03T10:24:43.848868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-06-03T10:24:43.864222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:24:43.864397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:24:43.864409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-03T10:24:43.864416Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:24:43.865685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-03T10:24:43.865941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-03T10:24:43.876474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1748946283910, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-03T10:24:43.876530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1748946283910 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-03T10:24:43.876538Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-03T10:24:43.876825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710657:0 128 -> 240 2025-06-03T10:24:43.876835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-03T10:24:43.876862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-03T10:24:43.876874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-03T10:24:43.878220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-03T10:24:43.878229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710657, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-03T10:24:43.878297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-03T10:24:43.878302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: sche ... p:347: Handle TEvRequest topic: 'topic' requestId: 2025-06-03T10:24:48.775069Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037893] got client message batch for topic 'PQ/account/topic' partition 0 2025-06-03T10:24:48.775090Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie 123|e2911190-caa34308-f92cf005-27146e19_0 generated for partition 0 topic 'PQ/account/topic' owner 123 2025-06-03T10:24:48.775111Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:35: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-03T10:24:48.775123Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-03T10:24:48.775144Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'topic' requestId: 2025-06-03T10:24:48.775146Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037893] got client message batch for topic 'PQ/account/topic' partition 0 2025-06-03T10:24:48.775157Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-03T10:24:48.775170Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 4 partition: 0 MaxSeqNo: 2 sessionId: 123|e2911190-caa34308-f92cf005-27146e19_0 2025-06-03T10:24:48.775431Z :INFO: [] MessageGroupId [123] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1748946288775 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:24:48.775450Z :INFO: [] MessageGroupId [123] SessionId [] Write session established. Init response: last_sequence_number: 2 session_id: "123|e2911190-caa34308-f92cf005-27146e19_0" topic: "PQ/account/topic" 2025-06-03T10:24:48.775507Z :DEBUG: [] MessageGroupId [123] SessionId [123|e2911190-caa34308-f92cf005-27146e19_0] Write 1 messages with Id from 1 to 1 2025-06-03T10:24:48.775526Z :DEBUG: [] MessageGroupId [123] SessionId [123|e2911190-caa34308-f92cf005-27146e19_0] Write session: try to update token 2025-06-03T10:24:48.775533Z :DEBUG: [] MessageGroupId [123] SessionId [123|e2911190-caa34308-f92cf005-27146e19_0] Send 1 message(s) (0 left), first sequence number is 3 2025-06-03T10:24:48.775577Z :INFO: [] MessageGroupId [123] SessionId [123|e2911190-caa34308-f92cf005-27146e19_0] Write session: close. Timeout = 10000 ms 2025-06-03T10:24:48.775647Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: 123|e2911190-caa34308-f92cf005-27146e19_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-03T10:24:48.775703Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037893 (partition=0) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-06-03T10:24:48.775737Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'topic' requestId: 2025-06-03T10:24:48.775740Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037893] got client message batch for topic 'PQ/account/topic' partition 0 2025-06-03T10:24:48.775758Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic' partition: 0 messageNo: 0 requestId: cookie: 1 2025-06-03T10:24:48.775764Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037893 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-03T10:24:48.775786Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'topic' requestId: 2025-06-03T10:24:48.775788Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037893] got client message batch for topic 'PQ/account/topic' partition 0 2025-06-03T10:24:48.775802Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2196: [PQ: 72075186224037893] got client message topic: PQ/account/topic partition: 0 SourceId: '\000123' SeqNo: 3 partNo : 0 messageNo: 1 size 372 offset: -1 2025-06-03T10:24:48.775819Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1704: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Send write quota request. Topic: "PQ/account/topic". Partition: 0. Amount: 376. Cookie: 3 2025-06-03T10:24:48.775833Z node 3 :PERSQUEUE DEBUG: partition.cpp:3630: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Got quota. Topic: "PQ/account/topic". Partition: 0: Cookie: 3 2025-06-03T10:24:48.775861Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'PQ/account/topic' partition 0 part blob processing sourceId '\000123' seqNo 3 partNo 0 2025-06-03T10:24:48.775895Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1333: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Topic 'PQ/account/topic' partition 0 part blob complete sourceId '\000123' seqNo 3 partNo 0 FormedBlobsCount 0 NewHead: Offset 2 PartNo 0 PackedSize 443 count 1 nextOffset 3 batches 1 2025-06-03T10:24:48.775942Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:1623: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Add new write blob: topic 'PQ/account/topic' partition 0 compactOffset 2,1 HeadOffset 0 endOffset 2 curOffset 3 d0000000000_00000000000000000002_00000_0000000001_00000| size 431 WTime 1748946288774 2025-06-03T10:24:48.775957Z node 3 :PERSQUEUE DEBUG: partition.cpp:2185: [PQ: 72075186224037893, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-06-03T10:24:48.775958Z node 3 :PERSQUEUE DEBUG: partition.cpp:2186: [PQ: 72075186224037893, Partition: 0, State: StateIdle] --- delete ---------------- 2025-06-03T10:24:48.775961Z node 3 :PERSQUEUE DEBUG: partition.cpp:2192: [PQ: 72075186224037893, Partition: 0, State: StateIdle] [x0000000000, x0000000001) 2025-06-03T10:24:48.775963Z node 3 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037893, Partition: 0, State: StateIdle] --- write ----------------- 2025-06-03T10:24:48.775968Z node 3 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037893, Partition: 0, State: StateIdle] m0000000000p123 2025-06-03T10:24:48.775970Z node 3 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037893, Partition: 0, State: StateIdle] d0000000000_00000000000000000002_00000_0000000001_00000| 2025-06-03T10:24:48.775971Z node 3 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037893, Partition: 0, State: StateIdle] i0000000000 2025-06-03T10:24:48.775973Z node 3 :PERSQUEUE DEBUG: partition.cpp:2199: [PQ: 72075186224037893, Partition: 0, State: StateIdle] --- rename ---------------- 2025-06-03T10:24:48.775975Z node 3 :PERSQUEUE DEBUG: partition.cpp:2204: [PQ: 72075186224037893, Partition: 0, State: StateIdle] =========================== 2025-06-03T10:24:48.775982Z node 3 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-06-03T10:24:48.775990Z node 3 :PERSQUEUE DEBUG: read.h:300: CacheProxy. Passthrough blob. Partition 0 offset 2 partNo 0 count 1 size 431 2025-06-03T10:24:48.779586Z node 3 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 0 offset 2 count 1 size 431 actorID [3:7511667111378097346:2399] 2025-06-03T10:24:48.779635Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 376 WriteNewSizeFromSupportivePartitions# 0 2025-06-03T10:24:48.779634Z node 3 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037893' partition 0 offset 2 partno 0 count 1 parts 0 size 431 2025-06-03T10:24:48.779647Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-03T10:24:48.779660Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Answering for message sourceid: '\000123', Topic: 'PQ/account/topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 2 is stored on disk 2025-06-03T10:24:48.779708Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-03T10:24:48.779728Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037893 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-03T10:24:48.781784Z :DEBUG: [] MessageGroupId [123] SessionId [123|e2911190-caa34308-f92cf005-27146e19_0] Write session got write response: sequence_numbers: 3 offsets: 2 already_written: false write_statistics { persist_duration_ms: 4 } 2025-06-03T10:24:48.781795Z :DEBUG: [] MessageGroupId [123] SessionId [123|e2911190-caa34308-f92cf005-27146e19_0] Write session: acknoledged message 1 2025-06-03T10:24:48.875679Z :INFO: [] MessageGroupId [123] SessionId [123|e2911190-caa34308-f92cf005-27146e19_0] Write session will now close 2025-06-03T10:24:48.875701Z :DEBUG: [] MessageGroupId [123] SessionId [123|e2911190-caa34308-f92cf005-27146e19_0] Write session: aborting 2025-06-03T10:24:48.875909Z :INFO: [] MessageGroupId [123] SessionId [123|e2911190-caa34308-f92cf005-27146e19_0] Write session: gracefully shut down, all writes complete 2025-06-03T10:24:48.875922Z :DEBUG: [] MessageGroupId [123] SessionId [123|e2911190-caa34308-f92cf005-27146e19_0] Write session: destroy 2025-06-03T10:24:48.877406Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: 123|e2911190-caa34308-f92cf005-27146e19_0 grpc read done: success: 0 data: 2025-06-03T10:24:48.877419Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: 123|e2911190-caa34308-f92cf005-27146e19_0 grpc read failed 2025-06-03T10:24:48.877429Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: 123|e2911190-caa34308-f92cf005-27146e19_0 grpc closed 2025-06-03T10:24:48.877435Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: 123|e2911190-caa34308-f92cf005-27146e19_0 is DEAD 2025-06-03T10:24:48.877648Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037893 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:24:48.877955Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037893] server disconnected, pipe [3:7511667111378097647:2427] destroyed 2025-06-03T10:24:48.877971Z node 3 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-03T10:24:48.931343Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7511667111378097661:2433], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:24:48.931432Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=3&id=YjFkOTM1MzYtZmQ3ZDMyYzMtMmFhNzFhZmItZjc2ZWFlNWU=, ActorId: [3:7511667111378097654:2429], ActorState: ExecuteState, TraceId: 01jwtn508wape5xzpmv41cz33d, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:24:48.931578Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } >> TTopicApiDescribes::GetPartitionDescribe [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/describes_ut/unittest >> TTopicApiDescribes::GetLocalDescribe [GOOD] Test command err: 2025-06-03T10:24:39.772169Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667071287695597:2185];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:39.772696Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:39.821449Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667074945902908:2092];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:39.821675Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002620/r3tmp/tmpBiR66b/pdisk_1.dat 2025-06-03T10:24:39.919945Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:24:39.922638Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:24:40.252182Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:40.254010Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:40.254254Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:40.258876Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:40.258893Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:40.267914Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:40.267950Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:40.269645Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6155, node 1 2025-06-03T10:24:40.429601Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/002620/r3tmp/yandexEbSOcZ.tmp 2025-06-03T10:24:40.429617Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/002620/r3tmp/yandexEbSOcZ.tmp 2025-06-03T10:24:40.429986Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/002620/r3tmp/yandexEbSOcZ.tmp 2025-06-03T10:24:40.430034Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:40.561918Z INFO: TTestServer started on Port 10764 GrpcPort 6155 TClient is connected to server localhost:10764 PQClient connected to localhost:6155 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:24:41.126660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:24:41.174496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:24:41.551078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:24:41.572240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710661, at schemeshard: 72057594046644480 2025-06-03T10:24:42.813220Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667084172598412:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:42.813314Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:42.817576Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667084172598426:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:42.835684Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667084172598431:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:42.836323Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:42.868018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-06-03T10:24:42.975581Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667084172598430:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-03T10:24:43.369413Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667088467565801:2746] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:24:43.388498Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7511667092125772480:2318], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:24:43.398083Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=2&id=ZWNhYjNmYTQtMzFjNTQyNzItOTRlOGMzYjktMzY1MmY3Yg==, ActorId: [2:7511667092125772441:2312], ActorState: ExecuteState, TraceId: 01jwtn4tm6511zprdcmnkvz3mc, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:24:43.399350Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:24:43.395431Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511667088467565815:2349], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:24:43.396544Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=ZTJmMjI5NjMtNGRiZTBkYzktMzQ5NjIyNTktOTJjZjgwMDY=, ActorId: [1:7511667084172598394:2336], ActorState: ExecuteState, TraceId: 01jwtn4t8sax13a0xsm45r3gfh, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:24:43.410930Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:24:43.415037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:24:43.558701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:24:43.665517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-03T10:24:43.803779Z node 1 :KQP_EXECUTER ERROR: kqp_planner ... 3.dc1--topic-x' partition 5 generation 1 [1:7511667114237370963:2522] 2025-06-03T10:24:49.833394Z node 1 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72075186224037896, Partition: 10, State: StateInit] bootstrapping 10 [1:7511667114237370964:2523] 2025-06-03T10:24:49.833457Z node 1 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037893, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 1 generation 1 [1:7511667114237370960:2519] 2025-06-03T10:24:49.833940Z node 1 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037896, Partition: 10, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 10 generation 1 [1:7511667114237370964:2523] 2025-06-03T10:24:49.834244Z node 1 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72075186224037898, Partition: 14, State: StateInit] bootstrapping 14 [1:7511667114237370966:2525] 2025-06-03T10:24:49.834305Z node 1 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72075186224037898, Partition: 11, State: StateInit] bootstrapping 11 [1:7511667114237370967:2526] 2025-06-03T10:24:49.834961Z node 1 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037898, Partition: 11, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 11 generation 1 [1:7511667114237370967:2526] 2025-06-03T10:24:49.834963Z node 1 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037898, Partition: 14, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 14 generation 1 [1:7511667114237370966:2525] 2025-06-03T10:24:49.835357Z node 1 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72075186224037899, Partition: 4, State: StateInit] bootstrapping 4 [1:7511667114237370969:2528] 2025-06-03T10:24:49.835829Z node 1 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037899, Partition: 4, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 4 generation 1 [1:7511667114237370969:2528] 2025-06-03T10:24:49.835377Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72075186224037894, Partition: 8, State: StateInit] bootstrapping 8 [2:7511667117895576931:2419] 2025-06-03T10:24:49.835992Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037894, Partition: 8, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 8 generation 1 [2:7511667117895576931:2419] 2025-06-03T10:24:49.836258Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037895, Partition: 9, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 9 generation 1 [2:7511667117895576927:2415] 2025-06-03T10:24:49.836369Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72075186224037894, Partition: 12, State: StateInit] bootstrapping 12 [2:7511667117895576943:2421] 2025-06-03T10:24:49.836708Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72075186224037897, Partition: 7, State: StateInit] bootstrapping 7 [2:7511667117895576945:2423] 2025-06-03T10:24:49.836933Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037894, Partition: 12, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 12 generation 1 [2:7511667117895576943:2421] 2025-06-03T10:24:49.837268Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72075186224037897, Partition: 13, State: StateInit] bootstrapping 13 [2:7511667117895576946:2424] 2025-06-03T10:24:49.837287Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037897, Partition: 7, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 7 generation 1 [2:7511667117895576945:2423] 2025-06-03T10:24:49.837838Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037897, Partition: 13, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 13 generation 1 [2:7511667117895576946:2424] 2025-06-03T10:24:49.838083Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72075186224037892, Partition: 3, State: StateInit] bootstrapping 3 [2:7511667117895576930:2418] 2025-06-03T10:24:49.838183Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72075186224037892, Partition: 0, State: StateInit] bootstrapping 0 [2:7511667117895576932:2420] 2025-06-03T10:24:49.838666Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037892, Partition: 3, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 3 generation 1 [2:7511667117895576930:2418] 2025-06-03T10:24:49.838777Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037892, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 0 generation 1 [2:7511667117895576932:2420] 2025-06-03T10:24:49.841562Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72075186224037898] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:49.841800Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72075186224037899] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:49.841803Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72075186224037896] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:49.841886Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72075186224037893] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:49.842622Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72075186224037894] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:49.842719Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72075186224037895] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:49.843968Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72075186224037897] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:49.844090Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig Create topic result: 1 2025-06-03T10:24:49.847517Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7511667114237371241:3857]: Request location 2025-06-03T10:24:49.848255Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7511667114237371259:3869] connected; active server actors: 1 2025-06-03T10:24:49.848465Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 0, NodeId 2, Generation 1 2025-06-03T10:24:49.848470Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 1, NodeId 1, Generation 1 2025-06-03T10:24:49.848472Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037895, partitionId 2, NodeId 2, Generation 1 2025-06-03T10:24:49.848474Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 2, Generation 1 2025-06-03T10:24:49.848476Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037899, partitionId 4, NodeId 1, Generation 1 2025-06-03T10:24:49.848478Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 5, NodeId 1, Generation 1 2025-06-03T10:24:49.848480Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 6, NodeId 1, Generation 1 2025-06-03T10:24:49.848482Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037897, partitionId 7, NodeId 2, Generation 1 2025-06-03T10:24:49.848485Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037894, partitionId 8, NodeId 2, Generation 1 2025-06-03T10:24:49.848487Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037895, partitionId 9, NodeId 2, Generation 1 2025-06-03T10:24:49.848489Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 10, NodeId 1, Generation 1 2025-06-03T10:24:49.848491Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037898, partitionId 11, NodeId 1, Generation 1 2025-06-03T10:24:49.848493Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037894, partitionId 12, NodeId 2, Generation 1 2025-06-03T10:24:49.848495Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037897, partitionId 13, NodeId 2, Generation 1 2025-06-03T10:24:49.848497Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037898, partitionId 14, NodeId 1, Generation 1 2025-06-03T10:24:49.848807Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [1:7511667114237371241:3857]: Got location 2025-06-03T10:24:49.848979Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7511667114237371263:3873]: Request location 2025-06-03T10:24:49.849289Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [1:7511667114237371263:3873]: Got location 2025-06-03T10:24:49.849220Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037900][rt3.dc1--topic-x] pipe [1:7511667114237371259:3869] disconnected; active server actors: 1 2025-06-03T10:24:49.849227Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037900][rt3.dc1--topic-x] pipe [1:7511667114237371259:3869] disconnected no session 2025-06-03T10:24:49.849231Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7511667114237371265:3875] connected; active server actors: 1 2025-06-03T10:24:49.849237Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 1, NodeId 1, Generation 1 2025-06-03T10:24:49.849239Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 2, Generation 1 2025-06-03T10:24:49.849241Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037896, partitionId 5, NodeId 1, Generation 1 2025-06-03T10:24:49.849554Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037900][rt3.dc1--topic-x] pipe [1:7511667114237371265:3875] disconnected; active server actors: 1 2025-06-03T10:24:49.849558Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037900][rt3.dc1--topic-x] pipe [1:7511667114237371265:3875] disconnected no session 2025-06-03T10:24:49.849646Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7511667114237371266:3876]: Request location 2025-06-03T10:24:49.849823Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7511667114237371270:3880] connected; active server actors: 1 >> LdapAuthProviderTest_nonSecure::LdapRefreshRemoveUserBad >> TPersQueueNewSchemeCacheTest::CheckGrpcWriteNoDC [GOOD] >> TPersQueueNewSchemeCacheTest::CheckGrpcReadNoDC >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithValidTokenButWithoutACL_SessionClosedWithUnauthorizedError [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsFromAdLdapServer [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeGood >> test.py::test[aggregate-aggregation_by_udf--Results] [GOOD] >> test.py::test[aggregate-avg_and_sum_by_value--Results] >> TPersqueueControlPlaneTestSuite::SetupWriteLockSessionWithDatabase [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueCommonTest::Auth_WriteUpdateTokenRequestWithValidTokenButWithoutACL_SessionClosedWithUnauthorizedError [GOOD] Test command err: === Server->StartServer(false); 2025-06-03T10:24:45.841543Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667097572974855:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:45.841565Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:45.913957Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667096668364855:2269];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:45.915127Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:46.171480Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002163/r3tmp/tmpJJpnI2/pdisk_1.dat 2025-06-03T10:24:46.188837Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:24:46.270400Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9876, node 1 2025-06-03T10:24:46.302104Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:46.302138Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:46.307042Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:46.308674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:46.352183Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/002163/r3tmp/yandexSNjmO5.tmp 2025-06-03T10:24:46.352207Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/002163/r3tmp/yandexSNjmO5.tmp 2025-06-03T10:24:46.352274Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/002163/r3tmp/yandexSNjmO5.tmp 2025-06-03T10:24:46.352332Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:46.376453Z INFO: TTestServer started on Port 3491 GrpcPort 9876 2025-06-03T10:24:46.424222Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:46.424251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:46.433979Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3491 PQClient connected to localhost:9876 === TenantModeEnabled() = 1 === Init PQ - start server on port 9876 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:24:46.662108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976720657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-03T10:24:46.662161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:46.662223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-03T10:24:46.662278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976720657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:24:46.662293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:46.663576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976720657, response: Status: StatusAccepted TxId: 281474976720657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-03T10:24:46.663599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976720657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-03T10:24:46.663635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:46.663647Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976720657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-03T10:24:46.663650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 281474976720657:0 ProgressState no shards to create, do next state 2025-06-03T10:24:46.663653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976720657:0 2 -> 3 2025-06-03T10:24:46.664576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:46.664588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976720657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-03T10:24:46.664595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976720657:0 3 -> 128 2025-06-03T10:24:46.665066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:46.665077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:46.665079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976720657:0, at tablet# 72057594046644480 2025-06-03T10:24:46.665082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 281474976720657 ready parts: 1/1 waiting... 2025-06-03T10:24:46.667669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976720657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:24:46.667931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976720657, at schemeshard: 72057594046644480 2025-06-03T10:24:46.667941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976720657, ready parts: 0/1, is published: true 2025-06-03T10:24:46.667945Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976720657, at schemeshard: 72057594046644480 2025-06-03T10:24:46.668149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976720657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976720657 msg type: 269090816 2025-06-03T10:24:46.668202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 281474976720657, partId: 4294967295, tablet: 72057594046316545 2025-06-03T10:24:46.668747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1748946286717, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-03T10:24:46.668784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976720657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1748946286717 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-03T10:24:46.668789Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976720657:0, at tablet# 72057594046644480 2025-06-03T10:24:46.668862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976720657:0 128 -> 240 2025-06-03T10:24:46.668875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976720657:0, at tablet# 72057594046644480 2025-06-03T10:24:46.668903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-03T10:24:46.668919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-03T10:24:46.669283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-03T10:24:46.669307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976720657, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-03T10:24:46.669344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-03T10:24:46.669353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshar ... , LocalPathId: 10] 2025-06-03T10:24:51.220248Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976720665:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046644480 2025-06-03T10:24:51.220253Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976720665:0, at schemeshard: 72057594046644480 2025-06-03T10:24:51.220273Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976720665:0 progress is 1/1 2025-06-03T10:24:51.220276Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976720665 ready parts: 1/1 2025-06-03T10:24:51.220280Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976720665:0 progress is 1/1 2025-06-03T10:24:51.220281Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976720665 ready parts: 1/1 2025-06-03T10:24:51.220292Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 3 2025-06-03T10:24:51.220305Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976720665, ready parts: 1/1, is published: false 2025-06-03T10:24:51.220310Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 10], at schemeshard: 72057594046644480 2025-06-03T10:24:51.220311Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976720665 ready parts: 1/1 2025-06-03T10:24:51.220314Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976720665:0 2025-06-03T10:24:51.220317Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976720665, publications: 1, subscribers: 0 2025-06-03T10:24:51.220319Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976720665, [OwnerId: 72057594046644480, LocalPathId: 10], 3 2025-06-03T10:24:51.229708Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976720665, response: Status: StatusSuccess TxId: 281474976720665 SchemeshardId: 72057594046644480, at schemeshard: 72057594046644480 2025-06-03T10:24:51.229775Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976720665, database: /Root, subject: , status: StatusSuccess, operation: MODIFY ACL, path: /Root/acc/topic1, add access: +W:test_user@builtin, remove access: -():test_user@builtin:- 2025-06-03T10:24:51.229818Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-03T10:24:51.229821Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976720665, path id: [OwnerId: 72057594046644480, LocalPathId: 10] 2025-06-03T10:24:51.229869Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-03T10:24:51.229872Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:7511667120905555507:2375], at schemeshard: 72057594046644480, txId: 281474976720665, path id: 10 2025-06-03T10:24:51.230144Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976720665 2025-06-03T10:24:51.230157Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 3 PathOwnerId: 72057594046644480, cookie: 281474976720665 2025-06-03T10:24:51.230160Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976720665 2025-06-03T10:24:51.230164Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976720665, pathId: [OwnerId: 72057594046644480, LocalPathId: 10], version: 3 2025-06-03T10:24:51.230168Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 4 2025-06-03T10:24:51.230197Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976720665, subscribers: 0 2025-06-03T10:24:51.237865Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976720665 2025-06-03T10:24:51.242776Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-03T10:24:51.242790Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 2 2025-06-03T10:24:51.242958Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: grpc read done: success: 1 data: init_request { topic: "/Root/acc/topic1" message_group_id: "test-message-group" } 2025-06-03T10:24:51.242980Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 2 topic: "/Root/acc/topic1" message_group_id: "test-message-group" from ipv6:[::1]:60642 2025-06-03T10:24:51.242985Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=2 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:60642 proto=v1 topic=/Root/acc/topic1 durationSec=0 2025-06-03T10:24:51.242987Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-03T10:24:51.249560Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 2 sessionId: describe result for acl check 2025-06-03T10:24:51.249633Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-03T10:24:51.249635Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-03T10:24:51.249636Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-03T10:24:51.249650Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7511667125200523827:2390] (SourceId=test-message-group, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-03T10:24:51.249656Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 2 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-03T10:24:51.254798Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:798: TPartitionWriter 72075186224037889 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037889, NodeId 3, Generation: 1 2025-06-03T10:24:51.254936Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie test-message-group|354f8c13-16e5652e-c5dd184a-884ec6f7_0 generated for partition 0 topic 'acc/topic1' owner test-message-group 2025-06-03T10:24:51.255096Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 2 partition: 0 MaxSeqNo: 0 sessionId: test-message-group|354f8c13-16e5652e-c5dd184a-884ec6f7_0 2025-06-03T10:24:51.255873Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: test-message-group|354f8c13-16e5652e-c5dd184a-884ec6f7_0 grpc read done: success: 1 data: update_token_request [content omitted] 2025-06-03T10:24:51.255968Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1346: updating token 2025-06-03T10:24:51.255975Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-03T10:24:51.256264Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 2 sessionId: test-message-group|354f8c13-16e5652e-c5dd184a-884ec6f7_0 describe result for acl check 2025-06-03T10:24:51.256282Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:809: session v1 error cookie: 2 reason: access to topic 'Topic /Root/acc/topic1 in database: /Root' denied for 'test_user_2@builtin' due to 'no WriteTopic rights', Marker# PQ1125 sessionId: test-message-group|354f8c13-16e5652e-c5dd184a-884ec6f7_0 2025-06-03T10:24:51.256341Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: test-message-group|354f8c13-16e5652e-c5dd184a-884ec6f7_0 is DEAD 2025-06-03T10:24:51.256387Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037889 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:24:51.816130Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7511667125200523842:2396], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:24:51.816928Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=3&id=OGZhMjU4MTMtMjgxZDc5MDItYjFkZTc3MTQtNWZhY2EwZGY=, ActorId: [3:7511667125200523840:2395], ActorState: ExecuteState, TraceId: 01jwtn53338mdw6425s44edw17, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:24:51.817056Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeGood [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/describes_ut/unittest >> TTopicApiDescribes::GetPartitionDescribe [GOOD] Test command err: 2025-06-03T10:24:41.298914Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667082478080323:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:41.299144Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:41.458000Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667080660808113:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:41.671143Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:24:41.695156Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:41.695646Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00261f/r3tmp/tmpxTNdFz/pdisk_1.dat 2025-06-03T10:24:42.055165Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:42.055188Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:42.056593Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:42.056605Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:42.063855Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:42.087196Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:42.087236Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:42.089210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29679, node 1 2025-06-03T10:24:42.353819Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/00261f/r3tmp/yandexx3QFOL.tmp 2025-06-03T10:24:42.353833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/00261f/r3tmp/yandexx3QFOL.tmp 2025-06-03T10:24:42.353905Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/00261f/r3tmp/yandexx3QFOL.tmp 2025-06-03T10:24:42.353950Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:42.422623Z INFO: TTestServer started on Port 15519 GrpcPort 29679 TClient is connected to server localhost:15519 PQClient connected to localhost:29679 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:24:42.882811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:24:43.025057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-03T10:24:44.176296Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667093545710206:2312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:44.176442Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:44.176727Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667093545710233:2315], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:44.190272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480 2025-06-03T10:24:44.201665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:24:44.204586Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511667093545710235:2316], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-03T10:24:44.342138Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511667093545710263:2172] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:24:44.348036Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511667095362983219:2343], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:24:44.348884Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=YTM3MzEzMWQtYzhlNGRhYWYtYmQ0NDQ4ZDctOTU3MzVmOTg=, ActorId: [1:7511667095362983186:2336], ActorState: ExecuteState, TraceId: 01jwtn4vp32xdxmmvjreg2aat9, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:24:44.350284Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:24:44.352340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:24:44.350534Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7511667093545710277:2320], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:24:44.351193Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=2&id=NTQyMTU2YjYtMzk1MDU3YmQtNjJjZGNiOTAtN2JjNjdjYQ==, ActorId: [2:7511667093545710204:2311], ActorState: ExecuteState, TraceId: 01jwtn4vmee6mngtx7x1jn5ae8, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:24:44.351316Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:24:44.390316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:24:44.478324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-03T10:24:44.557659Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710665. Ctx: { TraceId: 01jwtn4vzc1h0fy408vvr6575k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTY2NDZkN2QtZTZmZWEyNjAtM2QyMGJhZGItOTExZjMyMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7511667095362983674:3036] 2025-06-03T10:24:46.298714Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511667082478080323:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:46.298762Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-0 ... 2 [1:7511667116837821654:2550] 2025-06-03T10:24:49.901904Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72075186224037895] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:49.901922Z node 2 :PERSQUEUE INFO: pq_impl.cpp:787: [PQ: 72075186224037895] has a tx writes info 2025-06-03T10:24:49.902596Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:49.902599Z node 2 :PERSQUEUE INFO: pq_impl.cpp:787: [PQ: 72075186224037892] has a tx writes info 2025-06-03T10:24:49.902672Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037900][rt3.dc1--topic-x] TEvClientConnected TabletId 72075186224037896, NodeId 1, Generation 2 2025-06-03T10:24:49.902676Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037900][rt3.dc1--topic-x] TEvClientConnected TabletId 72075186224037898, NodeId 1, Generation 2 2025-06-03T10:24:49.902680Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037900][rt3.dc1--topic-x] TEvClientConnected TabletId 72075186224037899, NodeId 1, Generation 2 2025-06-03T10:24:49.903015Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72075186224037894] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:49.903016Z node 2 :PERSQUEUE INFO: pq_impl.cpp:787: [PQ: 72075186224037894] has a tx writes info 2025-06-03T10:24:49.905947Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72075186224037895, Partition: 2, State: StateInit] bootstrapping 2 [2:7511667115020547834:2452] 2025-06-03T10:24:49.906663Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72075186224037895, Partition: 9, State: StateInit] bootstrapping 9 [2:7511667115020547833:2451] 2025-06-03T10:24:49.908355Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72075186224037892, Partition: 3, State: StateInit] bootstrapping 3 [2:7511667115020547836:2453] 2025-06-03T10:24:49.909214Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72075186224037892, Partition: 0, State: StateInit] bootstrapping 0 [2:7511667115020547837:2454] 2025-06-03T10:24:49.909692Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72075186224037894, Partition: 12, State: StateInit] bootstrapping 12 [2:7511667115020547841:2457] 2025-06-03T10:24:49.910604Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72075186224037894, Partition: 8, State: StateInit] bootstrapping 8 [2:7511667115020547840:2456] 2025-06-03T10:24:49.919729Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72075186224037897] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:49.919747Z node 2 :PERSQUEUE INFO: pq_impl.cpp:787: [PQ: 72075186224037897] has a tx writes info 2025-06-03T10:24:49.920458Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72075186224037897, Partition: 7, State: StateInit] bootstrapping 7 [2:7511667115020547859:2464] 2025-06-03T10:24:49.921140Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72075186224037897, Partition: 13, State: StateInit] bootstrapping 13 [2:7511667115020547860:2465] 2025-06-03T10:24:49.927326Z node 2 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--topic-x:12:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:24:49.927346Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037894, Partition: 12, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 12 generation 2 [2:7511667115020547841:2457] 2025-06-03T10:24:49.927593Z node 2 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--topic-x:7:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:24:49.927596Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037897, Partition: 7, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 7 generation 2 [2:7511667115020547859:2464] 2025-06-03T10:24:49.929189Z node 2 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--topic-x:8:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:24:49.929192Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037894, Partition: 8, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 8 generation 2 [2:7511667115020547840:2456] 2025-06-03T10:24:49.933989Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037900][rt3.dc1--topic-x] TEvClientConnected TabletId 72075186224037894, NodeId 2, Generation 2 2025-06-03T10:24:49.934458Z node 2 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--topic-x:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:24:49.934464Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037892, Partition: 3, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 3 generation 2 [2:7511667115020547836:2453] 2025-06-03T10:24:49.934760Z node 2 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--topic-x:9:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:24:49.934763Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037895, Partition: 9, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 9 generation 2 [2:7511667115020547833:2451] 2025-06-03T10:24:49.934848Z node 2 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--topic-x:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:24:49.934852Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037895, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 2 generation 2 [2:7511667115020547834:2452] 2025-06-03T10:24:49.934881Z node 2 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--topic-x:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:24:49.934884Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037892, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 0 generation 2 [2:7511667115020547837:2454] 2025-06-03T10:24:49.935201Z node 2 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--topic-x:13:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:24:49.935203Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037897, Partition: 13, State: StateInit] init complete for topic 'rt3.dc1--topic-x' partition 13 generation 2 [2:7511667115020547860:2465] 2025-06-03T10:24:49.935446Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037900][rt3.dc1--topic-x] TEvClientConnected TabletId 72075186224037895, NodeId 2, Generation 2 2025-06-03T10:24:49.935449Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037900][rt3.dc1--topic-x] TEvClientConnected TabletId 72075186224037892, NodeId 2, Generation 2 2025-06-03T10:24:49.935451Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037900][rt3.dc1--topic-x] TEvClientConnected TabletId 72075186224037897, NodeId 2, Generation 2 2025-06-03T10:24:50.720686Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:168: new Describe partition request 2025-06-03T10:24:50.720734Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1209: TDescribePartitionActor for request path: "/Root/PQ//rt3.dc1--topic-x" partition_id: 1 include_location: true 2025-06-03T10:24:50.720747Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1219: TDescribePartitionActor[1:7511667121132789147:2574]: Bootstrap 2025-06-03T10:24:50.723571Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7511667121132789147:2574]: Request location 2025-06-03T10:24:50.729081Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7511667121132789149:2575] connected; active server actors: 1 2025-06-03T10:24:50.734325Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037893, partitionId 1, NodeId 1, Generation 2 2025-06-03T10:24:50.734525Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [1:7511667121132789147:2574]: Got location 2025-06-03T10:24:50.734785Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037900][rt3.dc1--topic-x] pipe [1:7511667121132789149:2575] disconnected; active server actors: 1 2025-06-03T10:24:50.734792Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037900][rt3.dc1--topic-x] pipe [1:7511667121132789149:2575] disconnected no session Got response: operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Topic.DescribePartitionResult] { partition { partition_id: 1 active: true partition_location { node_id: 1 generation: 2 } } } } } 2025-06-03T10:24:50.741623Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:168: new Describe partition request 2025-06-03T10:24:50.741663Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1209: TDescribePartitionActor for request path: "/Root/PQ//rt3.dc1--topic-x" partition_id: 3 include_stats: true include_location: true 2025-06-03T10:24:50.741679Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1219: TDescribePartitionActor[1:7511667121132789156:2578]: Bootstrap 2025-06-03T10:24:50.741967Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [1:7511667121132789156:2578]: Request location 2025-06-03T10:24:50.742715Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037900][rt3.dc1--topic-x] pipe [1:7511667121132789159:2580] connected; active server actors: 1 2025-06-03T10:24:50.742739Z node 2 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037900][rt3.dc1--topic-x] addPartitionToResponse tabletId 72075186224037892, partitionId 3, NodeId 2, Generation 2 2025-06-03T10:24:50.743006Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [1:7511667121132789156:2578]: Got location Got response: operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Topic.DescribePartitionResult] { partition { partition_id: 3 active: true partition_stats { partition_offsets { } last_write_time { seconds: 1748946289 nanos: 904000000 } max_write_time_lag { } bytes_written { } partition_node_id: 2 } partition_location { node_id: 2 generation: 2 } } } } } 2025-06-03T10:24:50.743986Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:168: new Describe partition request 2025-06-03T10:24:50.744013Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1209: TDescribePartitionActor for request path: "/Root/PQ//bad-topic" include_stats: true include_location: true 2025-06-03T10:24:50.744022Z node 1 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1219: TDescribePartitionActor[1:7511667121132789161:2581]: Bootstrap Got response: operation { ready: true status: SCHEME_ERROR issues { message: "path \'Root/PQ/bad-topic\' does not exist or you do not have access rights" issue_code: 500018 severity: 1 } } 2025-06-03T10:24:50.745026Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037900][rt3.dc1--topic-x] pipe [1:7511667121132789159:2580] disconnected; active server actors: 1 2025-06-03T10:24:50.745037Z node 2 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037900][rt3.dc1--topic-x] pipe [1:7511667121132789159:2580] disconnected no session >> TPersQueueCommonTest::Auth_WriteSessionWithValidTokenAndACEAndThenRemoveACEAndSendWriteRequest_SessionClosedWithUnauthorizedErrorAfterSuccessfullWriteResponse [GOOD] >> test.py::test[aggregate-group_by_session_distinct_compact--Results] [GOOD] >> test.py::test[aggregate-group_by_session_only--ForceBlocks] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersqueueControlPlaneTestSuite::SetupWriteLockSessionWithDatabase [GOOD] Test command err: === Server->StartServer(false); 2025-06-03T10:24:46.230977Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667103133904163:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:46.231151Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:46.235995Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667102208766904:2217];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:46.340544Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00215a/r3tmp/tmpnkbtsP/pdisk_1.dat 2025-06-03T10:24:46.355516Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:46.355590Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:24:46.576824Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:46.576860Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:46.583244Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:46.589087Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:46.589121Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:46.591010Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:46.594053Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:46.596019Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12480, node 1 2025-06-03T10:24:46.750096Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/00215a/r3tmp/yandex2X7qzI.tmp 2025-06-03T10:24:46.750110Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/00215a/r3tmp/yandex2X7qzI.tmp 2025-06-03T10:24:46.750362Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/00215a/r3tmp/yandex2X7qzI.tmp 2025-06-03T10:24:46.750414Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:46.825592Z INFO: TTestServer started on Port 64920 GrpcPort 12480 TClient is connected to server localhost:64920 PQClient connected to localhost:12480 === TenantModeEnabled() = 1 === Init PQ - start server on port 12480 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:24:47.081670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-03T10:24:47.081735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:47.081800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-03T10:24:47.081859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:24:47.081871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:47.085007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-03T10:24:47.085043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-03T10:24:47.085106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:47.085120Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-03T10:24:47.085122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-03T10:24:47.085126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710657:0 2 -> 3 waiting... 2025-06-03T10:24:47.086023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:24:47.086032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-03T10:24:47.086036Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:24:47.086370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:47.086380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-03T10:24:47.086385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710657:0 3 -> 128 2025-06-03T10:24:47.087380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:47.087390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:47.087394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-03T10:24:47.087401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-06-03T10:24:47.091517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:24:47.092402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-03T10:24:47.092466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-03T10:24:47.093803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1748946287137, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-03T10:24:47.093855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1748946287137 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-03T10:24:47.093864Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-03T10:24:47.093951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710657:0 128 -> 240 2025-06-03T10:24:47.093958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-03T10:24:47.094001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-03T10:24:47.094326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-03T10:24:47.095775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-03T10:24:47.095790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710657, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-03T10:24:47.095845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-03T10:24:47.095856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: sche ... 4046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 11 Version: 5 PathOwnerId: 72057594046644480, cookie: 281474976715664 2025-06-03T10:24:51.840088Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046644480, txId: 281474976715664 2025-06-03T10:24:51.840091Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715664, pathId: [OwnerId: 72057594046644480, LocalPathId: 11], version: 5 2025-06-03T10:24:51.840094Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 11] was 2 2025-06-03T10:24:51.840130Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 12 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976715664 2025-06-03T10:24:51.840137Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 12 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976715664 2025-06-03T10:24:51.840138Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976715664 2025-06-03T10:24:51.840139Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715664, pathId: [OwnerId: 72057594046644480, LocalPathId: 12], version: 2 2025-06-03T10:24:51.840140Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 12] was 4 2025-06-03T10:24:51.840145Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715664, subscribers: 1 2025-06-03T10:24:51.840147Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046644480, to actorId: [3:7511667122760844452:2368] 2025-06-03T10:24:51.841154Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715664 2025-06-03T10:24:51.841163Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715664 Create topic result: 1 === EnablePQLogs === CreateChannel === NewStub === InitializeWritePQService === InitializeWritePQService start iteration === InitializeWritePQService create streamingWriter === InitializeWritePQService Write 2025-06-03T10:24:51.955131Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-03T10:24:51.955146Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 1 2025-06-03T10:24:51.955351Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: grpc read done: success: 1 data: init_request { topic: "Root/acc/topic1" message_group_id: "12345678" } 2025-06-03T10:24:51.955369Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 1 topic: "Root/acc/topic1" message_group_id: "12345678" from ipv6:[::1]:46212 2025-06-03T10:24:51.955374Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=1 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:46212 proto=v1 topic=Root/acc/topic1 durationSec=0 2025-06-03T10:24:51.955378Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-03T10:24:51.965896Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-03T10:24:51.965934Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-03T10:24:51.965936Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-03T10:24:51.966356Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-03T10:24:51.966370Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7511667122760844708:2379] (SourceId=12345678, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-03T10:24:51.966374Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-03T10:24:51.966644Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:798: TPartitionWriter 72075186224037889 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037889, NodeId 3, Generation: 1 2025-06-03T10:24:51.966742Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie 12345678|ac71799d-bb2a1038-8cd3b6ef-a7b743f9_0 generated for partition 0 topic 'acc/topic1' owner 12345678 2025-06-03T10:24:51.966869Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: 12345678|ac71799d-bb2a1038-8cd3b6ef-a7b743f9_0 2025-06-03T10:24:51.967621Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: 12345678|ac71799d-bb2a1038-8cd3b6ef-a7b743f9_0 grpc read done: success: 0 data: 2025-06-03T10:24:51.967625Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: 12345678|ac71799d-bb2a1038-8cd3b6ef-a7b743f9_0 grpc read failed 2025-06-03T10:24:51.967666Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:818: session v1 closed cookie: 1 sessionId: 12345678|ac71799d-bb2a1038-8cd3b6ef-a7b743f9_0 2025-06-03T10:24:51.967670Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: 12345678|ac71799d-bb2a1038-8cd3b6ef-a7b743f9_0 is DEAD 2025-06-03T10:24:51.967721Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037889 (partition=0) Received event: NActors::TEvents::TEvPoison Finish: 0 === InitializeWritePQService done === PersQueueClient === InitializePQ completed 2025-06-03T10:24:51.991898Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-03T10:24:51.991912Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 2 2025-06-03T10:24:51.992098Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: grpc read done: success: 1 data: init_request { topic: "topic1" message_group_id: "12345678" } 2025-06-03T10:24:51.992117Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 2 topic: "topic1" message_group_id: "12345678" from ipv6:[::1]:46212 2025-06-03T10:24:51.992122Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=2 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:46212 proto=v1 topic=topic1 durationSec=0 2025-06-03T10:24:51.992126Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-03T10:24:51.993151Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 2 sessionId: describe result for acl check 2025-06-03T10:24:51.993184Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-03T10:24:51.993186Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-03T10:24:51.993187Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-03T10:24:51.993196Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7511667122760844728:2388] (SourceId=12345678, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-03T10:24:51.993201Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 2 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-03T10:24:51.993478Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:798: TPartitionWriter 72075186224037889 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037889, NodeId 3, Generation: 1 2025-06-03T10:24:51.993596Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie 12345678|b1867293-b128c03-77b7ff78-de49448_0 generated for partition 0 topic 'acc/topic1' owner 12345678 2025-06-03T10:24:51.994159Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 2 partition: 0 MaxSeqNo: 0 sessionId: 12345678|b1867293-b128c03-77b7ff78-de49448_0 2025-06-03T10:24:51.997840Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: 12345678|b1867293-b128c03-77b7ff78-de49448_0 grpc read done: success: 0 data: 2025-06-03T10:24:51.997849Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 2 sessionId: 12345678|b1867293-b128c03-77b7ff78-de49448_0 grpc read failed 2025-06-03T10:24:51.997856Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 2 sessionId: 12345678|b1867293-b128c03-77b7ff78-de49448_0 grpc closed 2025-06-03T10:24:51.997859Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: 12345678|b1867293-b128c03-77b7ff78-de49448_0 is DEAD 2025-06-03T10:24:51.997996Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037889 (partition=0) Received event: NActors::TEvents::TEvPoison ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueCommonTest::Auth_WriteSessionWithValidTokenAndACEAndThenRemoveACEAndSendWriteRequest_SessionClosedWithUnauthorizedErrorAfterSuccessfullWriteResponse [GOOD] Test command err: === Server->StartServer(false); 2025-06-03T10:24:46.582285Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667104181706701:2147];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00214c/r3tmp/tmpfKWlOT/pdisk_1.dat 2025-06-03T10:24:46.760818Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:24:46.787826Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667102079006205:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:46.788264Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:46.788288Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:24:46.805729Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:46.874615Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6477, node 1 2025-06-03T10:24:46.897624Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:46.897658Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:46.900946Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:46.902558Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:46.905547Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/00214c/r3tmp/yandexqRXaDc.tmp 2025-06-03T10:24:46.905561Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/00214c/r3tmp/yandexqRXaDc.tmp 2025-06-03T10:24:46.909554Z INFO: TTestServer started on Port 27285 GrpcPort 6477 2025-06-03T10:24:46.942617Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/00214c/r3tmp/yandexqRXaDc.tmp 2025-06-03T10:24:46.942723Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:46.953871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:46.954063Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:46.962517Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27285 PQClient connected to localhost:6477 === TenantModeEnabled() = 1 === Init PQ - start server on port 6477 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:24:47.089330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-03T10:24:47.089386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:47.089438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-03T10:24:47.089838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:24:47.089852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:47.095430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-03T10:24:47.095463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-03T10:24:47.095551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:47.095561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-03T10:24:47.095563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-03T10:24:47.095568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710657:0 2 -> 3 waiting... 2025-06-03T10:24:47.096143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:24:47.096153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-03T10:24:47.096157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:24:47.101658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:47.101675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-03T10:24:47.101859Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710657:0 3 -> 128 2025-06-03T10:24:47.102538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:47.102546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:47.102550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-03T10:24:47.102554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-06-03T10:24:47.104568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:24:47.106451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-03T10:24:47.106517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-03T10:24:47.107167Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1748946287151, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-03T10:24:47.107218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1748946287151 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-03T10:24:47.107229Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-03T10:24:47.107285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710657:0 128 -> 240 2025-06-03T10:24:47.107294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-03T10:24:47.107334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-03T10:24:47.107344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-03T10:24:47.107895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-03T10:24:47.107907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710657, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-03T10:24:47.107952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-03T10:24:47.107971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemesh ... LARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-03T10:24:52.234306Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-03T10:24:52.234317Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7511667129865721937:2390] (SourceId=test-group-id, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-03T10:24:52.234321Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 2 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-03T10:24:52.234704Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:798: TPartitionWriter 72075186224037889 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037889, NodeId 3, Generation: 1 2025-06-03T10:24:52.234749Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie test-group-id|ea8ffb1-ea1200bd-41b46276-85483445_0 generated for partition 0 topic 'acc/topic1' owner test-group-id ===Assert streaming op1 ===Assert streaming op2 2025-06-03T10:24:52.235712Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 2 partition: 0 MaxSeqNo: 0 sessionId: test-group-id|ea8ffb1-ea1200bd-41b46276-85483445_0 2025-06-03T10:24:52.244909Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: test-group-id|ea8ffb1-ea1200bd-41b46276-85483445_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-03T10:24:52.245116Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037889 (partition=0) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest 2025-06-03T10:24:52.245318Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037889 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-03T10:24:52.249998Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037889 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse ===ModifyAcl BEFORE MODIFY PERMISSIONS 2025-06-03T10:24:52.258454Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root/acc" OperationType: ESchemeOpModifyACL ModifyACL { Name: "topic1" DiffACL: "\n\031\010\001\022\025\032\023test_user_0@builtin" } } TxId: 281474976715666 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:35480" , at schemeshard: 72057594046644480 2025-06-03T10:24:52.258500Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_modify_acl.cpp:33: TModifyACL Propose, path: /Root/acc/topic1, operationId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:24:52.258529Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5223: ExamineTreeVFS visit path id [OwnerId: 72057594046644480, LocalPathId: 10] name: topic1 type: EPathTypePersQueueGroup state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046644480, LocalPathId: 9] 2025-06-03T10:24:52.258530Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5239: ExamineTreeVFS run path id: [OwnerId: 72057594046644480, LocalPathId: 10] 2025-06-03T10:24:52.258571Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715666:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046644480 2025-06-03T10:24:52.258575Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:24:52.258593Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715666:0 progress is 1/1 2025-06-03T10:24:52.258596Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715666 ready parts: 1/1 2025-06-03T10:24:52.258600Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715666:0 progress is 1/1 2025-06-03T10:24:52.258602Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715666 ready parts: 1/1 2025-06-03T10:24:52.258613Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 3 2025-06-03T10:24:52.258626Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976715666, ready parts: 1/1, is published: false 2025-06-03T10:24:52.258632Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 10], at schemeshard: 72057594046644480 2025-06-03T10:24:52.258633Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715666 ready parts: 1/1 2025-06-03T10:24:52.258637Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715666:0 2025-06-03T10:24:52.258641Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976715666, publications: 1, subscribers: 0 2025-06-03T10:24:52.258644Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976715666, [OwnerId: 72057594046644480, LocalPathId: 10], 4 2025-06-03T10:24:52.265697Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976715666, response: Status: StatusSuccess TxId: 281474976715666 SchemeshardId: 72057594046644480, at schemeshard: 72057594046644480 2025-06-03T10:24:52.265759Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715666, database: /Root, subject: , status: StatusSuccess, operation: MODIFY ACL, path: /Root/acc/topic1, remove access: -():test_user_0@builtin:- 2025-06-03T10:24:52.265807Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-03T10:24:52.265811Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715666, path id: [OwnerId: 72057594046644480, LocalPathId: 10] 2025-06-03T10:24:52.265855Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-03T10:24:52.265859Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [3:7511667125570753614:2374], at schemeshard: 72057594046644480, txId: 281474976715666, path id: 10 2025-06-03T10:24:52.266173Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976715666 2025-06-03T10:24:52.266183Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 10 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976715666 2025-06-03T10:24:52.266185Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976715666 2025-06-03T10:24:52.266188Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715666, pathId: [OwnerId: 72057594046644480, LocalPathId: 10], version: 4 2025-06-03T10:24:52.266192Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 10] was 4 2025-06-03T10:24:52.266223Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715666, subscribers: 0 ===Wait for session created with token with removed ACE to die2025-06-03T10:24:52.270006Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715666 2025-06-03T10:24:52.812574Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7511667129865721969:2395], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:24:52.813268Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=3&id=OWFjYzU3Y2EtNGNhY2JmOGItMmQ1NTRhMTUtYmJlMTE1MDc=, ActorId: [3:7511667129865721967:2394], ActorState: ExecuteState, TraceId: 01jwtn541b6ky7590nzsfxtr2k, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:24:52.813447Z node 3 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:24:53.235242Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-03T10:24:53.235596Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 2 sessionId: test-group-id|ea8ffb1-ea1200bd-41b46276-85483445_0 describe result for acl check 2025-06-03T10:24:53.235632Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:809: session v1 error cookie: 2 reason: access to topic 'Topic /Root/acc/topic1 in database: /Root' denied for 'test_user_0@builtin' due to 'no WriteTopic rights', Marker# PQ1125 sessionId: test-group-id|ea8ffb1-ea1200bd-41b46276-85483445_0 2025-06-03T10:24:53.235872Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: test-group-id|ea8ffb1-ea1200bd-41b46276-85483445_0 is DEAD 2025-06-03T10:24:53.235960Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037889 (partition=0) Received event: NActors::TEvents::TEvPoison status: UNAUTHORIZED issues { message: "access to topic \'Topic /Root/acc/topic1 in database: /Root\' denied for \'test_user_0@builtin\' due to \'no WriteTopic rights\', Marker# PQ1125" issue_code: 500018 severity: 1 } >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithCustomGroupAttributeGood >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithCustomGroupAttributeGood [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsUseInvalidSearchFilterBad >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] >> LdapAuthProviderTest::LdapServerIsUnavailable >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeGood >> LdapAuthProviderTest_LdapsScheme::LdapRefreshRemoveUserBad ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] Test command err: 2025-06-03T10:24:49.338627Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667114951164221:2268];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:49.338655Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00154d/r3tmp/tmpLAqwkt/pdisk_1.dat 2025-06-03T10:24:49.773349Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667114951163992:2079] 1748946289320066 != 1748946289320069 2025-06-03T10:24:49.777465Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:49.793235Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:49.793265Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 22035, node 1 2025-06-03T10:24:49.794744Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:49.816194Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:49.816214Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:49.816217Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:49.816270Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:49.881344Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:24:49.883607Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:24:49.883616Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:24:49.884030Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:9587, port: 9587 2025-06-03T10:24:49.885585Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:24:49.921536Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:24:49.965709Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:24:50.015452Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****ZNMQ (E42448B4) () has now valid token of ldapuser@ldap 2025-06-03T10:24:52.412726Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00154d/r3tmp/tmpYCQuwe/pdisk_1.dat 2025-06-03T10:24:52.472459Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667129478927463:2079] 1748946292390353 != 1748946292390356 2025-06-03T10:24:52.475777Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16440, node 2 2025-06-03T10:24:52.509830Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:52.509859Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:52.513969Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:52.533621Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:52.533632Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:52.533634Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:52.533675Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:52.681654Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:24:52.689409Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:24:52.689428Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:24:52.689640Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:28330, port: 28330 2025-06-03T10:24:52.689889Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:24:52.764019Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:24:52.809802Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:24:52.810174Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-03T10:24:52.810453Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:24:52.854042Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:24:52.901649Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:24:52.902123Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****UNQQ (751154A9) () has now valid token of ldapuser@ldap 2025-06-03T10:24:53.492228Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511667133024887328:2251];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:53.492344Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00154d/r3tmp/tmpLtkwnf/pdisk_1.dat 2025-06-03T10:24:53.541434Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:53.543716Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:53.543733Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:53.543866Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511667133024887114:2079] 1748946293475195 != 1748946293475198 2025-06-03T10:24:53.581763Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18338, node 3 2025-06-03T10:24:53.661503Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:53.661513Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:53.661515Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:53.661557Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:53.687375Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:24:53.688925Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:24:53.688932Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:24:53.689066Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:64932, port: 64932 2025-06-03T10:24:53.689096Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:24:53.727159Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:24:53.773743Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****cebA (BE9BB44E) () has now valid token of ldapuser@ldap 2025-06-03T10:24:55.006488Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511667137355968728:2088];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:55.006719Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00154d/r3tmp/tmp53zXpW/pdisk_1.dat 2025-06-03T10:24:55.066477Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8062, node 4 2025-06-03T10:24:55.091154Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:55.091183Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:55.096667Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:55.273623Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:55.273637Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:55.273639Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:55.273683Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:55.511321Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:24:55.511396Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:24:55.511402Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:24:55.511546Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://qqq:19115 ldap://localhost:19115 ldap://localhost:11111, port: 19115 2025-06-03T10:24:55.511566Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:24:55.592938Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:24:55.641598Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:24:55.641768Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-03T10:24:55.641777Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:24:55.689709Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:24:55.733745Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:24:55.734006Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****SC-w (BFEB2B3B) () has now valid token of ldapuser@ldap 2025-06-03T10:24:56.358166Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7511667147238939307:2203];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:56.360220Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00154d/r3tmp/tmpmpqlq5/pdisk_1.dat 2025-06-03T10:24:56.429340Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7511667147238939141:2079] 1748946296347214 != 1748946296347217 2025-06-03T10:24:56.429645Z node 5 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22823, node 5 2025-06-03T10:24:56.497802Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:56.497825Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:56.501635Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:56.565638Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:56.565650Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:56.565652Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:56.565693Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:56.776897Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:24:56.776971Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:24:56.776975Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:24:56.777131Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:9854, port: 9854 2025-06-03T10:24:56.777150Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:24:56.793614Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-03T10:24:56.841507Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:24:56.841754Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-03T10:24:56.841767Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-03T10:24:56.893466Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-03T10:24:56.941494Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-03T10:24:56.941835Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****OAYw (B1B0F02B) () has now valid token of ldapuser@ldap 2025-06-03T10:24:57.894198Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511667151106804491:2203];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:57.896363Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00154d/r3tmp/tmpOit7sQ/pdisk_1.dat 2025-06-03T10:24:57.980144Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7511667151106804325:2079] 1748946297876941 != 1748946297876944 2025-06-03T10:24:57.982757Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3660, node 6 2025-06-03T10:24:58.032104Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:58.032138Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:58.037822Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:58.113549Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:58.113561Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:58.113562Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:58.113603Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:58.309343Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:24:58.317377Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:24:58.317391Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:24:58.317576Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:13541, port: 13541 2025-06-03T10:24:58.317596Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:24:58.340027Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: &(uid=ldapuser)(), attributes: memberOf 2025-06-03T10:24:58.340061Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter &(uid=ldapuser)() on server ldap://localhost:13541. Bad search filter 2025-06-03T10:24:58.340164Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****UGvQ (AB519775) () has now permanent error message 'Could not login via LDAP (Could not perform search for filter &(uid=ldapuser)() on server ldap://localhost:13541. Bad search filter)' >> TPersQueueCommonTest::TestWriteWithRateLimiterWithUserPayloadRateLimit [GOOD] >> TPersQueueNewSchemeCacheTest::CheckGrpcReadNoDC [GOOD] >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClass [GOOD] >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClassTopicAPI >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDontExistGroupAttribute >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsFromAdLdapServer >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDontExistGroupAttribute >> TBlobStorageWardenTest::TestSendToInvalidGroupId >> TBlobStorageWardenTest::TestLimitedKeylessGroupThenNoMonitoring >> BsControllerConfig::MoveGroups [GOOD] >> LdapAuthProviderTest::LdapServerIsUnavailable [GOOD] >> LdapAuthProviderTest::LdapRequestWithEmptyHost >> TPersQueueNewSchemeCacheTest::TestReadAtTimestamp_3 [GOOD] >> TPersQueueNewSchemeCacheTest::TestReadAtTimestamp_10 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueCommonTest::TestWriteWithRateLimiterWithUserPayloadRateLimit [GOOD] Test command err: === Server->StartServer(false); 2025-06-03T10:24:46.355641Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667102574259695:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:46.356656Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:46.398770Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667104549901020:2222];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:46.402344Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002155/r3tmp/tmpIxh4Um/pdisk_1.dat 2025-06-03T10:24:46.507002Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:24:46.504866Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:24:46.682776Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:46.687824Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:46.687852Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:46.695094Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:46.697069Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:46.700578Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:46.700609Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:46.716053Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 22190, node 1 2025-06-03T10:24:46.857097Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/002155/r3tmp/yandex8O7IQf.tmp 2025-06-03T10:24:46.857169Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/002155/r3tmp/yandex8O7IQf.tmp 2025-06-03T10:24:46.857780Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/002155/r3tmp/yandex8O7IQf.tmp 2025-06-03T10:24:46.857851Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:46.870761Z INFO: TTestServer started on Port 7335 GrpcPort 22190 TClient is connected to server localhost:7335 PQClient connected to localhost:22190 === TenantModeEnabled() = 1 === Init PQ - start server on port 22190 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:24:47.136174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-03T10:24:47.136229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:47.136282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-03T10:24:47.137618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:24:47.137636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:47.139673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-03T10:24:47.139694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-03T10:24:47.139734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:47.139741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-03T10:24:47.139744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-03T10:24:47.139747Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710657:0 2 -> 3 waiting... 2025-06-03T10:24:47.140754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:47.140763Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-03T10:24:47.140935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710657:0 3 -> 128 2025-06-03T10:24:47.145419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:24:47.145434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-03T10:24:47.145439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:24:47.149939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:47.149955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:47.149961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-03T10:24:47.149969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-06-03T10:24:47.153701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:24:47.158125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-03T10:24:47.158173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-03T10:24:47.160253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1748946287207, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-03T10:24:47.160305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1748946287207 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-03T10:24:47.160313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-03T10:24:47.160388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710657:0 128 -> 240 2025-06-03T10:24:47.160395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-03T10:24:47.160432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-03T10:24:47.160442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-03T10:24:47.161507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-03T10:24:47.161528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710657, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-03T10:24:47.161572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-03T10:24:47.161580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: scheme ... 5-06-03T10:25:00.149498Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2163: [PQ: 72075186224037899] got client PART message topic: PQ/account3/folder1/folder2/topic partition: 0 SourceId: '\0001236' SeqNo: 1 partNo : 1 messageNo: 1 size: 511961 2025-06-03T10:25:00.149532Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2163: [PQ: 72075186224037899] got client PART message topic: PQ/account3/folder1/folder2/topic partition: 0 SourceId: '\0001236' SeqNo: 1 partNo : 2 messageNo: 1 size: 176151 2025-06-03T10:25:00.149536Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2196: [PQ: 72075186224037899] got client message topic: PQ/account3/folder1/folder2/topic partition: 0 SourceId: '\0001236' SeqNo: 1 partNo : 2 messageNo: 1 size 176151 offset: -1 2025-06-03T10:25:00.149577Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1704: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Send write quota request. Topic: "PQ/account3/folder1/folder2/topic". Partition: 0. Amount: 1200088. Cookie: 7 2025-06-03T10:25:00.149592Z node 2 :PERSQUEUE DEBUG: partition.cpp:3630: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Got quota. Topic: "PQ/account3/folder1/folder2/topic". Partition: 0: Cookie: 7 2025-06-03T10:25:00.149628Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob processing sourceId '\0001236' seqNo 1 partNo 0 2025-06-03T10:25:00.149637Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob processing sourceId '\0001236' seqNo 1 partNo 1 2025-06-03T10:25:00.149640Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob processing sourceId '\0001236' seqNo 1 partNo 2 2025-06-03T10:25:00.149841Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1333: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Topic 'PQ/account3/folder1/folder2/topic' partition 0 part blob complete sourceId '\0001236' seqNo 1 partNo 2 FormedBlobsCount 0 NewHead: Offset 6 PartNo 0 PackedSize 1200285 count 1 nextOffset 7 batches 3 2025-06-03T10:25:00.149942Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:1623: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Add new write blob: topic 'PQ/account3/folder1/folder2/topic' partition 0 compactOffset 6,1 HeadOffset 6 endOffset 6 curOffset 7 d0000000000_00000000000000000006_00000_0000000001_00002| size 1200275 WTime 1748946300147 2025-06-03T10:25:00.150111Z node 2 :PERSQUEUE DEBUG: partition.cpp:2185: [PQ: 72075186224037899, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-06-03T10:25:00.150113Z node 2 :PERSQUEUE DEBUG: partition.cpp:2186: [PQ: 72075186224037899, Partition: 0, State: StateIdle] --- delete ---------------- 2025-06-03T10:25:00.150115Z node 2 :PERSQUEUE DEBUG: partition.cpp:2192: [PQ: 72075186224037899, Partition: 0, State: StateIdle] [x0000000000, x0000000001) 2025-06-03T10:25:00.150117Z node 2 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037899, Partition: 0, State: StateIdle] --- write ----------------- 2025-06-03T10:25:00.150120Z node 2 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037899, Partition: 0, State: StateIdle] m0000000000p1236 2025-06-03T10:25:00.150121Z node 2 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037899, Partition: 0, State: StateIdle] d0000000000_00000000000000000006_00000_0000000001_00002| 2025-06-03T10:25:00.150122Z node 2 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037899, Partition: 0, State: StateIdle] i0000000000 2025-06-03T10:25:00.150124Z node 2 :PERSQUEUE DEBUG: partition.cpp:2199: [PQ: 72075186224037899, Partition: 0, State: StateIdle] --- rename ---------------- 2025-06-03T10:25:00.150126Z node 2 :PERSQUEUE DEBUG: partition.cpp:2204: [PQ: 72075186224037899, Partition: 0, State: StateIdle] =========================== 2025-06-03T10:25:00.150137Z node 2 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-06-03T10:25:00.150151Z node 2 :PERSQUEUE DEBUG: read.h:300: CacheProxy. Passthrough blob. Partition 0 offset 6 partNo 0 count 1 size 1200275 2025-06-03T10:25:00.330368Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037899 (partition=0) Received event: NActors::IEventHandle 2025-06-03T10:25:00.329844Z node 2 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 0 offset 6 count 1 size 1200275 actorID [2:7511667147499574714:2364] 2025-06-03T10:25:00.329889Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037899, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 1200088 WriteNewSizeFromSupportivePartitions# 0 2025-06-03T10:25:00.329895Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037899, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-03T10:25:00.329906Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Answering for message sourceid: '\0001236', Topic: 'PQ/account3/folder1/folder2/topic', Partition: 0, SeqNo: 1, partNo: 0, Offset: 6 is stored on disk 2025-06-03T10:25:00.329914Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037899, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-03T10:25:00.329920Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Answering for message sourceid: '\0001236', Topic: 'PQ/account3/folder1/folder2/topic', Partition: 0, SeqNo: 1, partNo: 1, Offset: 6 is stored on disk 2025-06-03T10:25:00.329927Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037899, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-03T10:25:00.329931Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037899, Partition: 0, State: StateIdle] Answering for message sourceid: '\0001236', Topic: 'PQ/account3/folder1/folder2/topic', Partition: 0, SeqNo: 1, partNo: 2, Offset: 6 is stored on disk 2025-06-03T10:25:00.330140Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-03T10:25:00.330227Z node 2 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037899' partition 0 offset 6 partno 0 count 1 parts 2 size 1200275 2025-06-03T10:25:00.333547Z :DEBUG: [] MessageGroupId [1236] SessionId [1236|b2e04b2-717e5d42-c58f6b3d-7708afe6_0] Write session got write response: sequence_numbers: 1 offsets: 6 already_written: false write_statistics { persist_duration_ms: 181 } 2025-06-03T10:25:00.333557Z :DEBUG: [] MessageGroupId [1236] SessionId [1236|b2e04b2-717e5d42-c58f6b3d-7708afe6_0] Write session: acknoledged message 1 2025-06-03T10:25:00.352448Z :INFO: [] MessageGroupId [1236] SessionId [1236|b2e04b2-717e5d42-c58f6b3d-7708afe6_0] Write session will now close 2025-06-03T10:25:00.352468Z :DEBUG: [] MessageGroupId [1236] SessionId [1236|b2e04b2-717e5d42-c58f6b3d-7708afe6_0] Write session: aborting 2025-06-03T10:25:00.352624Z :INFO: [] MessageGroupId [1236] SessionId [1236|b2e04b2-717e5d42-c58f6b3d-7708afe6_0] Write session: gracefully shut down, all writes complete 2025-06-03T10:25:00.352632Z :DEBUG: [] MessageGroupId [1236] SessionId [1236|b2e04b2-717e5d42-c58f6b3d-7708afe6_0] Write session: destroy 2025-06-03T10:25:00.353028Z node 1 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 22 sessionId: 1236|b2e04b2-717e5d42-c58f6b3d-7708afe6_0 grpc read done: success: 0 data: 2025-06-03T10:25:00.353038Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 22 sessionId: 1236|b2e04b2-717e5d42-c58f6b3d-7708afe6_0 grpc read failed 2025-06-03T10:25:00.353045Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 22 sessionId: 1236|b2e04b2-717e5d42-c58f6b3d-7708afe6_0 grpc closed 2025-06-03T10:25:00.353049Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 22 sessionId: 1236|b2e04b2-717e5d42-c58f6b3d-7708afe6_0 is DEAD 2025-06-03T10:25:00.353194Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037899 (partition=0) Received event: NActors::TEvents::TEvPoison DURATION 3.495367s 2025-06-03T10:25:00.353587Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037899] server disconnected, pipe [1:7511667162703804903:2613] destroyed 2025-06-03T10:25:00.353618Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037899, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-03T10:25:00.774482Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511667162703804917:2619], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:25:00.775092Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=N2NmMGM2NTQtYjNhYTZiOWYtMzlhOGE4ZTAtNDU0ZDg4NDg=, ActorId: [1:7511667162703804915:2618], ActorState: ExecuteState, TraceId: 01jwtn5bty88xd7gsqbkz6m0cv, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:25:00.775194Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:25:01.681527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7306: Cannot get console configs 2025-06-03T10:25:01.681542Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:01.782052Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511667166998772239:2626], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:25:01.782609Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=MmQ0ZWFjYmYtNzc4MDAyN2MtMmMxMmQ0NTAtMjZkNzYwZWE=, ActorId: [1:7511667166998772237:2625], ActorState: ExecuteState, TraceId: 01jwtn5ctjeamg537h16bmvgs1, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:25:01.782718Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueNewSchemeCacheTest::CheckGrpcReadNoDC [GOOD] Test command err: 2025-06-03T10:24:43.570780Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667092137890882:2211];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:43.571054Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:43.638588Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667088409918242:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:43.638608Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:43.713506Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00216b/r3tmp/tmp58qpK4/pdisk_1.dat 2025-06-03T10:24:43.734989Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:24:43.963631Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:43.968032Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:43.968055Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:43.968470Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:43.968478Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:43.980498Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:43.980549Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:43.985366Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 5330, node 1 2025-06-03T10:24:44.062791Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/00216b/r3tmp/yandex8hLSsp.tmp 2025-06-03T10:24:44.062808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/00216b/r3tmp/yandex8hLSsp.tmp 2025-06-03T10:24:44.062912Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/00216b/r3tmp/yandex8hLSsp.tmp 2025-06-03T10:24:44.062974Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:44.069865Z INFO: TTestServer started on Port 10219 GrpcPort 5330 TClient is connected to server localhost:10219 PQClient connected to localhost:5330 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:24:44.116974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:24:44.145459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-03T10:24:44.474120Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667096432859047:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:44.474194Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:44.477399Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667096432859059:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:44.478621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-06-03T10:24:44.481999Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667096432859093:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:44.482033Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:44.494430Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667096432859061:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-03T10:24:44.547481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:24:44.588674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:24:44.593914Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667096432859320:2854] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:24:44.625657Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511667096432859339:2359], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:24:44.626234Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=OTczOTBiYWUtYjYxM2Q0OTEtNWU3ZmExODYtMTI2OTAwMDk=, ActorId: [1:7511667096432859044:2334], ActorState: ExecuteState, TraceId: 01jwtn4vxk598x493q8czhxqe4, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:24:44.626958Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:24:44.691077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-03T10:24:44.831365Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710667. Ctx: { TraceId: 01jwtn4w75fjhvavx77jgtwxm9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWQwYzNmMTUtNmIzZTdiOC1lNTIxYjAyNi1iMDNjMTQ2MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7511667096432859591:3047] 2025-06-03T10:24:48.564471Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511667092137890882:2211];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:48.564509Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:24:48.645513Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7511667088409918242:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:48.645551Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:24:49.888028Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667092137890998:2153], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:24:49.888102Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:751166709 ... :01.310023Z :INFO: [/Root] [/Root] [ddfe2562-5c72b113-1c74b30c-67c71808] Closing read session. Close timeout: 0.000000s 2025-06-03T10:25:01.310033Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): null:account2/topic2:1:5:0:0 null:account2/topic2:2:4:0:0 null:account2/topic2:0:3:3:0 null:account2/topic2:3:2:0:0 null:account2/topic2:4:1:0:0 2025-06-03T10:25:01.310038Z :INFO: [/Root] [/Root] [ddfe2562-5c72b113-1c74b30c-67c71808] Counters: { Errors: 0 CurrentSessionLifetimeMs: 72 BytesRead: 40 MessagesRead: 4 BytesReadCompressed: 92 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:25:01.310050Z :NOTICE: [/Root] [/Root] [ddfe2562-5c72b113-1c74b30c-67c71808] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-03T10:25:01.310056Z :DEBUG: [/Root] [/Root] [ddfe2562-5c72b113-1c74b30c-67c71808] [null] Abort session to cluster 2025-06-03T10:25:01.310206Z :NOTICE: [/Root] [/Root] [ddfe2562-5c72b113-1c74b30c-67c71808] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-03T10:25:01.313788Z node 3 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer user1 session user1_3_2_12189821407206377235_v1 grpc read done: success# 0, data# { } 2025-06-03T10:25:01.313797Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer user1 session user1_3_2_12189821407206377235_v1 grpc read failed 2025-06-03T10:25:01.313804Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer user1 session user1_3_2_12189821407206377235_v1 grpc closed 2025-06-03T10:25:01.313816Z node 3 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer user1 session user1_3_2_12189821407206377235_v1 is DEAD 2025-06-03T10:25:01.314335Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037904] Destroy direct read session user1_3_2_12189821407206377235_v1 2025-06-03T10:25:01.314344Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037904] server disconnected, pipe [3:7511667168162571488:2591] destroyed 2025-06-03T10:25:01.314368Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: user1_3_2_12189821407206377235_v1 2025-06-03T10:25:01.314373Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037903] Destroy direct read session user1_3_2_12189821407206377235_v1 2025-06-03T10:25:01.314376Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037903] server disconnected, pipe [3:7511667168162571485:2588] destroyed 2025-06-03T10:25:01.314379Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037903] Destroy direct read session user1_3_2_12189821407206377235_v1 2025-06-03T10:25:01.314381Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037903] server disconnected, pipe [3:7511667168162571484:2587] destroyed 2025-06-03T10:25:01.314384Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: user1_3_2_12189821407206377235_v1 2025-06-03T10:25:01.314386Z node 3 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: user1_3_2_12189821407206377235_v1 2025-06-03T10:25:01.318144Z node 4 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037905][topic2] pipe [3:7511667168162571473:2581] disconnected; active server actors: 1 2025-06-03T10:25:01.318158Z node 4 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037905][topic2] pipe [3:7511667168162571473:2581] client user1 disconnected session user1_3_2_12189821407206377235_v1 2025-06-03T10:25:01.318192Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037902] Destroy direct read session user1_3_2_12189821407206377235_v1 2025-06-03T10:25:01.318201Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037902] server disconnected, pipe [3:7511667168162571487:2590] destroyed 2025-06-03T10:25:01.318205Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037902] Destroy direct read session user1_3_2_12189821407206377235_v1 2025-06-03T10:25:01.318208Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037902] server disconnected, pipe [3:7511667168162571486:2589] destroyed 2025-06-03T10:25:01.318221Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: user1_3_2_12189821407206377235_v1 2025-06-03T10:25:01.318223Z node 4 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: user1_3_2_12189821407206377235_v1 2025-06-03T10:25:01.342964Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2760: Handle TEvTxProxySchemeCache::TEvResolveKeySet: self# [3:7511667129507862315:2114], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 },{ TableId: [OwnerId: 72057594046644480, LocalPathId: 6] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-03T10:25:01.342990Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2063: FillEntry for TResolve: self# [3:7511667129507862315:2114], cacheItem# { Subscriber: { Subscriber: [3:7511667138097798048:2883] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 18 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1748946294627 PathId: [OwnerId: 72057594046644480, LocalPathId: 12] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:01.343002Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2063: FillEntry for TResolve: self# [3:7511667129507862315:2114], cacheItem# { Subscriber: { Subscriber: [3:7511667138097797756:2677] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 18 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1748946294452 PathId: [OwnerId: 72057594046644480, LocalPathId: 6] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { TableId: [OwnerId: 72057594046644480, LocalPathId: 6] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:01.343059Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7511667168162571507:4677], recipient# [3:7511667168162571506:2574], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 1 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 },{ TableId: [OwnerId: 72057594046644480, LocalPathId: 6] Access: 1 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-03T10:25:01.690338Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7511667129507862315:2114], request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:01.690393Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7511667129507862315:2114], cacheItem# { Subscriber: { Subscriber: [3:7511667133802830260:2553] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:01.690418Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7511667168162571523:4682], recipient# [3:7511667168162571522:2596], result# { ErrorCount: 1 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:01.954380Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7511667129507862315:2114], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:01.954416Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7511667129507862315:2114], cacheItem# { Subscriber: { Subscriber: [3:7511667129507862335:2122] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 29 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1748946293311 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: Root TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:01.954576Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7511667168162571531:4687], recipient# [3:7511667129507862093:2075], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } >> TBlobStorageWardenTest::TestHttpMonPage >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeGood [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithDontExistGroupAttribute [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithInvalidRobotUserLoginBad >> TBlobStorageWardenTest::TestDeleteStoragePool ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::MoveGroups [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3060:2106] recipient: [1:2962:2116] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3060:2106] recipient: [1:2962:2116] Leader for TabletID 72057594037932033 is [1:3066:2118] sender: [1:3067:2106] recipient: [1:2962:2116] 2025-06-03T10:24:27.131430Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:24:27.132058Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:24:27.132121Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-03T10:24:27.132409Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:24:27.132610Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-03T10:24:27.132641Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:27.132645Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:521} Handle TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:27.132751Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-03T10:24:27.133492Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-03T10:24:27.133518Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-03T10:24:27.133552Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-03T10:24:27.133568Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:27.133577Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:27.133585Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:3066:2118] sender: [1:3088:2106] recipient: [1:60:2107] 2025-06-03T10:24:27.144120Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-03T10:24:27.144189Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:27.154601Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:27.154669Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:27.154686Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:27.154698Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:27.154740Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:27.154748Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:27.154753Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:27.154760Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:27.165121Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:27.165175Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:27.175569Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:27.175640Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:19} TTxLoadEverything Execute 2025-06-03T10:24:27.175848Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:557} TTxLoadEverything Complete 2025-06-03T10:24:27.175854Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2188} LoadFinished 2025-06-03T10:24:27.175891Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-03T10:24:27.175902Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:562} TTxLoadEverything InitQueue processed 2025-06-03T10:24:27.177902Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "first box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12021 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12022 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12023 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12024 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12025 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12026 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12027 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12028 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12029 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12030 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12031 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12032 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12033 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12034 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12035 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12036 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12037 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12038 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12039 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12040 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12041 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12042 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12043 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12044 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12045 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12046 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12047 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12048 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12049 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12050 } HostConfigId: 1 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "first storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 150 PDiskFilter { Property { Type: ROT } } } } } 2025-06-03T10:24:27.178210Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1000 Path# /dev/disk1 2025-06-03T10:24:27.178222Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1001 Path# /dev/disk2 2025-06-03T10:24:27.178227Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1002 Path# /dev/disk3 2025-06-03T10:24:27.178232Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /dev/disk1 2025-06-03T10:24:27.178237Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1001 Path# /dev/disk2 2025-06-03T10:24:27.178244Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1002 Path# /dev/disk3 2025-06-03T10:24:27.178249Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1000 Path# /dev/disk1 2025-06-03T10:24:27.178257Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1001 Path# /dev/disk2 2025-06-03T10:24:27.178260Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1002 Path# /dev/disk3 2025-06-03T10:24:27.178263Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1000 Path# /dev/disk1 2025-06-03T10:24:27.178266Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1001 Path# /dev/disk2 2025-06-03T10:24:27.178269Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1002 Path# /dev/disk3 2025-06-03T10:24:27.178272Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1000 Path# /dev/disk1 2025-06-03T10:24:27.178275Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1001 Path# /dev/disk2 2025-06-03T10:24:27.178278Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1002 Path# /dev/disk3 2025-06-03T10:24:27.178281Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1000 Path# /dev/disk1 2025-06-03T10:24:27.178288Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1001 Path# /dev/disk2 2025-06-03T10:24:27.178291Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1002 Path# /dev/disk3 2025-06-03T10:24:27.178294Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1000 Path# /dev/disk1 2025-06-03T10:24:27.178296Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1001 Path# /dev/disk2 2025-06-03T10:24:27.178299Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:3 ... 78:1000 Path# /dev/disk1 2025-06-03T10:24:54.878853Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 178:1001 Path# /dev/disk2 2025-06-03T10:24:54.878858Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 178:1002 Path# /dev/disk3 2025-06-03T10:24:54.878864Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 179:1000 Path# /dev/disk1 2025-06-03T10:24:54.878868Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 179:1001 Path# /dev/disk2 2025-06-03T10:24:54.878872Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 179:1002 Path# /dev/disk3 2025-06-03T10:24:54.878877Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 180:1000 Path# /dev/disk1 2025-06-03T10:24:54.878881Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 180:1001 Path# /dev/disk2 2025-06-03T10:24:54.878885Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 180:1002 Path# /dev/disk3 2025-06-03T10:24:54.878890Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 181:1000 Path# /dev/disk1 2025-06-03T10:24:54.878895Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 181:1001 Path# /dev/disk2 2025-06-03T10:24:54.878899Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 181:1002 Path# /dev/disk3 2025-06-03T10:24:54.878903Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 182:1000 Path# /dev/disk1 2025-06-03T10:24:54.878908Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 182:1001 Path# /dev/disk2 2025-06-03T10:24:54.878914Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 182:1002 Path# /dev/disk3 2025-06-03T10:24:54.878919Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 183:1000 Path# /dev/disk1 2025-06-03T10:24:54.878923Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 183:1001 Path# /dev/disk2 2025-06-03T10:24:54.878928Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 183:1002 Path# /dev/disk3 2025-06-03T10:24:54.878932Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 184:1000 Path# /dev/disk1 2025-06-03T10:24:54.878936Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 184:1001 Path# /dev/disk2 2025-06-03T10:24:54.878941Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 184:1002 Path# /dev/disk3 2025-06-03T10:24:54.878944Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 185:1000 Path# /dev/disk1 2025-06-03T10:24:54.878949Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 185:1001 Path# /dev/disk2 2025-06-03T10:24:54.878954Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 185:1002 Path# /dev/disk3 2025-06-03T10:24:54.878958Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 186:1000 Path# /dev/disk1 2025-06-03T10:24:54.878962Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 186:1001 Path# /dev/disk2 2025-06-03T10:24:54.878966Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 186:1002 Path# /dev/disk3 2025-06-03T10:24:54.878970Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 187:1000 Path# /dev/disk1 2025-06-03T10:24:54.878974Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 187:1001 Path# /dev/disk2 2025-06-03T10:24:54.878978Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 187:1002 Path# /dev/disk3 2025-06-03T10:24:54.878984Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 188:1000 Path# /dev/disk1 2025-06-03T10:24:54.878989Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 188:1001 Path# /dev/disk2 2025-06-03T10:24:54.878993Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 188:1002 Path# /dev/disk3 2025-06-03T10:24:54.878998Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 189:1000 Path# /dev/disk1 2025-06-03T10:24:54.879003Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 189:1001 Path# /dev/disk2 2025-06-03T10:24:54.879008Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 189:1002 Path# /dev/disk3 2025-06-03T10:24:54.879012Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 190:1000 Path# /dev/disk1 2025-06-03T10:24:54.879017Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 190:1001 Path# /dev/disk2 2025-06-03T10:24:54.879021Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 190:1002 Path# /dev/disk3 2025-06-03T10:24:54.879026Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 191:1000 Path# /dev/disk1 2025-06-03T10:24:54.879030Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 191:1001 Path# /dev/disk2 2025-06-03T10:24:54.879035Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 191:1002 Path# /dev/disk3 2025-06-03T10:24:54.879040Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 192:1000 Path# /dev/disk1 2025-06-03T10:24:54.879044Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 192:1001 Path# /dev/disk2 2025-06-03T10:24:54.879049Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 192:1002 Path# /dev/disk3 2025-06-03T10:24:54.879054Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 193:1000 Path# /dev/disk1 2025-06-03T10:24:54.879060Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 193:1001 Path# /dev/disk2 2025-06-03T10:24:54.879065Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 193:1002 Path# /dev/disk3 2025-06-03T10:24:54.879069Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 194:1000 Path# /dev/disk1 2025-06-03T10:24:54.879074Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 194:1001 Path# /dev/disk2 2025-06-03T10:24:54.879079Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 194:1002 Path# /dev/disk3 2025-06-03T10:24:54.879083Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 195:1000 Path# /dev/disk1 2025-06-03T10:24:54.879087Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 195:1001 Path# /dev/disk2 2025-06-03T10:24:54.879092Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 195:1002 Path# /dev/disk3 2025-06-03T10:24:54.879097Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 196:1000 Path# /dev/disk1 2025-06-03T10:24:54.879101Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 196:1001 Path# /dev/disk2 2025-06-03T10:24:54.879107Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 196:1002 Path# /dev/disk3 2025-06-03T10:24:54.879113Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 197:1000 Path# /dev/disk1 2025-06-03T10:24:54.879117Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 197:1001 Path# /dev/disk2 2025-06-03T10:24:54.879122Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 197:1002 Path# /dev/disk3 2025-06-03T10:24:54.879127Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 198:1000 Path# /dev/disk1 2025-06-03T10:24:54.879131Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 198:1001 Path# /dev/disk2 2025-06-03T10:24:54.879136Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 198:1002 Path# /dev/disk3 2025-06-03T10:24:54.879140Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 199:1000 Path# /dev/disk1 2025-06-03T10:24:54.879144Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 199:1001 Path# /dev/disk2 2025-06-03T10:24:54.879149Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 199:1002 Path# /dev/disk3 2025-06-03T10:24:54.879152Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 200:1000 Path# /dev/disk1 2025-06-03T10:24:54.879157Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 200:1001 Path# /dev/disk2 2025-06-03T10:24:54.879161Z node 151 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 200:1002 Path# /dev/disk3 2025-06-03T10:24:55.195680Z node 151 :BS_CONTROLLER ERROR: {BSC07@impl.h:2181} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.317622s 2025-06-03T10:24:55.195801Z node 151 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:689} StateWork event processing took too much time Type# 2146435078 Duration# 0.317761s 2025-06-03T10:24:55.218039Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } 2025-06-03T10:24:55.288230Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { MoveGroups { BoxId: 1 OriginStoragePoolId: 2 OriginStoragePoolGeneration: 1 TargetStoragePoolId: 1 TargetStoragePoolGeneration: 1 ExplicitGroupId: 2147483748 } } } 2025-06-03T10:24:55.310425Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } 2025-06-03T10:24:55.388291Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { MoveGroups { BoxId: 1 OriginStoragePoolId: 2 OriginStoragePoolGeneration: 2 TargetStoragePoolId: 1 TargetStoragePoolGeneration: 2 ExplicitGroupId: 2147483749 } } } 2025-06-03T10:24:55.422743Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } 2025-06-03T10:24:55.524286Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { MoveGroups { BoxId: 1 OriginStoragePoolId: 2 OriginStoragePoolGeneration: 3 TargetStoragePoolId: 1 TargetStoragePoolGeneration: 3 } } } 2025-06-03T10:24:55.566723Z node 151 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { QueryBaseConfig { } } } >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsFromAdLdapServer [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeGood >> TBlobStorageWardenTest::TestSendToInvalidGroupId [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDontExistGroupAttribute [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithInvalidRobotUserLoginBad >> LdapAuthProviderTest::LdapRequestWithEmptyHost [GOOD] >> LdapAuthProviderTest::LdapRequestWithEmptyBaseDn >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts >> TBlobStorageWardenTest::TestFilterBadSerials [GOOD] >> TBlobStorageWardenTest::TestGivenPDiskFormatedWithGuid1AndCreatedWithGuid2WhenYardInitThenError >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithInvalidRobotUserLoginBad [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithInvalidRobotUserPasswordBad ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestSendToInvalidGroupId [GOOD] Test command err: 2025-06-03T10:25:03.420189Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:25:03.421249Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "SectorMap:/home/runner/.ya/build/build_root/u93c/000ef5/r3tmp/tmpEGlLYT/pdisk_map" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 33554432 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 1 } 2025-06-03T10:25:03.421469Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 0 Path# "SectorMap:/home/runner/.ya/build/build_root/u93c/000ef5/r3tmp/tmpEGlLYT/pdisk_map" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-03T10:25:03.421887Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-03T10:25:03.421969Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:0:0] VSlotId# 1:0:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:03.422233Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:0:0] VSlotId# 1:0:0 PDiskGuid# 1 2025-06-03T10:25:03.422247Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:1:0] VSlotId# 1:0:1 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:03.422401Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:1:0] VSlotId# 1:0:1 PDiskGuid# 1 2025-06-03T10:25:03.422410Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:2:0] VSlotId# 1:0:2 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:03.422557Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:2:0] VSlotId# 1:0:2 PDiskGuid# 1 2025-06-03T10:25:03.422565Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:3:0] VSlotId# 1:0:3 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:03.422673Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:3:0] VSlotId# 1:0:3 PDiskGuid# 1 2025-06-03T10:25:03.422682Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 33554432 2025-06-03T10:25:03.422926Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 1 PipeClientId# [1:28:2075] ControllerId# 72057594037932033 2025-06-03T10:25:03.422932Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:25:03.422965Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:25:03.422998Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:25:03.428633Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:25:03.429102Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:25:03.429188Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:03.429197Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:25:03.451073Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:03.451099Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-03T10:25:03.452085Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-03T10:25:03.452603Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-03T10:25:03.452715Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:03.453057Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-03T10:25:03.453066Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:25:03.453105Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:322} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\021\343zL\2259\375\337\2642\364\315m\231\323\317\373\202\200x" } 2025-06-03T10:25:03.453150Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-03T10:25:03.453161Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:361} StateFunc Type# 2146435075 Sender# [1:75:2118] SessionId# [0:0:0] Cookie# 0 2025-06-03T10:25:03.453171Z node 1 :BS_NODE DEBUG: {NWDC36@distconf_persistent_storage.cpp:205} TEvStorageConfigStored NumOk# 0 NumError# 0 Passed# 0.002039s 2025-06-03T10:25:03.460940Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "SectorMap:/home/runner/.ya/build/build_root/u93c/000ef5/r3tmp/tmpEGlLYT/pdisk_map" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 33554432 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 1 } 2025-06-03T10:25:03.461050Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:361} StateFunc Type# 268639248 Sender# [1:11:2058] SessionId# [0:0:0] Cookie# 0 2025-06-03T10:25:03.465917Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.466006Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.467718Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.467767Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.468503Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:03.468781Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.468807Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.469197Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:03.469579Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.469633Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.469670Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:03.469894Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:03.469909Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:03.470695Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:0:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:0:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:03.471028Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:03.476393Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:25:03.483347Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:25:03.483501Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-03T10:25:03.483781Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:25:03.483893Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-03T10:25:03.483988Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-03T10:25:03.483996Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:521} Handle TEvInterconnect::TEvNodesInfo 2025-06-03T10:25:03.484044Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-03T10:25:03.490512Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete ... obCount# 1 BlobIDs# [[72057594037932033:2:8:0:0:1298:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-03T10:25:03.874079Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1298:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:25:03.874084Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1298:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:25:03.874086Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1298:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:25:03.874089Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1298:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:25:03.874092Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1298:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:25:03.874094Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [e2e5f1b9c917f854] Id# [72057594037932033:2:8:0:0:1298:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:25:03.874099Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [e2e5f1b9c917f854] restore Id# [72057594037932033:2:8:0:0:1298:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:25:03.874108Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [e2e5f1b9c917f854] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:8:0:0:1298:1] Marker# BPG33 2025-06-03T10:25:03.874113Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [e2e5f1b9c917f854] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:8:0:0:1298:1] Marker# BPG32 2025-06-03T10:25:03.874116Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [e2e5f1b9c917f854] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:8:0:0:1298:2] Marker# BPG33 2025-06-03T10:25:03.874119Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [e2e5f1b9c917f854] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:8:0:0:1298:2] Marker# BPG32 2025-06-03T10:25:03.874122Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [e2e5f1b9c917f854] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:8:0:0:1298:3] Marker# BPG33 2025-06-03T10:25:03.874124Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [e2e5f1b9c917f854] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:8:0:0:1298:3] Marker# BPG32 2025-06-03T10:25:03.874163Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:46:2090] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1298:3] FDS# 1298 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-03T10:25:03.874169Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:39:2083] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1298:2] FDS# 1298 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-03T10:25:03.874194Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:60:2104] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:1298:1] FDS# 1298 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-03T10:25:03.875363Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [e2e5f1b9c917f854] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1298:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 9 } Cost# 90220 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 10 }}}} from# [2000000:1:0:0:0] Marker# BPP01 2025-06-03T10:25:03.875445Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [e2e5f1b9c917f854] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1298:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 90220 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:1:0] Marker# BPP01 2025-06-03T10:25:03.875465Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [e2e5f1b9c917f854] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:1298:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 8 } Cost# 90220 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 9 }}}} from# [2000000:1:0:3:0] Marker# BPP01 2025-06-03T10:25:03.875485Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [e2e5f1b9c917f854] Result# TEvPutResult {Id# [72057594037932033:2:8:0:0:1298:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 33554432 Marker# BPP12 2025-06-03T10:25:03.875496Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [e2e5f1b9c917f854] SendReply putResult# TEvPutResult {Id# [72057594037932033:2:8:0:0:1298:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:25:03.875538Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 33554432 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.22 sample PartId# [72057594037932033:2:8:0:0:1298:3] QueryCount# 1 VDiskId# [2000000:1:0:1:0] NodeId# 1 } TEvVPut{ TimestampMs# 0.22 sample PartId# [72057594037932033:2:8:0:0:1298:2] QueryCount# 1 VDiskId# [2000000:1:0:0:0] NodeId# 1 } TEvVPut{ TimestampMs# 0.22 sample PartId# [72057594037932033:2:8:0:0:1298:1] QueryCount# 1 VDiskId# [2000000:1:0:3:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 1.422 VDiskId# [2000000:1:0:0:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 1.474 VDiskId# [2000000:1:0:1:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 1.493 VDiskId# [2000000:1:0:3:0] NodeId# 1 Status# OK } ] } 2025-06-03T10:25:03.875776Z node 1 :BS_NODE DEBUG: {NW17@node_warden_impl.cpp:800} Handle(TEvBlobStorage::TEvControllerNodeServiceSetUpdate) Msg# {Status: OK NodeID: 1 ServiceSet { VDisks { VDiskID { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1000 VDiskSlotID: 1002 PDiskGuid: 4245061179013603848 } VDiskKind: Default EntityStatus: CREATE StoragePoolName: "test_storage" } Groups { GroupID: 2181038082 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1000 VDiskSlotID: 1002 PDiskGuid: 4245061179013603848 } } } EncryptionMode: 1 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 2181038082 MainKeyVersion: 0 StoragePoolName: "test_storage" DeviceType: ROT } } InstanceId: "573c9fff-24e28064-399d96b6-3b1e94e8" AvailDomain: 1 } 2025-06-03T10:25:03.875814Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# false Comprehensive# false Origin# controller ServiceSet# {VDisks { VDiskID { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1000 VDiskSlotID: 1002 PDiskGuid: 4245061179013603848 } VDiskKind: Default EntityStatus: CREATE StoragePoolName: "test_storage" } Groups { GroupID: 2181038082 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1000 VDiskSlotID: 1002 PDiskGuid: 4245061179013603848 } } } EncryptionMode: 1 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 2181038082 MainKeyVersion: 0 StoragePoolName: "test_storage" DeviceType: ROT } } 2025-06-03T10:25:03.875880Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [82000002:1:0:0:0] VSlotId# 1:1000:1002 PDiskGuid# 4245061179013603848 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:03.876129Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [82000002:1:0:0:0] VSlotId# 1:1000:1002 PDiskGuid# 4245061179013603848 2025-06-03T10:25:03.877547Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1000 VSlotId: 1002 PDiskGuid: 4245061179013603848 Status: INIT_PENDING OnlyPhantomsRemain: false } } 2025-06-03T10:25:03.877757Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1000 VSlotId: 1002 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-03T10:25:03.878678Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1000 VSlotId: 1002 } } 2025-06-03T10:25:03.879513Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.879717Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1000 VSlotId: 1002 PDiskGuid: 4245061179013603848 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-03T10:25:03.879835Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.879868Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1000 VSlotId: 1002 PDiskGuid: 4245061179013603848 Status: READY OnlyPhantomsRemain: false } } 2025-06-03T10:25:03.879921Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 4294967295 IsLimitedKeyless# 0 fullIfPossible# 1 Marker# DSP58 2025-06-03T10:25:03.880254Z node 1 :BS_PROXY CRIT: dsproxy_impl.h:309: The request was sent for an invalid groupID Group# 4294967295 HandleError ev# TEvBlock {TabletId# 1234 Generation# 1 Deadline# 18446744073709551 IsMonitored# 1} Response# TEvBlockResult {Status# ERROR ErrorReason# "Created as unconfigured in error state (DSPE11). It happens when the request was sent for an invalid groupID"} Marker# DSP31 Sending TEvPut 2025-06-03T10:25:03.880333Z node 1 :BS_PROXY DEBUG: dsproxy_impl.h:309: The request was sent for an invalid groupID Group# 4294967295 HandleError ev# TEvPut {Id# [1234:1:0:0:0:5:0] Size# 5 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} Response# TEvPutResult {Id# [1234:1:0:0:0:5:0] Status# ERROR StatusFlags# { } ErrorReason# "Created as unconfigured in error state (DSPE11). It happens when the request was sent for an invalid groupID" ApproximateFreeSpaceShare# 0} Marker# DSP31 2025-06-03T10:25:03.880363Z node 1 :BS_PROXY DEBUG: dsproxy_impl.h:309: The request was sent for an invalid groupID Group# 4294967295 HandleError ev# TEvCollectGarbage {TabletId# 1234 RecordGeneration# 4294967295 PerGenerationCounter# 4294967295 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 4294967295 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 1 IsMonitored# 1} Response# TEvCollectGarbageResult {TabletId# 1234 RecordGeneration# 4294967295 PerGenerationCounter# 4294967295 Channel# 0 Status# ERROR ErrorReason# "Created as unconfigured in error state (DSPE11). It happens when the request was sent for an invalid groupID"} Marker# DSP31 |59.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots |59.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots |59.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_replication_reboots/ydb-core-tx-schemeshard-ut_replication_reboots >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeGood [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithInvalidRobotUserLoginBad [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithInvalidRobotUserPasswordBad >> TBlobStorageWardenTest::TestLimitedKeylessGroupThenNoMonitoring [GOOD] >> test.py::test[aggregate-histogram_cdf-default.txt-ForceBlocks] [GOOD] >> test.py::test[aggregate-histogram_cdf-default.txt-Results] >> TBlobStorageWardenTest::TestDeleteStoragePool [GOOD] >> TBlobStorageWardenTest::TestBlockEncriptedGroup >> LdapAuthProviderTest::LdapRequestWithEmptyBaseDn [GOOD] >> LdapAuthProviderTest::LdapRequestWithEmptyBindDn >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts [GOOD] >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithInvalidRobotUserPasswordBad [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDontExistGroupAttribute >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithRemovedUserCredentialsBad >> TBlobStorageWardenTest::TestUnmonitoredEventsThenNoMonitorings >> TBlobStorageWardenTest::TestHttpMonPage [GOOD] >> BsControllerConfig::ExtendBoxAndStoragePool [GOOD] >> TBlobStorageWardenTest::TestSendUsefulMonitoring >> BindQueue::Basic ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestLimitedKeylessGroupThenNoMonitoring [GOOD] Test command err: 2025-06-03T10:25:03.531440Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:25:03.532641Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "pdisk0.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 33554432 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 1 } 2025-06-03T10:25:03.532711Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 33554432 2025-06-03T10:25:03.532959Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 1 PipeClientId# [2:41:2070] ControllerId# 72057594037932033 2025-06-03T10:25:03.532966Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:25:03.532999Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:25:03.533027Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:25:03.535634Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:25:03.535997Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:25:03.536046Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:25:03.536951Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "SectorMap:/home/runner/.ya/build/build_root/u93c/000ef1/r3tmp/tmpSM55W7/pdisk_map" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 33554432 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 1 } 2025-06-03T10:25:03.537025Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 0 Path# "SectorMap:/home/runner/.ya/build/build_root/u93c/000ef1/r3tmp/tmpSM55W7/pdisk_map" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-03T10:25:03.537441Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-03T10:25:03.537515Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:0:0] VSlotId# 1:0:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:03.537766Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:0:0] VSlotId# 1:0:0 PDiskGuid# 1 2025-06-03T10:25:03.537780Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:1:0] VSlotId# 1:0:1 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:03.537919Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:1:0] VSlotId# 1:0:1 PDiskGuid# 1 2025-06-03T10:25:03.537927Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:2:0] VSlotId# 1:0:2 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:03.538105Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:2:0] VSlotId# 1:0:2 PDiskGuid# 1 2025-06-03T10:25:03.538117Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:3:0] VSlotId# 1:0:3 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:03.538244Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:3:0] VSlotId# 1:0:3 PDiskGuid# 1 2025-06-03T10:25:03.538289Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 33554432 2025-06-03T10:25:03.538516Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 1 PipeClientId# [1:86:2076] ControllerId# 72057594037932033 2025-06-03T10:25:03.538523Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:25:03.538549Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:25:03.538577Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:25:03.541321Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:25:03.541784Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:03.541795Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:25:03.541864Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:25:03.551127Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:03.551150Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:25:03.551252Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:03.551264Z node 2 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-03T10:25:03.552301Z node 2 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-03T10:25:03.553260Z node 2 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-03T10:25:03.553368Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:03.553490Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:03.553503Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-03T10:25:03.553534Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-03T10:25:03.553590Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-03T10:25:03.554800Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:03.554934Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "pdisk0.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 33554432 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 1 } 2025-06-03T10:25:03.554992Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 268639248 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-03T10:25:03.555083Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-03T10:25:03.555100Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:25:03.555127Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:322} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\304\311\004D9`p\227F\252\311O\237\343\325b\254\2701\222" } 2025-06-03T10:25:03.555151Z node 1 :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:237} SubscribeToPeerNode NodeId# 2 SessionId# [0:0:0] Inserted# true Subscription# {SessionId# [0:0:0] SubscriptionCookie# 0} NextSubscribeCookie# 1 2025-06-03T10:25:03.555162Z node 1 :BS_NODE DEBUG: {NWDC29@distconf_binding.cpp:80} Initiated bind NodeId# 2 Binding# {2.0/2977634916764975488@[0:0:0]} SessionId# [0:0:0] 2025-06-03T10:25:03.555286Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-03T10:25:03.555297Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:361} StateFunc Type# 2146435075 Sender# [1:142:2121] SessionId# [0:0:0] Cookie# 0 2025-06-03T10:25:03.555305Z node 1 :BS_NODE DEBUG: {NWDC36@distconf_persistent_storage.cpp:205} TEvStorageConfigStored NumOk# 0 NumError# 0 Passed# 0.001777s 2025-06-03T10:25:03.557070Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 2 2025-06-03T10:25:03.557084Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:25:03.557113Z node 2 :BS_NODE DEBUG: {NWDC18@distconf_b ... etGroup Request# {NodeID: 2 GroupIDs: 2181038082 } 2025-06-03T10:25:04.232209Z node 2 :BS_NODE DEBUG: {NW17@node_warden_impl.cpp:800} Handle(TEvBlobStorage::TEvControllerNodeServiceSetUpdate) Msg# {Status: OK NodeID: 2 ServiceSet { Groups { GroupID: 2181038082 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1000 VDiskSlotID: 1002 PDiskGuid: 12963111900139603973 } } } EncryptionMode: 1 LifeCyclePhase: 3 MainKeyId: "/home/runner/.ya/build/build_root/u93c/000ef1/r3tmp/tmpSM55W7//key.txt" EncryptedGroupKey: "\243L\212\253T\233\261\236\212\025\373\035\326\336Q\256=\312bw;\200,\355\257\272x\033\341p\342\276\371og1" GroupKeyNonce: 2181038082 MainKeyVersion: 1 StoragePoolName: "test_storage" DeviceType: ROT } } } 2025-06-03T10:25:04.232226Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# false Comprehensive# false Origin# controller ServiceSet# {Groups { GroupID: 2181038082 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1000 VDiskSlotID: 1002 PDiskGuid: 12963111900139603973 } } } EncryptionMode: 1 LifeCyclePhase: 3 MainKeyId: "/home/runner/.ya/build/build_root/u93c/000ef1/r3tmp/tmpSM55W7//key.txt" EncryptedGroupKey: "\243L\212\253T\233\261\236\212\025\373\035\326\336Q\256=\312bw;\200,\355\257\272x\033\341p\342\276\371og1" GroupKeyNonce: 2181038082 MainKeyVersion: 1 StoragePoolName: "test_storage" DeviceType: ROT } } 2025-06-03T10:25:04.232250Z node 2 :BS_NODE ERROR: {NW19@node_warden_group.cpp:211} error while parsing group GroupId# 2181038082 Err# LifeCyclePhase# KEY_NOT_LOADED Key.Id# "" Key.Version# 0 MainKey.Id# "/home/runner/.ya/build/build_root/u93c/000ef1/r3tmp/tmpSM55W7//key.txt" MainKey.Version# 1 GroupKeyNonce# 2181038082 2025-06-03T10:25:04.232405Z node 2 :BS_NODE INFO: {NW81@node_warden_group_resolver.cpp:270} TGroupResolverActor::PassAway GroupId# 2181038082 2025-06-03T10:25:04.232415Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 2181038082 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# true Marker# DSP02 2025-06-03T10:25:04.232419Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 1 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:25:04.232674Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:604:2105] Create Queue# [2:606:2106] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:04.232694Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:604:2105] Create Queue# [2:607:2107] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:04.232732Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:604:2105] Create Queue# [2:608:2108] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:04.232747Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:604:2105] Create Queue# [2:609:2109] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:04.232764Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:604:2105] Create Queue# [2:610:2110] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:04.232787Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:604:2105] Create Queue# [2:611:2111] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:04.232809Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:604:2105] Create Queue# [2:612:2112] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:04.232814Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 2181038082 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:25:04.233096Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 1 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:04.233137Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 2 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:04.233158Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 3 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:04.233196Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 4 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:04.233214Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 5 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:04.233342Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 6 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:04.233362Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 7 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:04.233367Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:183: Group# 2181038082 -> StateWork Marker# DSP11 2025-06-03T10:25:04.233372Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:78: Group# 2181038082 SetStateWork Marker# DSP15 2025-06-03T10:25:04.233409Z node 2 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:150: [efc53170c63234c6] bootstrap ActorId# [2:613:2113] Group# 2181038082 TabletId# 1234 Generation# 3 Deadline# 586524-01-19T08:01:49.551615Z RestartCounter# 0 Marker# DSPB05 2025-06-03T10:25:04.233419Z node 2 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:111: [efc53170c63234c6] Sending TEvVBlock Tablet# 1234 Generation# 3 vdiskId# [82000002:1:0:0:0] node# 1 Marker# DSPB03 2025-06-03T10:25:04.233463Z node 2 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [2:606:2106] NKikimr::TEvBlobStorage::TEvVBlock# NKikimrBlobStorage.TEvVBlock TabletId: 1234 Generation: 3 VDiskID { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } IssuerGuid: 722288926890686495 MsgQoS { ExtQueueId: PutTabletLog } cookie# 0 2025-06-03T10:25:04.234137Z node 2 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:43: [efc53170c63234c6] Handle TEvVBlockResult status# OK From# [82000002:1:0:0:0] NodeId# 1 Marker# DSPB01 2025-06-03T10:25:04.234159Z node 2 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:100: [efc53170c63234c6] Result# TEvBlockResult {Status# OK} Marker# DSPB04 Sending TEvPut 2025-06-03T10:25:04.234243Z node 2 :BS_PROXY INFO: dsproxy_impl.h:309: Group# 2181038082 HandleError ev# TEvPut {Id# [1234:3:0:0:0:10:0] Size# 10 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} Response# TEvPutResult {Id# [1234:3:0:0:0:10:0] Status# ERROR StatusFlags# { } ErrorReason# "Created as LIMITED without keys. It happens when tenant keys are missing on the node." ApproximateFreeSpaceShare# 0} Marker# DSP31 Sending TEvPut 2025-06-03T10:25:04.234274Z node 2 :BS_PROXY DEBUG: dsproxy_impl.h:309: Group# 2181038082 HandleError ev# TEvPut {Id# [1234:4:0:0:0:10:0] Size# 10 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} Response# TEvPutResult {Id# [1234:4:0:0:0:10:0] Status# ERROR StatusFlags# { } ErrorReason# "Created as LIMITED without keys. It happens when tenant keys are missing on the node." ApproximateFreeSpaceShare# 0} Marker# DSP31 Sending TEvPut 2025-06-03T10:25:04.234377Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [c85e1a21dcb31b54] bootstrap ActorId# [1:614:2513] Group# 2181038082 BlobCount# 1 BlobIDs# [[1234:2:0:0:0:11:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-03T10:25:04.234429Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [c85e1a21dcb31b54] Id# [1234:2:0:0:0:11:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:25:04.234440Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [c85e1a21dcb31b54] restore Id# [1234:2:0:0:0:11:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:25:04.234452Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [c85e1a21dcb31b54] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [1234:2:0:0:0:11:1] Marker# BPG33 2025-06-03T10:25:04.234459Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [c85e1a21dcb31b54] Sending missing VPut part# 0 to# 0 blob Id# [1234:2:0:0:0:11:1] Marker# BPG32 2025-06-03T10:25:04.234490Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:593:2503] NKikimr::TEvBlobStorage::TEvVPut# {ID# [1234:2:0:0:0:11:1] FDS# 11 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-03T10:25:04.234555Z node 1 :BS_VDISK_PUT ERROR: blobstorage_skeleton.cpp:568: PDiskId# 1000 VDISK[82000002:_:0:0:0]: (2181038082) TEvVPut: failed to pass the Hull check; id# [1234:2:0:0:0:11:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-03T10:25:04.234618Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [c85e1a21dcb31b54] received {EvVPutResult Status# BLOCKED ErrorReason# "blocked" ID# [1234:2:0:0:0:11:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 3 } Cost# 80086 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 4 }}}} from# [82000002:1:0:0:0] Marker# BPP01 2025-06-03T10:25:04.234632Z node 1 :BS_PROXY_PUT ERROR: dsproxy_put_impl.cpp:72: [c85e1a21dcb31b54] Result# TEvPutResult {Id# [1234:2:0:0:0:11:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000002:1:0:0:0]" ApproximateFreeSpaceShare# 0} GroupId# 2181038082 Marker# BPP12 2025-06-03T10:25:04.234641Z node 1 :BS_PROXY_PUT NOTICE: dsproxy_put.cpp:486: [c85e1a21dcb31b54] SendReply putResult# TEvPutResult {Id# [1234:2:0:0:0:11:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000002:1:0:0:0]" ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:25:04.234666Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 2181038082 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.172 sample PartId# [1234:2:0:0:0:11:1] QueryCount# 1 VDiskId# [82000002:1:0:0:0] NodeId# 1 } ] } 2025-06-03T10:25:04.234761Z node 2 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [2:606:2106] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[1234:4294967295:4294967295:0] collect=[4294967295:4294967295] cookie# 0 >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeDisableNestedGroupsGood [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithInvalidRobotUserPasswordBad [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithRemovedUserCredentialsBad |59.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base |59.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base |59.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_base/ydb-core-tx-schemeshard-ut_base >> LdapAuthProviderTest::LdapRequestWithEmptyBindDn [GOOD] >> LdapAuthProviderTest::LdapRequestWithEmptyBindPassword ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestHttpMonPage [GOOD] Test command err: 2025-06-03T10:25:03.878880Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:25:03.879784Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "SectorMap:/home/runner/.ya/build/build_root/u93c/000f05/r3tmp/tmpsAtjXn/pdisk_map" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 33554432 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 1 } 2025-06-03T10:25:03.879852Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 0 Path# "SectorMap:/home/runner/.ya/build/build_root/u93c/000f05/r3tmp/tmpsAtjXn/pdisk_map" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-03T10:25:03.880209Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-03T10:25:03.880279Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:0:0] VSlotId# 1:0:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:03.880501Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:0:0] VSlotId# 1:0:0 PDiskGuid# 1 2025-06-03T10:25:03.880514Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:1:0] VSlotId# 1:0:1 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:03.880634Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:1:0] VSlotId# 1:0:1 PDiskGuid# 1 2025-06-03T10:25:03.880641Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:2:0] VSlotId# 1:0:2 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:03.880753Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:2:0] VSlotId# 1:0:2 PDiskGuid# 1 2025-06-03T10:25:03.880760Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:3:0] VSlotId# 1:0:3 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:03.880868Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:3:0] VSlotId# 1:0:3 PDiskGuid# 1 2025-06-03T10:25:03.880877Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 33554432 2025-06-03T10:25:03.881089Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 1 PipeClientId# [1:28:2075] ControllerId# 72057594037932033 2025-06-03T10:25:03.881095Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:25:03.881122Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:25:03.881149Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:25:03.885505Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:25:03.885909Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:25:03.885987Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:03.885993Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:25:03.892714Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:03.892733Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-03T10:25:03.893674Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-03T10:25:03.893769Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-03T10:25:03.893855Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:03.894234Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-03T10:25:03.894242Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:25:03.894268Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:322} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "T\360veZv\010\013\335+\262\276\247\214HTnm\331\215" } 2025-06-03T10:25:03.894395Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-03T10:25:03.894406Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:361} StateFunc Type# 2146435075 Sender# [1:77:2120] SessionId# [0:0:0] Cookie# 0 2025-06-03T10:25:03.894416Z node 1 :BS_NODE DEBUG: {NWDC36@distconf_persistent_storage.cpp:205} TEvStorageConfigStored NumOk# 0 NumError# 0 Passed# 0.001653s 2025-06-03T10:25:03.898703Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "SectorMap:/home/runner/.ya/build/build_root/u93c/000f05/r3tmp/tmpsAtjXn/pdisk_map" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 33554432 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 1 } 2025-06-03T10:25:03.898786Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:361} StateFunc Type# 268639248 Sender# [1:11:2058] SessionId# [0:0:0] Cookie# 0 2025-06-03T10:25:03.904350Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.906818Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.907656Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.908269Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.908302Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.908694Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.909043Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:03.909229Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:03.909537Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:03.909554Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:03.909579Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.909597Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.909995Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:03.910184Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:03.916010Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:25:03.921707Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:25:03.921839Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-03T10:25:03.922100Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:25:03.922224Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-03T10:25:03.922391Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-03T10:25:03.922398Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:521} Handle TEvInterconnect::TEvNodesInfo 2025-06-03T10:25:03.922443Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-03T10:25:03.927461Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-03T10:25:03.927516Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-03T10:25:03.927559Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-03T10:25:03.927596Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrat ... GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } NodeId: 2 PDiskId: 0 VSlotId: 1 PDiskGuid: 1 Status: READY OnlyPhantomsRemain: false } } 2025-06-03T10:25:04.495460Z node 2 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.495485Z node 2 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.495502Z node 2 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } NodeId: 2 PDiskId: 0 VSlotId: 2 PDiskGuid: 1 Status: READY OnlyPhantomsRemain: false } } 2025-06-03T10:25:04.495527Z node 2 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } NodeId: 2 PDiskId: 0 VSlotId: 3 PDiskGuid: 1 Status: READY OnlyPhantomsRemain: false } } 2025-06-03T10:25:04.495545Z node 2 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 2 PDiskId: 0 VSlotId: 0 } Success: true } 2025-06-03T10:25:04.495559Z node 2 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 2 PDiskId: 0 VSlotId: 1 } 2025-06-03T10:25:04.495632Z node 2 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 2 PDiskId: 0 VSlotId: 0 } } 2025-06-03T10:25:04.495682Z node 2 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.495887Z node 2 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.495919Z node 2 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.495941Z node 2 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 2 PDiskId: 0 VSlotId: 1 } Success: true } 2025-06-03T10:25:04.495952Z node 2 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 2 PDiskId: 0 VSlotId: 2 } 2025-06-03T10:25:04.496019Z node 2 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 2 PDiskId: 0 VSlotId: 1 } } 2025-06-03T10:25:04.496083Z node 2 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.496260Z node 2 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 2 PDiskId: 0 VSlotId: 2 } Success: true } 2025-06-03T10:25:04.496271Z node 2 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 2 PDiskId: 0 VSlotId: 3 } 2025-06-03T10:25:04.496301Z node 2 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.496323Z node 2 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.496337Z node 2 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 2 PDiskId: 0 VSlotId: 2 } } 2025-06-03T10:25:04.496404Z node 2 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.496606Z node 2 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 2 PDiskId: 0 VSlotId: 3 } Success: true } 2025-06-03T10:25:04.496639Z node 2 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.496669Z node 2 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 2 PDiskId: 0 VSlotId: 3 } } 2025-06-03T10:25:04.496676Z node 2 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.518306Z node 2 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 2 Devices# [] 2025-06-03T10:25:04.518459Z node 2 :BS_NODE DEBUG: {NW17@node_warden_impl.cpp:800} Handle(TEvBlobStorage::TEvControllerNodeServiceSetUpdate) Msg# {Status: OK NodeID: 2 ServiceSet { PDisks { NodeID: 2 PDiskID: 1000 Path: "/home/runner/.ya/build/build_root/u93c/000f05/r3tmp/tmpAWgFt7/pdisk_1.dat" PDiskGuid: 12988812606076077083 PDiskCategory: 0 EntityStatus: CREATE ExpectedSerial: "" ManagementStage: DISCOVER_SERIAL SpaceColorBorder: GREEN } VDisks { VDiskID { GroupID: 2181038080 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 2 PDiskID: 1000 VDiskSlotID: 1000 PDiskGuid: 12988812606076077083 } VDiskKind: Default EntityStatus: CREATE StoragePoolName: "pool-1" } VDisks { VDiskID { GroupID: 2181038081 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 2 PDiskID: 1000 VDiskSlotID: 1001 PDiskGuid: 12988812606076077083 } VDiskKind: Default EntityStatus: CREATE StoragePoolName: "pool-2" } Groups { GroupID: 2181038080 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 2 PDiskID: 1000 VDiskSlotID: 1000 PDiskGuid: 12988812606076077083 } } } EncryptionMode: 0 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 2181038080 MainKeyVersion: 0 StoragePoolName: "pool-1" DeviceType: ROT } Groups { GroupID: 2181038081 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 2 PDiskID: 1000 VDiskSlotID: 1001 PDiskGuid: 12988812606076077083 } } } EncryptionMode: 0 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 2181038081 MainKeyVersion: 0 StoragePoolName: "pool-2" DeviceType: ROT } } InstanceId: "b502d629-8b077f11-ab837abd-37b3e43c" AvailDomain: 1 } 2025-06-03T10:25:04.518491Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# false Comprehensive# false Origin# controller ServiceSet# {PDisks { NodeID: 2 PDiskID: 1000 Path: "/home/runner/.ya/build/build_root/u93c/000f05/r3tmp/tmpAWgFt7/pdisk_1.dat" PDiskGuid: 12988812606076077083 PDiskCategory: 0 EntityStatus: CREATE ExpectedSerial: "" ManagementStage: DISCOVER_SERIAL SpaceColorBorder: GREEN } VDisks { VDiskID { GroupID: 2181038080 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 2 PDiskID: 1000 VDiskSlotID: 1000 PDiskGuid: 12988812606076077083 } VDiskKind: Default EntityStatus: CREATE StoragePoolName: "pool-1" } VDisks { VDiskID { GroupID: 2181038081 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 2 PDiskID: 1000 VDiskSlotID: 1001 PDiskGuid: 12988812606076077083 } VDiskKind: Default EntityStatus: CREATE StoragePoolName: "pool-2" } Groups { GroupID: 2181038080 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 2 PDiskID: 1000 VDiskSlotID: 1000 PDiskGuid: 12988812606076077083 } } } EncryptionMode: 0 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 2181038080 MainKeyVersion: 0 StoragePoolName: "pool-1" DeviceType: ROT } Groups { GroupID: 2181038081 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 2 PDiskID: 1000 VDiskSlotID: 1001 PDiskGuid: 12988812606076077083 } } } EncryptionMode: 0 LifeCyclePhase: 0 MainKeyId: "" EncryptedGroupKey: "" GroupKeyNonce: 2181038081 MainKeyVersion: 0 StoragePoolName: "pool-2" DeviceType: ROT } } 2025-06-03T10:25:04.518544Z node 2 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 2 PDiskId# 1000 Path# "/home/runner/.ya/build/build_root/u93c/000f05/r3tmp/tmpAWgFt7/pdisk_1.dat" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-03T10:25:04.518724Z node 2 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [82000000:1:0:0:0] VSlotId# 2:1000:1000 PDiskGuid# 12988812606076077083 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:04.518849Z node 2 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [82000000:1:0:0:0] VSlotId# 2:1000:1000 PDiskGuid# 12988812606076077083 2025-06-03T10:25:04.518862Z node 2 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [82000001:1:0:0:0] VSlotId# 2:1000:1001 PDiskGuid# 12988812606076077083 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:04.518939Z node 2 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [82000001:1:0:0:0] VSlotId# 2:1000:1001 PDiskGuid# 12988812606076077083 2025-06-03T10:25:04.562776Z node 2 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 2181038080 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 2 PDiskId: 1000 VSlotId: 1000 PDiskGuid: 12988812606076077083 Status: INIT_PENDING OnlyPhantomsRemain: false } VDiskStatus { VDiskId { GroupID: 2181038081 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 2 PDiskId: 1000 VSlotId: 1001 PDiskGuid: 12988812606076077083 Status: INIT_PENDING OnlyPhantomsRemain: false } } 2025-06-03T10:25:04.563017Z node 2 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 2181038080 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 2 PDiskId: 1000 VSlotId: 1000 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-03T10:25:04.563035Z node 2 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 2181038081 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 2 PDiskId: 1000 VSlotId: 1001 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-03T10:25:04.564491Z node 2 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 2 PDiskId: 1000 VSlotId: 1000 } } 2025-06-03T10:25:04.564559Z node 2 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 2 PDiskId: 1000 VSlotId: 1001 } } 2025-06-03T10:25:04.565722Z node 2 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.565785Z node 2 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.565832Z node 2 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 2181038080 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 2 PDiskId: 1000 VSlotId: 1000 PDiskGuid: 12988812606076077083 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-03T10:25:04.565960Z node 2 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 2181038081 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 2 PDiskId: 1000 VSlotId: 1001 PDiskGuid: 12988812606076077083 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-03T10:25:04.566202Z node 2 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.566220Z node 2 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.566235Z node 2 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 2181038080 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 2 PDiskId: 1000 VSlotId: 1000 PDiskGuid: 12988812606076077083 Status: READY OnlyPhantomsRemain: false } } 2025-06-03T10:25:04.566265Z node 2 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 2181038081 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 2 PDiskId: 1000 VSlotId: 1001 PDiskGuid: 12988812606076077083 Status: READY OnlyPhantomsRemain: false } } >> test.py::test[blocks-lazy_nonstrict_with_scalar_ctx--ForceBlocks] [GOOD] >> test.py::test[blocks-lazy_nonstrict_with_scalar_ctx--Results] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithDontExistGroupAttribute [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithInvalidRobotUserLoginBad >> LdapAuthProviderTest_nonSecure::LdapRefreshRemoveUserBad [GOOD] >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoWithError >> LdapAuthProviderTest_nonSecure::LdapFetchGroupsWithRemovedUserCredentialsBad [GOOD] >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoGood >> TBlobStorageWardenTest::TestUnmonitoredEventsThenNoMonitorings [GOOD] >> TBlobStorageWardenTest::TestBlockEncriptedGroup [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::ExtendBoxAndStoragePool [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3060:2106] recipient: [1:2962:2116] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:3060:2106] recipient: [1:2962:2116] Leader for TabletID 72057594037932033 is [1:3066:2118] sender: [1:3067:2106] recipient: [1:2962:2116] 2025-06-03T10:24:26.194473Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:24:26.195668Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:24:26.195768Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-03T10:24:26.196186Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:24:26.196530Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-03T10:24:26.196601Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:26.196608Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:521} Handle TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:26.196708Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-03T10:24:26.198180Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-03T10:24:26.198226Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-03T10:24:26.198284Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-03T10:24:26.198311Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:26.198328Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:26.198341Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:3066:2118] sender: [1:3088:2106] recipient: [1:60:2107] 2025-06-03T10:24:26.209084Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-03T10:24:26.209155Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:26.219565Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:26.219630Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:26.219651Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:26.219668Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:26.219701Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:26.219713Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:26.219722Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:26.219735Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:26.230112Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:26.230170Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:26.240587Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:26.240655Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:19} TTxLoadEverything Execute 2025-06-03T10:24:26.240857Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:557} TTxLoadEverything Complete 2025-06-03T10:24:26.240863Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2188} LoadFinished 2025-06-03T10:24:26.240899Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-03T10:24:26.240909Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:562} TTxLoadEverything InitQueue processed 2025-06-03T10:24:26.243512Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "first box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 1 } } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "first storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 60 PDiskFilter { Property { Type: ROT } } } } Command { QueryBaseConfig { } } } 2025-06-03T10:24:26.243768Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1000 Path# /dev/disk1 2025-06-03T10:24:26.243777Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1001 Path# /dev/disk2 2025-06-03T10:24:26.243783Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1002 Path# /dev/disk3 2025-06-03T10:24:26.243788Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /dev/disk1 2025-06-03T10:24:26.243793Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1001 Path# /dev/disk2 2025-06-03T10:24:26.243799Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1002 Path# /dev/disk3 2025-06-03T10:24:26.243804Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1000 Path# /dev/disk1 2025-06-03T10:24:26.243812Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1001 Path# /dev/disk2 2025-06-03T10:24:26.243817Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 3:1002 Path# /dev/disk3 2025-06-03T10:24:26.243822Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1000 Path# /dev/disk1 2025-06-03T10:24:26.243827Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1001 Path# /dev/disk2 2025-06-03T10:24:26.243832Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 4:1002 Path# /dev/disk3 2025-06-03T10:24:26.243837Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1000 Path# /dev/disk1 2025-06-03T10:24:26.243842Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1001 Path# /dev/disk2 2025-06-03T10:24:26.243848Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 5:1002 Path# /dev/disk3 2025-06-03T10:24:26.243852Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1000 Path# /dev/disk1 2025-06-03T10:24:26.243862Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1001 Path# /dev/disk2 2025-06-03T10:24:26.243869Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 6:1002 Path# /dev/disk3 2025-06-03T10:24:26.243874Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1000 Path# /dev/disk1 2025-06-03T10:24:26.243880Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1001 Path# /dev/disk2 2025-06-03T10:24:26.243884Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 7:1002 Path# /dev/disk3 2025-06-03T10:24:26.243889Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1000 Path# /dev/disk1 2025-06-03T10:24:26.243894Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1001 Path# /dev/disk2 2025-06-03T10:24:26.243898Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 8:1002 Path# /dev/disk3 2025-06-03T10:24:26.243903Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1000 Path# /dev/disk1 2025-06-03T10:24:26.243907Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1001 Path# /dev/disk2 2025-06-03T10:24:26.243911Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 9:1002 Path# /dev/disk3 2025-06-03T10:24:26.243916Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1000 Path# /dev/disk1 2025-06-03T10:24:26.243920Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1001 Path# /dev/disk2 2025-06-03T10:24:26.243925Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 10:1002 Path# /dev/disk3 2025-06-03T10:24:26.243929Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1000 Path# /dev/disk1 2025-06-03T10:24:26.243933Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 11:1001 Path# /dev/disk2 2025-06-03T10:24:26.243938Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Cr ... p:340} Create new pdisk PDiskId# 204:1002 Path# /dev/disk3 2025-06-03T10:24:56.071007Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 205:1000 Path# /dev/disk1 2025-06-03T10:24:56.071012Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 205:1001 Path# /dev/disk2 2025-06-03T10:24:56.071017Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 205:1002 Path# /dev/disk3 2025-06-03T10:24:56.071021Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 206:1000 Path# /dev/disk1 2025-06-03T10:24:56.071026Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 206:1001 Path# /dev/disk2 2025-06-03T10:24:56.071030Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 206:1002 Path# /dev/disk3 2025-06-03T10:24:56.071035Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 207:1000 Path# /dev/disk1 2025-06-03T10:24:56.071040Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 207:1001 Path# /dev/disk2 2025-06-03T10:24:56.071045Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 207:1002 Path# /dev/disk3 2025-06-03T10:24:56.071049Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 208:1000 Path# /dev/disk1 2025-06-03T10:24:56.071054Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 208:1001 Path# /dev/disk2 2025-06-03T10:24:56.071059Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 208:1002 Path# /dev/disk3 2025-06-03T10:24:56.071063Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 209:1000 Path# /dev/disk1 2025-06-03T10:24:56.071067Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 209:1001 Path# /dev/disk2 2025-06-03T10:24:56.071072Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 209:1002 Path# /dev/disk3 2025-06-03T10:24:56.071076Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 210:1000 Path# /dev/disk1 2025-06-03T10:24:56.071081Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 210:1001 Path# /dev/disk2 2025-06-03T10:24:56.071086Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 210:1002 Path# /dev/disk3 2025-06-03T10:24:56.316509Z node 161 :BS_CONTROLLER ERROR: {BSC07@impl.h:2181} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.246495s 2025-06-03T10:24:56.316609Z node 161 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:689} StateWork event processing took too much time Type# 2146435078 Duration# 0.246616s 2025-06-03T10:24:56.337164Z node 161 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 4 Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" SharedWithOs: true } Drive { Path: "/dev/disk3" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "first box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12021 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12022 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12023 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12024 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12025 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12026 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12027 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12028 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12029 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12030 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12031 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12032 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12033 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12034 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12035 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12036 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12037 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12038 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12039 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12040 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12041 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12042 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12043 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12044 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12045 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12046 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12047 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12048 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12049 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12050 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12051 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12052 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12053 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12054 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12055 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12056 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12057 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12058 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12059 } HostConfigId: 4 } Host { Key { Fqdn: "::1" IcPort: 12060 } HostConfigId: 4 } ItemConfigGeneration: 1 } } Command { DefineStoragePool { BoxId: 1 StoragePoolId: 1 Name: "first storage pool" ErasureSpecies: "block-4-2" VDiskKind: "Default" NumGroups: 180 PDiskFilter { Property { Type: ROT } } ItemConfigGeneration: 1 } } Command { QueryBaseConfig { } } } 2025-06-03T10:24:56.339201Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 211:1000 Path# /dev/disk1 2025-06-03T10:24:56.339222Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 211:1001 Path# /dev/disk2 2025-06-03T10:24:56.339227Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 211:1002 Path# /dev/disk3 2025-06-03T10:24:56.339231Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 212:1000 Path# /dev/disk1 2025-06-03T10:24:56.339236Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 212:1001 Path# /dev/disk2 2025-06-03T10:24:56.339240Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 212:1002 Path# /dev/disk3 2025-06-03T10:24:56.339244Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 213:1000 Path# /dev/disk1 2025-06-03T10:24:56.339249Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 213:1001 Path# /dev/disk2 2025-06-03T10:24:56.339253Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 213:1002 Path# /dev/disk3 2025-06-03T10:24:56.339257Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 214:1000 Path# /dev/disk1 2025-06-03T10:24:56.339265Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 214:1001 Path# /dev/disk2 2025-06-03T10:24:56.339270Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 214:1002 Path# /dev/disk3 2025-06-03T10:24:56.339274Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 215:1000 Path# /dev/disk1 2025-06-03T10:24:56.339278Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 215:1001 Path# /dev/disk2 2025-06-03T10:24:56.339283Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 215:1002 Path# /dev/disk3 2025-06-03T10:24:56.339287Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 216:1000 Path# /dev/disk1 2025-06-03T10:24:56.339291Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 216:1001 Path# /dev/disk2 2025-06-03T10:24:56.339296Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 216:1002 Path# /dev/disk3 2025-06-03T10:24:56.339300Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 217:1000 Path# /dev/disk1 2025-06-03T10:24:56.339304Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 217:1001 Path# /dev/disk2 2025-06-03T10:24:56.339308Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 217:1002 Path# /dev/disk3 2025-06-03T10:24:56.339313Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 218:1000 Path# /dev/disk1 2025-06-03T10:24:56.339317Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 218:1001 Path# /dev/disk2 2025-06-03T10:24:56.339322Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 218:1002 Path# /dev/disk3 2025-06-03T10:24:56.339326Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 219:1000 Path# /dev/disk1 2025-06-03T10:24:56.339330Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 219:1001 Path# /dev/disk2 2025-06-03T10:24:56.339335Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 219:1002 Path# /dev/disk3 2025-06-03T10:24:56.339339Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 220:1000 Path# /dev/disk1 2025-06-03T10:24:56.339344Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 220:1001 Path# /dev/disk2 2025-06-03T10:24:56.339349Z node 161 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 220:1002 Path# /dev/disk3 2025-06-03T10:24:56.464504Z node 161 :BS_CONTROLLER ERROR: {BSC07@impl.h:2181} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.127474s 2025-06-03T10:24:56.464565Z node 161 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:689} StateWork event processing took too much time Type# 2146435078 Duration# 0.127551s |59.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithDefaultGroupAttributeGoodUseListOfHosts [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithCustomGroupAttributeGood |59.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection |59.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup_collection/ydb-core-tx-schemeshard-ut_backup_collection >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithRemovedUserCredentialsBad [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoGood >> LdapAuthProviderTest::LdapRequestWithEmptyBindPassword [GOOD] >> TBlobStorageWardenTest::TestReceivedPDiskRestartNotAllowed [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsDisableRequestToAD >> TBlobStorageWardenTest::TestSendUsefulMonitoring [GOOD] >> TBlobStorageWardenTest::TestGivenPDiskFormatedWithGuid1AndCreatedWithGuid2WhenYardInitThenError [GOOD] |59.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id |59.5%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id |59.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_assign_tx_id/core-tx-replication-controller-ut_assign_tx_id |59.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithInvalidRobotUserLoginBad [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithInvalidRobotUserPasswordBad |59.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme |59.5%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/scheme/ydb-core-kqp-ut-scheme ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestBlockEncriptedGroup [GOOD] Test command err: 2025-06-03T10:25:03.892576Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:25:03.893423Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "SectorMap:/home/runner/.ya/build/build_root/u93c/000f0d/r3tmp/tmprXpNis/pdisk_map" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 33554432 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 1 } 2025-06-03T10:25:03.893491Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 0 Path# "SectorMap:/home/runner/.ya/build/build_root/u93c/000f0d/r3tmp/tmprXpNis/pdisk_map" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-03T10:25:03.893744Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-03T10:25:03.893803Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:0:0] VSlotId# 1:0:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:03.893996Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:0:0] VSlotId# 1:0:0 PDiskGuid# 1 2025-06-03T10:25:03.894007Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:1:0] VSlotId# 1:0:1 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:03.894119Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:1:0] VSlotId# 1:0:1 PDiskGuid# 1 2025-06-03T10:25:03.894128Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:2:0] VSlotId# 1:0:2 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:03.894229Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:2:0] VSlotId# 1:0:2 PDiskGuid# 1 2025-06-03T10:25:03.894236Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:3:0] VSlotId# 1:0:3 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:03.894312Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:3:0] VSlotId# 1:0:3 PDiskGuid# 1 2025-06-03T10:25:03.894319Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 33554432 2025-06-03T10:25:03.894512Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 1 PipeClientId# [1:28:2075] ControllerId# 72057594037932033 2025-06-03T10:25:03.894518Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:25:03.894546Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:25:03.894575Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:25:03.911085Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:25:03.912668Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:25:03.912858Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:03.912867Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:25:03.948199Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:03.948225Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-03T10:25:03.949832Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-03T10:25:03.949914Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-03T10:25:03.949991Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:03.950456Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-03T10:25:03.950464Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:25:03.950486Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:322} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\006\035\010\020\241]\021Af~\236\375M\006s\225\010\377\377X" } 2025-06-03T10:25:03.950588Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-03T10:25:03.950600Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:361} StateFunc Type# 2146435075 Sender# [1:77:2120] SessionId# [0:0:0] Cookie# 0 2025-06-03T10:25:03.950608Z node 1 :BS_NODE DEBUG: {NWDC36@distconf_persistent_storage.cpp:205} TEvStorageConfigStored NumOk# 0 NumError# 0 Passed# 0.002356s 2025-06-03T10:25:03.957937Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "SectorMap:/home/runner/.ya/build/build_root/u93c/000f0d/r3tmp/tmprXpNis/pdisk_map" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 33554432 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 1 } 2025-06-03T10:25:03.958005Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:361} StateFunc Type# 268639248 Sender# [1:11:2058] SessionId# [0:0:0] Cookie# 0 2025-06-03T10:25:03.964548Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.967437Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.968514Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.969616Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.969639Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.969944Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.970194Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:03.970555Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:03.970951Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:03.970961Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:03.970978Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.970990Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:03.971486Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:03.971598Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:03.979500Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:25:04.002007Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:25:04.002194Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-03T10:25:04.002557Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:25:04.002738Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-03T10:25:04.002938Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-03T10:25:04.002949Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:521} Handle TEvInterconnect::TEvNodesInfo 2025-06-03T10:25:04.003009Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-03T10:25:04.042824Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-03T10:25:04.042893Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-03T10:25:04.042939Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-03T10:25:04.042987Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM ... equest# {NodeID: 3 GroupIDs: 2181038082 } 2025-06-03T10:25:05.204118Z node 3 :BS_NODE DEBUG: {NW17@node_warden_impl.cpp:800} Handle(TEvBlobStorage::TEvControllerNodeServiceSetUpdate) Msg# {Status: OK NodeID: 3 ServiceSet { Groups { GroupID: 2181038082 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 2 PDiskID: 1000 VDiskSlotID: 1002 PDiskGuid: 4602857426441423558 } } } EncryptionMode: 1 LifeCyclePhase: 3 MainKeyId: "/home/runner/.ya/build/build_root/u93c/000f0d/r3tmp/tmpcPimft//key.txt" EncryptedGroupKey: "\246\264\001\353\220\211\314\211\376\242\000\246\r6,\322\236[\366\0073\"\032\024RuK\260#\006J\274q\260\326\232" GroupKeyNonce: 2181038082 MainKeyVersion: 1 StoragePoolName: "test_storage" DeviceType: ROT } } } 2025-06-03T10:25:05.204145Z node 3 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# false Comprehensive# false Origin# controller ServiceSet# {Groups { GroupID: 2181038082 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 2 PDiskID: 1000 VDiskSlotID: 1002 PDiskGuid: 4602857426441423558 } } } EncryptionMode: 1 LifeCyclePhase: 3 MainKeyId: "/home/runner/.ya/build/build_root/u93c/000f0d/r3tmp/tmpcPimft//key.txt" EncryptedGroupKey: "\246\264\001\353\220\211\314\211\376\242\000\246\r6,\322\236[\366\0073\"\032\024RuK\260#\006J\274q\260\326\232" GroupKeyNonce: 2181038082 MainKeyVersion: 1 StoragePoolName: "test_storage" DeviceType: ROT } } 2025-06-03T10:25:05.204177Z node 3 :BS_NODE ERROR: {NW19@node_warden_group.cpp:211} error while parsing group GroupId# 2181038082 Err# LifeCyclePhase# KEY_NOT_LOADED Key.Id# "" Key.Version# 0 MainKey.Id# "/home/runner/.ya/build/build_root/u93c/000f0d/r3tmp/tmpcPimft//key.txt" MainKey.Version# 1 GroupKeyNonce# 2181038082 2025-06-03T10:25:05.204411Z node 3 :BS_NODE INFO: {NW81@node_warden_group_resolver.cpp:270} TGroupResolverActor::PassAway GroupId# 2181038082 2025-06-03T10:25:05.204431Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 2181038082 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# true Marker# DSP02 2025-06-03T10:25:05.204437Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 1 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:25:05.204824Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:604:2105] Create Queue# [3:606:2106] targetNodeId# 2 Marker# DSP01 2025-06-03T10:25:05.204861Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:604:2105] Create Queue# [3:607:2107] targetNodeId# 2 Marker# DSP01 2025-06-03T10:25:05.204883Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:604:2105] Create Queue# [3:608:2108] targetNodeId# 2 Marker# DSP01 2025-06-03T10:25:05.204906Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:604:2105] Create Queue# [3:609:2109] targetNodeId# 2 Marker# DSP01 2025-06-03T10:25:05.204930Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:604:2105] Create Queue# [3:610:2110] targetNodeId# 2 Marker# DSP01 2025-06-03T10:25:05.204952Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:604:2105] Create Queue# [3:611:2111] targetNodeId# 2 Marker# DSP01 2025-06-03T10:25:05.204974Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [3:604:2105] Create Queue# [3:612:2112] targetNodeId# 2 Marker# DSP01 2025-06-03T10:25:05.204980Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 2181038082 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:25:05.205366Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 1 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.205415Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 2 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.205431Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 3 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.205482Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 4 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.205497Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 5 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.205511Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 6 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.205521Z node 3 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 7 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.205531Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:183: Group# 2181038082 -> StateWork Marker# DSP11 2025-06-03T10:25:05.205536Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:78: Group# 2181038082 SetStateWork Marker# DSP15 2025-06-03T10:25:05.205573Z node 3 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:150: [efc53170c63234c6] bootstrap ActorId# [3:613:2113] Group# 2181038082 TabletId# 1234 Generation# 3 Deadline# 586524-01-19T08:01:49.551615Z RestartCounter# 0 Marker# DSPB05 2025-06-03T10:25:05.205582Z node 3 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:111: [efc53170c63234c6] Sending TEvVBlock Tablet# 1234 Generation# 3 vdiskId# [82000002:1:0:0:0] node# 2 Marker# DSPB03 2025-06-03T10:25:05.205624Z node 3 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [3:606:2106] NKikimr::TEvBlobStorage::TEvVBlock# NKikimrBlobStorage.TEvVBlock TabletId: 1234 Generation: 3 VDiskID { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } IssuerGuid: 9239576342764050621 MsgQoS { ExtQueueId: PutTabletLog } cookie# 0 2025-06-03T10:25:05.209832Z node 3 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:43: [efc53170c63234c6] Handle TEvVBlockResult status# OK From# [82000002:1:0:0:0] NodeId# 2 Marker# DSPB01 2025-06-03T10:25:05.209863Z node 3 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:100: [efc53170c63234c6] Result# TEvBlockResult {Status# OK} Marker# DSPB04 Sending TEvPut 2025-06-03T10:25:05.209968Z node 3 :BS_PROXY INFO: dsproxy_impl.h:309: Group# 2181038082 HandleError ev# TEvPut {Id# [1234:3:0:0:0:10:0] Size# 10 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} Response# TEvPutResult {Id# [1234:3:0:0:0:10:0] Status# ERROR StatusFlags# { } ErrorReason# "Created as LIMITED without keys. It happens when tenant keys are missing on the node." ApproximateFreeSpaceShare# 0} Marker# DSP31 Sending TEvPut 2025-06-03T10:25:05.210001Z node 3 :BS_PROXY DEBUG: dsproxy_impl.h:309: Group# 2181038082 HandleError ev# TEvPut {Id# [1234:4:0:0:0:10:0] Size# 10 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} Response# TEvPutResult {Id# [1234:4:0:0:0:10:0] Status# ERROR StatusFlags# { } ErrorReason# "Created as LIMITED without keys. It happens when tenant keys are missing on the node." ApproximateFreeSpaceShare# 0} Marker# DSP31 Sending TEvPut 2025-06-03T10:25:05.210122Z node 2 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [c85e1a21dcb31b54] bootstrap ActorId# [2:614:2513] Group# 2181038082 BlobCount# 1 BlobIDs# [[1234:2:0:0:0:11:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-03T10:25:05.210176Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [c85e1a21dcb31b54] Id# [1234:2:0:0:0:11:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:25:05.210186Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [c85e1a21dcb31b54] restore Id# [1234:2:0:0:0:11:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:25:05.210198Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [c85e1a21dcb31b54] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [1234:2:0:0:0:11:1] Marker# BPG33 2025-06-03T10:25:05.210207Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [c85e1a21dcb31b54] Sending missing VPut part# 0 to# 0 blob Id# [1234:2:0:0:0:11:1] Marker# BPG32 2025-06-03T10:25:05.210245Z node 2 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [2:593:2503] NKikimr::TEvBlobStorage::TEvVPut# {ID# [1234:2:0:0:0:11:1] FDS# 11 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-03T10:25:05.210356Z node 2 :BS_VDISK_PUT ERROR: blobstorage_skeleton.cpp:568: PDiskId# 1000 VDISK[82000002:_:0:0:0]: (2181038082) TEvVPut: failed to pass the Hull check; id# [1234:2:0:0:0:11:1] status# {Status# BLOCKED} Marker# BSVS03 2025-06-03T10:25:05.210437Z node 2 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [c85e1a21dcb31b54] received {EvVPutResult Status# BLOCKED ErrorReason# "blocked" ID# [1234:2:0:0:0:11:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 3 } Cost# 80086 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 4 }}}} from# [82000002:1:0:0:0] Marker# BPP01 2025-06-03T10:25:05.210454Z node 2 :BS_PROXY_PUT ERROR: dsproxy_put_impl.cpp:72: [c85e1a21dcb31b54] Result# TEvPutResult {Id# [1234:2:0:0:0:11:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000002:1:0:0:0]" ApproximateFreeSpaceShare# 0} GroupId# 2181038082 Marker# BPP12 2025-06-03T10:25:05.210463Z node 2 :BS_PROXY_PUT NOTICE: dsproxy_put.cpp:486: [c85e1a21dcb31b54] SendReply putResult# TEvPutResult {Id# [1234:2:0:0:0:11:0] Status# BLOCKED StatusFlags# { } ErrorReason# "Got VPutResult status# BLOCKED from VDiskId# [82000002:1:0:0:0]" ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:25:05.210493Z node 2 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 2181038082 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.185 sample PartId# [1234:2:0:0:0:11:1] QueryCount# 1 VDiskId# [82000002:1:0:0:0] NodeId# 2 } ] } 2025-06-03T10:25:05.210577Z node 3 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [3:606:2106] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[1234:4294967295:4294967295:0] collect=[4294967295:4294967295] cookie# 0 >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsWithCustomGroupAttributeGood [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsUseInvalidSearchFilterBad ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestUnmonitoredEventsThenNoMonitorings [GOOD] Test command err: 2025-06-03T10:25:04.860382Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:25:04.861355Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "SectorMap:/home/runner/.ya/build/build_root/u93c/000eda/r3tmp/tmpBYT8Od/pdisk_map" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 33554432 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 1 } 2025-06-03T10:25:04.861418Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 0 Path# "SectorMap:/home/runner/.ya/build/build_root/u93c/000eda/r3tmp/tmpBYT8Od/pdisk_map" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-03T10:25:04.861700Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-03T10:25:04.861758Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:0:0] VSlotId# 1:0:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:04.861969Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:0:0] VSlotId# 1:0:0 PDiskGuid# 1 2025-06-03T10:25:04.861982Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:1:0] VSlotId# 1:0:1 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:04.862073Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:1:0] VSlotId# 1:0:1 PDiskGuid# 1 2025-06-03T10:25:04.862079Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:2:0] VSlotId# 1:0:2 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:04.862175Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:2:0] VSlotId# 1:0:2 PDiskGuid# 1 2025-06-03T10:25:04.862180Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:3:0] VSlotId# 1:0:3 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:04.862259Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:3:0] VSlotId# 1:0:3 PDiskGuid# 1 2025-06-03T10:25:04.862265Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 33554432 2025-06-03T10:25:04.862660Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 1 PipeClientId# [1:28:2075] ControllerId# 72057594037932033 2025-06-03T10:25:04.862666Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:25:04.862691Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:25:04.862717Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:25:04.866699Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:25:04.866994Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:25:04.867057Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:04.867062Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:25:04.874868Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:04.874889Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-03T10:25:04.875607Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-03T10:25:04.875685Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-03T10:25:04.875753Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:04.876123Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-03T10:25:04.876131Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:25:04.876156Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:322} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\027\023^\203\244\273\244\307\331\2611\242\031S\250`\350\222\007\212" } 2025-06-03T10:25:04.876246Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-03T10:25:04.876255Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:361} StateFunc Type# 2146435075 Sender# [1:77:2120] SessionId# [0:0:0] Cookie# 0 2025-06-03T10:25:04.876263Z node 1 :BS_NODE DEBUG: {NWDC36@distconf_persistent_storage.cpp:205} TEvStorageConfigStored NumOk# 0 NumError# 0 Passed# 0.001344s 2025-06-03T10:25:04.880516Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "SectorMap:/home/runner/.ya/build/build_root/u93c/000eda/r3tmp/tmpBYT8Od/pdisk_map" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 33554432 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 1 } 2025-06-03T10:25:04.880580Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:361} StateFunc Type# 268639248 Sender# [1:11:2058] SessionId# [0:0:0] Cookie# 0 2025-06-03T10:25:04.884746Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.886183Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.886737Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.887114Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.887131Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.887300Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.887479Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:2:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:04.887581Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:3:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:3:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:04.887731Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:04.887740Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:3:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:04.887752Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.887761Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.887978Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:1:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:1:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:04.888086Z node 1 :BS_SYNCLOG WARN: blobstorage_synclog.cpp:177: PDiskId# 0 VDISK[2000000:_:0:2:0]: (33554432) Handle(TEvSyncLogRead): FULL_RECOVER(unequal guid); sourceVDisk# [2000000:1:0:0:0] targetVDisk# [2000000:1:0:2:0] oldSyncState# [0 0] DbBirthLsn# 0 2025-06-03T10:25:04.893037Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:25:04.900012Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:25:04.900189Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-03T10:25:04.900594Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:25:04.900784Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-03T10:25:04.900987Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-03T10:25:04.900997Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:521} Handle TEvInterconnect::TEvNodesInfo 2025-06-03T10:25:04.901051Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-03T10:25:04.907046Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-03T10:25:04.907109Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-03T10:25:04.907153Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-03T10:25:04.907193Z node 1 :BS_CONTROLLER DEBUG ... arker# BPP01 2025-06-03T10:25:05.315398Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [1a43693427d0a82b] Result# TEvPutResult {Id# [72057594037932033:2:10:0:0:238:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 33554432 Marker# BPP12 2025-06-03T10:25:05.315408Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [1a43693427d0a82b] SendReply putResult# TEvPutResult {Id# [72057594037932033:2:10:0:0:238:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:25:05.315444Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 33554432 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.305 sample PartId# [72057594037932033:2:10:0:0:238:3] QueryCount# 1 VDiskId# [2000000:1:0:3:0] NodeId# 1 } TEvVPut{ TimestampMs# 0.305 sample PartId# [72057594037932033:2:10:0:0:238:2] QueryCount# 1 VDiskId# [2000000:1:0:2:0] NodeId# 1 } TEvVPut{ TimestampMs# 0.305 sample PartId# [72057594037932033:2:10:0:0:238:1] QueryCount# 1 VDiskId# [2000000:1:0:1:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 1.162 VDiskId# [2000000:1:0:2:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 1.207 VDiskId# [2000000:1:0:3:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 1.225 VDiskId# [2000000:1:0:1:0] NodeId# 1 Status# OK } ] } 2025-06-03T10:25:05.315518Z node 1 :BS_CONTROLLER DEBUG: {BSCTXPGK08@propose_group_key.cpp:96} TTxProposeGroupKey Complete 2025-06-03T10:25:05.315581Z node 1 :BS_CONTROLLER DEBUG: {BSCTXGG02@get_group.cpp:58} TEvControllerGetGroup Sender# [1:336:2346] Cookie# 0 Recipient# [1:336:2346] RecipientRewrite# [1:336:2346] Request# {NodeID: 1 GroupIDs: 2181038082 } StopGivingGroups# false 2025-06-03T10:25:05.315602Z node 1 :BS_CONTROLLER DEBUG: {BSCTXGG01@get_group.cpp:22} Handle TEvControllerGetGroup Request# {NodeID: 1 GroupIDs: 2181038082 } 2025-06-03T10:25:05.315686Z node 1 :BS_NODE DEBUG: {NW17@node_warden_impl.cpp:800} Handle(TEvBlobStorage::TEvControllerNodeServiceSetUpdate) Msg# {Status: OK NodeID: 1 ServiceSet { Groups { GroupID: 2181038082 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1000 VDiskSlotID: 1002 PDiskGuid: 1566591005720630907 } } } EncryptionMode: 1 LifeCyclePhase: 3 MainKeyId: "/home/runner/.ya/build/build_root/u93c/000eda/r3tmp/tmpBYT8Od//key.txt" EncryptedGroupKey: "\025\356\272\300\034\240\234A/\253\001\321\302\230*\246O\210=\211\246\321\273$\244R\245\016t\254\333\316~\023\242I" GroupKeyNonce: 2181038082 MainKeyVersion: 1 StoragePoolName: "test_storage" DeviceType: ROT } } } 2025-06-03T10:25:05.315710Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# false Comprehensive# false Origin# controller ServiceSet# {Groups { GroupID: 2181038082 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1000 VDiskSlotID: 1002 PDiskGuid: 1566591005720630907 } } } EncryptionMode: 1 LifeCyclePhase: 3 MainKeyId: "/home/runner/.ya/build/build_root/u93c/000eda/r3tmp/tmpBYT8Od//key.txt" EncryptedGroupKey: "\025\356\272\300\034\240\234A/\253\001\321\302\230*\246O\210=\211\246\321\273$\244R\245\016t\254\333\316~\023\242I" GroupKeyNonce: 2181038082 MainKeyVersion: 1 StoragePoolName: "test_storage" DeviceType: ROT } } 2025-06-03T10:25:05.316019Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 2181038082 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-03T10:25:05.316028Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:25:05.316482Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:522:2498] Create Queue# [1:526:2501] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:05.316511Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:522:2498] Create Queue# [1:527:2502] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:05.316534Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:522:2498] Create Queue# [1:528:2503] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:05.316558Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:522:2498] Create Queue# [1:529:2504] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:05.316582Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:522:2498] Create Queue# [1:530:2505] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:05.316609Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:522:2498] Create Queue# [1:531:2506] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:05.316632Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [1:522:2498] Create Queue# [1:532:2507] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:05.316638Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 2181038082 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:25:05.316789Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 1 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.316810Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 2 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.316829Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 3 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.316849Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 4 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.316861Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 5 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.316872Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 6 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.316882Z node 1 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 7 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.316887Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:183: Group# 2181038082 -> StateWork Marker# DSP11 2025-06-03T10:25:05.316892Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:78: Group# 2181038082 SetStateWork Marker# DSP15 2025-06-03T10:25:05.316920Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:150: [bba3bffd2e286f4b] bootstrap ActorId# [1:533:2508] Group# 2181038082 TabletId# 1234 Generation# 1 Deadline# 586524-01-19T08:01:49.551615Z RestartCounter# 0 Marker# DSPB05 2025-06-03T10:25:05.316929Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:111: [bba3bffd2e286f4b] Sending TEvVBlock Tablet# 1234 Generation# 1 vdiskId# [82000002:1:0:0:0] node# 1 Marker# DSPB03 2025-06-03T10:25:05.316990Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:526:2501] NKikimr::TEvBlobStorage::TEvVBlock# NKikimrBlobStorage.TEvVBlock TabletId: 1234 Generation: 1 VDiskID { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } IssuerGuid: 5954322438564514923 MsgQoS { ExtQueueId: PutTabletLog } cookie# 0 2025-06-03T10:25:05.321164Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:43: [bba3bffd2e286f4b] Handle TEvVBlockResult status# OK From# [82000002:1:0:0:0] NodeId# 1 Marker# DSPB01 2025-06-03T10:25:05.321199Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:100: [bba3bffd2e286f4b] Result# TEvBlockResult {Status# OK} Marker# DSPB04 2025-06-03T10:25:05.321346Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:526:2501] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[1234:4294967295:4294967295:0] collect=[4294967295:4294967295] cookie# 0 2025-06-03T10:25:05.325311Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:290: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 0 Marker# DSP57 initialize full monitoring 2025-06-03T10:25:05.325505Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:150: [d70ef3c23a1a2346] bootstrap ActorId# [1:535:2510] Group# 2181038082 TabletId# 1234 Generation# 3 Deadline# 586524-01-19T08:01:49.551615Z RestartCounter# 0 Marker# DSPB05 2025-06-03T10:25:05.325520Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:111: [d70ef3c23a1a2346] Sending TEvVBlock Tablet# 1234 Generation# 3 vdiskId# [82000002:1:0:0:0] node# 1 Marker# DSPB03 2025-06-03T10:25:05.325587Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:526:2501] NKikimr::TEvBlobStorage::TEvVBlock# NKikimrBlobStorage.TEvVBlock TabletId: 1234 Generation: 3 VDiskID { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } IssuerGuid: 9128599526734180053 MsgQoS { ExtQueueId: PutTabletLog } cookie# 0 2025-06-03T10:25:05.329328Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:43: [d70ef3c23a1a2346] Handle TEvVBlockResult status# OK From# [82000002:1:0:0:0] NodeId# 1 Marker# DSPB01 2025-06-03T10:25:05.329368Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:100: [d70ef3c23a1a2346] Result# TEvBlockResult {Status# OK} Marker# DSPB04 2025-06-03T10:25:05.329536Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:150: [91379e686f748e92] bootstrap ActorId# [1:536:2511] Group# 2181038082 TabletId# 1234 Generation# 4 Deadline# 586524-01-19T08:01:49.551615Z RestartCounter# 0 Marker# DSPB05 2025-06-03T10:25:05.329549Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:111: [91379e686f748e92] Sending TEvVBlock Tablet# 1234 Generation# 4 vdiskId# [82000002:1:0:0:0] node# 1 Marker# DSPB03 2025-06-03T10:25:05.329614Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:526:2501] NKikimr::TEvBlobStorage::TEvVBlock# NKikimrBlobStorage.TEvVBlock TabletId: 1234 Generation: 4 VDiskID { GroupID: 2181038082 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } IssuerGuid: 4615760194623649967 MsgQoS { ExtQueueId: PutTabletLog } cookie# 0 2025-06-03T10:25:05.333897Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:43: [91379e686f748e92] Handle TEvVBlockResult status# OK From# [82000002:1:0:0:0] NodeId# 1 Marker# DSPB01 2025-06-03T10:25:05.333940Z node 1 :BS_PROXY_BLOCK DEBUG: dsproxy_block.cpp:100: [91379e686f748e92] Result# TEvBlockResult {Status# OK} Marker# DSPB04 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestGivenPDiskFormatedWithGuid1AndCreatedWithGuid2WhenYardInitThenError [GOOD] Test command err: 2025-06-03T10:25:04.413685Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:25:04.414684Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "pdisk0.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 33554432 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 1 } 2025-06-03T10:25:04.414749Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 33554432 2025-06-03T10:25:04.414957Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 1 PipeClientId# [2:41:2070] ControllerId# 72057594037932033 2025-06-03T10:25:04.414964Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:25:04.415000Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:25:04.415024Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:25:04.416028Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:25:04.416281Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:25:04.416319Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:25:04.416936Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "SectorMap:/home/runner/.ya/build/build_root/u93c/000ee8/r3tmp/tmpc8lPpR/pdisk_map" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 33554432 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 1 } 2025-06-03T10:25:04.416989Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 0 Path# "SectorMap:/home/runner/.ya/build/build_root/u93c/000ee8/r3tmp/tmpc8lPpR/pdisk_map" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-03T10:25:04.417355Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-03T10:25:04.417419Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:0:0] VSlotId# 1:0:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:04.417682Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:0:0] VSlotId# 1:0:0 PDiskGuid# 1 2025-06-03T10:25:04.417698Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:1:0] VSlotId# 1:0:1 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:04.417826Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:1:0] VSlotId# 1:0:1 PDiskGuid# 1 2025-06-03T10:25:04.417832Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:2:0] VSlotId# 1:0:2 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:04.417910Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:2:0] VSlotId# 1:0:2 PDiskGuid# 1 2025-06-03T10:25:04.417918Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:3:0] VSlotId# 1:0:3 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:04.417995Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:3:0] VSlotId# 1:0:3 PDiskGuid# 1 2025-06-03T10:25:04.418001Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 33554432 2025-06-03T10:25:04.418160Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 1 PipeClientId# [1:86:2076] ControllerId# 72057594037932033 2025-06-03T10:25:04.418166Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:25:04.418188Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:25:04.418212Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:25:04.420104Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:25:04.420473Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:04.420482Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:25:04.420547Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:25:04.428575Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:04.428592Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:25:04.428681Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:04.428690Z node 2 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-03T10:25:04.429382Z node 2 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-03T10:25:04.430072Z node 2 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-03T10:25:04.430127Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:04.430206Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:04.430213Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-03T10:25:04.430231Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-03T10:25:04.430276Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-03T10:25:04.430486Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:04.430565Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "pdisk0.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 33554432 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 1 } 2025-06-03T10:25:04.430597Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 268639248 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-03T10:25:04.430645Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-03T10:25:04.430648Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:25:04.430665Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:322} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "f\231\203\360`\231\367\"\275\252\241X\264\3226R\260\210\220\022" } 2025-06-03T10:25:04.430681Z node 1 :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:237} SubscribeToPeerNode NodeId# 2 SessionId# [0:0:0] Inserted# true Subscription# {SessionId# [0:0:0] SubscriptionCookie# 0} NextSubscribeCookie# 1 2025-06-03T10:25:04.430689Z node 1 :BS_NODE DEBUG: {NWDC29@distconf_binding.cpp:80} Initiated bind NodeId# 2 Binding# {2.0/8367665631450794338@[0:0:0]} SessionId# [0:0:0] 2025-06-03T10:25:04.430770Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-03T10:25:04.430777Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:361} StateFunc Type# 2146435075 Sender# [1:142:2121] SessionId# [0:0:0] Cookie# 0 2025-06-03T10:25:04.430783Z node 1 :BS_NODE DEBUG: {NWDC36@distconf_persistent_storage.cpp:205} TEvStorageConfigStored NumOk# 0 NumError# 0 Passed# 0.000553s 2025-06-03T10:25:04.431801Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 2 2025-06-03T10:25:04.431809Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:25:04.431825Z node 2 :BS_NODE DEBUG: {NWDC18@dis ... {VDiskStatus { VDiskId { GroupID: 2181038081 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1000 VSlotId: 1001 PDiskGuid: 1595022535169817486 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-03T10:25:04.718811Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.718840Z node 1 :BS_NODE DEBUG: {NW47@node_warden_impl.cpp:1141} Handle(TEvStatusUpdate) 2025-06-03T10:25:04.718869Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 2181038081 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1000 VSlotId: 1001 PDiskGuid: 1595022535169817486 Status: READY OnlyPhantomsRemain: false } } 2025-06-03T10:25:04.718909Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 2181038080 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1000 VSlotId: 1000 PDiskGuid: 1595022535169817486 Status: READY OnlyPhantomsRemain: false } } Formatting pdisk Creating PDisk Creating pdisk Verify that PDisk returns ERROR 2025-06-03T10:25:04.755059Z node 1 :BS_NODE DEBUG: {NW17@node_warden_impl.cpp:800} Handle(TEvBlobStorage::TEvControllerNodeServiceSetUpdate) Msg# {Status: OK NodeID: 1 ServiceSet { PDisks { NodeID: 1 PDiskID: 1001 Path: "/home/runner/.ya/build/build_root/u93c/000ee8/r3tmp/tmp64TJd7//new_pdisk.dat" PDiskGuid: 8557490841637691387 PDiskCategory: 0 EntityStatus: CREATE } } } 2025-06-03T10:25:04.755090Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# false Comprehensive# false Origin# controller ServiceSet# {PDisks { NodeID: 1 PDiskID: 1001 Path: "/home/runner/.ya/build/build_root/u93c/000ee8/r3tmp/tmp64TJd7//new_pdisk.dat" PDiskGuid: 8557490841637691387 PDiskCategory: 0 EntityStatus: CREATE } } 2025-06-03T10:25:04.755123Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1001 Path# "/home/runner/.ya/build/build_root/u93c/000ee8/r3tmp/tmp64TJd7//new_pdisk.dat" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-03T10:25:04.756720Z node 1 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2856} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/u93c/000ee8/r3tmp/tmp64TJd7//new_pdisk.dat": no such file. PDiskId# 1001 2025-06-03T10:25:04.756826Z node 1 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:290} PDiskId# 1001 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/u93c/000ee8/r3tmp/tmp64TJd7//new_pdisk.dat": no such file. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/u93c/000ee8/r3tmp/tmp64TJd7//new_pdisk.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 8557490841637691387 PDiskId# 1001 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 HashedMainKey[0]# 0x221976E60BD392C7 StartOwnerRound# 10 SectorMap# false EnableSectorEncryption # 1 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# Enable WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1001 2025-06-03T10:25:04.800982Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [b6b2c6548553d7a5] bootstrap ActorId# [1:542:2461] Group# 33554432 BlobCount# 1 BlobIDs# [[72057594037932033:2:8:0:0:332:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-03T10:25:04.801056Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:8:0:0:332:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:25:04.801064Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:8:0:0:332:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:25:04.801069Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:8:0:0:332:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:25:04.801078Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:8:0:0:332:0] restore disk# 3 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:25:04.801084Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:8:0:0:332:0] restore disk# 3 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:25:04.801089Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [b6b2c6548553d7a5] Id# [72057594037932033:2:8:0:0:332:0] restore disk# 3 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:25:04.801098Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [b6b2c6548553d7a5] restore Id# [72057594037932033:2:8:0:0:332:0] optimisticReplicas# 3 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:25:04.801114Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [b6b2c6548553d7a5] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037932033:2:8:0:0:332:1] Marker# BPG33 2025-06-03T10:25:04.801122Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [b6b2c6548553d7a5] Sending missing VPut part# 0 to# 0 blob Id# [72057594037932033:2:8:0:0:332:1] Marker# BPG32 2025-06-03T10:25:04.801130Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [b6b2c6548553d7a5] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72057594037932033:2:8:0:0:332:2] Marker# BPG33 2025-06-03T10:25:04.801135Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [b6b2c6548553d7a5] Sending missing VPut part# 1 to# 1 blob Id# [72057594037932033:2:8:0:0:332:2] Marker# BPG32 2025-06-03T10:25:04.801141Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [b6b2c6548553d7a5] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72057594037932033:2:8:0:0:332:3] Marker# BPG33 2025-06-03T10:25:04.801146Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [b6b2c6548553d7a5] Sending missing VPut part# 2 to# 2 blob Id# [72057594037932033:2:8:0:0:332:3] Marker# BPG32 2025-06-03T10:25:04.801186Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:103:2091] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:332:3] FDS# 332 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-03T10:25:04.801198Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:96:2084] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:332:2] FDS# 332 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-03T10:25:04.801206Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:117:2105] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037932033:2:8:0:0:332:1] FDS# 332 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-03T10:25:04.802154Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [b6b2c6548553d7a5] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:332:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 9 } Cost# 82614 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 10 }}}} from# [2000000:1:0:0:0] Marker# BPP01 2025-06-03T10:25:04.802218Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [b6b2c6548553d7a5] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:332:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 10 } Cost# 82614 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 11 }}}} from# [2000000:1:0:1:0] Marker# BPP01 2025-06-03T10:25:04.802237Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [b6b2c6548553d7a5] received {EvVPutResult Status# OK ID# [72057594037932033:2:8:0:0:332:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 8 } Cost# 82614 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 9 }}}} from# [2000000:1:0:3:0] Marker# BPP01 2025-06-03T10:25:04.802261Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [b6b2c6548553d7a5] Result# TEvPutResult {Id# [72057594037932033:2:8:0:0:332:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 33554432 Marker# BPP12 2025-06-03T10:25:04.802273Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [b6b2c6548553d7a5] SendReply putResult# TEvPutResult {Id# [72057594037932033:2:8:0:0:332:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:25:04.802312Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 33554432 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.274 sample PartId# [72057594037932033:2:8:0:0:332:3] QueryCount# 1 VDiskId# [2000000:1:0:1:0] NodeId# 1 } TEvVPut{ TimestampMs# 0.274 sample PartId# [72057594037932033:2:8:0:0:332:2] QueryCount# 1 VDiskId# [2000000:1:0:0:0] NodeId# 1 } TEvVPut{ TimestampMs# 0.274 sample PartId# [72057594037932033:2:8:0:0:332:1] QueryCount# 1 VDiskId# [2000000:1:0:3:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 1.236 VDiskId# [2000000:1:0:0:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 1.285 VDiskId# [2000000:1:0:1:0] NodeId# 1 Status# OK } TEvVPutResult{ TimestampMs# 1.308 VDiskId# [2000000:1:0:3:0] NodeId# 1 Status# OK } ] } 2025-06-03T10:25:05.401875Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {PDisksMetrics { PDiskId: 0 AvailableSize: 68557996032 TotalSize: 68719476736 MaxReadThroughput: 127000000 MaxWriteThroughput: 127000000 NonRealTimeMs: 0 SlowDeviceMs: 0 MaxIOPS: 125 EnforcedDynamicSlotSize: 17112760320 State: Normal } } 2025-06-03T10:25:05.525977Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {PDisksMetrics { PDiskId: 1000 AvailableSize: 68557996032 TotalSize: 68719476736 MaxReadThroughput: 127000000 MaxWriteThroughput: 127000000 NonRealTimeMs: 0 SlowDeviceMs: 0 MaxIOPS: 125 EnforcedDynamicSlotSize: 34225520640 State: Normal } } 2025-06-03T10:25:05.571491Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {PDisksMetrics { PDiskId: 1001 AvailableSize: 0 TotalSize: 0 MaxReadThroughput: 127000000 MaxWriteThroughput: 127000000 NonRealTimeMs: 0 SlowDeviceMs: 0 MaxIOPS: 125 State: OpenFileError } } 2025-06-03T10:25:05.571527Z node 1 :BS_CONTROLLER NOTICE: {BSCTXUDM03@disk_metrics.cpp:114} PDisk not found PDiskId# 1:1001 |59.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data |59.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestReceivedPDiskRestartNotAllowed [GOOD] |59.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::TestSendUsefulMonitoring [GOOD] Test command err: 2025-06-03T10:25:04.959876Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:25:04.960837Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "pdisk0.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 33554432 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 1 } 2025-06-03T10:25:04.960893Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 33554432 2025-06-03T10:25:04.961126Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 1 PipeClientId# [2:41:2070] ControllerId# 72057594037932033 2025-06-03T10:25:04.961132Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:25:04.961161Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:25:04.961185Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:25:04.962452Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:25:04.962740Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:25:04.962788Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:25:04.963527Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "SectorMap:/home/runner/.ya/build/build_root/u93c/000ee0/r3tmp/tmpA1Jeyr/pdisk_map" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 33554432 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 1 } 2025-06-03T10:25:04.963640Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 0 Path# "SectorMap:/home/runner/.ya/build/build_root/u93c/000ee0/r3tmp/tmpA1Jeyr/pdisk_map" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-03T10:25:04.964039Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-03T10:25:04.964118Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:0:0] VSlotId# 1:0:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:04.964373Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:0:0] VSlotId# 1:0:0 PDiskGuid# 1 2025-06-03T10:25:04.964389Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:1:0] VSlotId# 1:0:1 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:04.964525Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:1:0] VSlotId# 1:0:1 PDiskGuid# 1 2025-06-03T10:25:04.964534Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:2:0] VSlotId# 1:0:2 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:04.964661Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:2:0] VSlotId# 1:0:2 PDiskGuid# 1 2025-06-03T10:25:04.964669Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [2000000:1:0:3:0] VSlotId# 1:0:3 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:25:04.964790Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [2000000:1:0:3:0] VSlotId# 1:0:3 PDiskGuid# 1 2025-06-03T10:25:04.964799Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 33554432 2025-06-03T10:25:04.965004Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 1 PipeClientId# [1:86:2076] ControllerId# 72057594037932033 2025-06-03T10:25:04.965012Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:25:04.965035Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:25:04.965061Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:25:04.967665Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:25:04.968134Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:04.968145Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:25:04.968220Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:25:04.979369Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:04.979395Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:25:04.979489Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:04.979503Z node 2 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-03T10:25:04.980604Z node 2 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-03T10:25:04.981671Z node 2 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-03T10:25:04.981761Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:04.981884Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:04.981897Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-03T10:25:04.981929Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-03T10:25:04.981987Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-03T10:25:04.982307Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:25:04.982451Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 0 Path: "pdisk0.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 1 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 2 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } VDisks { VDiskID { GroupID: 33554432 GroupGeneration: 1 Ring: 0 Domain: 3 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } Groups { GroupID: 33554432 GroupGeneration: 1 ErasureSpecies: 1 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 0 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 1 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 2 PDiskGuid: 1 } } FailDomains { VDiskLocations { NodeID: 1 PDiskID: 0 VDiskSlotID: 3 PDiskGuid: 1 } } } } AvailabilityDomains: 1 } 2025-06-03T10:25:04.982502Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 268639248 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-03T10:25:04.982583Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-03T10:25:04.982590Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:25:04.982613Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:322} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\377\310\002\210\355\375O\351\257\221\007\"S4\"\004\251\220\355\254" } 2025-06-03T10:25:04.982637Z node 1 :BS_NODE DEBUG: {NWDC54@distconf_binding.cpp:237} SubscribeToPeerNode NodeId# 2 SessionId# [0:0:0] Inserted# true Subscription# {SessionId# [0:0:0] SubscriptionCookie# 0} NextSubscribeCookie# 1 2025-06-03T10:25:04.982648Z node 1 :BS_NODE DEBUG: {NWDC29@distconf_binding.cpp:80} Initiated bind NodeId# 2 Binding# {2.0/2589594885852055025@[0:0:0]} SessionId# [0:0:0] 2025-06-03T10:25:04.982765Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-03T10:25:04.982776Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:361} StateFunc Type# 2146435075 Sender# [1:142:2121] SessionId# [0:0:0] Cookie# 0 2025-06-03T10:25:04.982785Z node 1 :BS_NODE DEBUG: {NWDC36@distconf_persistent_storage.cpp:205} TEvStorageConfigStored NumOk# 0 NumError# 0 Passed# 0.000858s 2025-06-03T10:25:04.984532Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 2 2025-06-03T10:25:04.984550Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:25:04.984583Z node 2 :BS_NODE DEBUG: {NWDC18 ... :1:0:0:0] QueueId# 7 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.453997Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:183: Group# 2181038082 -> StateWork Marker# DSP11 2025-06-03T10:25:05.454002Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:78: Group# 2181038082 SetStateWork Marker# DSP15 2025-06-03T10:25:05.454010Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:290: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 0 Marker# DSP57 initialize full monitoring 2025-06-03T10:25:05.454157Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [d70ef3c23a1a2346] bootstrap ActorId# [1:600:2510] Group# 2181038082 BlobCount# 1 BlobIDs# [[1234:2:0:0:0:5:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-03T10:25:05.454209Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [d70ef3c23a1a2346] Id# [1234:2:0:0:0:5:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:25:05.454216Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [d70ef3c23a1a2346] restore Id# [1234:2:0:0:0:5:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:25:05.454226Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [d70ef3c23a1a2346] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [1234:2:0:0:0:5:1] Marker# BPG33 2025-06-03T10:25:05.454232Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [d70ef3c23a1a2346] Sending missing VPut part# 0 to# 0 blob Id# [1234:2:0:0:0:5:1] Marker# BPG32 2025-06-03T10:25:05.454262Z node 1 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [1:593:2503] NKikimr::TEvBlobStorage::TEvVPut# {ID# [1234:2:0:0:0:5:1] FDS# 5 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-03T10:25:05.459852Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [d70ef3c23a1a2346] received {EvVPutResult Status# OK ID# [1234:2:0:0:0:5:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 } Cost# 80039 ExtQueueId# PutTabletLog IntQueueId# IntPutLog CostSettings# { SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257} Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 1 }}}} from# [82000002:1:0:0:0] Marker# BPP01 2025-06-03T10:25:05.459920Z node 1 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [d70ef3c23a1a2346] Result# TEvPutResult {Id# [1234:2:0:0:0:5:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} GroupId# 2181038082 Marker# BPP12 2025-06-03T10:25:05.459934Z node 1 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [d70ef3c23a1a2346] SendReply putResult# TEvPutResult {Id# [1234:2:0:0:0:5:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.999479} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:25:05.459969Z node 1 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 2181038082 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.14 sample PartId# [1234:2:0:0:0:5:1] QueryCount# 1 VDiskId# [82000002:1:0:0:0] NodeId# 1 } TEvVPutResult{ TimestampMs# 5.77 VDiskId# [82000002:1:0:0:0] NodeId# 1 Status# OK } ] } 2025-06-03T10:25:05.460112Z node 2 :BS_NODE DEBUG: {NW46@node_warden_proxy.cpp:130} HandleForwarded GroupId# 2181038082 EnableProxyMock# false NoGroup# false 2025-06-03T10:25:05.460125Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 2181038082 2025-06-03T10:25:05.460134Z node 2 :BS_NODE DEBUG: {NW98@node_warden_group.cpp:265} RequestGroupConfig GroupId# 2181038082 2025-06-03T10:25:05.460243Z node 2 :BS_NODE INFO: {NW79@node_warden_group_resolver.cpp:74} TGroupResolverActor::Bootstrap GroupId# 2181038082 2025-06-03T10:25:05.460261Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 2181038082 TEvConfigureProxy received GroupGeneration# IsLimitedKeyless# false Marker# DSP02 2025-06-03T10:25:05.460267Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:55: Group# 2181038082 SetStateUnconfigured Marker# DSP07 2025-06-03T10:25:05.460294Z node 2 :BS_PROXY DEBUG: dsproxy_impl.h:205: Group# 2181038082 HandleEnqueue# TEvCollectGarbage {TabletId# 1234 RecordGeneration# 4294967295 PerGenerationCounter# 4294967295 Channel# 0 Deadline# 18446744073709551 Collect# true CollectGeneration# 4294967295 CollectStep# 4294967295 Hard# true IsMultiCollectAllowed# 1 IsMonitored# 1} Marker# DSP17 2025-06-03T10:25:05.460392Z node 1 :BS_CONTROLLER DEBUG: {BSCTXGG02@get_group.cpp:58} TEvControllerGetGroup Sender# [2:30:2059] Cookie# 0 Recipient# [1:441:2379] RecipientRewrite# [1:400:2347] Request# {NodeID: 2 GroupIDs: 2181038082 } StopGivingGroups# false 2025-06-03T10:25:05.460426Z node 1 :BS_CONTROLLER DEBUG: {BSCTXGG01@get_group.cpp:22} Handle TEvControllerGetGroup Request# {NodeID: 2 GroupIDs: 2181038082 } 2025-06-03T10:25:05.460564Z node 2 :BS_NODE DEBUG: {NW17@node_warden_impl.cpp:800} Handle(TEvBlobStorage::TEvControllerNodeServiceSetUpdate) Msg# {Status: OK NodeID: 2 ServiceSet { Groups { GroupID: 2181038082 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1000 VDiskSlotID: 1002 PDiskGuid: 4298666823393031122 } } } EncryptionMode: 1 LifeCyclePhase: 3 MainKeyId: "/home/runner/.ya/build/build_root/u93c/000ee0/r3tmp/tmpA1Jeyr//key.txt" EncryptedGroupKey: ";\304?1\313V\244\352\351;I\031\371\243$\016\347j{n[\314\217\003\345\313x*\365P\226\262\301\336\031j" GroupKeyNonce: 2181038082 MainKeyVersion: 1 StoragePoolName: "test_storage" DeviceType: ROT } } } 2025-06-03T10:25:05.460591Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# false Comprehensive# false Origin# controller ServiceSet# {Groups { GroupID: 2181038082 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1000 VDiskSlotID: 1002 PDiskGuid: 4298666823393031122 } } } EncryptionMode: 1 LifeCyclePhase: 3 MainKeyId: "/home/runner/.ya/build/build_root/u93c/000ee0/r3tmp/tmpA1Jeyr//key.txt" EncryptedGroupKey: ";\304?1\313V\244\352\351;I\031\371\243$\016\347j{n[\314\217\003\345\313x*\365P\226\262\301\336\031j" GroupKeyNonce: 2181038082 MainKeyVersion: 1 StoragePoolName: "test_storage" DeviceType: ROT } } 2025-06-03T10:25:05.460623Z node 2 :BS_NODE ERROR: {NW19@node_warden_group.cpp:211} error while parsing group GroupId# 2181038082 Err# LifeCyclePhase# KEY_NOT_LOADED Key.Id# "" Key.Version# 0 MainKey.Id# "/home/runner/.ya/build/build_root/u93c/000ee0/r3tmp/tmpA1Jeyr//key.txt" MainKey.Version# 1 GroupKeyNonce# 2181038082 2025-06-03T10:25:05.460849Z node 2 :BS_NODE INFO: {NW81@node_warden_group_resolver.cpp:270} TGroupResolverActor::PassAway GroupId# 2181038082 2025-06-03T10:25:05.460866Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 2181038082 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# true Marker# DSP02 2025-06-03T10:25:05.460872Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 2181038082 IsLimitedKeyless# 1 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:25:05.465817Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:602:2105] Create Queue# [2:604:2106] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:05.465868Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:602:2105] Create Queue# [2:605:2107] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:05.465891Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:602:2105] Create Queue# [2:606:2108] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:05.465911Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:602:2105] Create Queue# [2:607:2109] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:05.465932Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:602:2105] Create Queue# [2:608:2110] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:05.465955Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:602:2105] Create Queue# [2:609:2111] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:05.465976Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 2181038082 Actor# [2:602:2105] Create Queue# [2:610:2112] targetNodeId# 1 Marker# DSP01 2025-06-03T10:25:05.465983Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 2181038082 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:25:05.470132Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 1 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.470242Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 2 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.470270Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 3 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.470316Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 4 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.470344Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 5 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.470355Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 6 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.470366Z node 2 :BS_PROXY DEBUG: dsproxy_state.cpp:209: Group# 2181038082 Handle TEvProxyQueueState# {VDiskId# [82000002:1:0:0:0] QueueId# 7 IsConnected# true ExtraBlockChecksSupport# true CostModel# {SeekTimeUs# 8000 ReadSpeedBps# 127000000 WriteSpeedBps# 127000000 ReadBlockSize# 524288 WriteBlockSize# 524288 MinHugeBlobInBytes# 524257 GType# none}} Duration# 0.000000s Marker# DSP04 2025-06-03T10:25:05.470375Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:183: Group# 2181038082 -> StateWork Marker# DSP11 2025-06-03T10:25:05.470382Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:78: Group# 2181038082 SetStateWork Marker# DSP15 2025-06-03T10:25:05.470469Z node 2 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [2:604:2106] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[1234:4294967295:4294967295:0] collect=[4294967295:4294967295] cookie# 0 |59.5%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/data/ydb-core-kqp-ut-data >> test.py::test[window-row_number_to_map-default.txt-Results] [GOOD] >> test.py::test[window-win_func_aggr_stat--Results] >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsDisableRequestToAD [GOOD] >> test.py::test[aggregate-group_by_cube_grouping_and_expr-default.txt-Results] [GOOD] >> test.py::test[aggregate-group_by_expr_semi_join--Results] |59.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant |59.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant |59.5%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_base_tenant/ydb-core-tx-tx_proxy-ut_base_tenant >> TBlobStorageWardenTest::ObtainTenantKeySamePin [GOOD] >> TBlobStorageWardenTest::ObtainTenantKeyDifferentPin [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithInvalidRobotUserPasswordBad [GOOD] >> TBlobStorageHullFresh::AppendixPerf [GOOD] >> TBlobStorageHullFresh::AppendixPerf_Tune >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] >> test.py::test[aggregate-group_by_session_only--ForceBlocks] [GOOD] >> test.py::test[aggregate-group_by_session_only--Results] >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithoutLoginPlaceholders [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromHostnames [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromIpV4List [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromIpV6List [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromHostnamesLdapsScheme [GOOD] >> TLdapUtilsUrisCreatorTest::CreateUrisFromHostnamesUnknownScheme [GOOD] |59.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::ObtainTenantKeyDifferentPin [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsDisableRequestToAD [GOOD] Test command err: 2025-06-03T10:25:01.760276Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667167058761582:2192];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:01.760545Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00153d/r3tmp/tmpCWV3Pp/pdisk_1.dat 2025-06-03T10:25:02.529767Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:02.533427Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667167058761432:2079] 1748946301754972 != 1748946301754975 TServer::EnableGrpc on GrpcPort 25310, node 1 2025-06-03T10:25:02.558354Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:02.558386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:02.561949Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:02.563473Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:02.563476Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:02.563478Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:02.563522Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:02.944975Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:02.945092Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:02.945097Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:02.945659Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://unavailablehost:12643, port: 12643 2025-06-03T10:25:02.945674Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-03T10:25:02.965486Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:184: Could not start TLS. Can't contact LDAP server 2025-06-03T10:25:02.965800Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****Wu8w (6B659176) () has now retryable error message 'Could not login via LDAP (Could not start TLS. Can't contact LDAP server)' 2025-06-03T10:25:02.965851Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:02.965874Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:02.966297Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://unavailablehost:12643, port: 12643 2025-06-03T10:25:02.966313Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-03T10:25:02.988432Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:184: Could not start TLS. Can't contact LDAP server 2025-06-03T10:25:02.988492Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****Wu8w (6B659176) () has now retryable error message 'Could not login via LDAP (Could not start TLS. Can't contact LDAP server)' 2025-06-03T10:25:03.614038Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667175687138528:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:03.614227Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00153d/r3tmp/tmpSyFoVH/pdisk_1.dat 2025-06-03T10:25:03.631674Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:03.634501Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667175687138500:2079] 1748946303613617 != 1748946303613620 TServer::EnableGrpc on GrpcPort 29834, node 2 2025-06-03T10:25:03.643387Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:03.643402Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:03.643406Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:03.643461Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:03.719622Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:03.719665Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:03.721545Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:03.809356Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:03.813372Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:03.813392Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:03.813660Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****DHnA (B26173D7) () has now permanent error message 'Could not login via LDAP (List of ldap server hosts is empty)' 2025-06-03T10:25:04.249792Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511667179526322112:2176];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:04.249873Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00153d/r3tmp/tmpi5t3MV/pdisk_1.dat 2025-06-03T10:25:04.266104Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:04.266569Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511667179526321963:2079] 1748946304249048 != 1748946304249051 TServer::EnableGrpc on GrpcPort 16977, node 3 2025-06-03T10:25:04.277890Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:04.277905Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:04.277907Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:04.277965Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:04.354622Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:04.354655Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:04.355689Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:04.379904Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:04.382611Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:04.382631Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:04.382894Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****ZcwQ (3B67BE5A) () has now permanent error message 'Could not login via LDAP (Parameter BaseDn is empty)' 2025-06-03T10:25:04.723306Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511667180432912128:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:04.723334Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00153d/r3tmp/tmpU5a6s8/pdisk_1.dat 2025-06-03T10:25:04.739984Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:04.740219Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7511667180432912107:2079] 1748946304723161 != 1748946304723164 TServer::EnableGrpc on GrpcPort 11344, node 4 2025-06-03T10:25:04.756376Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:04.756391Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:04.756394Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:04.756448Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:04.828414Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:04.828453Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:04.830051Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:04.952513Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:04.952600Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:04.952605Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:04.952808Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****CY5Q (1E2B7458) () has now permanent error message 'Could not login via LDAP (Parameter BindDn is empty)' 2025-06-03T10:25:05.256965Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7511667185999612028:2138];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00153d/r3tmp/tmpaNxEKt/pdisk_1.dat 2025-06-03T10:25:05.262666Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:05.281613Z node 5 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:05.284441Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7511667185999611929:2079] 1748946305256292 != 1748946305256295 TServer::EnableGrpc on GrpcPort 2314, node 5 2025-06-03T10:25:05.297526Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:05.297547Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:05.297549Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:05.297607Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:05.362855Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:05.362887Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:05.363907Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:05.365348Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:05.366040Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:05.366056Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:05.366229Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****Tg2Q (E58D8CE0) () has now permanent error message 'Could not login via LDAP (Parameter BindPassword is empty)' 2025-06-03T10:25:05.872644Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511667182990964624:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:05.872974Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00153d/r3tmp/tmp4KcArd/pdisk_1.dat 2025-06-03T10:25:05.891161Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:05.891542Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7511667182990964602:2079] 1748946305872441 != 1748946305872444 TServer::EnableGrpc on GrpcPort 27001, node 6 2025-06-03T10:25:05.904569Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:05.904587Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:05.904589Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:05.904633Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:05.963230Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:05.965537Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:05.965559Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:05.965755Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:11091, port: 11091 2025-06-03T10:25:05.965797Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:05.978686Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:05.978717Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:05.979647Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:06.013601Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:06.057755Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****eqAQ (3EE719B8) () has now valid token of ldapuser@ldap >> test.py::test[aggregate-histogram_cdf-default.txt-Results] [GOOD] >> test.py::test[aggregate-list_after_group-default.txt-ForceBlocks] |59.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_user_attributes_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithInvalidRobotUserPasswordBad [GOOD] Test command err: test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001538/r3tmp/tmpHrpR9K/pdisk_1.dat 2025-06-03T10:25:02.375548Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667171745270416:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:02.375667Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:02.639620Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:02.639761Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667171745270255:2079] 1748946302368112 != 1748946302368115 2025-06-03T10:25:02.649396Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:02.653907Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:02.659880Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8696, node 1 2025-06-03T10:25:02.715724Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:02.715741Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:02.715743Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:02.715796Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:03.236986Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:03.237346Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:03.237351Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:03.238238Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:27039, port: 27039 2025-06-03T10:25:03.238647Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-03T10:25:03.270729Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:03.313706Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:03.357542Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:25:03.357737Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-03T10:25:03.357750Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:03.401576Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:03.445598Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:03.446371Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****Ai8A (D8400E11) () has now valid token of ldapuser@ldap test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001538/r3tmp/tmpawyrN6/pdisk_1.dat 2025-06-03T10:25:03.758291Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:03.768130Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:03.768593Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667174766913223:2079] 1748946303736824 != 1748946303736827 TServer::EnableGrpc on GrpcPort 15900, node 2 2025-06-03T10:25:03.784132Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:03.784149Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:03.784152Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:03.784216Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:03.853469Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:03.853497Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:03.854846Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:03.890109Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:03.892239Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:03.892253Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:03.892429Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:20306, port: 20306 2025-06-03T10:25:03.892459Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-03T10:25:03.900072Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:03.941603Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:03.985818Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****ve-g (F8E10C08) () has now valid token of ldapuser@ldap 2025-06-03T10:25:04.234047Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511667180993040681:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:04.234378Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001538/r3tmp/tmpBoPPAI/pdisk_1.dat 2025-06-03T10:25:04.246504Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:04.248698Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511667180993040497:2079] 1748946304233161 != 1748946304233164 TServer::EnableGrpc on GrpcPort 30684, node 3 2025-06-03T10:25:04.256859Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:04.256872Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:04.256874Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:04.256916Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:04.280391Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:04.282641Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:04.282672Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:04.282872Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://qqq:15070 ldap://localhost:15070 ldap://localhost:11111, port: 15070 2025-06-03T10:25:04.282894Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-03T10:25:04.303503Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:04.337542Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:04.337570Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:04.338587Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:04.345571Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:04.393551Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:25:04.397556Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-03T10:25:04.397590Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:04.441530Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:04.485531Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:04.486026Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****uDpQ (C0002BC4) () has now valid token of ldapuser@ldap 2025-06-03T10:25:04.737638Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511667179714277054:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:04.737718Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001538/r3tmp/tmpsBtJnI/pdisk_1.dat 2025-06-03T10:25:04.751023Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:04.752647Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7511667179714277032:2079] 1748946304737479 != 1748946304737482 TServer::EnableGrpc on GrpcPort 20461, node 4 2025-06-03T10:25:04.762056Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:04.762070Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:04.762073Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:04.762125Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:04.843768Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:04.843803Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:04.844379Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:04.967628Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:04.970146Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:04.970169Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:04.970395Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:17611, port: 17611 2025-06-03T10:25:04.970426Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-03T10:25:04.977220Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:05.023956Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-03T10:25:05.069788Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****7GRg (60FF10E3) () has now valid token of ldapuser@ldap 2025-06-03T10:25:05.440521Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7511667182752163520:2221];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001538/r3tmp/tmpaht1pu/pdisk_1.dat 2025-06-03T10:25:05.449700Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:05.459636Z node 5 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:05.461441Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7511667182752163308:2079] 1748946305438024 != 1748946305438027 TServer::EnableGrpc on GrpcPort 30319, node 5 2025-06-03T10:25:05.474167Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:05.474182Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:05.474186Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:05.474240Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:05.529372Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:05.531235Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:05.531256Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:05.531458Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:61595, port: 61595 2025-06-03T10:25:05.531491Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-03T10:25:05.543384Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:05.543414Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:05.544319Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:05.553147Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=invalidRobouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:05.598307Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldap://localhost:61595. Invalid credentials 2025-06-03T10:25:05.598620Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****tEQg (458687FC) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldap://localhost:61595. Invalid credentials)' 2025-06-03T10:25:06.020215Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511667187832985866:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:06.020251Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001538/r3tmp/tmp2GNKme/pdisk_1.dat 2025-06-03T10:25:06.037803Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6960, node 6 2025-06-03T10:25:06.048029Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:06.048042Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:06.048044Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:06.048096Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:06.104158Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:06.104243Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:06.104252Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:06.104450Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:11558, port: 11558 2025-06-03T10:25:06.104470Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-03T10:25:06.120821Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:06.120860Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:06.121907Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:06.192335Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:06.233537Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:11558. Invalid credentials 2025-06-03T10:25:06.233729Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****68Dg (EE6B346F) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:11558. Invalid credentials)' |59.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut |59.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut |59.6%| [LD] {RESULT} $(B)/ydb/core/fq/libs/actors/ut/ydb-core-fq-libs-actors-ut |59.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut |59.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut |59.6%| [LD] {RESULT} $(B)/ydb/core/ydb_convert/ut/ydb-core-ydb_convert-ut |59.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ldap_auth_provider/ut/unittest >> TLdapUtilsUrisCreatorTest::CreateUrisFromHostnamesUnknownScheme [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_LdapsScheme::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] Test command err: 2025-06-03T10:25:03.229712Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667175161520550:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:03.230179Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00152f/r3tmp/tmpgnYM9K/pdisk_1.dat 2025-06-03T10:25:03.378741Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:03.381456Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667175161520518:2079] 1748946303227335 != 1748946303227338 TServer::EnableGrpc on GrpcPort 7032, node 1 2025-06-03T10:25:03.405520Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:03.405537Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:03.405539Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:03.405581Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:03.425067Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:03.425110Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:03.429744Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:03.542691Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:03.545438Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:03.545456Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:03.546027Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:1064, port: 1064 2025-06-03T10:25:03.546062Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:03.605592Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:03.651215Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:25:03.694471Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****8K8w (625130B8) () has now valid token of ldapuser@ldap 2025-06-03T10:25:03.985204Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667175385767061:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:03.985233Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00152f/r3tmp/tmpcKInFL/pdisk_1.dat 2025-06-03T10:25:04.007658Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:04.008888Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667175385767039:2079] 1748946303985089 != 1748946303985092 TServer::EnableGrpc on GrpcPort 2088, node 2 2025-06-03T10:25:04.037588Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:04.037606Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:04.037609Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:04.037672Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:04.090494Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:04.090524Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:04.091395Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:04.119950Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:04.122171Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:04.122204Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:04.122421Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:20233, port: 20233 2025-06-03T10:25:04.122450Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:04.173713Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:04.221910Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:25:04.222205Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-03T10:25:04.222226Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:04.269528Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:04.313523Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:04.313911Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****1R5g (C2696D33) () has now valid token of ldapuser@ldap 2025-06-03T10:25:04.478074Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511667179891876936:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:04.478104Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00152f/r3tmp/tmpYsTBlY/pdisk_1.dat 2025-06-03T10:25:04.495452Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:04.495701Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511667179891876916:2079] 1748946304477919 != 1748946304477922 TServer::EnableGrpc on GrpcPort 5958, node 3 2025-06-03T10:25:04.507263Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:04.507278Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:04.507280Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:04.507348Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:04.582799Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:04.582825Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:04.583908Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:04.629158Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:04.630651Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:04.630669Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:04.630896Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:17615, port: 17615 2025-06-03T10:25:04.630942Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:04.681801Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:04.733766Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****YKVA (D3E63D37) () has now valid token of ldapuser@ldap 2025-06-03T10:25:04.986985Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511667180729522096:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:04.987042Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00152f/r3tmp/tmpB2vjAl/pdisk_1.dat 2025-06-03T10:25:05.004377Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:05.004606Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7511667180729522074:2079] 1748946304986796 != 1748946304986799 TServer::EnableGrpc on GrpcPort 15632, node 4 2025-06-03T10:25:05.016879Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:05.016893Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:05.016896Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:05.016960Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:05.092915Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:05.092948Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:05.093889Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:05.094502Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:05.097086Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:05.097103Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:05.097336Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://qqq:11214 ldaps://localhost:11214 ldaps://localhost:11111, port: 11214 2025-06-03T10:25:05.097362Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:05.162118Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:05.206119Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:25:05.206410Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-03T10:25:05.206424Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:05.250046Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:05.300353Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:05.300775Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****bw9A (35CBDB89) () has now valid token of ldapuser@ldap 2025-06-03T10:25:05.638438Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7511667183036266172:2155];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00152f/r3tmp/tmpRqsmfr/pdisk_1.dat 2025-06-03T10:25:05.651623Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:05.661935Z node 5 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:05.663175Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7511667183036266031:2079] 1748946305636587 != 1748946305636590 TServer::EnableGrpc on GrpcPort 31653, node 5 2025-06-03T10:25:05.675192Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:05.675213Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:05.675216Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:05.675276Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:05.746098Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:05.746139Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:05.747175Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:05.763032Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:05.765554Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:05.765583Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:05.765780Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:12646, port: 12646 2025-06-03T10:25:05.765806Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:05.833618Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-03T10:25:05.877654Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:25:05.877872Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-03T10:25:05.877886Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-03T10:25:05.922265Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-03T10:25:05.969482Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-03T10:25:05.969954Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****XPKA (A6FA246B) () has now valid token of ldapuser@ldap 2025-06-03T10:25:06.200461Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511667188502528907:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:06.200499Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00152f/r3tmp/tmpraknRU/pdisk_1.dat 2025-06-03T10:25:06.222666Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:06.223234Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7511667188502528887:2079] 1748946306200276 != 1748946306200279 TServer::EnableGrpc on GrpcPort 4490, node 6 2025-06-03T10:25:06.239328Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:06.239343Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:06.239346Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:06.239405Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:06.273235Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:06.275626Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:06.275650Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:06.275801Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:2179, port: 2179 2025-06-03T10:25:06.275825Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:06.307250Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:06.307289Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:06.308370Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:06.333614Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: &(uid=ldapuser)(), attributes: memberOf 2025-06-03T10:25:06.333649Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter &(uid=ldapuser)() on server ldaps://localhost:2179. Bad search filter 2025-06-03T10:25:06.333883Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****vdCw (3DCE9DAD) () has now permanent error message 'Could not login via LDAP (Could not perform search for filter &(uid=ldapuser)() on server ldaps://localhost:2179. Bad search filter)' >> TBlobStorageHullFresh::AppendixPerf_Tune [GOOD] >> TSchemeShardCheckProposeSize::CopyTable >> BindQueue::Basic [GOOD] >> TBlobStorageWardenTest::ObtainPDiskKeySamePin [GOOD] |59.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_user_attributes_reboots/unittest |59.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_user_attributes_reboots/unittest |59.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_replication_reboots/unittest |59.6%| [TA] $(B)/ydb/library/yql/tests/sql/dq_file/part11/test-results/pytest/{meta.json ... results_accumulator.log} |59.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hulldb/fresh/ut/unittest >> TBlobStorageHullFresh::AppendixPerf_Tune [GOOD] |59.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_user_attributes_reboots/unittest >> TPQTestSlow::TestWriteVeryBigMessage [GOOD] >> TSchemeShardCheckProposeSize::CopyTable [GOOD] >> TSchemeShardCheckProposeSize::CopyTables >> TSchemeShardTest::Boot ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/nodewarden/ut/unittest >> TBlobStorageWardenTest::ObtainPDiskKeySamePin [GOOD] Test command err: Pick Pick Disable nodeId# 51 Pick Enable nodeId# 51 Delete nodeId# 32 Pick Disable nodeId# 31 Pick Enable nodeId# 31 Pick Delete nodeId# 7 Add nodeId# 101 Add nodeId# 102 Disable nodeId# 5 Enable nodeId# 5 Pick Delete nodeId# 4 Disable nodeId# 94 Pick Disable nodeId# 75 Disable nodeId# 29 Delete nodeId# 77 Add nodeId# 103 Pick Delete nodeId# 33 Disable nodeId# 60 Enable nodeId# 29 Add nodeId# 104 Enable nodeId# 94 Delete nodeId# 27 Disable nodeId# 45 Add nodeId# 105 Delete nodeId# 90 Delete nodeId# 28 Enable nodeId# 60 Enable nodeId# 75 Pick Enable nodeId# 45 Add nodeId# 106 Delete nodeId# 99 Delete nodeId# 84 Disable nodeId# 26 Add nodeId# 107 Delete nodeId# 23 Pick Disable nodeId# 49 Disable nodeId# 68 Disable nodeId# 72 Add nodeId# 108 Disable nodeId# 73 Add nodeId# 109 Delete nodeId# 47 Enable nodeId# 49 Pick Add nodeId# 110 Delete nodeId# 105 Enable nodeId# 26 Add nodeId# 111 Enable nodeId# 68 Delete nodeId# 98 Delete nodeId# 74 Delete nodeId# 26 Pick Disable nodeId# 46 Delete nodeId# 22 Pick Pick Delete nodeId# 95 Disable nodeId# 63 Enable nodeId# 46 Add nodeId# 112 Add nodeId# 113 Add nodeId# 114 Disable nodeId# 15 Pick Delete nodeId# 109 Pick Disable nodeId# 103 Disable nodeId# 71 Disable nodeId# 111 Delete nodeId# 96 Delete nodeId# 14 Enable nodeId# 73 Disable nodeId# 82 Enable nodeId# 72 Disable nodeId# 16 Pick Add nodeId# 115 Enable nodeId# 82 Add nodeId# 116 Pick Add nodeId# 117 Add nodeId# 118 Disable nodeId# 50 Delete nodeId# 85 Pick Enable nodeId# 111 Add nodeId# 119 Enable nodeId# 50 Delete nodeId# 65 Pick Pick Enable nodeId# 71 Delete nodeId# 35 Enable nodeId# 15 Pick Pick Delete nodeId# 30 Pick Add nodeId# 120 Enable nodeId# 63 Add nodeId# 121 Enable nodeId# 16 Add nodeId# 122 Delete nodeId# 2 Disable nodeId# 87 Disable nodeId# 81 Delete nodeId# 89 Add nodeId# 123 Enable nodeId# 103 Enable nodeId# 87 Delete nodeId# 16 Add nodeId# 124 Pick Disable nodeId# 11 Enable nodeId# 11 Pick Delete nodeId# 55 Pick Disable nodeId# 118 Disable nodeId# 110 Add nodeId# 125 Enable nodeId# 110 Add nodeId# 126 Enable nodeId# 118 Add nodeId# 127 Enable nodeId# 81 Delete nodeId# 86 Add nodeId# 128 Add nodeId# 129 Pick Delete nodeId# 75 Disable nodeId# 73 Enable nodeId# 73 Delete nodeId# 128 Pick Disable nodeId# 18 Disable nodeId# 46 Disable nodeId# 54 Delete nodeId# 6 Pick Disable nodeId# 59 Add nodeId# 130 Pick Add nodeId# 131 Delete nodeId# 21 Delete nodeId# 17 Add nodeId# 132 Add nodeId# 133 Pick Delete nodeId# 44 Pick Add nodeId# 134 Add nodeId# 135 Enable nodeId# 18 Delete nodeId# 83 Add nodeId# 136 Disable nodeId# 125 Add nodeId# 137 Enable nodeId# 59 Pick Pick Pick Pick Delete nodeId# 38 Disable nodeId# 127 Enable nodeId# 125 Disable nodeId# 29 Disable nodeId# 80 Pick Delete nodeId# 92 Pick Pick Pick Disable nodeId# 13 Pick Delete nodeId# 25 Disable nodeId# 82 Add nodeId# 138 Add nodeId# 139 Delete nodeId# 113 Add nodeId# 140 Pick Add nodeId# 141 Pick Delete nodeId# 91 Pick Delete nodeId# 1 Disable nodeId# 129 Pick Disable nodeId# 41 Pick Pick Pick Delete nodeId# 107 Enable nodeId# 82 Pick Delete nodeId# 135 Enable nodeId# 41 Disable nodeId# 139 Delete nodeId# 61 Add nodeId# 142 Add nodeId# 143 Pick Delete nodeId# 62 Pick Disable nodeId# 10 Enable nodeId# 10 Delete nodeId# 66 Pick Delete nodeId# 122 Add nodeId# 144 Delete nodeId# 140 Pick Disable nodeId# 52 Delete nodeId# 88 Enable nodeId# 127 Delete nodeId# 50 Pick Add nodeId# 145 Pick Pick Delete nodeId# 134 Pick Delete nodeId# 129 Enable nodeId# 139 Add nodeId# 146 Disable nodeId# 42 Pick Add nodeId# 147 Pick Enable nodeId# 54 Enable nodeId# 13 Add nodeId# 148 Delete nodeId# 100 Enable nodeId# 80 Enable nodeId# 42 Add nodeId# 149 Pick Enable nodeId# 52 Pick Enable nodeId# 29 Pick Pick Enable nodeId# 46 Pick Pick Delete nodeId# 31 Add nodeId# 150 Pick Add nodeId# 151 Add nodeId# 152 Pick Disable nodeId# 60 Pick Enable nodeId# 60 Delete nodeId# 41 Delete nodeId# 145 Add nodeId# 153 Disable nodeId# 114 Add nodeId# 154 Enable nodeId# 114 Add nodeId# 155 Add nodeId# 156 Add nodeId# 157 Add nodeId# 158 Delete nodeId# 124 Add nodeId# 159 Disable nodeId# 127 Disable nodeId# 110 Disable nodeId# 11 Enable nodeId# 110 Disable nodeId# 34 Enable nodeId# 11 Delete nodeId# 138 Pick Disable nodeId# 147 Pick Delete nodeId# 158 Delete nodeId# 150 Disable nodeId# 93 Pick Pick Disable nodeId# 5 Delete nodeId# 133 Delete nodeId# 40 Disable nodeId# 153 Enable nodeId# 5 Disable nodeId# 15 Enable nodeId# 127 Pick Delete nodeId# 82 Delete nodeId# 18 Disable nodeId# 69 Add nodeId# 160 Delete nodeId# 71 Add nodeId# 161 Enable nodeId# 34 Enable nodeId# 69 Enable nodeId# 93 Delete nodeId# 53 Disable nodeId# 143 Delete nodeId# 147 Pick Delete nodeId# 137 Pick Delete nodeId# 67 Delete nodeId# 79 Pick Disable nodeId# 48 Enable nodeId# 143 Disable nodeId# 126 Delete nodeId# 11 Enable nodeId# 15 Disable nodeId# 130 Delete nodeId# 72 Enable nodeId# 126 Add nodeId# 162 Pick Delete nodeId# 152 Disable nodeId# 126 Add nodeId# 163 Delete nodeId# 142 Delete nodeId# 57 Disable nodeId# 103 Add nodeId# 164 Delete nodeId# 45 Add nodeId# 165 Pick Add nodeId# 166 Delete nodeId# 125 Delete nodeId# 108 Add nodeId# 167 Pick Add nodeId# 168 Disable nodeId# 39 Add nodeId# 169 Pick Disable nodeId# 81 Disable nodeId# 19 Enable nodeId# 81 Enable nodeId# 126 Disable nodeId# 116 Disable nodeId# 73 Disable nodeId# 168 Delete nodeId# 104 Enable nodeId# 103 Enable nodeId# 153 Enable nodeId# 130 Disable nodeId# 141 Delete nodeId# 76 Delete nodeId# 93 Enable nodeId# 19 Pick Disable nodeId# 126 Pick Delete nodeId# 155 Enable nodeId# 126 Pick Enable nodeId# 141 Add nodeId# 170 Disable nodeId# 132 Add nodeId# 171 Enable nodeId# 168 Disable nodeId# 10 Disable nodeId# 12 Enable nodeId# 116 Delete nodeId# 19 Enable nodeId# 132 Disable nodeId# 167 Disable nodeId# 169 Disable nodeId# 64 Disable nodeId# 121 Add nodeId# 172 Enable nodeId# 169 Disable nodeId# 114 Add nodeId# 173 Delete nodeId# 172 Disable nodeId# 117 Disable nodeId# 163 Add nodeId# 174 Disable nodeId# 87 Disable nodeId# 141 Enable nodeId# 114 Enable nodeId# 163 Delete nodeId# 154 Delete nodeId# 149 Pick Pick Delete nodeId# 153 Enable nodeId# 87 Enable nodeId# 141 Pick Delete nodeId# 143 Pick Add nodeId# 175 Add nodeId# 176 Pick Delete nodeId# 64 Disable nodeId# 111 Disable nodeId# 81 Pick Disable nodeId# 3 Add nodeId# 177 Disable nodeId# 120 Delete nodeId# 9 Add nodeId# 178 Pick Delete nodeId# 112 Add nodeId# 179 Pick Delete nodeId# 141 Add nodeId# 180 Pick Enable nodeId# 120 Delete nodeId# 36 Delete nodeId# 121 Add nodeId# 181 Enable nodeId# 167 Disable nodeId# 54 Pick Add nodeId# 182 Disable nodeId# 151 Pick Delete nodeId# 144 Delete nodeId# 157 Add nodeId# 183 Add nodeId# 184 Enable nodeId# 73 Disable nodeId# 58 Pick Disable nodeId# 119 Delete nodeId# 126 Add nodeId# 185 Pick Disable nodeId# 183 Disable nodeId# 116 Enable nodeId# 12 Delete nodeId# 161 Enable nodeId# 54 Disable nodeId# 110 Pick Enable nodeId# 58 Delete nodeId# 63 Enable nodeId# 81 Add nodeId# 186 Enable nodeId# 151 Pick Add nodeId# 187 Add nodeId# 188 Delete nodeId# 175 Enable nodeId# 3 Pick Add nodeId# 189 Add nodeId# 190 Enable nodeId# 39 Delete nodeId# 183 Pick Delete nodeId# 168 Disable nodeId# 12 Pick Delete nodeId# 110 Add nodeId# 191 Enable nodeId# 48 Add nodeId# 192 Disable nodeId# 185 Enable nodeId# 116 Pick Pick Delete nodeId# 131 Disable nodeId# 49 Add nodeId# 193 Disable nodeId# 156 Disable nodeId# 165 Add nodeId# 194 Delete nodeId# 3 Enable nodeId# 156 Add nodeId# 195 Delete nodeId# 81 Enable nodeId# 12 Pick Delete nodeId# 163 Enable nodeId# 111 Add nodeId# 196 Add nodeId# 197 Enable nodeId# 117 Pick Delete nodeId# 37 Pick Delete nodeId# 101 Disable nodeId# 73 Pick Delete nodeId# 106 Disable nodeId# 43 Pick Delete nodeId# 146 Enable nodeId# 43 Add nodeId# 198 Disable nodeId# 179 Disable nodeId# 178 Pick Disable nodeId# 48 Pick Pick Delete nodeId# 8 Enable nodeId# 10 Delete nodeId# 193 Add nodeId# 199 Disable nodeId# 120 Delete nodeId# 127 Enable nodeId# 49 Add nodeId# 200 Enable nodeId# 179 Enable nodeId# 178 Enable nodeId# 73 Add nodeId# 201 Delete nodeId# 173 Delete nodeId# 24 Enable nodeId# 185 Pick Pick Pick Disable nodeId# 97 Pick Delete nodeId# 42 Delete nodeId# 34 Add nodeId# 202 Add nodeId# 203 Pick Disable nodeId# 117 Delete nodeId# 148 Pick Pick Delete nodeId# 120 Disable nodeId# 73 Add nodeId# 204 Pick Add nodeId# 205 Enable nodeId# 119 Delete nodeId# 166 Delete nodeId# 48 Pick Delete nodeId# 136 Delete nodeId# 191 Disable nodeId# 87 Add nodeId# 206 Add nodeId# 207 Pick Pick Delete nodeId# 94 Delete nodeId# 29 Pick Disable nodeId# 159 Pick Add nodeId# 208 Disable nodeId# 170 Add nodeId# 209 Delete nodeId# 201 Disable nodeId# 59 Add nodeId# 210 Pick Disable nodeId# 196 Delete nodeId# 203 Delete nodeId# 118 Enable nodeId# 159 Disable nodeId# 102 Enable nodeId# 170 Pick Add nodeId# 211 Enable nodeId# 117 Disable nodeId# 189 Delete nodeId# 80 Pick Delete nodeId# 70 Add nodeId# 212 Add nodeId# 213 Enable nodeId# 97 Delete nodeId# 170 Disable nodeId# 213 Enable nodeId# 189 Pick Delete nodeId# 198 Add nodeId# 214 Pick Delete nodeId# 171 Disable nodeId# 12 Delete nodeId# 20 Delete nodeId# 119 Pick Pick Delete nodeId# 60 Add nodeId# 215 Delete nodeId# 54 Pick Enable nodeId# 12 Add nodeId# 216 Enable nodeId# 87 Pick Add nodeId# 217 Delete nodeId# 87 Enable nodeId# 73 Delete nodeId# 162 Disable nodeId# 52 Disable nodeId# 210 Enable nodeId# 213 Enable nodeId# 165 Add nodeId# 218 Add nodeId# 219 Delete nodeId# 213 Add nodeId# 220 Add nodeId# 221 Disable nodeId# 174 Add nodeId# 222 Delete nodeId# 222 Add nodeId# 223 Disable nodeId# 165 Pick Enable nodeId# 165 Disable nodeId# 97 Pick Add nodeId# 224 Delete nodeId# 211 Pick Enable nodeId# 196 Delete nodeId# 199 Add nodeId# 225 Enable nodeId# 210 Pick Enable nodeId# 97 Disable nodeId# 117 Delete nodeId# 196 Enable nodeId# 59 Disable nodeId# 210 Pick Pick Pick Delete nodeId# 221 Disable nodeId# 205 Enable nodeId# 117 Add nodeId# 226 Delete nodeId# 205 Delete nodeId# 202 Delete nodeId# 180 Pick Enable nodeId# 52 Disable nodeId# 218 Enable nodeId# 218 Disable nodeId# 132 Add nodeId# 227 Pick Pick Disable nodeId# 184 Add nodeId# 228 Enable nodeId# 210 Pick Pick Enable nodeId# 132 Add nodeId# 229 Add nodeId# 230 Delete nodeId# 73 Add nodeId# 231 Enable nodeId# 102 Pick Delete nodeId# 5 Add nodeId# 232 Disable nodeId# 192 Delete nodeId# 204 Enable nodeId# 184 Enable nodeId# 174 Add nodeId# 233 Disable nodeId# 103 Disable nodeId# 217 Delete nodeId# 159 Delete nodeId# 15 Pick Pick Enable nodeId# 103 Pick Enable nodeId# 192 Enable nodeId# 217 Pick Pick Add nodeId# 234 Delete nodeId# 190 Delete nodeId# 151 Add nodeId# 235 Delete nodeId# 194 Delete nodeId# 97 Disable nodeId# 116 Add nodeId# 236 Add nodeId# 237 Add nodeId# 238 Delete nodeId# 186 Enable nodeId# 116 Disable nodeId# 46 Disable nodeId# 130 Add nodeId# 239 Delete nodeId# 51 Add nodeId# 240 Enable nodeId# 46 Disable nodeId# 195 Add nodeId# 241 Delete nodeId# 215 Disable nodeId# 217 Pick Enable nodeId# 217 Pick Disable nodeId# 207 Pick Delete nodeId# 185 Enable nodeId# 130 Disable nodeId# 178 Delete nodeId# 160 Enable nodeId# 178 Delete nodeId# 174 Delete nodeId# 184 Enable nodeId# 207 Add nodeId# 242 Disable nodeId# 214 Delete nodeId# 212 Add nodeId# 243 Add nodeId# 244 Delete nodeId# 226 Disable nodeId# 207 Pick Disable nodeId# 176 Enable nodeId# 214 Enable nodeId# 207 Add nodeId# 245 Delete nodeId# 240 Delete nodeId# 12 Disable nodeId# 230 Disable nodeId# 233 Pick Delete nodeId# 224 Pick Pick Enable nodeId# 195 Add nodeId# 246 Enable nodeId# 176 Add nodeId# 247 Delete nodeId# 216 Enable nodeId# 230 Pick Delete nodeId# 130 Disable nodeId# 219 Add nodeId# 248 Pick Add nodeId# 249 Add nodeId# 250 Delete nodeId# 123 Add nodeId# 251 Enable nodeId# 233 Add nodeId# 252 Pick Enable nodeId# 219 Disable nodeId# 209 Delete nodeId# 220 Delete nodeId# 132 Delete nodeId# 115 Delete nodeId# 207 Delete nodeId# 192 Add nodeId# 253 Pick Add nodeId# 254 Disable nodeId# 178 Pick Pick Pick Pick Disable nodeId# 250 Add nodeId# 255 Add nodeId# 256 Enable nodeId# ... 0141 Disable nodeId# 19890 Pick Add nodeId# 20217 Add nodeId# 20218 Enable nodeId# 20147 Add nodeId# 20219 Enable nodeId# 19890 Add nodeId# 20220 Disable nodeId# 20199 Pick Disable nodeId# 20216 Disable nodeId# 20208 Delete nodeId# 20215 Pick Pick Delete nodeId# 20105 Delete nodeId# 20140 Disable nodeId# 19913 Disable nodeId# 20180 Pick Add nodeId# 20221 Add nodeId# 20222 Add nodeId# 20223 Enable nodeId# 19913 Add nodeId# 20224 Pick Pick Pick Disable nodeId# 20138 Pick Disable nodeId# 20191 Disable nodeId# 20219 Delete nodeId# 20203 Pick Add nodeId# 20225 Delete nodeId# 20166 Pick Add nodeId# 20226 Enable nodeId# 20191 Delete nodeId# 20212 Pick Enable nodeId# 20141 Enable nodeId# 20180 Pick Add nodeId# 20227 Pick Add nodeId# 20228 Disable nodeId# 20147 Disable nodeId# 20228 Add nodeId# 20229 Add nodeId# 20230 Pick Enable nodeId# 20208 Enable nodeId# 20199 Pick Enable nodeId# 20216 Disable nodeId# 20213 Pick Pick Enable nodeId# 20228 Pick Delete nodeId# 20190 Disable nodeId# 20178 Disable nodeId# 20214 Pick Disable nodeId# 20218 Pick Pick Disable nodeId# 20197 Enable nodeId# 20218 Pick Add nodeId# 20231 Disable nodeId# 20143 Enable nodeId# 20197 Add nodeId# 20232 Enable nodeId# 20219 Add nodeId# 20233 Enable nodeId# 20214 Add nodeId# 20234 Add nodeId# 20235 Pick Delete nodeId# 20223 Pick Add nodeId# 20236 Disable nodeId# 20211 Enable nodeId# 20138 Add nodeId# 20237 Delete nodeId# 20143 Delete nodeId# 20236 Disable nodeId# 20067 Disable nodeId# 20174 Disable nodeId# 19913 Add nodeId# 20238 Disable nodeId# 20192 Disable nodeId# 20220 Enable nodeId# 20178 Pick Delete nodeId# 19913 Add nodeId# 20239 Disable nodeId# 20221 Enable nodeId# 20211 Enable nodeId# 20192 Disable nodeId# 20168 Add nodeId# 20240 Disable nodeId# 20191 Pick Enable nodeId# 20221 Disable nodeId# 20240 Delete nodeId# 20001 Enable nodeId# 20147 Delete nodeId# 20228 Disable nodeId# 20056 Disable nodeId# 20110 Disable nodeId# 20237 Disable nodeId# 20094 Disable nodeId# 20104 Enable nodeId# 20220 Enable nodeId# 20240 Delete nodeId# 20200 Disable nodeId# 20196 Delete nodeId# 20107 Delete nodeId# 20238 Disable nodeId# 20159 Disable nodeId# 20148 Delete nodeId# 20180 Enable nodeId# 20148 Delete nodeId# 20222 Enable nodeId# 20213 Add nodeId# 20241 Delete nodeId# 20240 Enable nodeId# 20104 Delete nodeId# 20094 Disable nodeId# 20103 Enable nodeId# 20103 Delete nodeId# 19977 Enable nodeId# 20110 Pick Pick Disable nodeId# 20201 Disable nodeId# 20103 Pick Add nodeId# 20242 Pick Enable nodeId# 20159 Disable nodeId# 20198 Enable nodeId# 20201 Disable nodeId# 20193 Enable nodeId# 20103 Disable nodeId# 20202 Disable nodeId# 20161 Delete nodeId# 20148 Disable nodeId# 20239 Pick Enable nodeId# 20161 Add nodeId# 20243 Enable nodeId# 20239 Disable nodeId# 20103 Add nodeId# 20244 Pick Delete nodeId# 20213 Disable nodeId# 20219 Enable nodeId# 20237 Enable nodeId# 20198 Add nodeId# 20245 Add nodeId# 20246 Pick Pick Delete nodeId# 20144 Disable nodeId# 20216 Disable nodeId# 20211 Enable nodeId# 20219 Add nodeId# 20247 Pick Delete nodeId# 20214 Disable nodeId# 20201 Pick Disable nodeId# 20138 Pick Disable nodeId# 20219 Disable nodeId# 20147 Delete nodeId# 20208 Pick Add nodeId# 20248 Disable nodeId# 20169 Delete nodeId# 20015 Pick Disable nodeId# 20209 Enable nodeId# 20216 Disable nodeId# 20151 Disable nodeId# 20226 Pick Disable nodeId# 20218 Delete nodeId# 20147 Pick Delete nodeId# 20206 Enable nodeId# 20219 Pick Enable nodeId# 20168 Disable nodeId# 20239 Enable nodeId# 20218 Disable nodeId# 20104 Add nodeId# 20249 Delete nodeId# 20225 Pick Enable nodeId# 20239 Delete nodeId# 20243 Delete nodeId# 20156 Disable nodeId# 20072 Add nodeId# 20250 Enable nodeId# 20138 Delete nodeId# 20239 Enable nodeId# 20211 Enable nodeId# 20067 Pick Enable nodeId# 20169 Enable nodeId# 20196 Pick Enable nodeId# 20151 Enable nodeId# 20072 Enable nodeId# 20174 Pick Delete nodeId# 20138 Delete nodeId# 20161 Delete nodeId# 20191 Disable nodeId# 20162 Add nodeId# 20251 Enable nodeId# 20193 Disable nodeId# 20023 Disable nodeId# 20195 Add nodeId# 20252 Add nodeId# 20253 Delete nodeId# 20218 Disable nodeId# 20224 Enable nodeId# 20162 Delete nodeId# 20192 Disable nodeId# 20237 Delete nodeId# 20199 Pick Enable nodeId# 20103 Enable nodeId# 20104 Pick Add nodeId# 20254 Pick Pick Pick Delete nodeId# 20072 Disable nodeId# 20141 Disable nodeId# 20232 Delete nodeId# 20227 Pick Add nodeId# 20255 Pick Disable nodeId# 20130 Pick Enable nodeId# 20224 Pick Pick Delete nodeId# 20252 Pick Enable nodeId# 20141 Delete nodeId# 20232 Add nodeId# 20256 Disable nodeId# 20217 Add nodeId# 20257 Add nodeId# 20258 Enable nodeId# 20217 Enable nodeId# 20195 Pick Pick Pick Enable nodeId# 20201 Add nodeId# 20259 Add nodeId# 20260 Enable nodeId# 20023 Delete nodeId# 20168 Enable nodeId# 20202 Disable nodeId# 20216 Enable nodeId# 20056 Delete nodeId# 20230 Delete nodeId# 20250 Add nodeId# 20261 Delete nodeId# 19890 Pick Enable nodeId# 20226 Disable nodeId# 20196 Disable nodeId# 20110 Enable nodeId# 20110 Pick Pick Enable nodeId# 20130 Delete nodeId# 20261 Pick Pick Enable nodeId# 20216 Disable nodeId# 20220 Add nodeId# 20262 Disable nodeId# 20197 Disable nodeId# 20217 Pick Pick Delete nodeId# 20174 Add nodeId# 20263 Delete nodeId# 20220 Add nodeId# 20264 Enable nodeId# 20217 Pick Pick Pick Disable nodeId# 20103 Enable nodeId# 20197 Pick Add nodeId# 20265 Disable nodeId# 20255 Disable nodeId# 20265 Pick Enable nodeId# 20209 Delete nodeId# 20056 Pick Pick Delete nodeId# 20159 Add nodeId# 20266 Enable nodeId# 20103 Enable nodeId# 20196 Add nodeId# 20267 Delete nodeId# 20198 Pick Enable nodeId# 20255 Disable nodeId# 20217 Add nodeId# 20268 Delete nodeId# 20266 Add nodeId# 20269 Enable nodeId# 20217 Add nodeId# 20270 Disable nodeId# 20256 Delete nodeId# 20221 Pick Add nodeId# 20271 Pick Pick Disable nodeId# 20209 Add nodeId# 20272 Add nodeId# 20273 Pick Enable nodeId# 20237 Enable nodeId# 20265 Add nodeId# 20274 Enable nodeId# 20256 Add nodeId# 20275 Pick Add nodeId# 20276 Pick Add nodeId# 20277 Pick Delete nodeId# 20160 Disable nodeId# 20103 Add nodeId# 20278 Disable nodeId# 20151 Add nodeId# 20279 Enable nodeId# 20209 Delete nodeId# 20242 Disable nodeId# 20257 Disable nodeId# 20264 Enable nodeId# 20103 Enable nodeId# 20151 Add nodeId# 20280 Delete nodeId# 20262 Pick Delete nodeId# 20278 Delete nodeId# 20229 Disable nodeId# 20260 Pick Enable nodeId# 20264 Disable nodeId# 20254 Delete nodeId# 20162 Enable nodeId# 20260 Pick Delete nodeId# 20270 Disable nodeId# 20141 Add nodeId# 20281 Delete nodeId# 20224 Delete nodeId# 20110 Delete nodeId# 20279 Delete nodeId# 20274 Add nodeId# 20282 Pick Pick Delete nodeId# 20103 Delete nodeId# 20254 Add nodeId# 20283 Enable nodeId# 20257 Add nodeId# 20284 Disable nodeId# 20204 Add nodeId# 20285 Pick Enable nodeId# 20204 Pick Delete nodeId# 20267 Disable nodeId# 20246 Pick Delete nodeId# 20141 Disable nodeId# 20209 Pick Delete nodeId# 20259 Disable nodeId# 20244 Add nodeId# 20286 Enable nodeId# 20209 Disable nodeId# 20241 Add nodeId# 20287 Delete nodeId# 20268 Disable nodeId# 20195 Enable nodeId# 20244 Enable nodeId# 20195 Delete nodeId# 20263 Delete nodeId# 20104 Enable nodeId# 20246 Enable nodeId# 20241 Pick Disable nodeId# 20163 Add nodeId# 20288 Enable nodeId# 20163 Pick Delete nodeId# 20237 Pick Disable nodeId# 19972 Enable nodeId# 19972 Add nodeId# 20289 Delete nodeId# 20273 Pick Add nodeId# 20290 Disable nodeId# 20233 Pick Delete nodeId# 20287 Delete nodeId# 20269 Pick Add nodeId# 20291 Delete nodeId# 20290 Disable nodeId# 20284 Enable nodeId# 20284 Add nodeId# 20292 Add nodeId# 20293 Add nodeId# 20294 Delete nodeId# 20258 Delete nodeId# 20253 Enable nodeId# 20233 Delete nodeId# 20178 Delete nodeId# 20130 Disable nodeId# 20256 Add nodeId# 20295 Enable nodeId# 20256 Disable nodeId# 20282 Add nodeId# 20296 Pick Add nodeId# 20297 Pick Disable nodeId# 20276 Pick Enable nodeId# 20282 Disable nodeId# 20280 Disable nodeId# 20226 Pick Add nodeId# 20298 Pick Enable nodeId# 20276 Disable nodeId# 20217 Add nodeId# 20299 Add nodeId# 20300 Enable nodeId# 20226 Enable nodeId# 20217 Delete nodeId# 20272 Disable nodeId# 20196 Pick Delete nodeId# 20217 Disable nodeId# 20023 Delete nodeId# 20233 Enable nodeId# 20023 Delete nodeId# 20231 Disable nodeId# 20284 Enable nodeId# 20280 Enable nodeId# 20196 Delete nodeId# 20235 Add nodeId# 20301 Disable nodeId# 20244 Delete nodeId# 20280 Enable nodeId# 20284 Disable nodeId# 20260 Add nodeId# 20302 Pick Enable nodeId# 20260 Disable nodeId# 20288 Delete nodeId# 20288 Disable nodeId# 20291 Pick Add nodeId# 20303 Enable nodeId# 20244 Add nodeId# 20304 Add nodeId# 20305 Pick Disable nodeId# 20234 Disable nodeId# 20286 Enable nodeId# 20286 Pick Delete nodeId# 20241 Add nodeId# 20306 Enable nodeId# 20291 Delete nodeId# 20204 Pick Disable nodeId# 20300 Disable nodeId# 20248 Pick Disable nodeId# 20255 Pick Pick Delete nodeId# 20293 Enable nodeId# 20300 Add nodeId# 20307 Disable nodeId# 20169 Disable nodeId# 20151 Add nodeId# 20308 Add nodeId# 20309 Enable nodeId# 20169 Enable nodeId# 20234 Delete nodeId# 20284 Enable nodeId# 20248 Add nodeId# 20310 Add nodeId# 20311 Add nodeId# 20312 Enable nodeId# 20255 Pick Enable nodeId# 20151 Pick Add nodeId# 20313 Disable nodeId# 20308 Add nodeId# 20314 Add nodeId# 20315 Delete nodeId# 20245 Delete nodeId# 20195 Disable nodeId# 20260 Add nodeId# 20316 Disable nodeId# 20197 Pick Enable nodeId# 20308 Pick Delete nodeId# 20275 Add nodeId# 20317 Delete nodeId# 20196 Add nodeId# 20318 Delete nodeId# 20310 Disable nodeId# 20296 Disable nodeId# 20244 Delete nodeId# 20292 Disable nodeId# 20311 Add nodeId# 20319 Add nodeId# 20320 Enable nodeId# 20296 Enable nodeId# 20260 Disable nodeId# 20226 Enable nodeId# 20244 Enable nodeId# 20226 Pick Enable nodeId# 20311 Enable nodeId# 20197 Pick Pick Pick Delete nodeId# 20265 Add nodeId# 20321 Delete nodeId# 20201 Delete nodeId# 20297 Delete nodeId# 20321 Disable nodeId# 20305 Pick Delete nodeId# 20163 Pick Pick Pick Enable nodeId# 20305 Pick Delete nodeId# 20289 Add nodeId# 20322 Add nodeId# 20323 Add nodeId# 20324 Pick Pick Add nodeId# 20325 Add nodeId# 20326 Add nodeId# 20327 Delete nodeId# 20197 Delete nodeId# 20244 Pick Delete nodeId# 20296 Disable nodeId# 20301 Add nodeId# 20328 Disable nodeId# 20316 Delete nodeId# 20260 Pick Disable nodeId# 20315 Disable nodeId# 20312 Pick Pick Enable nodeId# 20316 Delete nodeId# 20276 Delete nodeId# 20211 Delete nodeId# 20294 Enable nodeId# 20315 Add nodeId# 20329 Pick Enable nodeId# 20312 Add nodeId# 20330 Delete nodeId# 20285 Add nodeId# 20331 Disable nodeId# 20023 Pick Enable nodeId# 20301 Enable nodeId# 20023 Pick Delete nodeId# 20319 Pick Delete nodeId# 20023 Pick Delete nodeId# 20202 Pick Delete nodeId# 20308 Disable nodeId# 20291 Pick Add nodeId# 20332 Delete nodeId# 20330 Delete nodeId# 20193 Disable nodeId# 20327 Add nodeId# 20333 Delete nodeId# 20325 Add nodeId# 20334 Enable nodeId# 20327 Delete nodeId# 20169 Add nodeId# 20335 Pick Add nodeId# 20336 Add nodeId# 20337 Delete nodeId# 20264 Pick Disable nodeId# 20286 Add nodeId# 20338 Enable nodeId# 20291 Add nodeId# 20339 Disable nodeId# 20249 Delete nodeId# 20323 Add nodeId# 20340 Disable nodeId# 20311 Enable nodeId# 20311 Pick Disable nodeId# 20226 Add nodeId# 20341 Pick Add nodeId# 20342 Disable nodeId# 20301 Enable nodeId# 20286 Enable nodeId# 20226 Enable nodeId# 20301 Delete nodeId# 20333 Enable nodeId# 20249 Disable nodeId# 20338 Disable nodeId# 20316 Delete nodeId# 20249 Disable nodeId# 20305 Enable nodeId# 20316 Delete nodeId# 20257 Add nodeId# 20343 Enable nodeId# 20338 Add nodeId# 20344 Delete nodeId# 20234 Disable nodeId# 20317 Delete nodeId# 20331 Enable nodeId# 20317 Delete nodeId# 20337 Delete nodeId# 20209 Disable nodeId# 20246 Add nodeId# 20345 Add nodeId# 20346 Disable nodeId# 20300 Enable nodeId# 20305 Pick Enable nodeId# 20300 Add nodeId# 20347 Add nodeId# 20348 Delete nodeId# 20302 Enable nodeId# 20246 Delete nodeId# 20342 Delete nodeId# 20322 Delete nodeId# 20282 Pick Add nodeId# 20349 Pick Add nodeId# 20350 Delete nodeId# 20338 Delete nodeId# 20283 >> KqpScheme::CreateAndAlterTableWithPartitionBy >> test.py::test[blocks-lazy_nonstrict_with_scalar_ctx--Results] [GOOD] >> test.py::test[blocks-minmax_strings_filter--ForceBlocks] >> KqpScheme::CreateTableWithUniqConstraintPublicApi >> KqpAcl::ReadSuccess >> test.py::test[aggregate-group_by_session_only--Results] [GOOD] >> KqpScheme::DisableS3ExternalDataSource >> KqpScheme::QueryWithAlter >> TSchemeShardTest::Boot [GOOD] >> TSchemeShardTest::AlterTableKeyColumns >> KqpScheme::CreateDroppedTable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/slow/unittest >> TPQTestSlow::TestWriteVeryBigMessage [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:104:2057] recipient: [1:102:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:104:2057] recipient: [1:102:2135] Leader for TabletID 72057594037927937 is [1:108:2139] sender: [1:109:2057] recipient: [1:102:2135] 2025-06-03T10:24:32.492783Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:32.492819Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:150:2057] recipient: [1:148:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:150:2057] recipient: [1:148:2170] Leader for TabletID 72057594037927938 is [1:154:2174] sender: [1:155:2057] recipient: [1:148:2170] Leader for TabletID 72057594037927937 is [1:108:2139] sender: [1:180:2057] recipient: [1:14:2061] 2025-06-03T10:24:32.506958Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:32.523550Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037927937] Config applied version 1 actor [1:178:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-03T10:24:32.524085Z node 1 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:186:2198] 2025-06-03T10:24:32.526886Z node 1 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:186:2198] 2025-06-03T10:24:32.529762Z node 1 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:187:2199] 2025-06-03T10:24:32.530637Z node 1 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:187:2199] 2025-06-03T10:24:32.537430Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|e50c953a-f2aa71b0-4f93395-352c51a_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:24:33.563913Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|bd52b036-90cc98d3-481f06d7-42d0c735_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:24:33.776246Z node 1 :PERSQUEUE NOTICE: read.h:361: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-03T10:24:33.785116Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3271134-27488ac1-5b450ac0-edd20c9_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:24:33.856246Z node 1 :PERSQUEUE NOTICE: read.h:361: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 Leader for TabletID 72057594037927937 is [1:108:2139] sender: [1:288:2057] recipient: [1:100:2134] Leader for TabletID 72057594037927937 is [1:108:2139] sender: [1:291:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [1:108:2139] sender: [1:292:2057] recipient: [1:290:2285] Leader for TabletID 72057594037927937 is [1:293:2286] sender: [1:294:2057] recipient: [1:290:2285] 2025-06-03T10:24:33.863301Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:33.863325Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-03T10:24:33.863427Z node 1 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:342:2327] 2025-06-03T10:24:33.863813Z node 1 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:343:2328] 2025-06-03T10:24:33.867592Z node 1 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:24:33.867614Z node 1 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [1:342:2327] 2025-06-03T10:24:33.868236Z node 1 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:24:33.868250Z node 1 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [1:343:2328] Leader for TabletID 72057594037927937 is [1:293:2286] sender: [1:373:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:104:2057] recipient: [2:102:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:104:2057] recipient: [2:102:2135] Leader for TabletID 72057594037927937 is [2:108:2139] sender: [2:109:2057] recipient: [2:102:2135] 2025-06-03T10:24:34.174619Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:34.174645Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:150:2057] recipient: [2:148:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:150:2057] recipient: [2:148:2170] Leader for TabletID 72057594037927938 is [2:154:2174] sender: [2:155:2057] recipient: [2:148:2170] Leader for TabletID 72057594037927937 is [2:108:2139] sender: [2:180:2057] recipient: [2:14:2061] 2025-06-03T10:24:34.178862Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:34.179044Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037927937] Config applied version 2 actor [2:178:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-03T10:24:34.179149Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:186:2198] 2025-06-03T10:24:34.179608Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [2:186:2198] 2025-06-03T10:24:34.179877Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:187:2199] 2025-06-03T10:24:34.180238Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [2:187:2199] 2025-06-03T10:24:34.181569Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|be96486c-f9ff13b5-cadd051a-a52f1a9e_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:24:34.234911Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|fc3ed7a5-7f0821f4-611aff7e-ad16536f_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:24:34.325657Z node 2 :PERSQUEUE NOTICE: read.h:361: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-03T10:24:34.337827Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|f981ea18-f3e25c6a-64924b37-2e4df9f2_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:24:34.398795Z node 2 :PERSQUEUE NOTICE: read.h:361: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 !Reboot 72057594037927937 (actor [2:108:2139]) on event NKikimr::TEvPersQueue::TEvOffsets ! Leader for TabletID 72057594037927937 is [2:108:2139] sender: [2:287:2057] recipient: [2:100:2134] Leader for TabletID 72057594037927937 is [2:108:2139] sender: [2:290:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:108:2139] sender: [2:291:2057] recipient: [2:289:2284] Leader for TabletID 72057594037927937 is [2:292:2285] sender: [2:293:2057] recipient: [2:289:2284] 2025-06-03T10:24:34.410113Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:34.410139Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-03T10:24:34.410303Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:341:2326] 2025-06-03T10:24:34.410960Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:342:2327] 2025-06-03T10:24:34.416066Z node 2 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:24:34.416094Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [2:341:2326] 2025-06-03T10:24:34.416875Z node 2 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:24:34.416893Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [2:342:2327] !Reboot 72057594037927937 (actor [2:108:2139]) rebooted! !Reboot 72057594037927937 (actor [2:108:2139]) tablet resolver refreshed! new actor is[2:292:2285] Leader for TabletID 72057594037927937 is [2:292:2285] sender: [2:398:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:292:2285] sender: [2:401:2057] recipient: [2:100:2134] Leader for TabletID 72057594037927937 is [2:292:2285] sender: [2:404:2057] recipient: [2:403:2359] Leader for TabletID 72057594037927937 is [2:292:2285] sender: [2:405:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:406:2360] sender: [2:407:2057] recipient: [2:403:2359] 2025-06-03T10:24:35.648267Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:35.648295Z node 2 :PE ... partition 0 generation 4 [53:431:2391] 2025-06-03T10:25:06.026172Z node 53 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:25:06.026202Z node 53 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 4 [53:432:2392] !Reboot 72057594037927937 (actor [53:293:2286]) rebooted! !Reboot 72057594037927937 (actor [53:293:2286]) tablet resolver refreshed! new actor is[53:380:2348] Leader for TabletID 72057594037927937 is [53:380:2348] sender: [53:487:2057] recipient: [53:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [54:104:2057] recipient: [54:102:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [54:104:2057] recipient: [54:102:2135] Leader for TabletID 72057594037927937 is [54:108:2139] sender: [54:109:2057] recipient: [54:102:2135] 2025-06-03T10:25:07.499490Z node 54 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:25:07.499516Z node 54 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [54:150:2057] recipient: [54:148:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [54:150:2057] recipient: [54:148:2170] Leader for TabletID 72057594037927938 is [54:154:2174] sender: [54:155:2057] recipient: [54:148:2170] Leader for TabletID 72057594037927937 is [54:108:2139] sender: [54:180:2057] recipient: [54:14:2061] 2025-06-03T10:25:07.502565Z node 54 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:25:07.502733Z node 54 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037927937] Config applied version 54 actor [54:178:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 54 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 54 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 54 Important: false } 2025-06-03T10:25:07.502821Z node 54 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [54:186:2198] 2025-06-03T10:25:07.503262Z node 54 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [54:186:2198] 2025-06-03T10:25:07.503518Z node 54 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [54:187:2199] 2025-06-03T10:25:07.503805Z node 54 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [54:187:2199] 2025-06-03T10:25:07.504842Z node 54 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d3f27b0c-7cffb08e-ed6481ae-16288304_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:07.516300Z node 54 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|aa97da45-1e1714a8-28d234d0-762b9752_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:07.557412Z node 54 :PERSQUEUE NOTICE: read.h:361: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-03T10:25:07.566945Z node 54 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d9b3a1e2-f1a0a4f6-a0f56b41-7c155039_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:07.612158Z node 54 :PERSQUEUE NOTICE: read.h:361: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 Leader for TabletID 72057594037927937 is [54:108:2139] sender: [54:288:2057] recipient: [54:100:2134] Leader for TabletID 72057594037927937 is [54:108:2139] sender: [54:290:2057] recipient: [54:14:2061] Leader for TabletID 72057594037927937 is [54:108:2139] sender: [54:292:2057] recipient: [54:291:2285] Leader for TabletID 72057594037927937 is [54:293:2286] sender: [54:294:2057] recipient: [54:291:2285] 2025-06-03T10:25:07.620550Z node 54 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:25:07.620576Z node 54 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-03T10:25:07.620685Z node 54 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [54:342:2327] 2025-06-03T10:25:07.621111Z node 54 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [54:343:2328] 2025-06-03T10:25:07.624397Z node 54 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:25:07.624429Z node 54 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [54:342:2327] 2025-06-03T10:25:07.624999Z node 54 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:25:07.625013Z node 54 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [54:343:2328] Leader for TabletID 72057594037927937 is [54:293:2286] sender: [54:373:2057] recipient: [54:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [55:104:2057] recipient: [55:102:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [55:104:2057] recipient: [55:102:2135] Leader for TabletID 72057594037927937 is [55:108:2139] sender: [55:109:2057] recipient: [55:102:2135] 2025-06-03T10:25:07.752544Z node 55 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:25:07.752576Z node 55 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [55:150:2057] recipient: [55:148:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [55:150:2057] recipient: [55:148:2170] Leader for TabletID 72057594037927938 is [55:154:2174] sender: [55:155:2057] recipient: [55:148:2170] Leader for TabletID 72057594037927937 is [55:108:2139] sender: [55:180:2057] recipient: [55:14:2061] 2025-06-03T10:25:07.755758Z node 55 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:25:07.755911Z node 55 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037927937] Config applied version 55 actor [55:178:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 55 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 55 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 55 Important: false } 2025-06-03T10:25:07.755994Z node 55 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [55:186:2198] 2025-06-03T10:25:07.756427Z node 55 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [55:186:2198] 2025-06-03T10:25:07.756678Z node 55 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [55:187:2199] 2025-06-03T10:25:07.756992Z node 55 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [55:187:2199] 2025-06-03T10:25:07.758238Z node 55 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|1ad941a4-f6cb7652-ce6073b7-7590153e_0 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:07.770989Z node 55 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|2644a3b2-77a8e482-df103ec3-c5faf72e_1 generated for partition 1 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:07.807922Z node 55 :PERSQUEUE NOTICE: read.h:361: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 2025-06-03T10:25:07.816484Z node 55 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|c6bdbc30-5f5e02a2-927b7701-18372a53_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:07.858591Z node 55 :PERSQUEUE NOTICE: read.h:361: Have to remove new data from cache. Topic rt3.dc1--asdfgs--topic, tablet id72057594037927937, cookie 0 Leader for TabletID 72057594037927937 is [55:108:2139] sender: [55:288:2057] recipient: [55:100:2134] Leader for TabletID 72057594037927937 is [55:108:2139] sender: [55:291:2057] recipient: [55:14:2061] Leader for TabletID 72057594037927937 is [55:108:2139] sender: [55:292:2057] recipient: [55:290:2285] Leader for TabletID 72057594037927937 is [55:293:2286] sender: [55:294:2057] recipient: [55:290:2285] 2025-06-03T10:25:07.866397Z node 55 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:25:07.866424Z node 55 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-03T10:25:07.866578Z node 55 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [55:342:2327] 2025-06-03T10:25:07.867215Z node 55 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [55:343:2328] 2025-06-03T10:25:07.871429Z node 55 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:25:07.871465Z node 55 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [55:342:2327] 2025-06-03T10:25:07.872047Z node 55 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:25:07.872063Z node 55 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [55:343:2328] Leader for TabletID 72057594037927937 is [55:293:2286] sender: [55:373:2057] recipient: [55:14:2061] |59.6%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part5/pytest >> test.py::test[aggregate-group_by_session_only--Results] [GOOD] >> TSchemeShardTest::AlterTableKeyColumns [GOOD] >> TSchemeShardTest::AlterTableFollowers >> KqpOlapScheme::TtlRunInterval >> test.py::test[aggregate-avg_and_sum_by_value--Results] [GOOD] >> test.py::test[aggregate-error_type--Results] >> TSubDomainTest::CreateTableInsideAndForceDeleteSubDomain |59.6%| [TA] $(B)/ydb/core/blobstorage/nodewarden/ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write |59.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write |59.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data/unittest >> TSubDomainTest::UserAttributes >> KqpScheme::CreateAndAlterTableWithPartitionBy [GOOD] >> KqpScheme::CreateAndAlterTableWithPartitionSizeCompat >> KqpScheme::CreateTableWithUniqConstraintPublicApi [GOOD] >> KqpScheme::CreateTableWithVectorIndex |59.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data/unittest >> KqpAcl::ReadSuccess [GOOD] >> KqpAcl::WriteSuccess >> TSchemeShardTest::AlterTableFollowers [GOOD] >> TSchemeShardTest::AlterTableSizeToSplit >> KqpScheme::CreateDroppedTable [GOOD] >> KqpScheme::CreateDropTableMultipleTime >> TSubDomainTest::CreateDummyTabletsInDifferentDomains >> TSubDomainTest::UserAttributes [GOOD] >> TSubDomainTest::UserAttributesApplyIf >> KqpOlapScheme::TtlRunInterval [GOOD] >> KqpOlapScheme::WithoutDefaultColumnFamily >> TSubDomainTest::LsLs >> test.py::test[aggregate-list_after_group-default.txt-ForceBlocks] [GOOD] >> test.py::test[aggregate-list_after_group-default.txt-Results] >> TSchemeShardTest::AlterTableSizeToSplit [GOOD] >> TSchemeShardTest::AlterTableSplitSchema >> KqpScheme::CreateAndAlterTableWithPartitionSizeCompat [GOOD] >> KqpScheme::CreateAndAlterTableWithMinMaxPartitionsUncompat >> ConvertMiniKQLValueToYdbValueTest::SimpleInt32 [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleInt64 [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleTzDate [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleTzDateTime [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleTzTimeStamp [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleDecimal [GOOD] >> ConvertMiniKQLValueToYdbValueTest::SimpleUuid [GOOD] >> KqpScheme::CreateTableWithVectorIndex [GOOD] >> KqpScheme::CreateTableWithVectorIndexCovered >> test.py::test[aggregate-error_type--Results] [GOOD] >> test.py::test[aggregate-group_by_expr--Results] >> ConvertYdbValueToMiniKQLValueTest::SimpleInt32 [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleTzDate [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleTzDateTime [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleTzTimeStamp [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleInt32TypeMissmatch [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleUuid [GOOD] >> TSubDomainTest::UserAttributesApplyIf [GOOD] >> TSubDomainTest::LsLs [GOOD] >> TSubDomainTest::LsAltered >> KqpOlapScheme::WithoutDefaultColumnFamily [GOOD] >> KqpOlapScheme::UnknownColumnFamily |59.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLValueToYdbValueTest::SimpleUuid [GOOD] >> TSchemeShardTest::AlterTableSplitSchema [GOOD] >> TSchemeShardTest::AlterTableSettings >> KqpScheme::DisableS3ExternalDataSource [GOOD] >> KqpScheme::DoubleCreateExternalDataSource >> TSubDomainTest::CreateTableInsideAndForceDeleteSubDomain [GOOD] >> TSubDomainTest::CreateTableInsidetThenStopTenantAndForceDeleteSubDomain |59.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/ydb_convert/ut/unittest >> ConvertYdbValueToMiniKQLValueTest::SimpleUuid [GOOD] >> KqpAcl::WriteSuccess [GOOD] >> KqpAcl::RecursiveCreateTableShouldSuccess >> test_generator.py::TestTpcdsGenerator::test_s1_state_and_parts [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::UserAttributesApplyIf [GOOD] Test command err: 2025-06-03T10:25:09.448387Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667200537464789:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:09.505054Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001188/r3tmp/tmp4vYjfn/pdisk_1.dat 2025-06-03T10:25:09.545001Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667200537464602:2079] 1748946309438404 != 1748946309438407 2025-06-03T10:25:09.557491Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:13383 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:25:09.579070Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511667200537464650:2096] Handle TEvNavigate describe path dc-1 2025-06-03T10:25:09.581000Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511667200537465140:2252] HANDLE EvNavigateScheme dc-1 2025-06-03T10:25:09.581039Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667200537464971:2148], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:09.581051Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2322: Create subscriber: self# [1:7511667200537464971:2148], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-03T10:25:09.581114Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:7511667200537465141:2253][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-03T10:25:09.581630Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667200537464572:2049] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667200537465145:2253] 2025-06-03T10:25:09.581663Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667200537464572:2049] Subscribe: subscriber# [1:7511667200537465145:2253], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:09.581667Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667200537464575:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667200537465146:2253] 2025-06-03T10:25:09.581677Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667200537464575:2052] Subscribe: subscriber# [1:7511667200537465146:2253], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:09.581687Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667200537464578:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667200537465147:2253] 2025-06-03T10:25:09.581690Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667200537464578:2055] Subscribe: subscriber# [1:7511667200537465147:2253], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:09.581696Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667200537465145:2253][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667200537464572:2049] 2025-06-03T10:25:09.581702Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667200537465146:2253][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667200537464575:2052] 2025-06-03T10:25:09.581703Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667200537464572:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667200537465145:2253] 2025-06-03T10:25:09.581707Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667200537464575:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667200537465146:2253] 2025-06-03T10:25:09.581708Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667200537465147:2253][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667200537464578:2055] 2025-06-03T10:25:09.581711Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667200537464578:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667200537465147:2253] 2025-06-03T10:25:09.581716Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667200537465141:2253][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667200537465142:2253] 2025-06-03T10:25:09.581723Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667200537465141:2253][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667200537465143:2253] 2025-06-03T10:25:09.581735Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:7511667200537465141:2253][/dc-1] Set up state: owner# [1:7511667200537464971:2148], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:09.581784Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667200537465141:2253][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667200537465144:2253] 2025-06-03T10:25:09.581793Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:7511667200537465141:2253][/dc-1] Path was already updated: owner# [1:7511667200537464971:2148], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:09.581801Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667200537465145:2253][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667200537465142:2253], cookie# 1 2025-06-03T10:25:09.581823Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667200537465146:2253][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667200537465143:2253], cookie# 1 2025-06-03T10:25:09.581829Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667200537465147:2253][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667200537465144:2253], cookie# 1 2025-06-03T10:25:09.581835Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667200537464572:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667200537465145:2253], cookie# 1 2025-06-03T10:25:09.581840Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667200537464575:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667200537465146:2253], cookie# 1 2025-06-03T10:25:09.581844Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667200537464578:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667200537465147:2253], cookie# 1 2025-06-03T10:25:09.581850Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667200537465145:2253][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667200537464572:2049], cookie# 1 2025-06-03T10:25:09.581854Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667200537465146:2253][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667200537464575:2052], cookie# 1 2025-06-03T10:25:09.581857Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667200537465147:2253][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667200537464578:2055], cookie# 1 2025-06-03T10:25:09.581862Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667200537465141:2253][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667200537465142:2253], cookie# 1 2025-06-03T10:25:09.581869Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7511667200537465141:2253][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:25:09.581873Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667200537465141:2253][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667200537465143:2253], cookie# 1 2025-06-03T10:25:09.581890Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7511667200537465141:2253][/dc-1] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:25:09.581895Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667200537465141:2253][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667200537465144:2253], cookie# 1 2025-06-03T10:25:09.581898Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7511667200537465141:2253][/dc-1] Unexpected sync response: sender# [1:7511667200537465144:2253], cookie# 1 2025-06-03T10:25:09.592710Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:7511667200537464971:2148], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046644480 } 2025-06-03T10:25:09.592822Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [1:7511667200537464971:2148], notify ... ifiers/resource_pool_classifiers Version: 0 }: sender# [2:7511667207921967253:2049] 2025-06-03T10:25:10.546336Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:7511667207921967985:2356][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [2:7511667207921967256:2052] 2025-06-03T10:25:10.546341Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:7511667207921967986:2356][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [2:7511667207921967259:2055] 2025-06-03T10:25:10.546347Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][2:7511667207921967968:2356][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [2:7511667207921967981:2356] 2025-06-03T10:25:10.546354Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][2:7511667207921967968:2356][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [2:7511667207921967982:2356] 2025-06-03T10:25:10.546360Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][2:7511667207921967968:2356][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Set up state: owner# [2:7511667207921967566:2113], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:10.546364Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][2:7511667207921967968:2356][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers Version: 0 }: sender# [2:7511667207921967983:2356] 2025-06-03T10:25:10.546368Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][2:7511667207921967968:2356][/dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers] Ignore empty state: owner# [2:7511667207921967566:2113], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:10.546372Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7511667207921967253:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:7511667207921967978:2355] 2025-06-03T10:25:10.546374Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7511667207921967253:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:7511667207921967984:2356] 2025-06-03T10:25:10.546378Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7511667207921967256:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:7511667207921967979:2355] 2025-06-03T10:25:10.546381Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7511667207921967256:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:7511667207921967985:2356] 2025-06-03T10:25:10.546384Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7511667207921967259:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:7511667207921967980:2355] 2025-06-03T10:25:10.546387Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [2:7511667207921967259:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [2:7511667207921967986:2356] 2025-06-03T10:25:10.546396Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [2:7511667207921967566:2113], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 } 2025-06-03T10:25:10.546411Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [2:7511667207921967566:2113], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [2:7511667207921967967:2355] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:10.546431Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7511667207921967566:2113], cacheItem# { Subscriber: { Subscriber: [2:7511667207921967967:2355] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:10.546437Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [2:7511667207921967566:2113], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 } 2025-06-03T10:25:10.546443Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [2:7511667207921967566:2113], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [2:7511667207921967968:2356] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:10.546453Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7511667207921967566:2113], cacheItem# { Subscriber: { Subscriber: [2:7511667207921967968:2356] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:10.546470Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7511667207921967987:2357], recipient# [2:7511667207921967965:2307], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:10.546847Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][2:7511667207921967966:2354][/dc-1/.metadata/workload_manager/delayed_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/delayed_requests Version: 0 }: sender# [2:7511667207921967969:2354] 2025-06-03T10:25:10.546861Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][2:7511667207921967966:2354][/dc-1/.metadata/workload_manager/delayed_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/delayed_requests Version: 0 }: sender# [2:7511667207921967970:2354] 2025-06-03T10:25:10.546865Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][2:7511667207921967966:2354][/dc-1/.metadata/workload_manager/delayed_requests] Set up state: owner# [2:7511667207921967566:2113], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:10.546873Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][2:7511667207921967966:2354][/dc-1/.metadata/workload_manager/delayed_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/delayed_requests Version: 0 }: sender# [2:7511667207921967971:2354] 2025-06-03T10:25:10.546877Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][2:7511667207921967966:2354][/dc-1/.metadata/workload_manager/delayed_requests] Ignore empty state: owner# [2:7511667207921967566:2113], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:10.546887Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [2:7511667207921967566:2113], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 } 2025-06-03T10:25:10.546896Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [2:7511667207921967566:2113], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [2:7511667207921967966:2354] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:10.546905Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7511667207921967566:2113], cacheItem# { Subscriber: { Subscriber: [2:7511667207921967966:2354] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:10.546922Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7511667207921967988:2358], recipient# [2:7511667207921967964:2306], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> test.py::test[window-win_func_aggr_stat--Results] [GOOD] >> TSchemeShardTest::AlterTableSettings [GOOD] >> TSchemeShardTest::AssignBlockStoreVolume >> test.py::test[window-win_func_aggr_with_qualified_all_no_simple_columns--Results] >> KqpOlapScheme::UnknownColumnFamily [GOOD] >> KqpOlapScheme::TwoSimilarColumnFamilies >> TSchemeShardTest::AssignBlockStoreVolume [GOOD] >> TSchemeShardTest::AssignBlockStoreVolumeDuringAlter >> KqpScheme::CreateAndAlterTableWithMinMaxPartitionsUncompat [GOOD] >> KqpScheme::CreateAndAlterTableWithMinMaxPartitionsCompat >> TSubDomainTest::LsAltered [GOOD] >> KqpScheme::CreateTableWithVectorIndexCovered [GOOD] >> KqpScheme::CreateTableWithVectorIndexCaseIncentive >> KqpScheme::QueryWithAlter [GOOD] >> KqpScheme::RenameTable |59.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/benchmarks_init/py3test >> test_generator.py::TestTpcdsGenerator::test_s1_state_and_parts [GOOD] |59.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow >> TSchemeShardTest::AssignBlockStoreVolumeDuringAlter [GOOD] >> TSchemeShardTest::AssignBlockStoreCheckVersionInAlter |59.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow >> TPersQueueNewSchemeCacheTest::TestReadAtTimestamp_10 [GOOD] >> TSubDomainTest::CreateTableInsidetThenStopTenantAndForceDeleteSubDomain [GOOD] >> TSubDomainTest::CreateTableInsideSubDomain >> test.py::test[blocks-minmax_strings_filter--ForceBlocks] [GOOD] >> test.py::test[blocks-minmax_strings_filter--Results] |59.7%| [TA] {RESULT} $(B)/ydb/core/blobstorage/nodewarden/ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_write/ydb-core-tx-datashard-ut_write |59.7%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/arrow/ydb-core-kqp-ut-arrow >> test.py::test[aggregate-list_after_group-default.txt-Results] [GOOD] >> test.py::test[aggregate-list_nullable--ForceBlocks] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::LsAltered [GOOD] Test command err: 2025-06-03T10:25:10.274552Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667206670303018:2198];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:10.274720Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001184/r3tmp/tmp14GSHv/pdisk_1.dat 2025-06-03T10:25:10.343447Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667206670302859:2079] 1748946310272612 != 1748946310272615 2025-06-03T10:25:10.346092Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:17638 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:25:10.371065Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511667206670303096:2088] Handle TEvNavigate describe path dc-1 2025-06-03T10:25:10.373237Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511667206670303395:2250] HANDLE EvNavigateScheme dc-1 2025-06-03T10:25:10.373269Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667206670303140:2114], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:10.373278Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2322: Create subscriber: self# [1:7511667206670303140:2114], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-03T10:25:10.373377Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:7511667206670303396:2251][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-03T10:25:10.373796Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667206670302835:2055] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667206670303402:2251] 2025-06-03T10:25:10.373803Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667206670302829:2049] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667206670303400:2251] 2025-06-03T10:25:10.373821Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667206670302829:2049] Subscribe: subscriber# [1:7511667206670303400:2251], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:10.373821Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667206670302835:2055] Subscribe: subscriber# [1:7511667206670303402:2251], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:10.373838Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667206670302832:2052] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667206670303401:2251] 2025-06-03T10:25:10.373843Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667206670302832:2052] Subscribe: subscriber# [1:7511667206670303401:2251], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:10.373855Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667206670303400:2251][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667206670302829:2049] 2025-06-03T10:25:10.373860Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667206670303402:2251][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667206670302835:2055] 2025-06-03T10:25:10.373864Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667206670302829:2049] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667206670303400:2251] 2025-06-03T10:25:10.373864Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667206670303401:2251][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667206670302832:2052] 2025-06-03T10:25:10.373869Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667206670302835:2055] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667206670303402:2251] 2025-06-03T10:25:10.373874Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667206670302832:2052] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667206670303401:2251] 2025-06-03T10:25:10.373877Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667206670303396:2251][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667206670303397:2251] 2025-06-03T10:25:10.373883Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667206670303396:2251][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667206670303399:2251] 2025-06-03T10:25:10.373893Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:7511667206670303396:2251][/dc-1] Set up state: owner# [1:7511667206670303140:2114], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:10.373929Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667206670303396:2251][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667206670303398:2251] 2025-06-03T10:25:10.373936Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:7511667206670303396:2251][/dc-1] Path was already updated: owner# [1:7511667206670303140:2114], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:10.373944Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667206670303400:2251][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667206670303397:2251], cookie# 1 2025-06-03T10:25:10.373947Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667206670303401:2251][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667206670303398:2251], cookie# 1 2025-06-03T10:25:10.373950Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667206670303402:2251][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667206670303399:2251], cookie# 1 2025-06-03T10:25:10.373955Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667206670302829:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667206670303400:2251], cookie# 1 2025-06-03T10:25:10.373961Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667206670302832:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667206670303401:2251], cookie# 1 2025-06-03T10:25:10.373965Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667206670302835:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667206670303402:2251], cookie# 1 2025-06-03T10:25:10.373980Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667206670303400:2251][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667206670302829:2049], cookie# 1 2025-06-03T10:25:10.373983Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667206670303401:2251][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667206670302832:2052], cookie# 1 2025-06-03T10:25:10.373986Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667206670303402:2251][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667206670302835:2055], cookie# 1 2025-06-03T10:25:10.373991Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667206670303396:2251][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667206670303397:2251], cookie# 1 2025-06-03T10:25:10.373996Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7511667206670303396:2251][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:25:10.373999Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667206670303396:2251][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667206670303398:2251], cookie# 1 2025-06-03T10:25:10.374002Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7511667206670303396:2251][/dc-1] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:25:10.374006Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667206670303396:2251][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667206670303399:2251], cookie# 1 2025-06-03T10:25:10.374009Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7511667206670303396:2251][/dc-1] Unexpected sync response: sender# [1:7511667206670303399:2251], cookie# 1 2025-06-03T10:25:10.384690Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:7511667206670303140:2114], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046644480 } 2025-06-03T10:25:10.384784Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [1:7511667206670303140:2114], notify ... eTxId: 281474976715658 CreateStep: 1748946311000 ParentPathId: 1 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046644480 PathId: 2 } StoragePools { Name: "/dc-1:test" Kind: "test" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946311000 ParentPathId: 1 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 ... (TRUNCATED) TClient::Ls request: /dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946310986 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946311000 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } Children { Name: ".sys" PathId: 18446744073709551615 ... (TRUNCATED) 2025-06-03T10:25:11.492702Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [2:7511667206375371722:2096] Handle TEvNavigate describe path /dc-1 2025-06-03T10:25:11.494460Z node 2 :TX_PROXY DEBUG: describe.cpp:272: Actor# [2:7511667210670339464:2329] HANDLE EvNavigateScheme /dc-1 2025-06-03T10:25:11.494490Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7511667206375371755:2113], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:11.494511Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:871: [main][2:7511667206375372015:2252][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [2:7511667206375371755:2113], cookie# 4 2025-06-03T10:25:11.494526Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:7511667206375372019:2252][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7511667206375372016:2252], cookie# 4 2025-06-03T10:25:11.494532Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:7511667206375372020:2252][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7511667206375372017:2252], cookie# 4 2025-06-03T10:25:11.494538Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][2:7511667206375372021:2252][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7511667206375372018:2252], cookie# 4 2025-06-03T10:25:11.494546Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [2:7511667206375371446:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7511667206375372019:2252], cookie# 4 2025-06-03T10:25:11.494556Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [2:7511667206375371449:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7511667206375372020:2252], cookie# 4 2025-06-03T10:25:11.494562Z node 2 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [2:7511667206375371452:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [2:7511667206375372021:2252], cookie# 4 2025-06-03T10:25:11.494570Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:7511667206375372019:2252][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7511667206375371446:2049], cookie# 4 2025-06-03T10:25:11.494573Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:7511667206375372020:2252][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7511667206375371449:2052], cookie# 4 2025-06-03T10:25:11.494578Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][2:7511667206375372021:2252][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7511667206375371452:2055], cookie# 4 2025-06-03T10:25:11.494584Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][2:7511667206375372015:2252][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7511667206375372016:2252], cookie# 4 2025-06-03T10:25:11.494590Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][2:7511667206375372015:2252][/dc-1] Sync is in progress: cookie# 4, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:25:11.494594Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][2:7511667206375372015:2252][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7511667206375372017:2252], cookie# 4 2025-06-03T10:25:11.494597Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][2:7511667206375372015:2252][/dc-1] Sync is done: cookie# 4, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:25:11.494602Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][2:7511667206375372015:2252][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 5 Partial: 0 }: sender# [2:7511667206375372018:2252], cookie# 4 2025-06-03T10:25:11.494604Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][2:7511667206375372015:2252][/dc-1] Unexpected sync response: sender# [2:7511667206375372018:2252], cookie# 4 2025-06-03T10:25:11.494612Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [2:7511667206375371755:2113], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-03T10:25:11.494628Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [2:7511667206375371755:2113], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [2:7511667206375372015:2252] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1748946310986 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:11.494640Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7511667206375371755:2113], cacheItem# { Subscriber: { Subscriber: [2:7511667206375372015:2252] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 4 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1748946310986 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 4 IsSync: true Partial: 0 } 2025-06-03T10:25:11.494693Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7511667210670339465:2330], recipient# [2:7511667210670339464:2329], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:25:11.494699Z node 2 :TX_PROXY DEBUG: describe.cpp:356: Actor# [2:7511667210670339464:2329] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-03T10:25:11.494719Z node 2 :TX_PROXY DEBUG: describe.cpp:435: Actor# [2:7511667210670339464:2329] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1" Options { ShowPrivateTable: true } 2025-06-03T10:25:11.494903Z node 2 :TX_PROXY DEBUG: describe.cpp:448: Actor# [2:7511667210670339464:2329] Handle TEvDescribeSchemeResult Forward to# [2:7511667210670339463:2328] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 63 Record# Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946310986 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } StoragePools { Name: "/dc-1:test" Kind: "test" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046644480 >> TSchemeShardTest::AssignBlockStoreCheckVersionInAlter [GOOD] >> TSchemeShardTest::AssignBlockStoreCheckFillGenerationInAlter >> KqpOlapScheme::TwoSimilarColumnFamilies [GOOD] >> KqpOlapTypes::Decimal >> ConvertMiniKQLValueToYdbValueTest::SimpleBool [GOOD] >> ConvertMiniKQLValueToYdbValueTest::OptionalString [GOOD] >> ConvertMiniKQLValueToYdbValueTest::OptionalEmpty [GOOD] >> ConvertMiniKQLValueToYdbValueTest::OptionalOptionalEmpty [GOOD] >> ConvertMiniKQLValueToYdbValueTest::OptionalOptionalEmpty2 [GOOD] >> ConvertMiniKQLValueToYdbValueTest::List [GOOD] >> ConvertMiniKQLValueToYdbValueTest::Dict [GOOD] >> KqpScheme::DoubleCreateExternalDataSource [GOOD] >> KqpScheme::DoubleCreateExternalTable >> ConvertMiniKQLValueToYdbValueTest::Void [GOOD] >> ConvertMiniKQLValueToYdbValueTest::Struct [GOOD] >> ConvertMiniKQLValueToYdbValueTest::Tuple [GOOD] >> ConvertMiniKQLValueToYdbValueTest::Variant [GOOD] >> ConvertTableDescription::StorageSettings [GOOD] >> ConvertTableDescription::ColumnFamilies [GOOD] >> ConvertYdbPermissionNameToACLAttrs::SimpleConvertGood [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleBool [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleBoolTypeMissmatch [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleDecimal [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleDecimalTypeMissmatch [GOOD] >> ConvertYdbValueToMiniKQLValueTest::OptionalString [GOOD] >> ConvertYdbValueToMiniKQLValueTest::PgValue [GOOD] >> TBtreeIndexTPartLarge::History [GOOD] >> TFlatTableLongTxLarge::LargeDeltaChain >> KqpAcl::RecursiveCreateTableShouldSuccess [GOOD] >> KqpConstraints::AddSerialColumnForbidden >> TSchemeShardTest::AssignBlockStoreCheckFillGenerationInAlter [GOOD] >> TSchemeShardTest::BlockStoreVolumeLimits >> test.py::test[aggregate-group_by_expr_semi_join--Results] [GOOD] >> test.py::test[aggregate-group_by_gs_few_empty--Results] |59.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLValueToYdbValueTest::Dict [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueNewSchemeCacheTest::TestReadAtTimestamp_10 [GOOD] Test command err: 2025-06-03T10:24:48.887089Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667110688791729:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:48.887179Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:49.019811Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667110070872828:2182];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:49.173262Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002138/r3tmp/tmpEqrNQc/pdisk_1.dat 2025-06-03T10:24:49.185507Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:24:49.199299Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:49.403342Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:49.405271Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:49.405285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:49.406102Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:49.406115Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:49.426284Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:49.426315Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:49.427563Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17142, node 1 2025-06-03T10:24:49.697599Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/002138/r3tmp/yandexYMDeVf.tmp 2025-06-03T10:24:49.697615Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/002138/r3tmp/yandexYMDeVf.tmp 2025-06-03T10:24:49.729466Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/002138/r3tmp/yandexYMDeVf.tmp 2025-06-03T10:24:49.729620Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:49.755309Z INFO: TTestServer started on Port 4909 GrpcPort 17142 TClient is connected to server localhost:4909 PQClient connected to localhost:17142 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:24:49.856896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:24:49.885029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-03T10:24:49.887691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:24:51.755178Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667123573694369:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:51.755462Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:51.755575Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667123573694381:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:51.759856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480 2025-06-03T10:24:51.788024Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667123573694383:2335], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-03T10:24:51.870845Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667123573694487:2662] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } waiting... 2025-06-03T10:24:51.907985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715662, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:24:51.917618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715663, at schemeshard: 72057594046644480 2025-06-03T10:24:52.349004Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511667123573694515:2339], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:24:52.378362Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=ZTRlMTVlYzctOTgyMTdmMi1jZDU0Mzk2Ni04ODQ3NTk3Yg==, ActorId: [1:7511667123573694367:2330], ActorState: ExecuteState, TraceId: 01jwtn53191nhz0tqf847xx23k, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:24:52.378857Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:24:52.417197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:24:52.542390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:24:52.677972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-03T10:24:52.832634Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jwtn53zsf3kepfyyv08xe3e5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGY4M2JmNmEtNGJiNTc1NjctNWQ4ODExMDUtZjc4ZDY4NjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7511667127868662347:3071] 2025-06-03T10:24:53.887010Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511667110688791729:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:53.887044Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:24:53.997411Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7511667110070872828:2182];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:53.998524Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok >>>>> Prepare scheme WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:24:57.927285Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667114983759115:2152], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:24:57.927363Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:7511667114983759115:2152], notify# NKiki ... 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:25:11.820487Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7511667212142830560:4750], recipient# [3:7511667212142830558:2808], result# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/PQ/Config/V2/Versions TableId: [72057594046644480:12:1] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: false SyncVersion: true Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } },{ Path: Root/PQ/Config/V2/Versions TableId: [72057594046644480:12:1] RequestType: ByPath Operation: OpUnknown RedirectRequired: true ShowPrivatePath: false SyncVersion: true Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:25:11.820691Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7511667177783088208:2139], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: TableId: [72057594046644480:10:0] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:11.820721Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7511667177783088208:2139], cacheItem# { Subscriber: { Subscriber: [3:7511667182078056400:2734] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 20 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1748946304560 PathId: [OwnerId: 72057594046644480, LocalPathId: 10] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: TableId: [72057594046644480:10:0] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:11.820762Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7511667177783088208:2139], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: TableId: [72057594046644480:12:0] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:11.820772Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7511667177783088208:2139], cacheItem# { Subscriber: { Subscriber: [3:7511667182078056668:2942] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 20 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1748946304595 PathId: [OwnerId: 72057594046644480, LocalPathId: 12] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: TableId: [72057594046644480:12:0] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:11.820831Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7511667212142830563:4751], recipient# [3:7511667177783088241:2291], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/PQ/Config/V2/Cluster TableId: [72057594046644480:10:1] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:25:11.820833Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7511667212142830564:4752], recipient# [3:7511667177783088241:2291], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/PQ/Config/V2/Versions TableId: [72057594046644480:12:1] RequestType: ByTableId Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:25:11.820912Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7511667177783088208:2139], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:11.820921Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7511667177783088208:2139], cacheItem# { Subscriber: { Subscriber: [3:7511667177783088218:2142] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 28 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1748946304119 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:11.820950Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7511667212142830565:4753], recipient# [3:7511667177783088241:2291], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 2 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:25:11.859140Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2760: Handle TEvTxProxySchemeCache::TEvResolveKeySet: self# [3:7511667177783088208:2139], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 },{ TableId: [OwnerId: 72057594046644480, LocalPathId: 10] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-03T10:25:11.859173Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2063: FillEntry for TResolve: self# [3:7511667177783088208:2139], cacheItem# { Subscriber: { Subscriber: [3:7511667182078056668:2942] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 20 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1748946304595 PathId: [OwnerId: 72057594046644480, LocalPathId: 12] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:11.859187Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2063: FillEntry for TResolve: self# [3:7511667177783088208:2139], cacheItem# { Subscriber: { Subscriber: [3:7511667182078056400:2734] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 20 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1748946304560 PathId: [OwnerId: 72057594046644480, LocalPathId: 10] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { TableId: [OwnerId: 72057594046644480, LocalPathId: 10] Access: 1 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:11.859268Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7511667212142830568:4754], recipient# [3:7511667212142830567:2805], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 12] Access: 1 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 },{ TableId: [OwnerId: 72057594046644480, LocalPathId: 10] Access: 1 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Utf8 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-03T10:25:11.859934Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1948: ActorId: [3:7511667212142830566:2805] TxId: 281474976720687. Ctx: { TraceId: 01jwtn5pm7bn0dhecyxagkmyp4, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=NDY1MGQwZWEtYjRmZjAxNWMtMTNhZTE2ODQtZmFmMTgwMjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 4 2025-06-03T10:25:11.860193Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7511667212142830570:2805], TxId: 281474976720687, task: 3. Ctx: { SessionId : ydb://session/3?node_id=3&id=NDY1MGQwZWEtYjRmZjAxNWMtMTNhZTE2ODQtZmFmMTgwMjg=. CustomerSuppliedId : . TraceId : 01jwtn5pm7bn0dhecyxagkmyp4. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [3:7511667212142830566:2805], status: UNAVAILABLE, reason: {
: Error: Terminate execution } >> KqpScheme::CreateTableWithVectorIndexCaseIncentive [GOOD] >> KqpScheme::CreateTableWithVectorIndexNoFeatureFlag >> TSchemeShardTest::BlockStoreVolumeLimits [GOOD] >> TSchemeShardTest::BlockStoreNonreplVolumeLimits |59.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/ydb_convert/ut/unittest >> ConvertYdbPermissionNameToACLAttrs::SimpleConvertGood [GOOD] |59.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/ydb_convert/ut/unittest >> ConvertYdbValueToMiniKQLValueTest::PgValue [GOOD] >> KqpScheme::RenameTable [GOOD] >> KqpScheme::PathWithNoRoot >> TSubDomainTest::CreateTableInsideSubDomain [GOOD] >> KqpScheme::CreateAndAlterTableWithMinMaxPartitionsCompat [GOOD] >> KqpScheme::CreateAndAlterTableWithBloomFilterUncompat >> ConvertMiniKQLTypeToYdbTypeTest::TTzDateTime [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::TTzTimeStamp [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::UuidType [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::VariantTuple [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::VariantStruct [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Void [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Tuple [GOOD] |59.7%| [TA] {RESULT} $(B)/ydb/library/yql/tests/sql/dq_file/part11/test-results/pytest/{meta.json ... results_accumulator.log} >> LdapAuthProviderTest_LdapsScheme::LdapRefreshRemoveUserBad [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoWithError >> TSchemeShardTest::BlockStoreNonreplVolumeLimits [GOOD] >> TSchemeShardTest::BlockStoreSystemVolumeLimits >> KqpScheme::DoubleCreateExternalTable [GOOD] >> KqpScheme::DoubleCreateResourcePool >> TSchemeShardTest::BlockStoreSystemVolumeLimits [GOOD] >> TSchemeShardTest::AlterTableWithCompactionStrategies >> ConvertYdbValueToMiniKQLValueTest::Void [GOOD] >> ConvertYdbValueToMiniKQLValueTest::SimpleUuidTypeMissmatch [GOOD] >> ConvertYdbValueToMiniKQLValueTest::Struct [GOOD] >> ConvertYdbValueToMiniKQLValueTest::Tuple [GOOD] >> ConvertYdbValueToMiniKQLValueTest::Variant [GOOD] >> ConvertYdbValueToMiniKQLValueTest::VariantIndexUnderflow [GOOD] |59.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLTypeToYdbTypeTest::Tuple [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::CreateTableInsideSubDomain [GOOD] Test command err: 2025-06-03T10:25:09.219204Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667201975040926:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:09.219260Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001211/r3tmp/tmpefautX/pdisk_1.dat 2025-06-03T10:25:09.346836Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:09.346861Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:09.354612Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:09.359060Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:8216 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:25:09.437799Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511667201975041005:2115] Handle TEvNavigate describe path dc-1 2025-06-03T10:25:09.440086Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511667201975041464:2432] HANDLE EvNavigateScheme dc-1 2025-06-03T10:25:09.440124Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667201975041029:2129], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:09.440136Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2322: Create subscriber: self# [1:7511667201975041029:2129], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-03T10:25:09.440190Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:7511667201975041465:2433][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-03T10:25:09.440639Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667201975040684:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667201975041469:2433] 2025-06-03T10:25:09.440660Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667201975040684:2050] Subscribe: subscriber# [1:7511667201975041469:2433], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:09.440675Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667201975040687:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667201975041470:2433] 2025-06-03T10:25:09.440679Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667201975040687:2053] Subscribe: subscriber# [1:7511667201975041470:2433], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:09.440684Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667201975040690:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667201975041471:2433] 2025-06-03T10:25:09.440688Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667201975040690:2056] Subscribe: subscriber# [1:7511667201975041471:2433], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:09.440700Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667201975041469:2433][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667201975040684:2050] 2025-06-03T10:25:09.440713Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667201975041470:2433][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667201975040687:2053] 2025-06-03T10:25:09.440718Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667201975041471:2433][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667201975040690:2056] 2025-06-03T10:25:09.440726Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667201975041465:2433][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667201975041466:2433] 2025-06-03T10:25:09.440732Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667201975041465:2433][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667201975041467:2433] 2025-06-03T10:25:09.440744Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:7511667201975041465:2433][/dc-1] Set up state: owner# [1:7511667201975041029:2129], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:09.440781Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667201975041465:2433][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667201975041468:2433] 2025-06-03T10:25:09.440788Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:7511667201975041465:2433][/dc-1] Path was already updated: owner# [1:7511667201975041029:2129], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:09.440796Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667201975041469:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667201975041466:2433], cookie# 1 2025-06-03T10:25:09.440799Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667201975041470:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667201975041467:2433], cookie# 1 2025-06-03T10:25:09.440803Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667201975041471:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667201975041468:2433], cookie# 1 2025-06-03T10:25:09.440809Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667201975040684:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667201975041469:2433] 2025-06-03T10:25:09.440813Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667201975040684:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667201975041469:2433], cookie# 1 2025-06-03T10:25:09.440818Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667201975040687:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667201975041470:2433] 2025-06-03T10:25:09.440821Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667201975040687:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667201975041470:2433], cookie# 1 2025-06-03T10:25:09.440825Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667201975040690:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667201975041471:2433] 2025-06-03T10:25:09.440828Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667201975040690:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667201975041471:2433], cookie# 1 2025-06-03T10:25:09.441340Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667201975041469:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667201975040684:2050], cookie# 1 2025-06-03T10:25:09.441347Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667201975041470:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667201975040687:2053], cookie# 1 2025-06-03T10:25:09.441350Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667201975041471:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667201975040690:2056], cookie# 1 2025-06-03T10:25:09.441356Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667201975041465:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667201975041466:2433], cookie# 1 2025-06-03T10:25:09.441364Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7511667201975041465:2433][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:25:09.441368Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667201975041465:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667201975041467:2433], cookie# 1 2025-06-03T10:25:09.441372Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7511667201975041465:2433][/dc-1] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:25:09.441377Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667201975041465:2433][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667201975041468:2433], cookie# 1 2025-06-03T10:25:09.441379Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7511667201975041465:2433][/dc-1] Unexpected sync response: sender# [1:7511667201975041468:2433], cookie# 1 2025-06-03T10:25:09.459669Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:7511667201975041029:2129], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLi ... 2057594046644480 }: sender# [5:7511667213479375916:2725] 2025-06-03T10:25:12.567505Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [5:7511667213479374777:2056] Upsert description: path# /dc-1/.metadata/workload_manager/delayed_requests 2025-06-03T10:25:12.567510Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [5:7511667213479374777:2056] Subscribe: subscriber# [5:7511667213479375916:2725], path# /dc-1/.metadata/workload_manager/delayed_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:12.567512Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][5:7511667213479375914:2725][/dc-1/.metadata/workload_manager/delayed_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/delayed_requests Version: 0 }: sender# [5:7511667213479374771:2050] 2025-06-03T10:25:12.567519Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][5:7511667213479375915:2725][/dc-1/.metadata/workload_manager/delayed_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/delayed_requests Version: 0 }: sender# [5:7511667213479374774:2053] 2025-06-03T10:25:12.567520Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [5:7511667213479374777:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/.metadata/workload_manager/running_requests DomainOwnerId: 72057594046644480 }: sender# [5:7511667213479375922:2726] 2025-06-03T10:25:12.567522Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [5:7511667213479374777:2056] Upsert description: path# /dc-1/.metadata/workload_manager/running_requests 2025-06-03T10:25:12.567527Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][5:7511667213479375916:2725][/dc-1/.metadata/workload_manager/delayed_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/delayed_requests Version: 0 }: sender# [5:7511667213479374777:2056] 2025-06-03T10:25:12.567527Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [5:7511667213479374777:2056] Subscribe: subscriber# [5:7511667213479375922:2726], path# /dc-1/.metadata/workload_manager/running_requests, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:12.567533Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][5:7511667213479375909:2725][/dc-1/.metadata/workload_manager/delayed_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/delayed_requests Version: 0 }: sender# [5:7511667213479375911:2725] 2025-06-03T10:25:12.567535Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [5:7511667213479374777:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [5:7511667213479375916:2725] 2025-06-03T10:25:12.567546Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][5:7511667213479375920:2726][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [5:7511667213479374771:2050] 2025-06-03T10:25:12.567546Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][5:7511667213479375909:2725][/dc-1/.metadata/workload_manager/delayed_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/delayed_requests Version: 0 }: sender# [5:7511667213479375912:2725] 2025-06-03T10:25:12.567552Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][5:7511667213479375921:2726][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [5:7511667213479374774:2053] 2025-06-03T10:25:12.567553Z node 5 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][5:7511667213479375909:2725][/dc-1/.metadata/workload_manager/delayed_requests] Set up state: owner# [5:7511667213479374932:2127], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:12.567557Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][5:7511667213479375922:2726][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [5:7511667213479374777:2056] 2025-06-03T10:25:12.567559Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][5:7511667213479375909:2725][/dc-1/.metadata/workload_manager/delayed_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/delayed_requests Version: 0 }: sender# [5:7511667213479375913:2725] 2025-06-03T10:25:12.567564Z node 5 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][5:7511667213479375909:2725][/dc-1/.metadata/workload_manager/delayed_requests] Ignore empty state: owner# [5:7511667213479374932:2127], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:12.567566Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][5:7511667213479375910:2726][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [5:7511667213479375917:2726] 2025-06-03T10:25:12.567570Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [5:7511667213479374771:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [5:7511667213479375914:2725] 2025-06-03T10:25:12.567575Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][5:7511667213479375910:2726][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [5:7511667213479375918:2726] 2025-06-03T10:25:12.567576Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [5:7511667213479374771:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [5:7511667213479375920:2726] 2025-06-03T10:25:12.567581Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [5:7511667213479374774:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [5:7511667213479375915:2725] 2025-06-03T10:25:12.567582Z node 5 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][5:7511667213479375910:2726][/dc-1/.metadata/workload_manager/running_requests] Set up state: owner# [5:7511667213479374932:2127], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:12.567585Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [5:7511667213479374774:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [5:7511667213479375921:2726] 2025-06-03T10:25:12.567588Z node 5 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][5:7511667213479375910:2726][/dc-1/.metadata/workload_manager/running_requests] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/workload_manager/running_requests Version: 0 }: sender# [5:7511667213479375919:2726] 2025-06-03T10:25:12.567593Z node 5 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][5:7511667213479375910:2726][/dc-1/.metadata/workload_manager/running_requests] Ignore empty state: owner# [5:7511667213479374932:2127], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:12.567594Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [5:7511667213479374932:2127], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 } 2025-06-03T10:25:12.567598Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [5:7511667213479374777:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [5:7511667213479375922:2726] 2025-06-03T10:25:12.567607Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [5:7511667213479374932:2127], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [5:7511667213479375909:2725] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:12.567628Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [5:7511667213479374932:2127], cacheItem# { Subscriber: { Subscriber: [5:7511667213479375909:2725] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:12.567633Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [5:7511667213479374932:2127], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 } 2025-06-03T10:25:12.567639Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [5:7511667213479374932:2127], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/workload_manager/running_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [5:7511667213479375910:2726] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:12.567647Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [5:7511667213479374932:2127], cacheItem# { Subscriber: { Subscriber: [5:7511667213479375910:2726] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:12.567669Z node 5 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [5:7511667213479375923:2727], recipient# [5:7511667213479375908:2313], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> TSchemeShardTest::AlterTableWithCompactionStrategies [GOOD] >> TSchemeShardTest::BackupBackupCollection-WithIncremental-false >> ConvertYdbPermissionNameToACLAttrs::TestEqualGranularAndDeprecatedAcl [GOOD] >> ConvertYdbValueToMiniKQLValueTest::OptionalEmpty [GOOD] >> ConvertYdbValueToMiniKQLValueTest::OptionalOptionalEmpty [GOOD] >> ConvertYdbValueToMiniKQLValueTest::OptionalOptionalEmpty2 [GOOD] >> ConvertYdbValueToMiniKQLValueTest::List [GOOD] >> ConvertYdbValueToMiniKQLValueTest::Dict [GOOD] >> KqpConstraints::AddSerialColumnForbidden [GOOD] >> KqpConstraints::AddColumnWithDefaultForbidden >> ConvertMiniKQLTypeToYdbTypeTest::SimpleType [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::TTzDate [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Optional [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::List [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Struct [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::Dict [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::PgType [GOOD] |59.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/ydb_convert/ut/unittest >> ConvertYdbValueToMiniKQLValueTest::VariantIndexUnderflow [GOOD] >> KqpOlapTypes::Decimal [GOOD] >> KqpOlapTypes::AttributeNegative >> CellsFromTupleTest::CellsFromTupleSuccess [GOOD] >> CellsFromTupleTest::CellsFromTupleSuccessPg [GOOD] >> CellsFromTupleTest::CellsFromTupleFails [GOOD] >> CellsFromTupleTest::CellsFromTupleFailsPg [GOOD] >> CompressionTests::Zstd [GOOD] >> CompressionTests::Unsupported [GOOD] >> ConvertMiniKQLTypeToYdbTypeTest::DecimalType [GOOD] >> KqpScheme::PathWithNoRoot [GOOD] >> KqpScheme::RenameTableWithVectorIndex >> KqpScheme::CreateTableWithVectorIndexNoFeatureFlag [GOOD] >> KqpScheme::CreateTableWithVectorIndexCoveredPublicApi >> KqpScheme::CreateAndAlterTableWithBloomFilterUncompat [GOOD] >> KqpScheme::CreateAndAlterTableWithBloomFilterCompat >> DataShardWrite::ExecSQLUpsertImmediate-EvWrite >> test.py::test[blocks-minmax_strings_filter--Results] [GOOD] >> test.py::test[blocks-mod_uint64--ForceBlocks] |59.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/ydb_convert/ut/unittest >> ConvertYdbValueToMiniKQLValueTest::Dict [GOOD] |59.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLTypeToYdbTypeTest::PgType [GOOD] >> KqpScheme::DoubleCreateResourcePool [GOOD] >> KqpScheme::DoubleCreateResourcePoolClassifier+UseSink |59.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/ydb_convert/ut/unittest >> ConvertMiniKQLTypeToYdbTypeTest::DecimalType [GOOD] >> TSchemeShardTest::BackupBackupCollection-WithIncremental-false [GOOD] >> TSchemeShardTest::BackupBackupCollection-WithIncremental-true >> DataShardWrite::WriteImmediateBadRequest >> DataShardWrite::ExecSQLUpsertImmediate+EvWrite >> DataShardWrite::UpsertImmediateManyColumns >> KqpConstraints::AddColumnWithDefaultForbidden [GOOD] >> KqpConstraints::AddNonColumnDoesnotReturnInternalError >> KqpOlapTypes::AttributeNegative [GOOD] |59.8%| [TA] $(B)/ydb/core/ydb_convert/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoGood [GOOD] >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoDisableNestedGroupsGood >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoWithError [GOOD] >> TLdapUtilsSearchFilterCreatorTest::GetDefaultFilter [GOOD] >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithOneLoginPlaceholder [GOOD] >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithSearchAttribute [GOOD] >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithFewLoginPlaceholders [GOOD] >> test.py::test[aggregate-group_by_expr--Results] [GOOD] >> test.py::test[aggregate-group_by_expr_only_join--Results] >> KqpScheme::RenameTableWithVectorIndex [GOOD] >> KqpScheme::NEG_CreateTableWithUnsupportedStoreType >> KqpScheme::CreateTableWithVectorIndexCoveredPublicApi [GOOD] |59.8%| [TA] $(B)/ydb/tests/functional/benchmarks_init/test-results/py3test/{meta.json ... results_accumulator.log} >> KqpScheme::CreateAndAlterTableWithBloomFilterCompat [GOOD] >> TDatabaseResolverTests::PostgreSQL [GOOD] >> TDatabaseResolverTests::PostgreSQL_PermissionDenied [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoGood [GOOD] >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoDisableNestedGroupsGood ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpOlapTypes::AttributeNegative [GOOD] Test command err: Trying to start YDB, gRPC: 18097, MsgBus: 23920 2025-06-03T10:25:08.893477Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667198822432588:2214];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0017c4/r3tmp/tmpEtk95U/pdisk_1.dat 2025-06-03T10:25:08.937808Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:08.969065Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18097, node 1 2025-06-03T10:25:08.991815Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:08.991849Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:08.993392Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:09.009961Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:09.009971Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:09.009974Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:09.010025Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23920 TClient is connected to server localhost:23920 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:09.180999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:09.184756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:25:09.196925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:09.236711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:09.311242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:09.346981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:09.507518Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667203117401344:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:09.507544Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:09.569319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.578468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.590539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.601590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.619049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.630754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.647994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.666858Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667203117402000:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:09.666881Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:09.666980Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667203117402005:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:09.667896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:09.671375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-03T10:25:09.671460Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667203117402007:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:25:09.725873Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667203117402058:3397] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:09.878029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.892798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.897278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710674:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 30583, MsgBus: 9504 2025-06-03T10:25:10.213580Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667204076002972:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:10.213601Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0017c4/r3tmp/tmpPCS4bC/pdisk_1.dat 2025-06-03T10:25:10.248530Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30583, node 2 2025-06-03T10:25:10.279523Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:10.279539Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:10.279542Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:10.279600Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9504 2025-06-03T10:25:10.328912Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:10.328966Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:10.329736Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9504 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathSt ... 15659:3, at schemeshard: 72057594046644480 2025-06-03T10:25:13.010412Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7511667219479912038:2353], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:25:13.078974Z node 5 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [5:7511667219479912089:2385] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:13.168813Z node 5 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946313065, txId: 18446744073709551615] shutting down 2025-06-03T10:25:13.237227Z node 5 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946313177, txId: 18446744073709551615] shutting down 2025-06-03T10:25:13.298848Z node 5 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946313268, txId: 18446744073709551615] shutting down 2025-06-03T10:25:13.365591Z node 5 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946313324, txId: 18446744073709551615] shutting down 2025-06-03T10:25:13.435409Z node 5 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946313387, txId: 18446744073709551615] shutting down 2025-06-03T10:25:13.488694Z node 5 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946313464, txId: 18446744073709551615] shutting down 2025-06-03T10:25:13.554581Z node 5 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946313520, txId: 18446744073709551615] shutting down 2025-06-03T10:25:13.612033Z node 5 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946313583, txId: 18446744073709551615] shutting down 2025-06-03T10:25:13.664444Z node 5 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946313639, txId: 18446744073709551615] shutting down 2025-06-03T10:25:13.729843Z node 5 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946313695, txId: 18446744073709551615] shutting down 2025-06-03T10:25:13.798887Z node 5 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946313758, txId: 18446744073709551615] shutting down Trying to start YDB, gRPC: 27370, MsgBus: 26930 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0017c4/r3tmp/tmppdcjS1/pdisk_1.dat 2025-06-03T10:25:14.245397Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:14.261349Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27370, node 6 2025-06-03T10:25:14.278177Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:14.278197Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:14.278200Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:14.278271Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26930 2025-06-03T10:25:14.317870Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:14.317910Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:14.318416Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26930 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:25:14.441391Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:25:14.446113Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:25:14.458571Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:14.484211Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:25:14.527005Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:25:14.570459Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:14.721152Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511667225111744134:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:14.721200Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:14.736796Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:14.770776Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:14.785541Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:14.800489Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:14.827851Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:14.854002Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:14.880644Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:14.914735Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511667225111744786:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:14.914767Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:14.914855Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511667225111744791:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:14.917912Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:14.922346Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:25:14.922435Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511667225111744793:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:14.996129Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511667225111744844:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> TSchemeShardTest::BackupBackupCollection-WithIncremental-true [GOOD] >> DataShardWrite::UpsertImmediate >> KqpScanArrowFormat::SingleKey ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ldap_auth_provider/ut/unittest >> TLdapUtilsSearchFilterCreatorTest::GetFilterWithFewLoginPlaceholders [GOOD] Test command err: 2025-06-03T10:24:53.278159Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667134452719952:2192];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:53.278178Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001544/r3tmp/tmpwfsRCJ/pdisk_1.dat 2025-06-03T10:24:54.201958Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667134452719801:2079] 1748946293255222 != 1748946293255225 2025-06-03T10:24:54.205836Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:54.205861Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:54.209754Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:54.211073Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22847, node 1 2025-06-03T10:24:54.540036Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:54.540057Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:54.540060Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:54.540135Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:54.969660Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:24:54.972460Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:24:54.972469Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:24:54.972634Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:15467, port: 15467 2025-06-03T10:24:54.976381Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:24:55.066614Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:24:55.113695Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:24:55.113899Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-03T10:24:55.113910Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:24:55.161684Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:24:55.209501Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:24:55.212739Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****CHuA (3F609231) () has now valid token of ldapuser@ldap 2025-06-03T10:24:58.277512Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511667134452719952:2192];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:58.277540Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:24:59.305601Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****CHuA (3F609231) 2025-06-03T10:24:59.408363Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:15467, port: 15467 2025-06-03T10:24:59.408419Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:24:59.475979Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:24:59.481719Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:340: LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:15467 return no entries 2025-06-03T10:24:59.481816Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****CHuA (3F609231) () has now permanent error message 'Could not login via LDAP (LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldap://localhost:15467 return no entries)' 2025-06-03T10:25:04.321765Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****CHuA (3F609231) test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001544/r3tmp/tmpQfRjhQ/pdisk_1.dat 2025-06-03T10:25:05.551508Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:05.553243Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:05.557871Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667185963239052:2079] 1748946305512521 != 1748946305512524 TServer::EnableGrpc on GrpcPort 20519, node 2 2025-06-03T10:25:05.581573Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:05.581590Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:05.581592Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:05.581649Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:05.613882Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:05.613921Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:05.614392Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:05.701386Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:05.702220Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:05.702236Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:05.702392Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:13790, port: 13790 2025-06-03T10:25:05.702413Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:05.771338Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:05.771696Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:13790. Server is busy 2025-06-03T10:25:05.771819Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****dsMg (E45D87D6) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:13790. Server is busy)' 2025-06-03T10:25:05.771882Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:05.771888Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:05.772120Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:13790, port: 13790 2025-06-03T10:25:05.772140Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:05.789888Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:05.790124Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:13790. Server is busy 2025-06-03T10:25:05.790215Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****dsMg (E45D87D6) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:13790. Server is busy)' 2025-06-03T10:25:07.512575Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****dsMg (E45D87D6) 2025-06-03T10:25:07.512680Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:07.512689Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:07.512875Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:13790, port: 13790 2025-06-03T10:25:07.512913Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:07.540407Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:07.540623Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldap://localhost:13790. Server is busy 2025-06-03T10:25:07.540761Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****dsMg (E45D87D6) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldap://localhost:13790. Server is busy)' 2025-06-03T10:25:10.517458Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****dsMg (E45D87D6) 2025-06-03T10:25:10.517558Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:10.517565Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:10.517847Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:13790, port: 13790 2025-06-03T10:25:10.517896Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:10.527956Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:10.573499Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:25:10.573772Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-03T10:25:10.573784Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:10.622854Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:10.665748Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:10.666130Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****dsMg (E45D87D6) () has now valid token of ldapuser@ldap 2025-06-03T10:25:14.533464Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****dsMg (E45D87D6) 2025-06-03T10:25:14.533532Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:13790, port: 13790 2025-06-03T10:25:14.533562Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:14.554384Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:14.601519Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:25:14.601762Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-03T10:25:14.601793Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:14.645930Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:14.689519Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:14.689937Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****dsMg (E45D87D6) () has now valid token of ldapuser@ldap ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateTableWithVectorIndexCoveredPublicApi [GOOD] Test command err: Trying to start YDB, gRPC: 20214, MsgBus: 12408 2025-06-03T10:25:08.181270Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667196812263787:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:08.181475Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00170d/r3tmp/tmpEuBQzD/pdisk_1.dat 2025-06-03T10:25:08.252848Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20214, node 1 2025-06-03T10:25:08.272087Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:08.272103Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:08.272105Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:08.272151Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:08.282789Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:08.282842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:08.284479Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12408 TClient is connected to server localhost:12408 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:25:08.341785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.345331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.368592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.436688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.462074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.473562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.575661Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667196812265385:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.575687Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.631001Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.640509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.649752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.664100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.677826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.693058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.709795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.726783Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667196812266037:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.726816Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.727052Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667196812266042:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.728544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:08.732904Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667196812266044:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:08.833512Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667196812266095:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:09.046165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 26172, MsgBus: 19999 2025-06-03T10:25:09.404634Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667202517364356:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:09.404671Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00170d/r3tmp/tmp1ASvJ9/pdisk_1.dat 2025-06-03T10:25:09.422878Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26172, node 2 2025-06-03T10:25:09.434982Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:09.434999Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:09.435001Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:09.435065Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19999 TClient is connected to server localhost:19999 2025-06-03T10:25:09.504907Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:09.504947Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:09.506040Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 Pro ... de 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:13.746144Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:13.766333Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511667219264482173:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:13.766359Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:13.766369Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511667219264482178:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:13.766989Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:13.773007Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7511667219264482180:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:13.854930Z node 5 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [5:7511667219264482231:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:14.031701Z node 5 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [5:7511667223559449816:3570] txid# 281474976715672, issues: { message: "Vector index support is disabled" severity: 1 } Trying to start YDB, gRPC: 27847, MsgBus: 10766 2025-06-03T10:25:14.506370Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511667223036507888:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:14.506750Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00170d/r3tmp/tmpp1VkYf/pdisk_1.dat 2025-06-03T10:25:14.530458Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27847, node 6 2025-06-03T10:25:14.545731Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:14.545749Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:14.545751Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:14.545807Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10766 2025-06-03T10:25:14.617905Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:14.617943Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:14.621259Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10766 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:14.639585Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:14.644771Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:25:14.647069Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:14.667249Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:14.706698Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:14.726809Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:15.009799Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511667227331476770:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:15.009839Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:15.019750Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:15.040117Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:15.103111Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:15.113660Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:15.126010Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:15.157505Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:15.172306Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:15.204157Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511667227331477429:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:15.204197Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:15.204421Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511667227331477434:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:15.205615Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:15.210203Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:25:15.210351Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511667227331477436:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:15.282668Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511667227331477487:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:15.785907Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 >> DataShardWrite::ExecSQLUpsertImmediate-EvWrite [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared-EvWrite-Volatile ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::PostgreSQL_PermissionDenied [GOOD] Test command err: 2025-06-03T10:25:16.426392Z node 2 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed PostgreSQL database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'mdb.api.cloud.yandex.net:443', url '/managed-postgresql/v1/clusters/etn021us5r9rhld1vgbh/hosts': you have no permission to resolve database id into database endpoint. Please check that your service account has role `managed-postgresql.viewer`. ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateAndAlterTableWithBloomFilterCompat [GOOD] Test command err: Trying to start YDB, gRPC: 3190, MsgBus: 23509 2025-06-03T10:25:08.191082Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667197964543425:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:08.191124Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001727/r3tmp/tmpt9tC4y/pdisk_1.dat 2025-06-03T10:25:08.263347Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3190, node 1 2025-06-03T10:25:08.285475Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:08.285491Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:08.285494Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:08.285557Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:08.292902Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:08.292937Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:08.294044Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23509 TClient is connected to server localhost:23509 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:08.356962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.371121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.390757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.413659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.430611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.607338Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667197964545022:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.607410Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.651417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.659495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.715057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.727219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.743111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.754752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.769564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.787204Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667197964545675:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.787242Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.787260Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667197964545680:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.788199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:08.795972Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667197964545682:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:08.873130Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667197964545733:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:09.123438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 20700, MsgBus: 14213 2025-06-03T10:25:09.369423Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667200872271012:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:09.370180Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001727/r3tmp/tmpyu9ALy/pdisk_1.dat 2025-06-03T10:25:09.403137Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667200872270903:2079] 1748946309368737 != 1748946309368740 2025-06-03T10:25:09.403913Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20700, node 2 2025-06-03T10:25:09.411196Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:09.411212Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:09.411215Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:09.411284Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14213 2025-06-03T10:25:09.479994Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:09.480030Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:14213 2025-06-03T10:25:09.481251Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricate ... t_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:14.564591Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23738 TClient is connected to server localhost:23738 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:14.636994Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:14.637020Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:14.637457Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:25:14.638119Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:25:14.644877Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:14.657958Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:25:14.680344Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:14.756090Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:14.775067Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:15.141825Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511667227368176240:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:15.141854Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:15.158466Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:15.186580Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:15.207868Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:15.235124Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:15.263263Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:15.292086Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:15.371483Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:15.410417Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511667227368176892:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:15.410451Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:15.410671Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511667227368176897:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:15.412057Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:15.416853Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:25:15.416967Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511667227368176899:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:15.494455Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511667227368176950:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:15.883697Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 TClient::Ls request: /Root/TableWithBloomFilter TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableWithBloomFilter" PathId: 17 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715672 CreateStep: 1748946315991 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableWithBloomFilter" Columns { Name: "Key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "String" TypeId: 4097 Id: 2 NotNull: false IsBuildInProgress: false } ... (TRUNCATED) 2025-06-03T10:25:15.962875Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 TClient::Ls request: /Root/TableWithBloomFilter TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableWithBloomFilter" PathId: 17 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715672 CreateStep: 1748946315991 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableWithBloomFilter" Columns { Name: "Key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "String" TypeId: 4097 Id: 2 NotNull: false IsBuildInProgress: false } ... (TRUNCATED) 2025-06-03T10:25:15.979949Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 TClient::Ls request: /Root/TableWithBloomFilter TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableWithBloomFilter" PathId: 17 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715672 CreateStep: 1748946315991 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableWithBloomFilter" Columns { Name: "Key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "String" TypeId: 4097 Id: 2 NotNull: false IsBuildInProgress: false } ... (TRUNCATED) >> DataShardWrite::WriteImmediateBadRequest [GOOD] >> DataShardWrite::WriteImmediateSeveralOperations >> test.py::test[aggregate-list_nullable--ForceBlocks] [GOOD] >> test.py::test[aggregate-list_nullable--Results] >> KqpScheme::NEG_CreateTableWithUnsupportedStoreType [GOOD] >> KqpScheme::OlapSharding_KeyOnly >> DataShardWrite::ExecSQLUpsertImmediate+EvWrite [GOOD] >> DataShardWrite::DeleteImmediate >> TSubDomainTest::CreateDummyTabletsInDifferentDomains [GOOD] >> TSubDomainTest::CoordinatorRunAtSubdomainNodeWhenAvailable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardTest::BackupBackupCollection-WithIncremental-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:25:08.158008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:25:08.158036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:25:08.158042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:25:08.158048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:25:08.158054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:25:08.158059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:25:08.158073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:25:08.158087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:25:08.158198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:25:08.158277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:25:08.172768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:25:08.172804Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:08.177281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:25:08.177427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:25:08.177475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:25:08.180048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:25:08.180130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:25:08.180254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:25:08.180309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:25:08.181239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:25:08.181322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:25:08.181700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:25:08.181715Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:25:08.181725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:25:08.181735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:25:08.181742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:25:08.181768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:25:08.183494Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:25:08.210202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:25:08.210332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:08.210415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:25:08.210474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:25:08.210487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:08.213972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:25:08.214025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:25:08.214102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:08.214119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:25:08.214125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:25:08.214136Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:25:08.217140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:08.217178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:25:08.217190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:25:08.217974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:08.217994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:08.218005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:25:08.218014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:25:08.218879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:25:08.219654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:25:08.219709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:25:08.219929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:25:08.219964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:25:08.219974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:25:08.220060Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:25:08.220074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:25:08.220115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:25:08.220129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:25:08.220769Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:25:08.220782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:25:08.220838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 31 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:25:16.544265Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:25:16.544286Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA" took 25us result status StatusSuccess 2025-06-03T10:25:16.544357Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA" PathDescription { Self { Name: "DirA" PathId: 29 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 28 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 7 } ChildrenExist: true } Children { Name: "DirB" PathId: 30 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 29 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: "Table2" PathId: 32 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 29 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } } PathId: 29 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:25:16.544470Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/Table2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:25:16.544496Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/Table2" took 31us result status StatusSuccess 2025-06-03T10:25:16.544605Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/Table2" PathDescription { Self { Name: "Table2" PathId: 32 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 29 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 4 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 32 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:25:16.544737Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:25:16.544757Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB" took 23us result status StatusSuccess 2025-06-03T10:25:16.544844Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB" PathDescription { Self { Name: "DirB" PathId: 30 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 29 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "Table3" PathId: 33 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 30 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } } PathId: 30 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:25:16.544961Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB/Table3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:25:16.544986Z node 16 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB/Table3" took 30us result status StatusSuccess 2025-06-03T10:25:16.545085Z node 16 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.backups/collections/MyCollection1/19700101000000Z_incremental/DirA/DirB/Table3" PathDescription { Self { Name: "Table3" PathId: 33 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 109 CreateStep: 5000010 ParentPathId: 30 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table3" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 32 PathsLimit: 10000 ShardsInside: 18 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 33 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClassTopicAPI [GOOD] >> KqpScheme::OlapSharding_KeyOnly [GOOD] >> KqpScanArrowFormat::AggregateCountStar ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_v1/ut/new_schemecache_ut/unittest >> TPersQueueNewSchemeCacheTest::TestWriteStat1stClassTopicAPI [GOOD] Test command err: 2025-06-03T10:24:42.878416Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667086487413935:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:42.878472Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:24:43.310748Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002173/r3tmp/tmpFTUDcR/pdisk_1.dat 2025-06-03T10:24:43.373991Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:24:43.429589Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:24:43.634162Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667085428137071:2071] 1748946282987513 != 1748946282987510 2025-06-03T10:24:43.634921Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:43.650040Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:43.650077Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:43.651715Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:43.652423Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:43.652438Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:43.653436Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:43.653719Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10455, node 1 2025-06-03T10:24:43.898017Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/002173/r3tmp/yandexRnlEAK.tmp 2025-06-03T10:24:43.898033Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/002173/r3tmp/yandexRnlEAK.tmp 2025-06-03T10:24:43.898345Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/002173/r3tmp/yandexRnlEAK.tmp 2025-06-03T10:24:43.898401Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:43.960919Z INFO: TTestServer started on Port 1988 GrpcPort 10455 TClient is connected to server localhost:1988 PQClient connected to localhost:10455 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:24:44.087063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:24:44.110577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-03T10:24:44.359682Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667095077349546:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:44.359712Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:44.359793Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667095077349573:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:44.360529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-06-03T10:24:44.383583Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667095077349575:2341], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-03T10:24:44.423323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:24:44.445597Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667095077349725:2776] txid# 281474976710664, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:24:44.482514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:24:44.496092Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511667095077349762:2354], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:24:44.497434Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=NDE0NzZkM2ItNmJjNjVmMDItN2I3ODFhNjYtZTJkNTZjOTQ=, ActorId: [1:7511667095077349543:2335], ActorState: ExecuteState, TraceId: 01jwtn4vt4d7tp191ac87qds45, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:24:44.499456Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:24:44.587363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-03T10:24:44.689945Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710667. Ctx: { TraceId: 01jwtn4w3a2ccfq81vh6rsnzds, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDc2NzJkYjQtZmEyNmUwMWQtZjc5YTcwODMtNzczOTFhOGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7511667095077350103:3061] 2025-06-03T10:24:47.881485Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511667086487413935:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:24:47.881546Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:24:49.793222Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667090782381484:2143], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:24:49.793329Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:7511667090782381484:2143], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /Root PathId: Partial: 0 } 2025-06-03T10:24:49.793350Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [1:7511667090782381484:2143], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /Root PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7511667090782381936:2443] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 14 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1748946284141 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:24:49.793363Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7511667090782381484:2143], cacheItem# { Subscriber: { Subscriber: [1:7511667090782381936:2443] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 14 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateSte ... _size_bytes","bin":"20480"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.message_size_bytes","bin":"204800"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.message_size_bytes","bin":"2097152"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.message_size_bytes","bin":"5120"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.message_size_bytes","bin":"51200"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.message_size_bytes","bin":"524288"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.message_size_bytes","bin":"5242880"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.message_size_bytes","bin":"67108864"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.message_size_bytes","bin":"99999999"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.messages"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"0"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"1"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"10"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"100"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"1000"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"10000"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"20"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"2500"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"5"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"50"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"500"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"5000"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.partition_throttled_milliseconds","bin":"999999"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.write.uncompressed_bytes"},"value":0,"kind":"RATE"}]} ===Request counters with query: /counters/counters=datastreams/database=%2FRoot/cloud_id=somecloud/folder_id=somefolder/database_id=root/topic=account2%2Ftopic2/consumer=some@random@consumer/json counters: {"sensors":[{"labels":{"name":"api.grpc.topic.stream_read.bytes"},"value":0,"kind":"RATE"},{"labels":{"name":"api.grpc.topic.stream_read.messages"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.bytes"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"100"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"1000"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"10000"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"180000"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"200"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"2000"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"30000"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"500"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"5000"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"60000"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.lag_milliseconds","bin":"999999"},"value":0,"kind":"RATE"},{"labels":{"name":"topic.read.messages"},"value":0,"kind":"RATE"}]} ===Request counters with query: /counters/counters=pqproxy/subsystem=userAgents/json counters: {"sensors":[{"labels":{"sensor":"BytesReadByUserAgent","consumer":"some@random@consumer","sdk_build_info":"ydb-cpp-sdk\/dev","protocol":"topic","user_agent":"test-client\/v0.1","host":""},"value":396,"kind":"RATE"},{"labels":{"topic":"\/Root\/account2\/topic2","sensor":"BytesWrittenByUserAgent","sdk_build_info":"ydb-cpp-sdk\/dev","protocol":"topic","user_agent":"test-client\/v0.1","host":""},"value":460,"kind":"RATE"}]} ===Request counters with query: /counters/counters=pqproxy/subsystem=userAgents/json counters: {"sensors":[{"labels":{"sensor":"BytesReadByUserAgent","consumer":"some@random@consumer","sdk_build_info":"ydb-cpp-sdk\/dev","protocol":"topic","user_agent":"test-client\/v0.1","host":""},"value":396,"kind":"RATE"},{"labels":{"topic":"\/Root\/account2\/topic2","sensor":"BytesWrittenByUserAgent","sdk_build_info":"ydb-cpp-sdk\/dev","protocol":"topic","user_agent":"test-client\/v0.1","host":""},"value":460,"kind":"RATE"}]} 2025-06-03T10:25:16.994087Z :INFO: [/Root] [/Root] [db04857a-ff1a5129-6f315cd5-95570ff1] Closing read session. Close timeout: 0.000000s 2025-06-03T10:25:16.994115Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:account2/topic2:0:5:3:0 -:account2/topic2:3:4:0:0 -:account2/topic2:4:3:0:0 -:account2/topic2:1:2:0:0 -:account2/topic2:2:1:0:0 2025-06-03T10:25:16.994128Z :INFO: [/Root] [/Root] [db04857a-ff1a5129-6f315cd5-95570ff1] Counters: { Errors: 0 CurrentSessionLifetimeMs: 36 BytesRead: 40 MessagesRead: 4 BytesReadCompressed: 40 BytesInflightUncompressed: 30 BytesInflightCompressed: 0 BytesInflightTotal: 30 MessagesInflight: 3 } 2025-06-03T10:25:16.994154Z :NOTICE: [/Root] [/Root] [db04857a-ff1a5129-6f315cd5-95570ff1] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-03T10:25:16.994170Z :DEBUG: [/Root] [/Root] [db04857a-ff1a5129-6f315cd5-95570ff1] [] Returning serverBytesSize = 0 to budget 2025-06-03T10:25:16.994220Z :DEBUG: [/Root] [/Root] [db04857a-ff1a5129-6f315cd5-95570ff1] [] Abort session to cluster 2025-06-03T10:25:16.994440Z :NOTICE: [/Root] [/Root] [db04857a-ff1a5129-6f315cd5-95570ff1] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-03T10:25:16.997402Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer some@random@consumer session some@random@consumer_7_1_7768492440752929328_v1 grpc read done: success# 0, data# { } 2025-06-03T10:25:16.997422Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer some@random@consumer session some@random@consumer_7_1_7768492440752929328_v1 grpc read failed 2025-06-03T10:25:16.997433Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer some@random@consumer session some@random@consumer_7_1_7768492440752929328_v1 grpc closed 2025-06-03T10:25:16.997458Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer some@random@consumer session some@random@consumer_7_1_7768492440752929328_v1 is DEAD 2025-06-03T10:25:16.997826Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037894] Destroy direct read session some@random@consumer_7_1_7768492440752929328_v1 2025-06-03T10:25:16.997836Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037894] server disconnected, pipe [7:7511667233700174105:2501] destroyed 2025-06-03T10:25:16.997843Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037894] Destroy direct read session some@random@consumer_7_1_7768492440752929328_v1 2025-06-03T10:25:16.997847Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037894] server disconnected, pipe [7:7511667233700174104:2500] destroyed 2025-06-03T10:25:16.997869Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: some@random@consumer_7_1_7768492440752929328_v1 2025-06-03T10:25:16.997872Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: some@random@consumer_7_1_7768492440752929328_v1 2025-06-03T10:25:16.997913Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037896] Destroy direct read session some@random@consumer_7_1_7768492440752929328_v1 2025-06-03T10:25:16.997933Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037896] server disconnected, pipe [7:7511667233700174106:2502] destroyed 2025-06-03T10:25:16.997943Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037895] Destroy direct read session some@random@consumer_7_1_7768492440752929328_v1 2025-06-03T10:25:16.997950Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037895] server disconnected, pipe [7:7511667233700174103:2499] destroyed 2025-06-03T10:25:16.997954Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037895] Destroy direct read session some@random@consumer_7_1_7768492440752929328_v1 2025-06-03T10:25:16.997958Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037895] server disconnected, pipe [7:7511667233700174102:2498] destroyed 2025-06-03T10:25:16.997970Z node 8 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: some@random@consumer_7_1_7768492440752929328_v1 2025-06-03T10:25:16.997974Z node 8 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: some@random@consumer_7_1_7768492440752929328_v1 2025-06-03T10:25:16.997977Z node 8 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: some@random@consumer_7_1_7768492440752929328_v1 2025-06-03T10:25:16.998161Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037897][topic2] pipe [7:7511667233700174095:2495] disconnected; active server actors: 1 2025-06-03T10:25:16.998173Z node 7 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037897][topic2] pipe [7:7511667233700174095:2495] client some@random@consumer disconnected session some@random@consumer_7_1_7768492440752929328_v1 2025-06-03T10:25:17.001512Z :INFO: [/Root] TraceId [] SessionId [123|4ef299cb-39f6b0d8-18a9e02-93d0016d_0] MessageGroupId [123] Write session: close. Timeout 0.000000s 2025-06-03T10:25:17.001550Z :INFO: [/Root] TraceId [] SessionId [123|4ef299cb-39f6b0d8-18a9e02-93d0016d_0] MessageGroupId [123] Write session will now close 2025-06-03T10:25:17.001564Z :DEBUG: [/Root] TraceId [] SessionId [123|4ef299cb-39f6b0d8-18a9e02-93d0016d_0] MessageGroupId [123] Write session: aborting 2025-06-03T10:25:17.001907Z :INFO: [/Root] TraceId [] SessionId [123|4ef299cb-39f6b0d8-18a9e02-93d0016d_0] MessageGroupId [123] Write session: gracefully shut down, all writes complete 2025-06-03T10:25:17.001923Z :DEBUG: [/Root] TraceId [] SessionId [123|4ef299cb-39f6b0d8-18a9e02-93d0016d_0] MessageGroupId [123] Write session: destroy 2025-06-03T10:25:17.003357Z node 7 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: 123|4ef299cb-39f6b0d8-18a9e02-93d0016d_0 grpc read done: success: 0 data: 2025-06-03T10:25:17.003372Z node 7 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: 123|4ef299cb-39f6b0d8-18a9e02-93d0016d_0 grpc read failed 2025-06-03T10:25:17.003386Z node 7 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: 123|4ef299cb-39f6b0d8-18a9e02-93d0016d_0 grpc closed 2025-06-03T10:25:17.003395Z node 7 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: 123|4ef299cb-39f6b0d8-18a9e02-93d0016d_0 is DEAD 2025-06-03T10:25:17.003713Z node 7 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037896 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:25:17.005572Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037896] server disconnected, pipe [7:7511667233700174090:2488] destroyed 2025-06-03T10:25:17.005602Z node 8 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037896, Partition: 0, State: StateIdle] TPartition::DropOwner. >> DataShardWrite::UpsertImmediateManyColumns [GOOD] >> DataShardWrite::UpsertPrepared+Volatile >> KqpScheme::DoubleCreateResourcePoolClassifier+UseSink [GOOD] >> KqpScheme::DoubleCreateResourcePoolClassifier-UseSink >> DataShardWrite::UpsertImmediate [GOOD] >> DataShardWrite::WriteImmediateSeveralOperations [GOOD] >> DataShardWrite::ReplaceImmediate >> DataShardWrite::UpsertPreparedManyTables+Volatile |59.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/cms/console/ut/ydb-core-cms-console-ut >> KqpScanArrowFormat::AllTypesColumns |59.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/console/ut/ydb-core-cms-console-ut |59.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx |59.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx >> KqpScanArrowFormat::SingleKey [GOOD] |59.8%| [TA] {RESULT} $(B)/ydb/tests/functional/benchmarks_init/test-results/py3test/{meta.json ... results_accumulator.log} |59.8%| [LD] {RESULT} $(B)/ydb/core/cms/console/ut/ydb-core-cms-console-ut |59.8%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/tx/ydb-core-kqp-ut-tx |59.9%| [TA] {RESULT} $(B)/ydb/core/ydb_convert/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpScanArrowFormat::JoinWithParams >> KqpScanArrowInChanels::AggregateNoColumn >> TDatabaseResolverTests::Ydb_Serverless [GOOD] >> test.py::test[window-win_func_aggr_with_qualified_all_no_simple_columns--Results] [GOOD] >> test.py::test[window-win_func_first_last_over_nonopt-default.txt-Results] >> DataShardWrite::ExecSQLUpsertPrepared-EvWrite-Volatile [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared+EvWrite-Volatile ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::OlapSharding_KeyOnly [GOOD] Test command err: Trying to start YDB, gRPC: 23918, MsgBus: 24442 2025-06-03T10:25:08.408074Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667199120741214:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:08.408169Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0017d0/r3tmp/tmp4cJ9oo/pdisk_1.dat 2025-06-03T10:25:08.476147Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667199120741184:2079] 1748946308407882 != 1748946308407885 2025-06-03T10:25:08.479110Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23918, node 1 2025-06-03T10:25:08.494707Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:08.494719Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:08.494720Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:08.494763Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:08.509555Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:08.509591Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:08.510642Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24442 TClient is connected to server localhost:24442 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:08.563256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.578325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.600782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.633800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:25:08.650886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.893142Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667199120742817:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.893203Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.948685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.959826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.978019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.988663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.010519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.022191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.036341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.062197Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667203415710768:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:09.062225Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:09.062356Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667203415710773:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:09.063449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:09.066875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:25:09.066978Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667203415710775:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:09.126712Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667203415710826:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:10.340603Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976716002:0, at schemeshard: 72057594046644480 2025-06-03T10:25:10.369716Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1546: SelfId: [1:7511667207710682496:2517], TxId: 281474976716003, task: 1. Ctx: { TraceId : 01jwtn5n6d5bpgy128txvzjypj. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NTIwYTA2NjYtMzczOGQzN2MtZGU0NzU0ODUtNDlkNjA5OWI=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Source[0] fatal error: {
: Error: Read request aborted subissue: {
: Error: Wrong schemaversion 1 requested, table schemaversion 2 (shard# 72075186224037895 node# 1 state# Ready) } } 2025-06-03T10:25:10.372863Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7511667207710682496:2517], TxId: 281474976716003, task: 1. Ctx: { TraceId : 01jwtn5n6d5bpgy128txvzjypj. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=NTIwYTA2NjYtMzczOGQzN2MtZGU0NzU0ODUtNDlkNjA5OWI=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: ABORTED DEFAULT_ERROR: {
: Error: Read request aborted subissue: {
: Error: Wrong schemaversion 1 requested, table schemaversion 2 (shard# 72075186224037895 node# 1 state# Ready) } }. 2025-06-03T10:25:10.373151Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=NTIwYTA2NjYtMzczOGQzN2MtZGU0NzU0ODUtNDlkNjA5OWI=, ActorId: [1:7511667203415711099:2517], ActorState: ExecuteState, TraceId: 01jwtn5n6d5bpgy128txvzjypj, Create QueryResponse for error on request, msg: 2025-06-03T10:25:10.373397Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1546: SelfId: [1:7511667207710682499:2521], TxId: 281474976716004, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=NzhlMzAzMDktZTM3ZTcxYmMtY2Y4MGM3ZTEtOTNiNjU3ODk=. CustomerSuppliedId : . TraceId : 01jwtn5n6bff5eh142jxwn1dfn. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Source[0] fatal error: {
: Error: Read request aborted subissue: {
: Error: Wrong schemaversion 1 requested, table schemaversion 2 (shard# 72075186224037895 node# 1 state# Ready) } } 2025-06-03T10:25:10.373406Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7511667207710682499:2521], TxId: 281474976716004, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=NzhlMzAzMDktZTM3ZTcxYmMtY2Y4MGM3ZTEtOTNiNjU3ODk=. CustomerSuppliedId : . TraceId : 01jwtn5n6bff5eh142jxwn1dfn. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: ABORTED DEFAULT_ERROR: {
: Error: Read request aborted subissue: {
: Error: Wrong schemaversion 1 ... 2025-06-03T10:25:14.952703Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:14.970063Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:14.995481Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:15.014473Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:15.037048Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:15.070680Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667227025309393:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:15.070707Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:15.070791Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667227025309398:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:15.071785Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:15.075369Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:25:15.075465Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511667227025309400:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:15.166053Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511667227025309451:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:15.636917Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 16242, MsgBus: 26584 2025-06-03T10:25:16.267524Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7511667229895853740:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:16.269817Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0017d0/r3tmp/tmpZEQwqw/pdisk_1.dat 2025-06-03T10:25:16.302434Z node 5 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:16.304067Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7511667229895853558:2079] 1748946316255554 != 1748946316255557 TServer::EnableGrpc on GrpcPort 16242, node 5 2025-06-03T10:25:16.333792Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:16.333808Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:16.333810Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:16.333866Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26584 2025-06-03T10:25:16.377789Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:16.377826Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:16.381770Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26584 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:25:16.481688Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:25:16.484108Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:25:16.781833Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511667229895854214:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:16.781865Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } Trying to start YDB, gRPC: 27351, MsgBus: 4089 2025-06-03T10:25:17.091827Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511667235734003029:2093];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:17.092140Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0017d0/r3tmp/tmp7DHg1G/pdisk_1.dat 2025-06-03T10:25:17.110009Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27351, node 6 2025-06-03T10:25:17.126465Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:17.126480Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:17.126483Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:17.126543Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4089 2025-06-03T10:25:17.191079Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:17.191126Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:17.192239Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4089 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:17.211712Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:17.538468Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511667235734003592:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:17.538494Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:17.543885Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511667235734003612:2293] txid# 281474976715658, issues: { message: "sharding column name have to been primary key column: Value1" severity: 1 } >> DataShardWrite::DeleteImmediate [GOOD] >> DataShardWrite::CancelImmediate >> KqpScanArrowInChanels::AllTypesColumns >> test.py::test[aggregate-list_nullable--Results] [GOOD] >> test.py::test[aggregate-percentiles_containers--ForceBlocks] |59.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::Ydb_Serverless [GOOD] >> DataShardWrite::ReplaceImmediate [GOOD] >> DataShardWrite::ReplaceImmediate_DefaultValue >> TDatabaseResolverTests::ResolveTwoDataStreamsFirstError >> DataShardWrite::UpsertPreparedManyTables+Volatile [GOOD] >> DataShardWrite::UpsertPreparedManyTables-Volatile >> TDatabaseResolverTests::ResolveTwoDataStreamsFirstError [GOOD] >> KqpScanArrowFormat::JoinWithParams [GOOD] >> KqpScanArrowInChanels::AggregateCountStar >> TSubDomainTest::CoordinatorRunAtSubdomainNodeWhenAvailable [GOOD] >> TSubDomainTest::CoordinatorRunAtSubdomainNodeWhenAvailable2 >> DataShardWrite::UpsertPrepared+Volatile [GOOD] >> DataShardWrite::UpsertPrepared-Volatile >> test.py::test[blocks-mod_uint64--ForceBlocks] [GOOD] >> test.py::test[blocks-mod_uint64--Results] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::ResolveTwoDataStreamsFirstError [GOOD] Test command err: 2025-06-03T10:25:19.558890Z node 1 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed DataStreams database with id etn021us5r9rhld1vgb1 via HTTP request to: endpoint 'ydbc.ydb.cloud.yandex.net:8789', url '/ydbc/cloud-prod/database?databaseId=etn021us5r9rhld1vgb1': Status: 404 Response body: {"message":"Database not found"} |59.9%| [TA] $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> DataShardWrite::CancelImmediate [GOOD] >> DataShardWrite::DeletePrepared+Volatile >> DataShardWrite::ReplaceImmediate_DefaultValue [GOOD] >> DataShardWrite::UpdateImmediate >> TDatabaseResolverTests::MySQL >> DataShardWrite::ExecSQLUpsertPrepared+EvWrite-Volatile [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared-EvWrite+Volatile >> TDatabaseResolverTests::MySQL [GOOD] >> TDatabaseResolverTests::MySQL_PermissionDenied >> TDatabaseResolverTests::MySQL_PermissionDenied [GOOD] >> KqpScanArrowInChanels::AggregateCountStar [GOOD] >> KqpScanArrowInChanels::AggregateByColumn >> DataShardWrite::DeletePrepared+Volatile [GOOD] >> DataShardWrite::UpsertPreparedManyTables-Volatile [GOOD] >> DataShardWrite::DeletePrepared-Volatile >> DataShardWrite::UpsertPreparedNoTxCache+Volatile >> BsControllerConfig::ManyPDisksRestarts [GOOD] >> BsControllerConfig::MergeBoxes >> KqpScheme::DoubleCreateResourcePoolClassifier-UseSink [GOOD] >> DataShardWrite::UpsertPrepared-Volatile [GOOD] >> DataShardWrite::UpsertNoLocksArbiter >> KqpScanArrowFormat::AllTypesColumns [GOOD] >> KqpScanArrowFormat::AllTypesColumnsCellvec >> DataShardWrite::UpdateImmediate [GOOD] >> DataShardWrite::RejectOnChangeQueueOverflow ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::MySQL_PermissionDenied [GOOD] Test command err: 2025-06-03T10:25:20.907054Z node 2 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed MySQL database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'mdb.api.cloud.yandex.net:443', url '/managed-mysql/v1/clusters/etn021us5r9rhld1vgbh/hosts': you have no permission to resolve database id into database endpoint. >> KqpScanArrowFormat::AggregateCountStar [GOOD] >> KqpScanArrowFormat::AggregateByColumn >> TDatabaseResolverTests::ClickHouseNative [GOOD] >> TDatabaseResolverTests::ClickHouseHttp [GOOD] >> TDatabaseResolverTests::Ydb_Dedicated [GOOD] >> KqpScanArrowInChanels::AllTypesColumns [GOOD] >> KqpScanArrowInChanels::SingleKey >> KqpScanArrowInChanels::AggregateNoColumn [GOOD] >> KqpScanArrowInChanels::AggregateNoColumnNoRemaps ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::DoubleCreateResourcePoolClassifier-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 14351, MsgBus: 18159 2025-06-03T10:25:08.403800Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667198430273909:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:08.403832Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001739/r3tmp/tmpwLUjPt/pdisk_1.dat 2025-06-03T10:25:08.474541Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14351, node 1 2025-06-03T10:25:08.490539Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:08.490557Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:08.490559Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:08.490625Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:08.505271Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:08.505342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:08.506374Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18159 TClient is connected to server localhost:18159 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:08.555684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.560966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.583097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.613159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.633528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.816654Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667198430275504:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.816702Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.874198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.884527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.898843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.917640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.936838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.964514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.981866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.997735Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667198430276161:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.997764Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.997820Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667198430276166:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.998613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:09.009854Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667198430276168:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:25:09.071128Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667202725243515:3399] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:09.296449Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667202725243785:3571] txid# 281474976710672, issues: { message: "(NKikimr::NExternalSource::TExternalSourceException) External source with type ObjectStorage is disabled. Please contact your system administrator to enable it" severity: 1 } 2025-06-03T10:25:09.408625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:1, at schemeshard: 72057594046644480 2025-06-03T10:25:09.541122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710676:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.619293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710679:1, at schemeshard: 72057594046644480 2025-06-03T10:25:09.717052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710682:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.816323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710685:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.895356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710688:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.970875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-06-03T10:25:09.987531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-06-03T10:25:10.291747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710703:0, at schemeshard: 72057594046644480 2025-06-03T10:25:10.310425Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667207020212178:4111] txid# 281474976710704, issues: { message: "(NKikimr::NExternalSource::TExternalSourceException) External source with type ObjectStorage is disabled. Please contact your system administrator to enable it" severity: 1 } 2025-06-03T10:25:10.315504Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=YzlkMWIzNGYtZjRmMDM2MmYtNjVjYTg1NzQtNDQ0MmU2NWE=, ActorId: [1:7511667207020212170:2779], ActorState: ExecuteState, TraceId: 01jwtn5n528p9fy8wd950m2x2v, Create QueryRespon ... ent::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:18.234682Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:18.238658Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:25:18.250624Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:18.305287Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:18.358054Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:18.442440Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:18.613563Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511667239538703023:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:18.613611Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:18.620975Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:18.650960Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:18.667996Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:18.696144Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:18.737837Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:18.766523Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:18.790825Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:18.818640Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511667239538703678:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:18.818683Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:18.818867Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511667239538703683:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:18.819738Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:18.823112Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:25:18.823198Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511667239538703685:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:18.878597Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511667239538703736:3391] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:19.094509Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480 2025-06-03T10:25:19.239190Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:25:19.324537Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:1, at schemeshard: 72057594046644480 2025-06-03T10:25:19.437225Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480 2025-06-03T10:25:19.557691Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715684:0, at schemeshard: 72057594046644480 2025-06-03T10:25:19.671394Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715689:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.411837Z node 6 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [6:7511667248128639375:2744], TxId: 281474976715706, task: 1. Ctx: { SessionId : ydb://session/3?node_id=6&id=MjVlNDlkMzItYWIyZTNhZWEtZGQ5MDg1ZmMtYTFlYTljYTg=. TraceId : 01jwtn5yzc3kxvkgbkp3bt9677. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Conflict with existing key., code: 2012 }. 2025-06-03T10:25:20.411933Z node 6 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [6:7511667248128639376:2745], TxId: 281474976715706, task: 2. Ctx: { TraceId : 01jwtn5yzc3kxvkgbkp3bt9677. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=6&id=MjVlNDlkMzItYWIyZTNhZWEtZGQ5MDg1ZmMtYTFlYTljYTg=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [6:7511667248128639372:2719], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-03T10:25:20.411988Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=6&id=MjVlNDlkMzItYWIyZTNhZWEtZGQ5MDg1ZmMtYTFlYTljYTg=, ActorId: [6:7511667248128639297:2719], ActorState: ExecuteState, TraceId: 01jwtn5yzc3kxvkgbkp3bt9677, Create QueryResponse for error on request, msg: 2025-06-03T10:25:20.413395Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor.h:64;event=unexpected reply;error_message=operation { ready: true status: PRECONDITION_FAILED issues { message: "Conflict with existing key." issue_code: 2012 severity: 1 } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { id: "01jwtn5yz56s1rjyywd6yyr6t7" } } } } ;request=session_id: "ydb://session/3?node_id=6&id=MjVlNDlkMzItYWIyZTNhZWEtZGQ5MDg1ZmMtYTFlYTljYTg=" tx_control { tx_id: "01jwtn5yz56s1rjyywd6yyr6t7" } query { yql_text: "DECLARE $objects AS List>;\nINSERT INTO `//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers`\nSELECT database,name,config,rank FROM AS_TABLE($objects)\n" } parameters { key: "$objects" value { type { list_type { item { struct_type { members { name: "database" type { type_id: UTF8 } } members { name: "name" type { type_id: UTF8 } } members { name: "config" type { type_id: JSON_DOCUMENT } } members { name: "rank" type { type_id: INT64 } } } } } } value { items { items { text_value: "/Root" } items { text_value: "MyResourcePoolClassifier" } items { text_value: "{\"resource_pool\":\"test_pool\"}" } items { int64_value: 1 } } } } } ; >> test.py::test[blocks-mod_uint64--Results] [GOOD] >> test.py::test[blocks-pg_to_numbers--ForceBlocks] >> TDatabaseResolverTests::DataStreams_Serverless [GOOD] >> TDatabaseResolverTests::DataStreams_PermissionDenied >> KqpConstraints::AddNonColumnDoesnotReturnInternalError [GOOD] |59.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::ClickHouseHttp [GOOD] >> TDatabaseResolverTests::DataStreams_PermissionDenied [GOOD] >> DataShardWrite::UpsertPreparedNoTxCache+Volatile [GOOD] >> DataShardWrite::UpsertPreparedNoTxCache-Volatile >> DataShardWrite::DeletePrepared-Volatile [GOOD] >> DataShardWrite::DelayedVolatileTxAndEvWrite |59.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::Ydb_Dedicated [GOOD] >> KqpScanArrowInChanels::AggregateByColumn [GOOD] |59.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut |59.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut >> DataShardWrite::RejectOnChangeQueueOverflow [GOOD] >> DataShardWrite::UpsertBrokenLockArbiter >> KqpScanArrowFormat::AllTypesColumnsCellvec [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared-EvWrite+Volatile [GOOD] >> DataShardWrite::ExecSQLUpsertPrepared+EvWrite+Volatile >> KqpScanArrowFormat::AggregateNoColumnNoRemaps >> KqpScanArrowInChanels::SingleKey [GOOD] >> KqpScanArrowInChanels::JoinWithParams ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::DataStreams_PermissionDenied [GOOD] Test command err: 2025-06-03T10:25:21.948083Z node 2 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed DataStreams database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'ydbc.ydb.cloud.yandex.net:8789', url '/ydbc/cloud-prod/database?databaseId=etn021us5r9rhld1vgbh': you have no permission to resolve database id into database endpoint. ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpConstraints::AddNonColumnDoesnotReturnInternalError [GOOD] Test command err: Trying to start YDB, gRPC: 18096, MsgBus: 1674 2025-06-03T10:25:08.243885Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667199166627204:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:08.244035Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001731/r3tmp/tmp6DVY9F/pdisk_1.dat 2025-06-03T10:25:08.315934Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18096, node 1 2025-06-03T10:25:08.338379Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:08.338394Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:08.338397Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:08.338465Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:08.343633Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:08.343697Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:08.344671Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1674 TClient is connected to server localhost:1674 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:08.410541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:25:08.427003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.449694Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.468419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.480349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.685848Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667199166628649:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.685885Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.752914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.761774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.776490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.790079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.804169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.818654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.832711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:08.850800Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667199166629301:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.850835Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667199166629306:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.850853Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:08.851666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:08.859121Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667199166629308:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:08.966569Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667199166629359:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:09.243559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.267018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.267441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.267922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.473769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715678:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.518004Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715680. Ctx: { TraceId: 01jwtn5mbh5ka78zhqq3py9pnd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2E5YjVlMzMtYTUwNWI0OGYtMTIwMzYzNGMtMzRiZGUxYTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Trying to start YDB, gRPC: 3051, MsgBus: 8445 2025-06-03T10:25:09.810638Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667201720567608:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:09.810665Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001731/r3tmp/tmpaHbYkF/pdisk_1.dat 2025-06-03T10:25:09.826683Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3051, node 2 2025-06-03T10:25:09.840133Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:09.840159Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:09.840163Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:09.840220Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8445 TClient is connected to server localhost:8445 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 ... e: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:16.682712Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:25:16.683639Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:25:16.766303Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:16.973032Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:17.262634Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:17.505172Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:17.838873Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:1717:3312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:17.838958Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:17.843875Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:18.037334Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:18.292669Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:18.548495Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:18.830035Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:19.095744Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:19.429448Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:19.694282Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:2385:3807], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:19.694328Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:19.694392Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:2390:3812], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:19.695523Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:19.852597Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:2392:3814], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:19.895114Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:2450:3853] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:20.103495Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.514495Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710757:0, at schemeshard: 72057594046644480 2025-06-03T10:25:21.208448Z node 6 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [6:3059:4306], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:17: Error: At function: RemovePrefixMembers, At function: PersistableRepr, At function: SqlProject, At tuple, At function: SqlProjectItem, At lambda
:2:37: Error: At function: Member
:2:37: Error: Member not found: Value3. Did you mean Value? 2025-06-03T10:25:21.209320Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=6&id=NDY0NDE1ZTItYmUzOTYwOTctYjVhN2ZjNWMtM2I3Y2MzMmM=, ActorId: [6:2715:4059], ActorState: ExecuteState, TraceId: 01jwtn5zskdh8r02y6a0vzmgma, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: GENERIC_ERROR:
: Error: Type annotation, code: 1030
:2:17: Error: At function: RemovePrefixMembers, At function: PersistableRepr, At function: SqlProject, At tuple, At function: SqlProjectItem, At lambda
:2:37: Error: At function: Member
:2:37: Error: Member not found: Value3. Did you mean Value? 2025-06-03T10:25:21.224092Z node 6 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [6:3068:4315], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:2:29: Error: At function: KiWriteTable!
:2:29: Error: Column is under build operation, write operation is not allowed to column: Value3 for table: /Root/AddNonColumnDoesnotReturnInternalError, code: 2017 2025-06-03T10:25:21.224867Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=6&id=NDY0NDE1ZTItYmUzOTYwOTctYjVhN2ZjNWMtM2I3Y2MzMmM=, ActorId: [6:2715:4059], ActorState: ExecuteState, TraceId: 01jwtn5zt4052yaqc3y3bay1y1, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: BAD_REQUEST:
: Error: Type annotation, code: 1030
:2:29: Error: At function: KiWriteTable!
:2:29: Error: Column is under build operation, write operation is not allowed to column: Value3 for table: /Root/AddNonColumnDoesnotReturnInternalError, code: 2017 2025-06-03T10:25:21.238816Z node 6 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [6:3077:4324], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:2:94: Error: At function: KiUpdateTable!
:2:94: Error: Column 'Value3' is under the build operation '/Root/AddNonColumnDoesnotReturnInternalError'., code: 2017 2025-06-03T10:25:21.239301Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=6&id=NDY0NDE1ZTItYmUzOTYwOTctYjVhN2ZjNWMtM2I3Y2MzMmM=, ActorId: [6:2715:4059], ActorState: ExecuteState, TraceId: 01jwtn5ztkcr44z3z1s47qa638, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: BAD_REQUEST:
: Error: Type annotation, code: 1030
:2:94: Error: At function: KiUpdateTable!
:2:94: Error: Column 'Value3' is under the build operation '/Root/AddNonColumnDoesnotReturnInternalError'., code: 2017 2025-06-03T10:25:21.256000Z node 6 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [6:3086:4333], status: GENERIC_ERROR, issues:
: Error: Execution, code: 1060
:2:89: Error: At tuple, At tuple, At function: KqlDeleteRows, At function: Map, At function: Filter
:2:29: Error: At lambda
:2:88: Error: At function: ==
:2:82: Error: At function: Member, At function: Member, At function: Member
:2:82: Error: Member not found: Value3. Did you mean Value? 2025-06-03T10:25:21.256556Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=6&id=NDY0NDE1ZTItYmUzOTYwOTctYjVhN2ZjNWMtM2I3Y2MzMmM=, ActorId: [6:2715:4059], ActorState: ExecuteState, TraceId: 01jwtn5zv2ewsv7spasa5mahf6, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: GENERIC_ERROR:
: Error: Execution, code: 1060
:2:89: Error: At tuple, At tuple, At function: KqlDeleteRows, At function: Map, At function: Filter
:2:29: Error: At lambda
:2:88: Error: At function: ==
:2:82: Error: At function: Member, At function: Member, At function: Member
:2:82: Error: Member not found: Value3. Did you mean Value? 2025-06-03T10:25:21.313989Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710760:0, at schemeshard: 72057594046644480 >> KqpScanArrowFormat::AggregateByColumn [GOOD] >> KqpScanArrowFormat::AggregateNoColumn >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoWithError [GOOD] >> KqpScanArrowInChanels::AggregateNoColumnNoRemaps [GOOD] >> test.py::test[aggregate-group_by_expr_only_join--Results] [GOOD] >> DataShardWrite::UpsertPreparedNoTxCache-Volatile [GOOD] >> TDatabaseResolverTests::DataStreams_Dedicated >> DataShardWrite::DelayedVolatileTxAndEvWrite [GOOD] >> KqpScanArrowInChanels::AggregateWithFunction >> LdapAuthProviderTest_StartTls::LdapFetchGroupsFromAdLdapServer >> DataShardWrite::WriteCommitVersion >> DataShardWrite::DoubleWriteUncommittedThenDoubleReadWithCommit >> test.py::test[aggregate-group_by_rollup_grouping_hum_bind--Results] >> TDatabaseResolverTests::DataStreams_Dedicated [GOOD] >> TDatabaseResolverTests::ClickHouse_PermissionDenied [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/arrow/unittest >> KqpScanArrowInChanels::AggregateByColumn [GOOD] Test command err: Trying to start YDB, gRPC: 9810, MsgBus: 11523 2025-06-03T10:25:16.792833Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667230953537504:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:16.792864Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0018c6/r3tmp/tmpAPfLW3/pdisk_1.dat 2025-06-03T10:25:16.866554Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667230953537483:2079] 1748946316792660 != 1748946316792663 2025-06-03T10:25:16.866820Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9810, node 1 2025-06-03T10:25:16.885552Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:16.885566Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:16.885569Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:16.885624Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11523 2025-06-03T10:25:16.937663Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:16.937701Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:16.940312Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11523 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:16.998568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:17.008980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:25:17.018246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:17.055768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:17.091114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:17.103956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:17.289446Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667235248506409:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:17.289484Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:17.347249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:17.359387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:17.420366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:17.431915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:17.441725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:17.456969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:17.469932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:17.486198Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667235248507063:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:17.486236Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:17.486253Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667235248507068:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:17.487211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:17.497382Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667235248507070:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:25:17.577759Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667235248507121:3398] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:17.847713Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946317867, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 20038, MsgBus: 16577 2025-06-03T10:25:18.189126Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667239365552547:2150];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0018c6/r3tmp/tmprmzpyM/pdisk_1.dat 2025-06-03T10:25:18.209376Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:18.225367Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667239365552422:2079] 1748946318187016 != 1748946318187019 2025-06-03T10:25:18.225435Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20038, node 2 2025-06-03T10:25:18.241515Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:18.241529Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:18.241531Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:18.241588Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16577 2025-06-03T10:25:18.287034Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:18.287070Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:18.289128Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16577 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } Child ... rd__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.306659Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.323910Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.347872Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667247884916698:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:20.347901Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:20.347948Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667247884916703:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:20.348800Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:20.351299Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511667247884916705:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:20.434038Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511667247884916756:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:20.801095Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946320709, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 17125, MsgBus: 11291 2025-06-03T10:25:21.061488Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511667254610243847:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:21.061510Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0018c6/r3tmp/tmpiGH63S/pdisk_1.dat 2025-06-03T10:25:21.080638Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:21.081226Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7511667254610243828:2079] 1748946321061396 != 1748946321061399 TServer::EnableGrpc on GrpcPort 17125, node 4 2025-06-03T10:25:21.092625Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:21.092641Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:21.092645Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:21.092702Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11291 TClient is connected to server localhost:11291 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:21.168279Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:21.168312Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:21.169122Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:25:21.169399Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:25:21.176668Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:21.188880Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:21.213641Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:21.232980Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:21.500890Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667254610245460:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:21.500923Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:21.511059Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:21.520074Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:21.529931Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:21.544029Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:21.557906Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:21.572161Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:21.586328Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:21.602292Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667254610246111:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:21.602323Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667254610246116:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:21.602328Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:21.603301Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:21.613079Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511667254610246118:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:21.676692Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511667254610246169:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:22.126897Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946321955, txId: 281474976715672] shutting down >> KqpScanArrowInChanels::JoinWithParams [GOOD] >> DataShardWrite::UpsertBrokenLockArbiter [GOOD] >> DataShardWrite::PreparedDistributedWritePageFault >> DataShardWrite::ExecSQLUpsertPrepared+EvWrite+Volatile [GOOD] >> DataShardWrite::InsertImmediate >> LdapAuthProviderTest_StartTls::LdapFetchGroupsFromAdLdapServer [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsDisableRequestToAD >> test.py::test[aggregate-group_by_gs_few_empty--Results] [GOOD] >> test.py::test[aggregate-group_by_gs_flatten_expr-default.txt-Results] >> DataShardWrite::UpsertNoLocksArbiter [GOOD] >> DataShardWrite::UpsertLostPrepareArbiter >> KqpScanArrowFormat::AggregateNoColumnNoRemaps [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::ClickHouse_PermissionDenied [GOOD] Test command err: 2025-06-03T10:25:24.005788Z node 2 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed ClickHouse database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'mdb.api.cloud.yandex.net:443', url '/managed-clickhouse/v1/clusters/etn021us5r9rhld1vgbh/hosts': you have no permission to resolve database id into database endpoint. Please check that your service account has role `managed-clickhouse.viewer`. >> TDatabaseResolverTests::Greenplum_MasterNode >> KqpScanArrowInChanels::AggregateWithFunction [GOOD] >> KqpScanArrowFormat::AggregateWithFunction >> KqpScanArrowInChanels::AggregateEmptySum >> DataShardWrite::WriteCommitVersion [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/arrow/unittest >> KqpScanArrowInChanels::JoinWithParams [GOOD] Test command err: Trying to start YDB, gRPC: 16333, MsgBus: 16602 2025-06-03T10:25:19.419269Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667246658868609:2202];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:19.419554Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0018ba/r3tmp/tmppesHry/pdisk_1.dat 2025-06-03T10:25:19.613252Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:19.615579Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667246658868445:2079] 1748946319412785 != 1748946319412788 TServer::EnableGrpc on GrpcPort 16333, node 1 2025-06-03T10:25:19.665493Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:19.665507Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:19.665509Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:19.665554Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:19.676468Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:19.676796Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:19.678024Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16602 TClient is connected to server localhost:16602 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:19.916862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:19.920359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:25:19.931098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:20.044720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:20.229085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:20.316315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:20.738644Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667250953837411:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:20.738681Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:20.902425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.931234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.988945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.998341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:21.012642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:21.026937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:21.040014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:21.058505Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667255248805362:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:21.058529Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:21.058580Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667255248805367:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:21.059356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:21.066879Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667255248805369:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:25:21.166738Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667255248805420:3405] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:21.332879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 864000000000 2025-06-03T10:25:21.414449Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946321458, txId: 281474976710674] shutting down Trying to start YDB, gRPC: 4392, MsgBus: 28930 2025-06-03T10:25:21.660197Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667251790596911:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:21.660239Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0018ba/r3tmp/tmpxB9PNi/pdisk_1.dat 2025-06-03T10:25:21.677972Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:21.678177Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667251790596891:2079] 1748946321660039 != 1748946321660042 TServer::EnableGrpc on GrpcPort 4392, node 2 2025-06-03T10:25:21.687989Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:21.688004Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:21.688007Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:21.688087Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28930 TClient is connected to server localhost:28930 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: tru ... 1474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:22.138940Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:22.156548Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667256085566469:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:22.156577Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:22.156636Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667256085566474:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:22.157970Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:22.165938Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511667256085566476:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:22.244445Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511667256085566527:3393] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:22.491583Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946322522, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 19277, MsgBus: 1357 2025-06-03T10:25:22.959398Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511667259418218221:2202];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0018ba/r3tmp/tmpYZKp09/pdisk_1.dat 2025-06-03T10:25:22.961915Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:22.978201Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:22.978708Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511667259418218056:2079] 1748946322951786 != 1748946322951789 TServer::EnableGrpc on GrpcPort 19277, node 3 2025-06-03T10:25:23.004331Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:23.004345Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:23.004349Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:23.004405Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1357 TClient is connected to server localhost:1357 2025-06-03T10:25:23.062006Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:23.062028Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:23.063001Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:23.067359Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:23.069572Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:25:23.141382Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:23.165855Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:23.201851Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:25:23.228114Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:25:23.381739Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667263713186981:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:23.381769Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:23.388661Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:23.400973Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:23.424334Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:23.437490Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:23.460376Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:23.471337Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:23.486165Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:23.503257Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667263713187635:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:23.503288Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:23.503380Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667263713187640:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:23.504240Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:23.509819Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511667263713187642:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:23.608363Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511667263713187693:3393] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:23.843137Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946323887, txId: 281474976715672] shutting down 2025-06-03T10:25:23.935037Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946323922, txId: 281474976715674] shutting down |59.9%| [TA] {RESULT} $(B)/ydb/services/persqueue_v1/ut/new_schemecache_ut/test-results/unittest/{meta.json ... results_accumulator.log} |59.9%| [LD] {RESULT} $(B)/ydb/core/grpc_streaming/ut/ydb-core-grpc_streaming-ut >> TDatabaseResolverTests::Greenplum_MasterNode [GOOD] >> TDatabaseResolverTests::Greenplum_PermissionDenied [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsDisableRequestToAD [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithCustomGroupAttributeGood >> TDatabaseResolverTests::Ydb_Serverless_Timeout >> TSubDomainTest::CoordinatorRunAtSubdomainNodeWhenAvailable2 [GOOD] >> TDatabaseResolverTests::Ydb_Serverless_Timeout [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::WriteCommitVersion [GOOD] Test command err: 2025-06-03T10:25:16.107386Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:25:16.107515Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:16.107560Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001889/r3tmp/tmpw4KA9Q/pdisk_1.dat 2025-06-03T10:25:16.245271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:25:16.266010Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:16.267354Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946315401724 != 1748946315401728 2025-06-03T10:25:16.312001Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:16.312049Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:16.322822Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:16.403924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:25:16.426005Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:25:16.426342Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:25:16.426482Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:25:16.426572Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:25:16.438261Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:25:16.438486Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:25:16.438525Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:25:16.438662Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:25:16.438669Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:25:16.438675Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:25:16.438734Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:25:16.438749Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:25:16.438760Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:25:16.449615Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:25:16.454324Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:25:16.454426Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:25:16.454457Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:25:16.454461Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:25:16.454465Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:25:16.454470Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:25:16.454545Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:16.454550Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:16.454658Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:25:16.454683Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:25:16.454803Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:25:16.454815Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:25:16.454825Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:25:16.454831Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:25:16.454836Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:25:16.454842Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:25:16.454849Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:25:16.454864Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:16.454871Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:16.454879Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:25:16.454904Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:25:16.454908Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:25:16.454931Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:25:16.454986Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:25:16.454999Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:25:16.455021Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:25:16.455031Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:25:16.455035Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:25:16.455041Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:25:16.455046Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:25:16.455140Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-03T10:25:16.455147Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-03T10:25:16.455152Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-03T10:25:16.455156Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:25:16.455171Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-03T10:25:16.455175Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-03T10:25:16.455179Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-03T10:25:16.455183Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-03T10:25:16.455189Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-03T10:25:16.455487Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269746185, Sender [1:683:2579], Recipient [1:663:2568]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-03T10:25:16.455501Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:25:16.466330Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:25:16.466359Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:25:16.466367Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:25:16.466380Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-06-03T10:25:16.466393Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:25:16.613599Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:16.613633Z node 1 :TX_DATASHARD TRACE: datashard_impl. ... 1474976715666] at 72075186224037890 is Executed 2025-06-03T10:25:25.150325Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1510:281474976715666] at 72075186224037890 executing on unit BuildAndWaitDependencies 2025-06-03T10:25:25.150331Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1510:281474976715666] at 72075186224037890 to execution unit ExecuteWrite 2025-06-03T10:25:25.150335Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1510:281474976715666] at 72075186224037890 on unit ExecuteWrite 2025-06-03T10:25:25.150339Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [1510:281474976715666] at 72075186224037890 2025-06-03T10:25:25.150343Z node 7 :TX_DATASHARD TRACE: datashard_kqp.cpp:694: Send commit decision from 72075186224037890 to 72075186224037889 2025-06-03T10:25:25.150347Z node 7 :TX_DATASHARD TRACE: datashard_kqp.cpp:725: Will wait for volatile decision from 72075186224037889 to 72075186224037890 2025-06-03T10:25:25.150360Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [1510:281474976715666] at 72075186224037890, row count=1 2025-06-03T10:25:25.150378Z node 7 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:180: Deleted RS at 72075186224037890 source 72075186224037890 dest 72075186224037888 consumer 72075186224037888 seqno 1 txId 281474976715660 2025-06-03T10:25:25.150385Z node 7 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:180: Deleted RS at 72075186224037890 source 72075186224037890 dest 72075186224037889 consumer 72075186224037889 seqno 2 txId 281474976715660 2025-06-03T10:25:25.150396Z node 7 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-03T10:25:25.150402Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1510:281474976715666] at 72075186224037890 is DelayCompleteNoMoreRestarts 2025-06-03T10:25:25.150406Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1510:281474976715666] at 72075186224037890 executing on unit ExecuteWrite 2025-06-03T10:25:25.150410Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1510:281474976715666] at 72075186224037890 to execution unit CompleteWrite 2025-06-03T10:25:25.150414Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1510:281474976715666] at 72075186224037890 on unit CompleteWrite 2025-06-03T10:25:25.150427Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1510:281474976715666] at 72075186224037890 is DelayComplete 2025-06-03T10:25:25.150432Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1510:281474976715666] at 72075186224037890 executing on unit CompleteWrite 2025-06-03T10:25:25.150435Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1510:281474976715666] at 72075186224037890 to execution unit CompletedOperations 2025-06-03T10:25:25.150438Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [1510:281474976715666] at 72075186224037890 on unit CompletedOperations 2025-06-03T10:25:25.150443Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [1510:281474976715666] at 72075186224037890 is Executed 2025-06-03T10:25:25.150446Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1510:281474976715666] at 72075186224037890 executing on unit CompletedOperations 2025-06-03T10:25:25.150449Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [1510:281474976715666] at 72075186224037890 has finished 2025-06-03T10:25:25.150453Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:25:25.150456Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037890 2025-06-03T10:25:25.150459Z node 7 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2025-06-03T10:25:25.150462Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037890 2025-06-03T10:25:25.150567Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287425, Sender [7:707:2590], Recipient [7:703:2588]: {TEvReadSet step# 1510 txid# 281474976715666 TabletSource# 72075186224037890 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037890 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-03T10:25:25.150573Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3148: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-03T10:25:25.150577Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037890 dest 72075186224037889 producer 72075186224037890 txId 281474976715666 2025-06-03T10:25:25.150585Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 1510 txid# 281474976715666 TabletSource# 72075186224037890 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037890 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-03T10:25:25.150595Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287425, Sender [7:703:2588], Recipient [7:707:2590]: {TEvReadSet step# 1510 txid# 281474976715666 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-03T10:25:25.150598Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3148: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-03T10:25:25.150602Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037889 dest 72075186224037890 producer 72075186224037889 txId 281474976715666 2025-06-03T10:25:25.150606Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 1510 txid# 281474976715666 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-03T10:25:25.150640Z node 7 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 1510} 2025-06-03T10:25:25.150711Z node 7 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037890 step# 1510} 2025-06-03T10:25:25.150736Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-03T10:25:25.150741Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1510:281474976715666] at 72075186224037889 on unit ExecuteWrite 2025-06-03T10:25:25.150746Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 3 at 72075186224037889 from 72075186224037889 to 72075186224037890 txId 281474976715666 2025-06-03T10:25:25.150751Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1510:281474976715666] at 72075186224037889 on unit CompleteWrite 2025-06-03T10:25:25.150762Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:25:25.150775Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-03T10:25:25.150819Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287425, Sender [7:703:2588], Recipient [7:707:2590]: {TEvReadSet step# 1510 txid# 281474976715666 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-03T10:25:25.150823Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3148: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-03T10:25:25.150826Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037890 source 72075186224037889 dest 72075186224037890 producer 72075186224037889 txId 281474976715666 2025-06-03T10:25:25.150831Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037890 got read set: {TEvReadSet step# 1510 txid# 281474976715666 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-03T10:25:25.150914Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-03T10:25:25.150920Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1510:281474976715666] at 72075186224037890 on unit ExecuteWrite 2025-06-03T10:25:25.150924Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 3 at 72075186224037890 from 72075186224037890 to 72075186224037889 txId 281474976715666 2025-06-03T10:25:25.150930Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1510:281474976715666] at 72075186224037890 on unit CompleteWrite 2025-06-03T10:25:25.150938Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:755: Complete volatile write [1510 : 281474976715666] from 72075186224037890 at tablet 72075186224037890 send result to client [7:1068:2828] 2025-06-03T10:25:25.150943Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-03T10:25:25.150949Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-03T10:25:25.150996Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287425, Sender [7:707:2590], Recipient [7:703:2588]: {TEvReadSet step# 1510 txid# 281474976715666 TabletSource# 72075186224037890 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037890 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-03T10:25:25.151000Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3148: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-03T10:25:25.151003Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037890 dest 72075186224037889 producer 72075186224037890 txId 281474976715666 2025-06-03T10:25:25.151009Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 1510 txid# 281474976715666 TabletSource# 72075186224037890 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037890 ReadSet.Size()# 2 Seqno# 3 Flags# 0} 2025-06-03T10:25:25.151020Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:755: Complete volatile write [1510 : 281474976715666] from 72075186224037889 at tablet 72075186224037889 send result to client [7:1068:2828] 2025-06-03T10:25:25.151035Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037890 2025-06-03T10:25:25.151096Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:25:25.151109Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-03T10:25:25.151170Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287938, Sender [7:707:2590], Recipient [7:703:2588]: {TEvReadSet step# 1510 txid# 281474976715666 TabletSource# 72075186224037889 TabletDest# 72075186224037890 SetTabletConsumer# 72075186224037890 Flags# 0 Seqno# 3} 2025-06-03T10:25:25.151176Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3149: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-03T10:25:25.151181Z node 7 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037890 consumer 72075186224037890 txId 281474976715666 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::Greenplum_PermissionDenied [GOOD] Test command err: 2025-06-03T10:25:25.424945Z node 2 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed Greenplum database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'mdb.api.cloud.yandex.net:443', url '/managed-greenplum/v1/clusters/etn021us5r9rhld1vgbh/master-hosts': you have no permission to resolve database id into database endpoint. >> DataShardWrite::InsertImmediate [GOOD] >> DataShardWrite::ImmediateAndPlannedCommittedOpsRace >> DataShardWrite::PreparedDistributedWritePageFault [GOOD] >> KqpScanArrowFormat::AggregateWithFunction [GOOD] >> KqpScanArrowInChanels::AggregateEmptySum [GOOD] >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoDisableNestedGroupsGood [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/actors/ut/unittest >> TDatabaseResolverTests::Ydb_Serverless_Timeout [GOOD] Test command err: 2025-06-03T10:25:25.929012Z node 1 :FQ_DATABASE_RESOLVER ERROR: database_resolver.cpp:175: TraceId: traceId ResponseProcessor::Handle(HttpIncomingResponse): error=Error while trying to resolve managed Ydb database with id etn021us5r9rhld1vgbh via HTTP request to: endpoint 'ydbc.ydb.cloud.yandex.net:8789', url '/ydbc/cloud-prod/database?databaseId=etn021us5r9rhld1vgbh': Connection timeout >> TSubDomainTest::StartAndStopTenanNode ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::CoordinatorRunAtSubdomainNodeWhenAvailable2 [GOOD] Test command err: 2025-06-03T10:25:10.068834Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667206587365743:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:10.068857Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001185/r3tmp/tmpVurZv2/pdisk_1.dat 2025-06-03T10:25:10.152456Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:26812 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:25:10.166909Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511667206587365969:2140] Handle TEvNavigate describe path dc-1 2025-06-03T10:25:10.169111Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511667206587366384:2424] HANDLE EvNavigateScheme dc-1 2025-06-03T10:25:10.169164Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667206587365993:2153], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:10.169176Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2322: Create subscriber: self# [1:7511667206587365993:2153], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-03T10:25:10.169259Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:7511667206587366385:2425][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-03T10:25:10.169884Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667206587365618:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667206587366391:2425] 2025-06-03T10:25:10.169917Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667206587365618:2057] Subscribe: subscriber# [1:7511667206587366391:2425], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:10.169955Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667206587366391:2425][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667206587365618:2057] 2025-06-03T10:25:10.169968Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667206587366385:2425][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667206587366388:2425] 2025-06-03T10:25:10.169994Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667206587365618:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667206587366391:2425] 2025-06-03T10:25:10.170172Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667206587365615:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667206587366390:2425] 2025-06-03T10:25:10.170177Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667206587365615:2054] Subscribe: subscriber# [1:7511667206587366390:2425], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:10.170222Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667206587366390:2425][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667206587365615:2054] 2025-06-03T10:25:10.170227Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667206587366385:2425][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667206587366387:2425] 2025-06-03T10:25:10.170239Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:7511667206587366385:2425][/dc-1] Set up state: owner# [1:7511667206587365993:2153], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:10.170279Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667206587366389:2425][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667206587366386:2425], cookie# 1 2025-06-03T10:25:10.170283Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667206587366390:2425][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667206587366387:2425], cookie# 1 2025-06-03T10:25:10.170286Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667206587366391:2425][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667206587366388:2425], cookie# 1 2025-06-03T10:25:10.170373Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667206587365615:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667206587366390:2425] 2025-06-03T10:25:10.170377Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667206587365615:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667206587366390:2425], cookie# 1 2025-06-03T10:25:10.170484Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667206587365612:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667206587366389:2425] 2025-06-03T10:25:10.170493Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667206587365612:2051] Subscribe: subscriber# [1:7511667206587366389:2425], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:10.170503Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667206587365612:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667206587366389:2425], cookie# 1 2025-06-03T10:25:10.170509Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667206587365618:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667206587366391:2425], cookie# 1 2025-06-03T10:25:10.170667Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667206587366390:2425][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667206587365615:2054], cookie# 1 2025-06-03T10:25:10.170674Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667206587366389:2425][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667206587365612:2051] 2025-06-03T10:25:10.170677Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667206587366389:2425][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667206587365612:2051], cookie# 1 2025-06-03T10:25:10.170681Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667206587366391:2425][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667206587365618:2057], cookie# 1 2025-06-03T10:25:10.170687Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667206587366385:2425][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667206587366387:2425], cookie# 1 2025-06-03T10:25:10.170693Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7511667206587366385:2425][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:25:10.170698Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667206587366385:2425][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667206587366386:2425] 2025-06-03T10:25:10.170708Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:7511667206587366385:2425][/dc-1] Path was already updated: owner# [1:7511667206587365993:2153], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:10.170713Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667206587366385:2425][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667206587366386:2425], cookie# 1 2025-06-03T10:25:10.170716Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7511667206587366385:2425][/dc-1] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:25:10.170721Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667206587366385:2425][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667206587366388:2425], cookie# 1 2025-06-03T10:25:10.170723Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7511667206587366385:2425][/dc-1] Unexpected sync response: sender# [1:7511667206587366388:2425], cookie# 1 2025-06-03T10:25:10.170772Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667206587365612:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667206587366389:2425] 2025-06-03T10:25:10.170916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:10.170937Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:10.172706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:10.181193Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:7511667206587365993:2153], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsL ... 44073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:24.757744Z node 12 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [12:7511667267057404886:2264], recipient# [12:7511667267057404885:2328], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:24.775615Z node 14 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [14:7511667257930253967:2111], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:24.775663Z node 14 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [14:7511667257930253967:2111], cacheItem# { Subscriber: { Subscriber: [14:7511667262225221279:2116] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:24.775687Z node 14 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [14:7511667266520188642:2136], recipient# [14:7511667266520188641:2308], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:24.777330Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [13:7511667256588923325:2110], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:24.777374Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [13:7511667256588923325:2110], cacheItem# { Subscriber: { Subscriber: [13:7511667260883890683:2121] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:24.777398Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [13:7511667265178858080:2187], recipient# [13:7511667265178858079:2311], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:24.777441Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [11:7511667257976031152:2105], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:24.777481Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [11:7511667257976031152:2105], cacheItem# { Subscriber: { Subscriber: [11:7511667262270998527:2122] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:24.777500Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [11:7511667266565965855:2137], recipient# [11:7511667266565965854:2309], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:24.797380Z node 15 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [15:7511667257512757993:2105], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:24.797432Z node 15 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [15:7511667257512757993:2105], cacheItem# { Subscriber: { Subscriber: [15:7511667261807725369:2121] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:24.797470Z node 15 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [15:7511667266102692773:2188], recipient# [15:7511667266102692772:2312], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:25.545542Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [13:7511667256588923325:2110], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:25.545601Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [13:7511667256588923325:2110], cacheItem# { Subscriber: { Subscriber: [13:7511667260883890658:2116] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:25.545627Z node 13 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [13:7511667269473825378:2188], recipient# [13:7511667269473825377:2312], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:25.708329Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [11:7511667257976031152:2105], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:25.708397Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [11:7511667257976031152:2105], cacheItem# { Subscriber: { Subscriber: [11:7511667262270998503:2117] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:25.708427Z node 11 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [11:7511667270860933153:2138], recipient# [11:7511667270860933152:2310], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> KqpScanArrowFormat::AggregateNoColumn [GOOD] >> KqpScanArrowFormat::AggregateEmptySum >> LdapAuthProviderTest_StartTls::LdapFetchGroupsWithCustomGroupAttributeGood [GOOD] >> LdapAuthProviderTest_StartTls::LdapFetchGroupsUseInvalidSearchFilterBad >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoDisableNestedGroupsGood [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/arrow/unittest >> KqpScanArrowFormat::AggregateWithFunction [GOOD] Test command err: Trying to start YDB, gRPC: 5109, MsgBus: 12594 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0018bc/r3tmp/tmpbKYyvK/pdisk_1.dat 2025-06-03T10:25:18.309953Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:18.339224Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667238544633912:2079] 1748946318211250 != 1748946318211253 2025-06-03T10:25:18.341245Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5109, node 1 2025-06-03T10:25:18.361593Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:18.361610Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:18.361613Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:18.361657Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:18.394072Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:18.394110Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:18.397840Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12594 TClient is connected to server localhost:12594 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:18.588775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:18.605174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:25:18.620799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:18.890968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:25:19.132533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:19.252722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.261247Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667247134570155:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:20.261376Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:20.271555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.319792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.387940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.446783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.458202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.526351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.593861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.673600Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667247134570823:2468], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:20.673632Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:20.674067Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667247134570828:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:20.675916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:20.683584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:25:20.683700Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667247134570830:2472], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:20.759295Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667247134570881:3408] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:21.053103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 864000000000 2025-06-03T10:25:21.130486Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946321171, txId: 281474976715674] shutting down Trying to start YDB, gRPC: 19466, MsgBus: 15662 2025-06-03T10:25:21.377153Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667253759293979:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:21.377194Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0018bc/r3tmp/tmptQKKKU/pdisk_1.dat 2025-06-03T10:25:21.397132Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:21.399490Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667253759293959:2079] 1748946321377008 != 1748946321377011 TServer::EnableGrpc on GrpcPort 19466, node 2 2025-06-03T10:25:21.408227Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:21.408242Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:21.408244Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:21.408299Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15662 TClient is connected to server localhost:15662 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 Crea ... eration part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:23.473130Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:23.490537Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:23.507172Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:23.568229Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667264034302810:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:23.568255Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:23.568346Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667264034302815:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:23.569026Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:23.574981Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511667264034302817:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:23.670336Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511667264034302871:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:24.545101Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946323992, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 8493, MsgBus: 14712 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0018bc/r3tmp/tmpvWF2Jn/pdisk_1.dat 2025-06-03T10:25:24.867797Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 8493, node 4 2025-06-03T10:25:24.894425Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:24.894889Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:24.894900Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:24.894902Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:24.894954Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14712 2025-06-03T10:25:24.954138Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:24.954164Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:24.959242Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14712 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:25.042368Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:25.051225Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:25:25.066423Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:25.098757Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:25.131991Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:25.149947Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:25.324994Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667270422672314:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:25.325025Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:25.327663Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:25.396466Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:25.409246Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:25.424871Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:25.438860Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:25.450295Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:25.464763Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:25.539528Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667270422672976:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:25.539565Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:25.539696Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667270422672981:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:25.540609Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:25.545474Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511667270422672983:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:25.606697Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511667270422673034:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:25.997003Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946325854, txId: 281474976715672] shutting down ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::PreparedDistributedWritePageFault [GOOD] Test command err: 2025-06-03T10:25:17.310645Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:25:17.310745Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:17.310777Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00187e/r3tmp/tmpePpQ1T/pdisk_1.dat 2025-06-03T10:25:17.430022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:25:17.448972Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:17.450371Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946316759729 != 1748946316759733 2025-06-03T10:25:17.492828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:17.492893Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:17.503703Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:17.578294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:25:17.598668Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:25:17.598994Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:25:17.599116Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:25:17.599206Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:25:17.611009Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:25:17.611292Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:25:17.611332Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:25:17.611571Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:25:17.611583Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:25:17.611591Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:25:17.611681Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:25:17.611712Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:25:17.611727Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:25:17.622146Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:25:17.627863Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:25:17.627986Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:25:17.628024Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:25:17.628030Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:25:17.628037Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:25:17.628046Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:25:17.628134Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:17.628144Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:17.628282Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:25:17.628317Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:25:17.628461Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:25:17.628472Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:25:17.628485Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:25:17.628492Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:25:17.628497Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:25:17.628504Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:25:17.628511Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:25:17.628528Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:17.628535Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:17.628543Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:25:17.628569Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:25:17.628575Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:25:17.628604Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:25:17.628668Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:25:17.628684Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:25:17.628705Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:25:17.628715Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:25:17.628721Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:25:17.628729Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:25:17.628734Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:25:17.628824Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-03T10:25:17.628831Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-03T10:25:17.628836Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-03T10:25:17.628840Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:25:17.628855Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-03T10:25:17.628859Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-03T10:25:17.628866Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-03T10:25:17.628871Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-03T10:25:17.628877Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-03T10:25:17.629969Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269746185, Sender [1:683:2579], Recipient [1:663:2568]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-03T10:25:17.629996Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:25:17.640358Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:25:17.640396Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:25:17.640404Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:25:17.640417Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-06-03T10:25:17.640431Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:25:17.795437Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:17.795472Z node 1 :TX_DATASHARD TRACE: datashard_impl. ... Add [3500:1234567890011] at 72075186224037888 to execution unit LoadWriteDetails 2025-06-03T10:25:26.012550Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit LoadTxDetails 2025-06-03T10:25:26.012647Z node 7 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 1234567890011 at 72075186224037888, record: Operations { Type: OPERATION_UPSERT TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } ColumnIds: 1 ColumnIds: 2 PayloadIndex: 0 PayloadFormat: FORMAT_CELLVEC } TxId: 1234567890011 TxMode: MODE_PREPARE Locks { Op: Commit } 2025-06-03T10:25:26.012673Z node 7 :TX_DATASHARD TRACE: datashard_write_operation.cpp:190: Table /Root/table, shard: 72075186224037888, write point (Int32 : 1) 2025-06-03T10:25:26.012681Z node 7 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Int32 : 1) table: [72057594046644480:2:1] 2025-06-03T10:25:26.012700Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:683: LoadWriteDetails at 72075186224037888 loaded writeOp from db 3500:1234567890011 keys extracted: 1 2025-06-03T10:25:26.012705Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-03T10:25:26.012709Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit LoadWriteDetails 2025-06-03T10:25:26.012714Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-03T10:25:26.012718Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-03T10:25:26.012739Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [3500:1234567890011] is the new logically complete end at 72075186224037888 2025-06-03T10:25:26.012745Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [3500:1234567890011] is the new logically incomplete end at 72075186224037888 2025-06-03T10:25:26.012749Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [3500:1234567890011] at 72075186224037888 2025-06-03T10:25:26.012756Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-03T10:25:26.012760Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-03T10:25:26.012764Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit BuildWriteOutRS 2025-06-03T10:25:26.012769Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit BuildWriteOutRS 2025-06-03T10:25:26.012782Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-03T10:25:26.012786Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit BuildWriteOutRS 2025-06-03T10:25:26.012790Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit StoreAndSendWriteOutRS 2025-06-03T10:25:26.012795Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit StoreAndSendWriteOutRS 2025-06-03T10:25:26.012801Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-03T10:25:26.012804Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit StoreAndSendWriteOutRS 2025-06-03T10:25:26.012809Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit PrepareWriteTxInRS 2025-06-03T10:25:26.012814Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit PrepareWriteTxInRS 2025-06-03T10:25:26.012822Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-03T10:25:26.012826Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit PrepareWriteTxInRS 2025-06-03T10:25:26.012830Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit LoadAndWaitInRS 2025-06-03T10:25:26.012835Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit LoadAndWaitInRS 2025-06-03T10:25:26.012841Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-03T10:25:26.012845Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit LoadAndWaitInRS 2025-06-03T10:25:26.012849Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit ExecuteWrite 2025-06-03T10:25:26.012854Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit ExecuteWrite 2025-06-03T10:25:26.012860Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [3500:1234567890011] at 72075186224037888 2025-06-03T10:25:26.013057Z node 7 :TX_DATASHARD TRACE: execute_write_unit.cpp:122: Tablet 72075186224037888 is not ready for [3500:1234567890011] execution 2025-06-03T10:25:26.013080Z node 7 :TX_DATASHARD DEBUG: datashard_write_operation.cpp:431: tx 1234567890011 at 72075186224037888 released its data 2025-06-03T10:25:26.013088Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Restart 2025-06-03T10:25:26.013093Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-03T10:25:26.013098Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-03T10:25:26.013102Z node 7 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:25:26.013107Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:25:26.013227Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:25:26.013234Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit ExecuteWrite 2025-06-03T10:25:26.013239Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [3500:1234567890011] at 72075186224037888 2025-06-03T10:25:26.013318Z node 7 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 1234567890011 at 72075186224037888, record: Operations { Type: OPERATION_UPSERT TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } ColumnIds: 1 ColumnIds: 2 PayloadIndex: 0 PayloadFormat: FORMAT_CELLVEC } TxId: 1234567890011 TxMode: MODE_PREPARE Locks { Op: Commit } 2025-06-03T10:25:26.013333Z node 7 :TX_DATASHARD TRACE: datashard_write_operation.cpp:190: Table /Root/table, shard: 72075186224037888, write point (Int32 : 1) 2025-06-03T10:25:26.013339Z node 7 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Int32 : 1) table: [72057594046644480:2:1] 2025-06-03T10:25:26.013351Z node 7 :TX_DATASHARD DEBUG: datashard_write_operation.cpp:524: tx 1234567890011 at 72075186224037888 restored its data 2025-06-03T10:25:26.013397Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [3500:1234567890011] at 72075186224037888, row count=1 2025-06-03T10:25:26.013413Z node 7 :TX_DATASHARD TRACE: locks.cpp:194: Lock 1234567890001 marked broken at v{min} 2025-06-03T10:25:26.013435Z node 7 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-03T10:25:26.013450Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:25:26.013455Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit ExecuteWrite 2025-06-03T10:25:26.013460Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit CompleteWrite 2025-06-03T10:25:26.013466Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit CompleteWrite 2025-06-03T10:25:26.013530Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is DelayComplete 2025-06-03T10:25:26.013535Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit CompleteWrite 2025-06-03T10:25:26.013538Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:1234567890011] at 72075186224037888 to execution unit CompletedOperations 2025-06-03T10:25:26.013542Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:1234567890011] at 72075186224037888 on unit CompletedOperations 2025-06-03T10:25:26.013547Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:1234567890011] at 72075186224037888 is Executed 2025-06-03T10:25:26.013550Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:1234567890011] at 72075186224037888 executing on unit CompletedOperations 2025-06-03T10:25:26.013554Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:1234567890011] at 72075186224037888 has finished 2025-06-03T10:25:26.013557Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:25:26.013559Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-03T10:25:26.013562Z node 7 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:25:26.013565Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:25:26.013700Z node 7 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 3500} 2025-06-03T10:25:26.013850Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:25:26.013860Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:1234567890011] at 72075186224037888 on unit CompleteWrite 2025-06-03T10:25:26.013875Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [3500 : 1234567890011] from 72075186224037888 at tablet 72075186224037888 send result to client [7:791:2649] 2025-06-03T10:25:26.013885Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 >> TSchemeShardCheckProposeSize::CopyTables [GOOD] >> TSchemeShardDecimalTypesInTables::Parameterless >> TSubDomainTest::Boot ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/arrow/unittest >> KqpScanArrowInChanels::AggregateEmptySum [GOOD] Test command err: Trying to start YDB, gRPC: 11303, MsgBus: 7676 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0018bb/r3tmp/tmpmWOe1W/pdisk_1.dat 2025-06-03T10:25:18.462409Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:18.553965Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667240783544881:2079] 1748946318356026 != 1748946318356029 2025-06-03T10:25:18.556427Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:18.557267Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:18.557311Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:18.558481Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11303, node 1 2025-06-03T10:25:18.572532Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:18.572547Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:18.572550Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:18.572597Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7676 TClient is connected to server localhost:7676 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:18.945151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:18.949357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:25:18.964456Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:19.308057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:19.565898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:19.589435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:19.929969Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667245078513856:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:19.930017Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:19.985812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:19.999943Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.026757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.085784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.115476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.181530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.246209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.327606Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667249373481816:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:20.327628Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:20.327774Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667249373481821:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:20.328609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:20.332962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-03T10:25:20.333038Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667249373481823:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:25:20.426648Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667249373481874:3405] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:21.478664Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946320905, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 22158, MsgBus: 7243 2025-06-03T10:25:21.755598Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667254041938551:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:21.755637Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0018bb/r3tmp/tmpdfY1Md/pdisk_1.dat 2025-06-03T10:25:21.772778Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:21.776105Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667254041938532:2079] 1748946321755468 != 1748946321755471 TServer::EnableGrpc on GrpcPort 22158, node 2 2025-06-03T10:25:21.787618Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:21.787637Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:21.787639Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:21.787695Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7243 TClient is connected to server localhost:7243 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:25:21.860 ... ARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:24.392856Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:24.407687Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:24.478404Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667265180604804:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:24.478429Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:24.478545Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667265180604809:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:24.479486Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:24.482743Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511667265180604811:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:24.550413Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511667265180604862:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:24.957968Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946324867, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 28318, MsgBus: 17043 2025-06-03T10:25:25.344229Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511667270010102316:2243];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:25.344940Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0018bb/r3tmp/tmpT177Dp/pdisk_1.dat 2025-06-03T10:25:25.358995Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28318, node 4 2025-06-03T10:25:25.366712Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:25.366724Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:25.366726Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:25.366773Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17043 TClient is connected to server localhost:17043 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:25.444203Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:25.444236Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:25.445360Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:25.448156Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:25.450774Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:25:25.454264Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:25.478291Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:25.528500Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:25:25.553769Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:25:25.790398Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667270010103718:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:25.790432Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:25.807532Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:25.824157Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:25.845379Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:25.858368Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:25.877106Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:25.891319Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:25.906874Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:25.925368Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667270010104372:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:25.925394Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:25.925515Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667270010104377:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:25.926411Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:25.933314Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511667270010104379:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:26.022961Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511667274305071726:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:26.348864Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946326309, txId: 281474976715672] shutting down >> TModifyUserTest::ModifyUser ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_LdapsScheme::LdapRefreshGroupsInfoDisableNestedGroupsGood [GOOD] Test command err: 2025-06-03T10:25:03.399335Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667177298928346:2198];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:03.399448Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00152d/r3tmp/tmpTZ6xgJ/pdisk_1.dat 2025-06-03T10:25:03.546523Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667177298928187:2079] 1748946303396782 != 1748946303396785 2025-06-03T10:25:03.549604Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6350, node 1 2025-06-03T10:25:03.573578Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:03.573594Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:03.573597Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:03.573648Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:03.610229Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:03.610257Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:03.612149Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:03.685396Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:03.685561Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:03.685567Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:03.686082Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:20515, port: 20515 2025-06-03T10:25:03.686121Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:03.737485Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-03T10:25:03.782176Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****yqPA (5275FA4D) () has now valid token of ldapuser@ldap 2025-06-03T10:25:04.060215Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667179961825450:2209];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00152d/r3tmp/tmpdKleqY/pdisk_1.dat 2025-06-03T10:25:04.065216Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:04.073057Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:04.073583Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667179961825267:2079] 1748946304057692 != 1748946304057695 TServer::EnableGrpc on GrpcPort 28935, node 2 2025-06-03T10:25:04.085054Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:04.085069Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:04.085071Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:04.085140Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:04.164485Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:04.164517Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:04.165510Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:04.194418Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:04.196692Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:04.196710Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:04.196901Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:16826, port: 16826 2025-06-03T10:25:04.196940Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=invalidRobouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:04.293610Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldaps://localhost:16826. Invalid credentials 2025-06-03T10:25:04.293820Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****X4aw (F5F28A0D) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldaps://localhost:16826. Invalid credentials)' 2025-06-03T10:25:04.564190Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511667179582757643:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:04.564216Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00152d/r3tmp/tmpvZPjOO/pdisk_1.dat 2025-06-03T10:25:04.582644Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:04.586609Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511667179582757622:2079] 1748946304564003 != 1748946304564006 TServer::EnableGrpc on GrpcPort 62434, node 3 2025-06-03T10:25:04.595613Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:04.595626Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:04.595629Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:04.595683Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:04.660258Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:04.661758Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:04.661774Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:04.661926Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:25690, port: 25690 2025-06-03T10:25:04.661947Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:04.669454Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:04.669494Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:04.670399Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:04.717630Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldaps://localhost:25690. Invalid credentials 2025-06-03T10:25:04.717923Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****nucg (E41EAE38) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldaps://localhost:25690. Invalid credentials)' test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00152d/r3tmp/tmpOXFhta/pdisk_1.dat 2025-06-03T10:25:05.154089Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511667183986231148:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:05.170698Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:05.171030Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:05.171306Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7511667183986230936:2079] 1748946305151504 != 1748946305151507 TServer::EnableGrpc on GrpcPort 6406, node 4 2025-06-03T10:25:05.179856Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:05.179882Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:05.179884Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:05.179936Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:05.254525Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:05.254559Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:05.255525Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:05.299919Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:05.302114Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:05.302127Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:05.302268Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:28677, port: 28677 2025-06-03T10:25:05.302285Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:05.370813Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net ... istributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:05.822165Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:05.822167Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:05.822224Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:05.894433Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:05.894466Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:05.896503Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:05.988111Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:05.989767Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:05.989786Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:05.990005Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:31141, port: 31141 2025-06-03T10:25:05.990034Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:06.061578Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:06.105471Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:25:06.106186Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-03T10:25:06.106208Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:06.151171Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:06.193489Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:06.193989Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****NQzw (4EFF01DE) () has now valid token of ldapuser@ldap 2025-06-03T10:25:09.795362Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****NQzw (4EFF01DE) 2025-06-03T10:25:09.795410Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:31141, port: 31141 2025-06-03T10:25:09.795437Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:09.849570Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:09.901504Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:25:09.901716Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-03T10:25:09.901733Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:09.949533Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:09.993532Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:09.994051Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****NQzw (4EFF01DE) () has now valid token of ldapuser@ldap 2025-06-03T10:25:13.798103Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****NQzw (4EFF01DE) 2025-06-03T10:25:13.798179Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:31141, port: 31141 2025-06-03T10:25:13.798206Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:13.845611Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:13.897448Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:25:13.897755Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-03T10:25:13.897772Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:13.941539Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:13.985964Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:13.986665Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****NQzw (4EFF01DE) () has now valid token of ldapuser@ldap test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00152d/r3tmp/tmpzwtr2q/pdisk_1.dat 2025-06-03T10:25:16.436627Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511667231639015807:2215];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:16.445222Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:16.450917Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26974, node 6 2025-06-03T10:25:16.453054Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7511667231639015617:2079] 1748946316435096 != 1748946316435099 2025-06-03T10:25:16.461321Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:16.461332Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:16.461334Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:16.461385Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:16.510482Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:16.512986Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:16.512999Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:16.513206Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:63041, port: 63041 2025-06-03T10:25:16.513231Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:16.540604Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:16.540637Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:16.541700Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:16.562949Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:16.605865Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****-rag (D296333E) () has now valid token of ldapuser@ldap 2025-06-03T10:25:21.436625Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7511667231639015807:2215];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:21.436672Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:25:21.446377Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****-rag (D296333E) 2025-06-03T10:25:21.446420Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:63041, port: 63041 2025-06-03T10:25:21.446444Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:21.505541Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:21.549786Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****-rag (D296333E) () has now valid token of ldapuser@ldap 2025-06-03T10:25:25.450778Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****-rag (D296333E) 2025-06-03T10:25:25.450812Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:63041, port: 63041 2025-06-03T10:25:25.450846Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:25.523608Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:25.569908Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****-rag (D296333E) () has now valid token of ldapuser@ldap ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_nonSecure::LdapRefreshGroupsInfoDisableNestedGroupsGood [GOOD] Test command err: 2025-06-03T10:25:03.089701Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667175592662954:2197];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:03.105841Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001520/r3tmp/tmpOoXZ4J/pdisk_1.dat 2025-06-03T10:25:03.262902Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667175592662796:2079] 1748946303080281 != 1748946303080284 2025-06-03T10:25:03.266123Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4296, node 1 2025-06-03T10:25:03.284636Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:03.284664Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:03.287892Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:03.309534Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:03.309551Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:03.309555Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:03.309609Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:03.509386Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:03.509909Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:03.509915Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:03.510107Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:5093, port: 5093 2025-06-03T10:25:03.510488Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:03.521093Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-03T10:25:03.566651Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****C-eQ (6C79E2FD) () has now valid token of ldapuser@ldap 2025-06-03T10:25:03.904640Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667176756677230:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:03.904663Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001520/r3tmp/tmprG5pWC/pdisk_1.dat 2025-06-03T10:25:03.919714Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:03.921893Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667176756677210:2079] 1748946303904523 != 1748946303904526 TServer::EnableGrpc on GrpcPort 6354, node 2 2025-06-03T10:25:03.931416Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:03.931438Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:03.931441Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:03.931496Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:03.997352Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:03.998668Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:03.998687Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:03.998909Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:5587, port: 5587 2025-06-03T10:25:03.998958Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=invalidRobouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:04.005185Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldap://localhost:5587. Invalid credentials 2025-06-03T10:25:04.005373Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****N0qA (C1F0D65F) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=invalidRobouser,dc=search,dc=yandex,dc=net on server ldap://localhost:5587. Invalid credentials)' 2025-06-03T10:25:04.010064Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:04.010094Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:04.012985Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:04.348103Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511667178920983069:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:04.348122Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001520/r3tmp/tmp0JJXrK/pdisk_1.dat 2025-06-03T10:25:04.361638Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:04.362110Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511667178920983049:2079] 1748946304347997 != 1748946304348000 TServer::EnableGrpc on GrpcPort 30854, node 3 2025-06-03T10:25:04.375175Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:04.375188Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:04.375191Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:04.375234Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:04.411511Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:04.414215Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:04.414249Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:04.414481Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:65496, port: 65496 2025-06-03T10:25:04.414526Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:04.432884Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:201: Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:65496. Invalid credentials 2025-06-03T10:25:04.433081Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****n8tg (FBE6311C) () has now permanent error message 'Could not login via LDAP (Could not perform initial LDAP bind for dn cn=robouser,dc=search,dc=yandex,dc=net on server ldap://localhost:65496. Invalid credentials)' 2025-06-03T10:25:04.451910Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:04.451936Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:04.453016Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:04.823620Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511667180970117343:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:04.823647Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001520/r3tmp/tmpb816Wg/pdisk_1.dat 2025-06-03T10:25:04.839234Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:04.839560Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7511667180970117324:2079] 1748946304823525 != 1748946304823528 TServer::EnableGrpc on GrpcPort 25860, node 4 2025-06-03T10:25:04.849973Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:04.849984Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:04.849986Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:04.850043Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:04.929291Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:04.929342Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:04.930341Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:04.940556Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:04.944300Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:04.944315Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:04.944484Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:16075, port: 16075 2025-06-03T10:25:04.944509Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:04.946918Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, ... r.cpp:228: got bad distributable configuration 2025-06-03T10:25:05.540344Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:05.540428Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:05.540434Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:05.540621Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:7580, port: 7580 2025-06-03T10:25:05.540644Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:05.543366Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:05.543399Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:05.544087Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:05.576670Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:05.621507Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:25:05.621699Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-03T10:25:05.621708Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:05.665572Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:05.709535Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:05.709933Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****GyBA (D59423EE) () has now valid token of ldapuser@ldap 2025-06-03T10:25:10.441364Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7511667182608586842:2206];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:10.441406Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:25:10.445357Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****GyBA (D59423EE) 2025-06-03T10:25:10.445467Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:7580, port: 7580 2025-06-03T10:25:10.445499Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:10.484500Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:10.529638Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:25:10.529874Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-03T10:25:10.529886Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:10.578963Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:10.625419Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:10.626517Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****GyBA (D59423EE) () has now valid token of ldapuser@ldap 2025-06-03T10:25:14.449510Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****GyBA (D59423EE) 2025-06-03T10:25:14.449585Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:7580, port: 7580 2025-06-03T10:25:14.449613Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:14.473325Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:14.523713Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:25:14.524249Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-03T10:25:14.524274Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:14.569441Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:14.613957Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:14.614416Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****GyBA (D59423EE) () has now valid token of ldapuser@ldap 2025-06-03T10:25:16.057425Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511667232825503332:2209];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001520/r3tmp/tmpdXLTWn/pdisk_1.dat 2025-06-03T10:25:16.074492Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:16.086900Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:16.087258Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7511667232825503149:2079] 1748946316042302 != 1748946316042305 TServer::EnableGrpc on GrpcPort 2984, node 6 2025-06-03T10:25:16.109550Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:16.109566Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:16.109569Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:16.109626Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:16.162031Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:16.162065Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:16.163312Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:16.229379Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:16.230008Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:16.230018Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:16.230165Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:29352, port: 29352 2025-06-03T10:25:16.230186Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:16.249376Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:16.293698Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****MM-Q (E10D4C5C) () has now valid token of ldapuser@ldap 2025-06-03T10:25:21.044762Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7511667232825503332:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:21.044806Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:25:21.054002Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****MM-Q (E10D4C5C) 2025-06-03T10:25:21.054071Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:29352, port: 29352 2025-06-03T10:25:21.054106Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:21.060450Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:21.101661Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****MM-Q (E10D4C5C) () has now valid token of ldapuser@ldap 2025-06-03T10:25:24.058243Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****MM-Q (E10D4C5C) 2025-06-03T10:25:24.058275Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:29352, port: 29352 2025-06-03T10:25:24.058299Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:24.066851Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:24.110246Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****MM-Q (E10D4C5C) () has now valid token of ldapuser@ldap |60.0%| [TA] $(B)/ydb/core/fq/libs/actors/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TSubDomainTest::CreateTablet >> TSchemeShardDecimalTypesInTables::Parameterless [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_22_9-EnableParameterizedDecimal-false >> LdapAuthProviderTest_StartTls::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] >> DataShardWrite::DoubleWriteUncommittedThenDoubleReadWithCommit [GOOD] >> KqpLocksTricky::TestNoWrite >> KqpScanArrowFormat::AggregateEmptySum [GOOD] >> DataShardWrite::UpsertLostPrepareArbiter [GOOD] >> DataShardWrite::UpsertNoLocksArbiterRestart >> DataShardWrite::ImmediateAndPlannedCommittedOpsRace [GOOD] |60.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage |60.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage |60.0%| [TA] {RESULT} $(B)/ydb/core/fq/libs/actors/ut/test-results/unittest/{meta.json ... results_accumulator.log} |60.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ydb-core-blobstorage-ut_blobstorage >> KqpLocks::TwoPhaseTx >> KqpSinkLocks::EmptyRange ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::DoubleWriteUncommittedThenDoubleReadWithCommit [GOOD] Test command err: 2025-06-03T10:25:16.175183Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:25:16.175307Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:16.175349Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001890/r3tmp/tmpCHfsNC/pdisk_1.dat 2025-06-03T10:25:16.311433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:25:16.331227Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:16.332450Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946315517595 != 1748946315517599 2025-06-03T10:25:16.382254Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:16.382317Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:16.393986Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:16.484168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:25:16.510440Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:25:16.510748Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:25:16.510856Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:25:16.510935Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:25:16.523375Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:25:16.523680Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:25:16.523726Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:25:16.523953Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:25:16.523967Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:25:16.523976Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:25:16.524062Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:25:16.524090Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:25:16.524106Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:25:16.535169Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:25:16.542182Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:25:16.542334Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:25:16.542374Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:25:16.542380Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:25:16.542387Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:25:16.542395Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:25:16.542499Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:16.542508Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:16.542664Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:25:16.542700Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:25:16.542855Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:25:16.542867Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:25:16.542880Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:25:16.542887Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:25:16.542892Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:25:16.542899Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:25:16.542906Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:25:16.542922Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:16.542929Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:16.542937Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:25:16.542963Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:25:16.542969Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:25:16.542995Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:25:16.543080Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:25:16.543096Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:25:16.543123Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:25:16.543134Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:25:16.543140Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:25:16.543147Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:25:16.543152Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:25:16.543247Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-03T10:25:16.543253Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-03T10:25:16.543259Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-03T10:25:16.543264Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:25:16.543278Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-03T10:25:16.543282Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-03T10:25:16.543289Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-03T10:25:16.543294Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-03T10:25:16.543300Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-03T10:25:16.543649Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269746185, Sender [1:683:2579], Recipient [1:663:2568]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-03T10:25:16.543660Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:25:16.554052Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:25:16.554098Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:25:16.554108Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:25:16.554124Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-06-03T10:25:16.554142Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:25:16.707470Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:16.707513Z node 1 :TX_DATASHARD TRACE: datashard_impl. ... 888 on unit CompletedOperations 2025-06-03T10:25:28.307565Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-03T10:25:28.307569Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-06-03T10:25:28.307572Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished 2025-06-03T10:25:28.307576Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-03T10:25:28.307586Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-03T10:25:28.308864Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553219, Sender [8:1646:2439], Recipient [8:1303:2390]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-03T10:25:28.308883Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-06-03T10:25:28.309436Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553219, Sender [8:1648:2440], Recipient [8:1303:2390]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-03T10:25:28.309448Z node 8 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } 2025-06-03T10:25:28.311819Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 278003712, Sender [7:1631:2941], Recipient [8:1576:2434] 2025-06-03T10:25:28.311840Z node 8 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-06-03T10:25:28.311885Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435074, Sender [8:1303:2390], Recipient [8:1303:2390]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-06-03T10:25:28.311891Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3156: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-06-03T10:25:28.311919Z node 8 :TX_DATASHARD TRACE: datashard__write.cpp:28: TTxWrite:: execute at tablet# 72075186224037888 2025-06-03T10:25:28.311964Z node 8 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 0 at 72075186224037888, record: TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true } SendingShards: 72075186224037888 ReceivingShards: 72075186224037888 Op: Commit } 2025-06-03T10:25:28.311985Z node 8 :TX_DATASHARD TRACE: key_validator.cpp:33: -- AddReadRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-03T10:25:28.311993Z node 8 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-03T10:25:28.312008Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckWrite 2025-06-03T10:25:28.312021Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-03T10:25:28.312027Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckWrite 2025-06-03T10:25:28.312032Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-03T10:25:28.312036Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-03T10:25:28.312046Z node 8 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v1500/18446744073709551615 ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-03T10:25:28.312068Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-06-03T10:25:28.312073Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-03T10:25:28.312076Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-03T10:25:28.312080Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteWrite 2025-06-03T10:25:28.312083Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteWrite 2025-06-03T10:25:28.312090Z node 8 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:7] at 72075186224037888 2025-06-03T10:25:28.312097Z node 8 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v1500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v1500/18446744073709551615 ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-03T10:25:28.312113Z node 8 :TX_DATASHARD TRACE: datashard_kqp.cpp:806: KqpCommitLock LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true 2025-06-03T10:25:28.312123Z node 8 :TX_DATASHARD TRACE: datashard_user_db.cpp:368: Committing changes lockId# 281474976715661 in localTid# 1001 shard# 72075186224037888 2025-06-03T10:25:28.312145Z node 8 :TX_DATASHARD DEBUG: execute_write_unit.cpp:414: Skip empty write operation for [0:7] at 72075186224037888 2025-06-03T10:25:28.312186Z node 8 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-03T10:25:28.312199Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:25:28.312205Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteWrite 2025-06-03T10:25:28.312209Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-03T10:25:28.312212Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit FinishProposeWrite 2025-06-03T10:25:28.312230Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-03T10:25:28.312234Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-03T10:25:28.312238Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-06-03T10:25:28.312241Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-06-03T10:25:28.312253Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-03T10:25:28.312256Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-06-03T10:25:28.312261Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-06-03T10:25:28.314447Z node 8 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-03T10:25:28.314476Z node 8 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:7] at 72075186224037888 on unit FinishProposeWrite 2025-06-03T10:25:28.314486Z node 8 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 7 at tablet 72075186224037888 send to client, propose latency: 1 ms, status: STATUS_COMPLETED 2025-06-03T10:25:28.314506Z node 8 :TX_DATASHARD DEBUG: datashard.cpp:2560: Waiting for PlanStep# 1501 from mediator time cast 2025-06-03T10:25:28.314524Z node 8 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:25:28.314998Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 270270977, Sender [8:58:2063], Recipient [8:1303:2390]: {TEvNotifyPlanStep TabletId# 72075186224037888 PlanStep# 1501} 2025-06-03T10:25:28.315007Z node 8 :TX_DATASHARD TRACE: datashard_impl.h:3169: StateWork, processing event TEvMediatorTimecast::TEvNotifyPlanStep 2025-06-03T10:25:28.315013Z node 8 :TX_DATASHARD DEBUG: datashard.cpp:3780: Notified by mediator time cast with PlanStep# 1501 at tablet 72075186224037888 2025-06-03T10:25:28.315019Z node 8 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 { items { int64_value: 0 } items { int64_value: 1000 } }, { items { int64_value: 1 } items { int64_value: 1001 } }, { items { int64_value: 2 } items { int64_value: 1002 } }, { items { int64_value: 3 } items { int64_value: 1003 } }, { items { int64_value: 4 } items { int64_value: 1004 } }, { items { int64_value: 5 } items { int64_value: 1005 } }, { items { int64_value: 6 } items { int64_value: 5001 } } { items { int64_value: 0 } items { int64_value: 2000 } }, { items { int64_value: 1 } items { int64_value: 2001 } }, { items { int64_value: 2 } items { int64_value: 2002 } }, { items { int64_value: 3 } items { int64_value: 2003 } }, { items { int64_value: 4 } items { int64_value: 2004 } }, { items { int64_value: 5 } items { int64_value: 2005 } }, { items { int64_value: 6 } items { int64_value: 5002 } } result_sets { columns { name: "index" type { optional_type { item { type_id: INT64 } } } } columns { name: "value" type { optional_type { item { type_id: INT64 } } } } rows { items { int64_value: 0 } items { int64_value: 1000 } } rows { items { int64_value: 1 } items { int64_value: 1001 } } rows { items { int64_value: 2 } items { int64_value: 1002 } } rows { items { int64_value: 3 } items { int64_value: 1003 } } rows { items { int64_value: 4 } items { int64_value: 1004 } } rows { items { int64_value: 5 } items { int64_value: 1005 } } rows { items { int64_value: 6 } items { int64_value: 5001 } } } result_sets { columns { name: "index" type { optional_type { item { type_id: INT64 } } } } columns { name: "value" type { optional_type { item { type_id: INT64 } } } } rows { items { int64_value: 0 } items { int64_value: 2000 } } rows { items { int64_value: 1 } items { int64_value: 2001 } } rows { items { int64_value: 2 } items { int64_value: 2002 } } rows { items { int64_value: 3 } items { int64_value: 2003 } } rows { items { int64_value: 4 } items { int64_value: 2004 } } rows { items { int64_value: 5 } items { int64_value: 2005 } } rows { items { int64_value: 6 } items { int64_value: 5002 } } } tx_meta { } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ldap_auth_provider/ut/unittest >> LdapAuthProviderTest_StartTls::LdapFetchGroupsUseInvalidSearchFilterBad [GOOD] Test command err: 2025-06-03T10:25:02.354645Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667172903431375:2197];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:02.361660Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001532/r3tmp/tmpnQ6EDx/pdisk_1.dat 2025-06-03T10:25:02.602269Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667172903431217:2079] 1748946302351165 != 1748946302351168 2025-06-03T10:25:02.602870Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:02.611822Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:02.611856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:02.617856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13747, node 1 2025-06-03T10:25:02.649272Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:02.649742Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:02.649752Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:02.649812Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:02.797555Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:02.804543Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:02.804562Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:02.805167Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:16501, port: 16501 2025-06-03T10:25:02.805200Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:02.862021Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:02.914754Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:25:02.915169Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-03T10:25:02.915200Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:02.961435Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:03.009455Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: memberOf 2025-06-03T10:25:03.010169Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****0LCQ (B7421F08) () has now valid token of ldapuser@ldap 2025-06-03T10:25:07.353889Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511667172903431375:2197];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:07.353935Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:25:08.351253Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****0LCQ (B7421F08) 2025-06-03T10:25:08.351384Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:16501, port: 16501 2025-06-03T10:25:08.351415Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:08.417578Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:08.417802Z node 1 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:340: LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldaps://localhost:16501 return no entries 2025-06-03T10:25:08.418023Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****0LCQ (B7421F08) () has now permanent error message 'Could not login via LDAP (LDAP user ldapuser does not exist. LDAP search for filter uid=ldapuser on server ldaps://localhost:16501 return no entries)' 2025-06-03T10:25:12.361757Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****0LCQ (B7421F08) 2025-06-03T10:25:13.331762Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667219453263652:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:13.333937Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001532/r3tmp/tmpRpOm2L/pdisk_1.dat 2025-06-03T10:25:13.355510Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:13.355784Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667219453263551:2079] 1748946313330834 != 1748946313330837 TServer::EnableGrpc on GrpcPort 16792, node 2 2025-06-03T10:25:13.379230Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:13.379248Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:13.379251Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:13.379309Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:13.428955Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:13.428994Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:13.430027Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:13.442914Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:13.444988Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:13.445005Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:13.445255Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:29259, port: 29259 2025-06-03T10:25:13.445279Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:13.529912Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:13.530890Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldaps://localhost:29259. Server is busy 2025-06-03T10:25:13.531126Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****oDWg (2BB355AD) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldaps://localhost:29259. Server is busy)' 2025-06-03T10:25:13.531202Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:13.531209Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:13.531453Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:29259, port: 29259 2025-06-03T10:25:13.531475Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:13.581627Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:13.581949Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldaps://localhost:29259. Server is busy 2025-06-03T10:25:13.582200Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****oDWg (2BB355AD) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldaps://localhost:29259. Server is busy)' 2025-06-03T10:25:16.334782Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****oDWg (2BB355AD) 2025-06-03T10:25:16.334869Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:16.334875Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:16.335211Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldaps, uris: ldaps://localhost:29259, port: 29259 2025-06-03T10:25:16.335234Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:16.426607Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:16.427364Z node 2 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter uid=ldapuser on server ldaps://localhost:29259. Server is busy 2025-06-03T10:25:16.427591Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****oDWg (2BB355AD) () has now retryable error message 'Could not login via LDAP (Could not perform search for filter uid=ldapuser on server ldaps://localhost:29259. Server is busy)' 2025-06-03T10:25:18.336552Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:75116 ... 190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:23.961742Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:23.961744Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:23.961787Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:24.029373Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:24.029460Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:24.029465Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:24.029644Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:29100, port: 29100 2025-06-03T10:25:24.029670Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-03T10:25:24.065540Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:24.117599Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:24.169513Z node 3 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:25:24.218010Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****WXrA (7929A026) () has now valid token of ldapuser@ldap test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001532/r3tmp/tmpYP9O1C/pdisk_1.dat 2025-06-03T10:25:24.734335Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:24.762277Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:24.764067Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7511667268016084617:2079] 1748946324691716 != 1748946324691719 TServer::EnableGrpc on GrpcPort 27029, node 4 2025-06-03T10:25:24.837939Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:24.837978Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:24.838503Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:24.844946Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:24.844957Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:24.844959Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:24.845003Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:25.080445Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:25.080533Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:25.080538Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:25.080732Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:8203, port: 8203 2025-06-03T10:25:25.080751Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-03T10:25:25.121185Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:25.179899Z node 4 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: memberOf 2025-06-03T10:25:25.226018Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****HoXQ (F684493E) () has now valid token of ldapuser@ldap test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001532/r3tmp/tmpWZMnTW/pdisk_1.dat 2025-06-03T10:25:25.846144Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:25.930475Z node 5 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:25.933218Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7511667271491566145:2079] 1748946325793587 != 1748946325793590 2025-06-03T10:25:25.940165Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:25.940201Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:25.950048Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9606, node 5 2025-06-03T10:25:25.977530Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:25.977545Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:25.977547Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:25.977597Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:26.197367Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:26.199686Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:26.199697Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:26.199892Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:7268, port: 7268 2025-06-03T10:25:26.199913Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-03T10:25:26.241311Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:26.293667Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: uid=ldapuser, attributes: groupDN 2025-06-03T10:25:26.341450Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:357: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (member:1.2.840.113556.1.4.1941:=uid=ldapuser,dc=search,dc=yandex,dc=net), attributes: 1.1 2025-06-03T10:25:26.342338Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:382: Try to get nested groups - tree traversal 2025-06-03T10:25:26.342351Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managerOfProject1,cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=project1,cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-03T10:25:26.385731Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=managers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)(entryDn=cn=developers,cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-03T10:25:26.433674Z node 5 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:404: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: (|(entryDn=cn=people,ou=groups,dc=search,dc=yandex,dc=net)), attributes: groupDN 2025-06-03T10:25:26.434046Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****HcnA (CB2D407F) () has now valid token of ldapuser@ldap test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001532/r3tmp/tmpihqjHK/pdisk_1.dat 2025-06-03T10:25:26.974279Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511667274780522904:2153];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:26.986948Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:27.026434Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10793, node 6 2025-06-03T10:25:27.057681Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:27.057705Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:27.065726Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:27.157508Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:27.157519Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:27.157521Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:27.157564Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:27.313352Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:25:27.313424Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:25:27.313430Z node 6 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:25:27.313579Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:232: init: scheme: ldap, uris: ldap://localhost:7780, port: 7780 2025-06-03T10:25:27.314184Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:179: start TLS 2025-06-03T10:25:27.369874Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:195: bind: bindDn: cn=robouser,dc=search,dc=yandex,dc=net 2025-06-03T10:25:27.425766Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:308: search: baseDn: dc=search,dc=yandex,dc=net, scope: subtree, filter: &(uid=ldapuser)(), attributes: memberOf 2025-06-03T10:25:27.425794Z node 6 :LDAP_AUTH_PROVIDER DEBUG: ldap_auth_provider.cpp:323: Could not perform search for filter &(uid=ldapuser)() on server ldap://localhost:7780. Bad search filter 2025-06-03T10:25:27.425989Z node 6 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****_2gw (05BC8847) () has now permanent error message 'Could not login via LDAP (Could not perform search for filter &(uid=ldapuser)() on server ldap://localhost:7780. Bad search filter)' >> TSubDomainTest::Boot [GOOD] >> TSubDomainTest::CheckAccessCopyTable >> TModifyUserTest::ModifyUser [GOOD] >> TModifyUserTest::ModifyLdapUser >> KqpLocks::DifferentKeyUpdate ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::ImmediateAndPlannedCommittedOpsRace [GOOD] Test command err: 2025-06-03T10:25:15.444255Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:25:15.444370Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:15.444410Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0018b1/r3tmp/tmpsxlwBb/pdisk_1.dat 2025-06-03T10:25:15.623256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:25:15.644083Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:15.653730Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946314642704 != 1748946314642708 2025-06-03T10:25:15.702255Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:15.702333Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:15.714004Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:15.812745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:25:15.859648Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:25:15.860092Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:25:15.860251Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:25:15.860356Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:25:15.905196Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:25:15.909746Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:25:15.909823Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:25:15.910076Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:25:15.910093Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:25:15.910105Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:25:15.910220Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:25:15.910285Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:25:15.910316Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:25:15.925649Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:25:15.932165Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:25:15.932299Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:25:15.932342Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:25:15.932349Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:25:15.932356Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:25:15.932365Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:25:15.932472Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:15.932482Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:15.932625Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:25:15.932658Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:25:15.932806Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:25:15.932818Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:25:15.932830Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:25:15.932837Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:25:15.932842Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:25:15.932849Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:25:15.932856Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:25:15.932874Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:15.932884Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:15.932894Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:25:15.932918Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:25:15.932924Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:25:15.932955Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:25:15.933018Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:25:15.933033Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:25:15.933055Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:25:15.933066Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:25:15.933072Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:25:15.933079Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:25:15.933084Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:25:15.933175Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-03T10:25:15.933180Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-03T10:25:15.933185Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-03T10:25:15.933191Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:25:15.933208Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-03T10:25:15.933213Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-03T10:25:15.933218Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-03T10:25:15.933222Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-03T10:25:15.933229Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-03T10:25:15.933614Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269746185, Sender [1:683:2579], Recipient [1:663:2568]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-03T10:25:15.933630Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:25:15.944048Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:25:15.944110Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:25:15.944122Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:25:15.944140Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-06-03T10:25:15.944161Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:25:16.099323Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:16.099362Z node 1 :TX_DATASHARD TRACE: datashard_impl. ... outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 1234567890011 ... observed 2 more commits after readset unblock ... unblocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR ... unblocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR ... unblocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR ... unblocking NKikimr::TEvBlobStorage::TEvPut from TABLET_REQ_WRITE_LOG to BS_PROXY_ACTOR 2025-06-03T10:25:28.545783Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:25:28.545806Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [2000:1234567890012] at 72075186224037888 on unit StoreAndSendWriteOutRS 2025-06-03T10:25:28.545816Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 2 at 72075186224037888 from 72075186224037888 to 72075186224037889 txId 1234567890012 2025-06-03T10:25:28.545862Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:25:28.545866Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [2000:1234567890012] at 72075186224037888 on unit CompleteWrite 2025-06-03T10:25:28.545884Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [2000 : 1234567890012] from 72075186224037888 at tablet 72075186224037888 send result to client [7:836:2688] 2025-06-03T10:25:28.545895Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:25:28.545916Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037888 2025-06-03T10:25:28.545944Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:25:28.545949Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [1500:1234567890011] at 72075186224037888 on unit CompleteWrite 2025-06-03T10:25:28.545954Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [1500 : 1234567890011] from 72075186224037888 at tablet 72075186224037888 send result to client [7:799:2663] 2025-06-03T10:25:28.545967Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 72075186224037888 {TEvReadSet step# 1500 txid# 1234567890011 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletConsumer# 72075186224037888 Flags# 0 Seqno# 1} 2025-06-03T10:25:28.545971Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:25:28.545991Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287425, Sender [7:693:2583], Recipient [7:698:2585]: {TEvReadSet step# 2000 txid# 1234567890012 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 2 Flags# 0} 2025-06-03T10:25:28.546000Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3148: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-03T10:25:28.546006Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037888 dest 72075186224037889 producer 72075186224037888 txId 1234567890012 2025-06-03T10:25:28.546022Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 2000 txid# 1234567890012 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 2 Flags# 0} 2025-06-03T10:25:28.546032Z node 7 :TX_DATASHARD TRACE: operation.cpp:67: Filled readset for [2000:1234567890012] from=72075186224037888 to=72075186224037889origin=72075186224037888 2025-06-03T10:25:28.546049Z node 7 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-03T10:25:28.546066Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287938, Sender [7:693:2583], Recipient [7:698:2585]: {TEvReadSet step# 1500 txid# 1234567890011 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletConsumer# 72075186224037888 Flags# 0 Seqno# 1} 2025-06-03T10:25:28.546071Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3149: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-03T10:25:28.546076Z node 7 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 1234567890011 2025-06-03T10:25:28.546100Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:25:28.546105Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:8] at 72075186224037888 on unit FinishProposeWrite 2025-06-03T10:25:28.546113Z node 7 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 8 at tablet 72075186224037888 send to client, propose latency: 3 ms, status: STATUS_COMPLETED 2025-06-03T10:25:28.546133Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:25:28.546156Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [7:698:2585], Recipient [7:698:2585]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:28.546161Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:28.546167Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-03T10:25:28.546175Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 1 active planned 1 immediate 0 planned 1 2025-06-03T10:25:28.546181Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [2000:1234567890012] at 72075186224037889 for LoadAndWaitInRS 2025-06-03T10:25:28.546187Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2000:1234567890012] at 72075186224037889 on unit LoadAndWaitInRS 2025-06-03T10:25:28.546195Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [2000:1234567890012] at 72075186224037889 is Executed 2025-06-03T10:25:28.546210Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2000:1234567890012] at 72075186224037889 executing on unit LoadAndWaitInRS 2025-06-03T10:25:28.546215Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2000:1234567890012] at 72075186224037889 to execution unit ExecuteWrite 2025-06-03T10:25:28.546221Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2000:1234567890012] at 72075186224037889 on unit ExecuteWrite 2025-06-03T10:25:28.546226Z node 7 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [2000:1234567890012] at 72075186224037889 2025-06-03T10:25:28.546238Z node 7 :TX_DATASHARD TRACE: execute_write_unit.cpp:384: Operation [2000:1234567890012] at 72075186224037889 aborting because locks are not valid 2025-06-03T10:25:28.546248Z node 7 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=1234567890012; 2025-06-03T10:25:28.546261Z node 7 :TX_DATASHARD INFO: datashard_write_operation.cpp:684: Write transaction 1234567890012 at 72075186224037889 has an error: Operation is aborting because locks are not valid 2025-06-03T10:25:28.546272Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [2000:1234567890012] at 72075186224037889 is Executed 2025-06-03T10:25:28.546275Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2000:1234567890012] at 72075186224037889 executing on unit ExecuteWrite 2025-06-03T10:25:28.546279Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2000:1234567890012] at 72075186224037889 to execution unit CompleteWrite 2025-06-03T10:25:28.546282Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2000:1234567890012] at 72075186224037889 on unit CompleteWrite 2025-06-03T10:25:28.546345Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [2000:1234567890012] at 72075186224037889 is DelayComplete 2025-06-03T10:25:28.546349Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2000:1234567890012] at 72075186224037889 executing on unit CompleteWrite 2025-06-03T10:25:28.546353Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2000:1234567890012] at 72075186224037889 to execution unit CompletedOperations 2025-06-03T10:25:28.546356Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2000:1234567890012] at 72075186224037889 on unit CompletedOperations 2025-06-03T10:25:28.546361Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [2000:1234567890012] at 72075186224037889 is Executed 2025-06-03T10:25:28.546364Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2000:1234567890012] at 72075186224037889 executing on unit CompletedOperations 2025-06-03T10:25:28.546368Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [2000:1234567890012] at 72075186224037889 has finished 2025-06-03T10:25:28.546372Z node 7 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:25:28.546376Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2025-06-03T10:25:28.546380Z node 7 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-06-03T10:25:28.546383Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2025-06-03T10:25:28.546483Z node 7 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-03T10:25:28.546488Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [2000:1234567890012] at 72075186224037889 on unit CompleteWrite 2025-06-03T10:25:28.546494Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:826: Complete write [2000 : 1234567890012] from 72075186224037889 at tablet 72075186224037889 send result to client [7:836:2688] 2025-06-03T10:25:28.546500Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 72075186224037889 {TEvReadSet step# 2000 txid# 1234567890012 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletConsumer# 72075186224037889 Flags# 0 Seqno# 2} 2025-06-03T10:25:28.546506Z node 7 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:25:28.546521Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287938, Sender [7:698:2585], Recipient [7:693:2583]: {TEvReadSet step# 2000 txid# 1234567890012 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletConsumer# 72075186224037889 Flags# 0 Seqno# 2} 2025-06-03T10:25:28.546525Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3149: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-03T10:25:28.546529Z node 7 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 1234567890012 >> TSubDomainTest::StartAndStopTenanNode [GOOD] >> TSubDomainTest::StartTenanNodeAndStopAtDestructor >> KqpTx::SnapshotRO >> TSchemeShardDecimalTypesInTables::Parameters_22_9-EnableParameterizedDecimal-false [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_22_9-EnableParameterizedDecimal-true ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/arrow/unittest >> KqpScanArrowFormat::AggregateEmptySum [GOOD] Test command err: Trying to start YDB, gRPC: 25056, MsgBus: 27604 2025-06-03T10:25:18.189635Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667239712086439:2199];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:18.189764Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0018c0/r3tmp/tmpmHl6WN/pdisk_1.dat 2025-06-03T10:25:18.364562Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:18.364597Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:18.369403Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667239712086271:2079] 1748946318186025 != 1748946318186028 2025-06-03T10:25:18.373009Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25056, node 1 2025-06-03T10:25:18.461365Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:18.557502Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:18.557517Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:18.557520Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:18.557563Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27604 TClient is connected to server localhost:27604 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:19.381035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:19.384562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:25:19.390196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:19.562030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:19.681755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:19.750766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:19.973089Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667244007055242:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:19.973130Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:20.069747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.090266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.160465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.179141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.245406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.307155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.391834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:20.467294Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667248302023202:2468], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:20.467327Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:20.467379Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667248302023207:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:20.468571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:20.473703Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-03T10:25:20.473787Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667248302023209:2472], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:25:20.568449Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667248302023260:3410] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:21.051361Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946320926, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 10246, MsgBus: 20372 2025-06-03T10:25:21.434309Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667253461793582:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:21.434332Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0018c0/r3tmp/tmpFhQ8S4/pdisk_1.dat 2025-06-03T10:25:21.449850Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:21.450094Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667253461793562:2079] 1748946321434110 != 1748946321434113 TServer::EnableGrpc on GrpcPort 10246, node 2 2025-06-03T10:25:21.459503Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:21.459520Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:21.459522Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:21.459570Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20372 TClient is connected to server localhost:20372 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } ... shard: 72057594046644480 2025-06-03T10:25:24.174369Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:24.195443Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:24.228071Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667266138931665:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:24.228102Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:24.228190Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667266138931670:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:24.229076Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:24.233582Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511667266138931672:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:24.318781Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511667266138931723:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:26.274945Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946325168, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 28437, MsgBus: 17906 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0018c0/r3tmp/tmpqAM0UI/pdisk_1.dat 2025-06-03T10:25:27.001002Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:27.029257Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7511667273420038150:2079] 1748946326973240 != 1748946326973243 2025-06-03T10:25:27.034874Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28437, node 4 2025-06-03T10:25:27.073823Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:27.073853Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:27.077664Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:27.077985Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:27.077988Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:27.077991Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:27.078031Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17906 TClient is connected to server localhost:17906 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:27.250115Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:27.257684Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:25:27.274305Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:27.310700Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:27.394569Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:27.452279Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:27.737120Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667277715007090:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:27.737148Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:27.753147Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:27.782342Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:27.804377Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:27.823039Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:27.838755Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:27.855006Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:27.872931Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:27.917837Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667277715007743:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:27.917863Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:27.917963Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667277715007748:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:27.918853Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:27.922416Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:25:27.922545Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511667277715007750:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:28.022248Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511667282009975106:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:28.596194Z node 4 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946328430, txId: 281474976715672] shutting down >> TModifyUserTest::ModifyLdapUser [GOOD] >> TModifyUserTest::ModifyUserIsEnabled >> test.py::test[aggregate-percentiles_containers--ForceBlocks] [GOOD] >> test.py::test[aggregate-percentiles_containers--Results] >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite2 >> KqpTx::CommitRoTx >> TSchemeShardDecimalTypesInTables::Parameters_22_9-EnableParameterizedDecimal-true [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_35_6-EnableParameterizedDecimal-false >> TGRpcStreamingTest::ClientDisconnects >> TModifyUserTest::ModifyUserIsEnabled [GOOD] >> TGRpcStreamingTest::SimpleEcho >> TGRpcStreamingTest::WritesDoneFromClient >> TSubDomainTest::CreateTablet [GOOD] >> TSubDomainTest::CreateTabletForUnknownDomain >> TSchemeShardDecimalTypesInTables::Parameters_35_6-EnableParameterizedDecimal-false [GOOD] >> TSchemeShardDecimalTypesInTables::Parameters_35_6-EnableParameterizedDecimal-true |60.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant |60.0%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant |60.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_storage_tenant/ydb-core-tx-tx_proxy-ut_storage_tenant >> TSubDomainTest::StartTenanNodeAndStopAtDestructor [GOOD] >> test.py::test[window-win_func_first_last_over_nonopt-default.txt-Results] [GOOD] >> test.py::test[window-win_func_in_lib--Results] |60.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_streaming/ut/unittest >> TSchemeShardDecimalTypesInTables::Parameters_35_6-EnableParameterizedDecimal-true [GOOD] >> TSchemeShardDecimalTypesInTables::CopyTableShouldNotFailOnDisabledFeatureFlag ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TModifyUserTest::ModifyUserIsEnabled [GOOD] Test command err: test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00116d/r3tmp/tmpp0hMAr/pdisk_1.dat 2025-06-03T10:25:28.042048Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:28.212554Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667278553683994:2079] 1748946327870093 != 1748946327870096 2025-06-03T10:25:28.228035Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:28.229116Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:28.229132Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:28.231689Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13134 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:25:28.538231Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511667278553684021:2086] Handle TEvNavigate describe path dc-1 2025-06-03T10:25:28.560471Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511667282848651846:2261] HANDLE EvNavigateScheme dc-1 2025-06-03T10:25:28.560520Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667278553684271:2111], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:28.560543Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:871: [main][1:7511667282848651661:2147][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7511667278553684271:2111], cookie# 1 2025-06-03T10:25:28.560884Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667282848651682:2147][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667282848651679:2147], cookie# 1 2025-06-03T10:25:28.560890Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667282848651683:2147][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667282848651680:2147], cookie# 1 2025-06-03T10:25:28.560893Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667282848651684:2147][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667282848651681:2147], cookie# 1 2025-06-03T10:25:28.560902Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667278553683964:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667282848651682:2147], cookie# 1 2025-06-03T10:25:28.560909Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667278553683967:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667282848651683:2147], cookie# 1 2025-06-03T10:25:28.560914Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667278553683970:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667282848651684:2147], cookie# 1 2025-06-03T10:25:28.560919Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667282848651682:2147][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667278553683964:2049], cookie# 1 2025-06-03T10:25:28.560922Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667282848651683:2147][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667278553683967:2052], cookie# 1 2025-06-03T10:25:28.560924Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667282848651684:2147][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667278553683970:2055], cookie# 1 2025-06-03T10:25:28.560929Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667282848651661:2147][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667282848651679:2147], cookie# 1 2025-06-03T10:25:28.560934Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7511667282848651661:2147][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:25:28.560937Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667282848651661:2147][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667282848651680:2147], cookie# 1 2025-06-03T10:25:28.560939Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7511667282848651661:2147][/dc-1] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:25:28.560943Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667282848651661:2147][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667282848651681:2147], cookie# 1 2025-06-03T10:25:28.560945Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7511667282848651661:2147][/dc-1] Unexpected sync response: sender# [1:7511667282848651681:2147], cookie# 1 2025-06-03T10:25:28.560952Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:7511667278553684271:2111], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-03T10:25:28.567023Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [1:7511667278553684271:2111], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7511667282848651661:2147] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:28.567066Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7511667278553684271:2111], cacheItem# { Subscriber: { Subscriber: [1:7511667282848651661:2147] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-03T10:25:28.570210Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7511667282848651847:2262], recipient# [1:7511667282848651846:2261], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:25:28.570252Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7511667282848651846:2261] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-03T10:25:28.628166Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7511667282848651846:2261] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-03T10:25:28.629199Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7511667282848651846:2261] Handle TEvDescribeSchemeResult Forward to# [1:7511667282848651845:2260] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-03T10:25:28.652682Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7511667278553684021:2086] Handle TEvProposeTransaction 2025-06-03T10:25:28.652695Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7511667278553684021:2086] TxId# 281474976710657 ProcessProposeTransaction 2025-06-03T10:25:28.652727Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7511667278553684021:2086] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [1:7511667282848651852:2266] 2025-06-03T10:25:28.729517Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:7511667282848651852:2266] txid# 281474976710657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "" PeerName: "" 2025-06-03T10:25:28.729551Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:751166728 ... E DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7511667289929140966:2115], cacheItem# { Subscriber: { Subscriber: [3:7511667289929141219:2252] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 9 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1748946330684 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 9 IsSync: true Partial: 0 } 2025-06-03T10:25:31.082623Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7511667294224108644:2332], recipient# [3:7511667294224108643:2331], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user2 },{ Sid: user1 }] Groups: [] } }] } 2025-06-03T10:25:31.082631Z node 3 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [3:7511667294224108643:2331] txid# 281474976715662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:25:31.082639Z node 3 :TX_PROXY ERROR: schemereq.cpp:1072: Actor# [3:7511667294224108643:2331] txid# 281474976715662, Access denied for user2 on path /dc-1, with access AlterSchema 2025-06-03T10:25:31.082655Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511667294224108643:2331] txid# 281474976715662, issues: { message: "Access denied for user2 on path /dc-1" issue_code: 200000 severity: 1 } 2025-06-03T10:25:31.082659Z node 3 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [3:7511667294224108643:2331] txid# 281474976715662 SEND to# [3:7511667294224108642:2330] Source {TEvProposeTransactionStatus Status# 5} 2025-06-03T10:25:31.089775Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [3:7511667289929140725:2095] Handle TEvProposeTransaction 2025-06-03T10:25:31.089787Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [3:7511667289929140725:2095] TxId# 281474976715663 ProcessProposeTransaction 2025-06-03T10:25:31.089797Z node 3 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [3:7511667289929140725:2095] Cookie# 0 userReqId# "" txid# 281474976715663 SEND to# [3:7511667294224108646:2334] 2025-06-03T10:25:31.090453Z node 3 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [3:7511667294224108646:2334] txid# 281474976715663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { ModifyUser { User: "user2" Password: "password" CanLogin: false } } } } UserToken: "\n\005user2\022\030\022\026\n\024all-users@well-known\032\322\003eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc0ODk4OTUzMSwiaWF0IjoxNzQ4OTQ2MzMxLCJzdWIiOiJ1c2VyMiJ9.eiImoKQKaCtmjCYoXgglETb0FV0YXiTqs6azEhdCIIWpMmDhipTCrg9oiRYjUnBpKzqqJrvIUaS_4G0-VANQ8gzAITqRayAAqVOQLtJi8SEmLDkLc_sDIknm_BBri-j5XjrcNbZ61NY497B7twH6GflYOgk-2r6SqH_vLQMtWmS7nz2WCE2TAiz-Hp06ZMHZsihGvewQcrCBA81SwdHUNOWXPDeqAWNuDncZ-YsvHZLf8iKu9AOP7fN0z9gEmF-agDJnLmyrB16TpejOM97SlcvYO-0AOp1uadsSVDV1lLWzhSGPELpx0qWSsP12B14T2Xii_LKtyiqhHaQwe7swQQ\"\005Login*~eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc0ODk4OTUzMSwiaWF0IjoxNzQ4OTQ2MzMxLCJzdWIiOiJ1c2VyMiJ9.**" PeerName: "" 2025-06-03T10:25:31.090474Z node 3 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [3:7511667294224108646:2334] txid# 281474976715663 Bootstrap, UserSID: user2 CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-03T10:25:31.090477Z node 3 :TX_PROXY DEBUG: schemereq.cpp:578: Actor# [3:7511667294224108646:2334] txid# 281474976715663 Bootstrap, UserSID: user2 IsClusterAdministrator: 1 2025-06-03T10:25:31.090491Z node 3 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [3:7511667294224108646:2334] txid# 281474976715663 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:25:31.090513Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7511667289929140966:2115], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:31.090542Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:871: [main][3:7511667289929141219:2252][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [3:7511667289929140966:2115], cookie# 10 2025-06-03T10:25:31.090555Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][3:7511667289929141223:2252][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7511667289929141220:2252], cookie# 10 2025-06-03T10:25:31.090559Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][3:7511667289929141224:2252][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7511667289929141221:2252], cookie# 10 2025-06-03T10:25:31.090562Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][3:7511667289929141225:2252][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7511667289929141222:2252], cookie# 10 2025-06-03T10:25:31.090569Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [3:7511667289929140651:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7511667289929141223:2252], cookie# 10 2025-06-03T10:25:31.090576Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [3:7511667289929140654:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7511667289929141224:2252], cookie# 10 2025-06-03T10:25:31.090581Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [3:7511667289929140657:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [3:7511667289929141225:2252], cookie# 10 2025-06-03T10:25:31.090588Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][3:7511667289929141223:2252][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7511667289929140651:2049], cookie# 10 2025-06-03T10:25:31.090591Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][3:7511667289929141224:2252][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7511667289929140654:2052], cookie# 10 2025-06-03T10:25:31.090594Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][3:7511667289929141225:2252][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7511667289929140657:2055], cookie# 10 2025-06-03T10:25:31.090600Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][3:7511667289929141219:2252][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7511667289929141220:2252], cookie# 10 2025-06-03T10:25:31.090606Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][3:7511667289929141219:2252][/dc-1] Sync is in progress: cookie# 10, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:25:31.090610Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][3:7511667289929141219:2252][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7511667289929141221:2252], cookie# 10 2025-06-03T10:25:31.090612Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][3:7511667289929141219:2252][/dc-1] Sync is done: cookie# 10, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:25:31.090616Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][3:7511667289929141219:2252][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 8 Partial: 0 }: sender# [3:7511667289929141222:2252], cookie# 10 2025-06-03T10:25:31.090618Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][3:7511667289929141219:2252][/dc-1] Unexpected sync response: sender# [3:7511667289929141222:2252], cookie# 10 2025-06-03T10:25:31.090624Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [3:7511667289929140966:2115], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-03T10:25:31.090639Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [3:7511667289929140966:2115], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [3:7511667289929141219:2252] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 10 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1748946330684 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:31.090650Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7511667289929140966:2115], cacheItem# { Subscriber: { Subscriber: [3:7511667289929141219:2252] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 10 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 1748946330684 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 10 IsSync: true Partial: 0 } 2025-06-03T10:25:31.090690Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7511667294224108647:2335], recipient# [3:7511667294224108646:2334], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user2 },{ Sid: user1 }] Groups: [] } }] } 2025-06-03T10:25:31.090696Z node 3 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [3:7511667294224108646:2334] txid# 281474976715663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:25:31.090704Z node 3 :TX_PROXY ERROR: schemereq.cpp:1072: Actor# [3:7511667294224108646:2334] txid# 281474976715663, Access denied for user2 on path /dc-1, with access AlterSchema 2025-06-03T10:25:31.090720Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511667294224108646:2334] txid# 281474976715663, issues: { message: "Access denied for user2 on path /dc-1" issue_code: 200000 severity: 1 } 2025-06-03T10:25:31.090724Z node 3 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [3:7511667294224108646:2334] txid# 281474976715663 SEND to# [3:7511667294224108645:2333] Source {TEvProposeTransactionStatus Status# 5} >> KqpLocks::TwoPhaseTx [GOOD] >> KqpLocks::MixedTxFail-useSink |60.1%| [TA] $(B)/ydb/core/kqp/ut/arrow/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpLocks::DifferentKeyUpdate [GOOD] >> KqpLocks::EmptyRange >> TPQTestSlow::TestOnDiskStoredSourceIds [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::StartTenanNodeAndStopAtDestructor [GOOD] Test command err: 2025-06-03T10:25:26.993598Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667274326626829:2147];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:26.993669Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001181/r3tmp/tmp766AMF/pdisk_1.dat 2025-06-03T10:25:27.470977Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:27.472040Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:27.472058Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:27.475341Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28878 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:25:27.569799Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511667274326626983:2115] Handle TEvNavigate describe path dc-1 2025-06-03T10:25:27.571746Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511667278621594744:2427] HANDLE EvNavigateScheme dc-1 2025-06-03T10:25:27.571788Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667278621594302:2128], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:27.571800Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2322: Create subscriber: self# [1:7511667278621594302:2128], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-03T10:25:27.571857Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:7511667278621594745:2428][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-03T10:25:27.572324Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667274326626662:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667278621594749:2428] 2025-06-03T10:25:27.572348Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667274326626662:2050] Subscribe: subscriber# [1:7511667278621594749:2428], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:27.572364Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667274326626665:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667278621594750:2428] 2025-06-03T10:25:27.572367Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667274326626665:2053] Subscribe: subscriber# [1:7511667278621594750:2428], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:27.572372Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667274326626668:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667278621594751:2428] 2025-06-03T10:25:27.572375Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667274326626668:2056] Subscribe: subscriber# [1:7511667278621594751:2428], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:27.572389Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667278621594749:2428][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667274326626662:2050] 2025-06-03T10:25:27.572403Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667278621594750:2428][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667274326626665:2053] 2025-06-03T10:25:27.572407Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667278621594751:2428][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667274326626668:2056] 2025-06-03T10:25:27.572413Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667278621594745:2428][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667278621594746:2428] 2025-06-03T10:25:27.572420Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667278621594745:2428][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667278621594747:2428] 2025-06-03T10:25:27.572451Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:7511667278621594745:2428][/dc-1] Set up state: owner# [1:7511667278621594302:2128], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:27.572492Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667278621594745:2428][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667278621594748:2428] 2025-06-03T10:25:27.572501Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:7511667278621594745:2428][/dc-1] Path was already updated: owner# [1:7511667278621594302:2128], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:27.572509Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667278621594749:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667278621594746:2428], cookie# 1 2025-06-03T10:25:27.572512Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667278621594750:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667278621594747:2428], cookie# 1 2025-06-03T10:25:27.572516Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667278621594751:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667278621594748:2428], cookie# 1 2025-06-03T10:25:27.572523Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667274326626662:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667278621594749:2428] 2025-06-03T10:25:27.572528Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667274326626662:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667278621594749:2428], cookie# 1 2025-06-03T10:25:27.572533Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667274326626665:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667278621594750:2428] 2025-06-03T10:25:27.572535Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667274326626665:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667278621594750:2428], cookie# 1 2025-06-03T10:25:27.572538Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667274326626668:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667278621594751:2428] 2025-06-03T10:25:27.572541Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667274326626668:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667278621594751:2428], cookie# 1 2025-06-03T10:25:27.573345Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667278621594749:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667274326626662:2050], cookie# 1 2025-06-03T10:25:27.573352Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667278621594750:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667274326626665:2053], cookie# 1 2025-06-03T10:25:27.573355Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667278621594751:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667274326626668:2056], cookie# 1 2025-06-03T10:25:27.573361Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667278621594745:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667278621594746:2428], cookie# 1 2025-06-03T10:25:27.573372Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7511667278621594745:2428][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:25:27.573377Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667278621594745:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667278621594747:2428], cookie# 1 2025-06-03T10:25:27.573380Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7511667278621594745:2428][/dc-1] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:25:27.573385Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667278621594745:2428][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667278621594748:2428], cookie# 1 2025-06-03T10:25:27.573387Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7511667278621594745:2428][/dc-1] Unexpected sync response: sender# [1:7511667278621594748:2428], cookie# 1 TClient::Ls response: 2025-06-03T10:25:27.603368Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:7511667278621594302:2128], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsI ... TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:30.663195Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7511667293882361035:2528], recipient# [3:7511667293882361012:2310], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:30.663205Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7511667293882361036:2529], recipient# [3:7511667293882361013:2311], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:31.221862Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7511667293882360452:2127], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:31.221891Z node 3 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2322: Create subscriber: self# [3:7511667293882360452:2127], path# /dc-1/.metadata/initialization/migrations, domainOwnerId# 72057594046644480 2025-06-03T10:25:31.221952Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][3:7511667298177328340:2536][/dc-1/.metadata/initialization/migrations] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-03T10:25:31.222025Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7511667293882360113:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/.metadata/initialization/migrations DomainOwnerId: 72057594046644480 }: sender# [3:7511667298177328344:2536] 2025-06-03T10:25:31.222028Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:7511667293882360113:2050] Upsert description: path# /dc-1/.metadata/initialization/migrations 2025-06-03T10:25:31.222053Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7511667293882360113:2050] Subscribe: subscriber# [3:7511667298177328344:2536], path# /dc-1/.metadata/initialization/migrations, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:31.222067Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7511667293882360116:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/.metadata/initialization/migrations DomainOwnerId: 72057594046644480 }: sender# [3:7511667298177328345:2536] 2025-06-03T10:25:31.222069Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:7511667293882360116:2053] Upsert description: path# /dc-1/.metadata/initialization/migrations 2025-06-03T10:25:31.222074Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7511667293882360116:2053] Subscribe: subscriber# [3:7511667298177328345:2536], path# /dc-1/.metadata/initialization/migrations, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:31.222083Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [3:7511667293882360119:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/.metadata/initialization/migrations DomainOwnerId: 72057594046644480 }: sender# [3:7511667298177328346:2536] 2025-06-03T10:25:31.222084Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [3:7511667293882360119:2056] Upsert description: path# /dc-1/.metadata/initialization/migrations 2025-06-03T10:25:31.222093Z node 3 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [3:7511667293882360119:2056] Subscribe: subscriber# [3:7511667298177328346:2536], path# /dc-1/.metadata/initialization/migrations, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:31.222103Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7511667298177328344:2536][/dc-1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/initialization/migrations Version: 0 }: sender# [3:7511667293882360113:2050] 2025-06-03T10:25:31.222108Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7511667298177328345:2536][/dc-1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/initialization/migrations Version: 0 }: sender# [3:7511667293882360116:2053] 2025-06-03T10:25:31.222112Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7511667298177328346:2536][/dc-1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/initialization/migrations Version: 0 }: sender# [3:7511667293882360119:2056] 2025-06-03T10:25:31.222122Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][3:7511667298177328340:2536][/dc-1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/initialization/migrations Version: 0 }: sender# [3:7511667298177328341:2536] 2025-06-03T10:25:31.222136Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][3:7511667298177328340:2536][/dc-1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/initialization/migrations Version: 0 }: sender# [3:7511667298177328342:2536] 2025-06-03T10:25:31.222144Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][3:7511667298177328340:2536][/dc-1/.metadata/initialization/migrations] Set up state: owner# [3:7511667293882360452:2127], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:31.222149Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][3:7511667298177328340:2536][/dc-1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/.metadata/initialization/migrations Version: 0 }: sender# [3:7511667298177328343:2536] 2025-06-03T10:25:31.222154Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][3:7511667298177328340:2536][/dc-1/.metadata/initialization/migrations] Ignore empty state: owner# [3:7511667293882360452:2127], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:31.222160Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7511667293882360113:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7511667298177328344:2536] 2025-06-03T10:25:31.222163Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7511667293882360116:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7511667298177328345:2536] 2025-06-03T10:25:31.222166Z node 3 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [3:7511667293882360119:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7511667298177328346:2536] 2025-06-03T10:25:31.222175Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [3:7511667293882360452:2127], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/initialization/migrations PathId: Strong: 1 } 2025-06-03T10:25:31.222198Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [3:7511667293882360452:2127], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/.metadata/initialization/migrations PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7511667298177328340:2536] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:31.222216Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7511667293882360452:2127], cacheItem# { Subscriber: { Subscriber: [3:7511667298177328340:2536] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:31.222231Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7511667298177328347:2537], recipient# [3:7511667298177328339:2312], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:31.673458Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7511667293882360452:2127], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:31.673528Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7511667293882360452:2127], cacheItem# { Subscriber: { Subscriber: [3:7511667293882361014:2525] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:31.673572Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7511667298177328356:2539], recipient# [3:7511667298177328355:2313], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> DataShardWrite::UpsertNoLocksArbiterRestart [GOOD] >> DataShardWrite::UpsertLostPrepareArbiterRestart >> TGRpcStreamingTest::ClientDisconnects [GOOD] |60.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_streaming/ut/unittest >> TSchemeShardDecimalTypesInTables::CopyTableShouldNotFailOnDisabledFeatureFlag [GOOD] >> TSchemeShardDecimalTypesInTables::CreateWithWrongParameters >> TSubDomainTest::CheckAccessCopyTable [GOOD] >> TSubDomainTest::ConsistentCopyTable >> TGRpcStreamingTest::WritesDoneFromClient [GOOD] >> test.py::test[blocks-pg_to_numbers--ForceBlocks] [GOOD] >> test.py::test[blocks-pg_to_numbers--Results] |60.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_streaming/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/slow/unittest >> TPQTestSlow::TestOnDiskStoredSourceIds [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:104:2057] recipient: [1:102:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:104:2057] recipient: [1:102:2135] Leader for TabletID 72057594037927937 is [1:108:2139] sender: [1:109:2057] recipient: [1:102:2135] 2025-06-03T10:24:31.272517Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:31.272548Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:150:2057] recipient: [1:148:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:150:2057] recipient: [1:148:2170] Leader for TabletID 72057594037927938 is [1:154:2174] sender: [1:155:2057] recipient: [1:148:2170] Leader for TabletID 72057594037927937 is [1:108:2139] sender: [1:180:2057] recipient: [1:14:2061] 2025-06-03T10:24:31.280060Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:31.283273Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037927937] Config applied version 1 actor [1:178:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-03T10:24:31.283658Z node 1 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:186:2198] 2025-06-03T10:24:31.284447Z node 1 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:186:2198] 2025-06-03T10:24:31.284967Z node 1 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:187:2199] 2025-06-03T10:24:31.285497Z node 1 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:187:2199] 2025-06-03T10:24:31.291410Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|289a4903-e5cbf69b-201dda12-bf787ff_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:24:31.292502Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|689df878-30bf2c8-8adc9cce-6b511cfe_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:24:31.373478Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|f9e70fff-416b694b-d263f3eb-61bc11d9_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:24:31.375960Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|52ae624-8517d33e-a64800f7-1304aa51_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:24:31.377497Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|cc534074-a32910b7-5ea5a84e-a2d3d41d_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:24:31.378806Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d12a3c6e-a99f7697-a8dbde94-ff5bb1d_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:104:2057] recipient: [2:102:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:104:2057] recipient: [2:102:2135] Leader for TabletID 72057594037927937 is [2:108:2139] sender: [2:109:2057] recipient: [2:102:2135] 2025-06-03T10:24:31.985774Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:31.985802Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:150:2057] recipient: [2:148:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:150:2057] recipient: [2:148:2170] Leader for TabletID 72057594037927938 is [2:154:2174] sender: [2:155:2057] recipient: [2:148:2170] Leader for TabletID 72057594037927937 is [2:108:2139] sender: [2:180:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:108:2139]) on event NKikimr::TEvPersQueue::TEvUpdateConfigBuilder ! Leader for TabletID 72057594037927937 is [2:108:2139] sender: [2:182:2057] recipient: [2:100:2134] Leader for TabletID 72057594037927937 is [2:108:2139] sender: [2:185:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:108:2139] sender: [2:186:2057] recipient: [2:184:2195] Leader for TabletID 72057594037927937 is [2:187:2196] sender: [2:188:2057] recipient: [2:184:2195] 2025-06-03T10:24:31.995220Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:31.995249Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info !Reboot 72057594037927937 (actor [2:108:2139]) rebooted! !Reboot 72057594037927937 (actor [2:108:2139]) tablet resolver refreshed! new actor is[2:187:2196] Leader for TabletID 72057594037927937 is [2:187:2196] sender: [2:267:2057] recipient: [2:14:2061] 2025-06-03T10:24:33.738023Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:33.738249Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037927937] Config applied version 2 actor [2:178:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-03T10:24:33.738438Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:273:2258] 2025-06-03T10:24:33.739077Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [2:273:2258] 2025-06-03T10:24:33.739525Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:274:2259] 2025-06-03T10:24:33.739978Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [2:274:2259] 2025-06-03T10:24:33.742046Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|eafb7817-74ca6870-56ebf975-83c5b707_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:24:33.743029Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|42e0c351-ff22ffe7-65339ae4-1b03ffed_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:24:33.747577Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|8ee2a1c5-d6ac8a75-c5840cdb-8caffef2_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:24:33.749101Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|168bd524-51933047-ed458d57-86ffb3e3_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:24:33.750434Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|35aee137-a342bdf8-642917b2-58ce73f2_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:24:33.751787Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3015e7c4-159dd454-36937e60-67d0fe8c_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:104:2057] recipient: [3:102:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:104:2057] recipient: [3:102:2135] Leader for TabletID 72057594037927937 is [3:108:2139] sender: [3:109:2057] recipient: [3:102:2135] 2025-06-03T10:24:34.123783Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:34.123812Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:150:2057] recipient: [3:148:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:150:2057] recipient: [3:148:2170] Leader for TabletID 72057594037927938 is [3:154:2174] sender: [3:155:2057] recipient: [3:148:2170] Leader for TabletID 72057594037927937 is [3:108:2139] sender: [3:180:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:108:2139]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:108:2139] sender: [3:182:2057] recipient: [3:100:2134] Leader for TabletID 72057594037927937 is [3:108:2139] sender: [3:185:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:108:2139] sender: [3:186:2057] recipient: [3:184:2195] Leader for TabletID 72057594037927937 is [3:187:2196] sender: [3:188:2057] recipient: [3:184:2195] 2025-06-03T10:24:34.134849Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:34.134878Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info !Reboot 72057594037927937 (actor [3:108:2139]) rebooted! !Reboot 72057594037927937 (actor [3:108:2139]) tablet resolver refreshed! new actor is[3:187:2196] Leader for TabletID 72057594037927937 is [3:187:2196] sender: [3:267:2057] recipient: [3:14:2061] 2025-06-03T10:24:35.683210Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:24:35.683398Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037927937] Config applied version 3 actor [3:178:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 3 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } 2025-06-03T10:24:35.683525Z node 3 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 720575940 ... 94037927937, Partition: 0, State: StateInit] bootstrapping 0 [47:186:2198] 2025-06-03T10:25:30.719400Z node 47 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [47:186:2198] 2025-06-03T10:25:30.719762Z node 47 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [47:187:2199] 2025-06-03T10:25:30.720182Z node 47 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [47:187:2199] 2025-06-03T10:25:30.721731Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|b0abf60e-e5712dd2-531b4e4a-f5d7a77_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:30.722779Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|a714b85e-456f2248-df35941d-820fc1eb_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:30.726577Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|92cca991-b4ccf544-75ed9ee6-e951c55a_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:30.727693Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|636914c5-87cd7af7-e89f150e-b88d667_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:30.728960Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|ddc77ded-fed351d5-41b2fc61-9e7f40d6_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:30.730028Z node 47 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|a6ada682-a11b08a3-91e1c155-b38f73f3_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default !Reboot 72057594037927937 (actor [47:108:2139]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [47:108:2139] sender: [47:285:2057] recipient: [47:100:2134] Leader for TabletID 72057594037927937 is [47:108:2139] sender: [47:288:2057] recipient: [47:14:2061] Leader for TabletID 72057594037927937 is [47:108:2139] sender: [47:289:2057] recipient: [47:287:2281] Leader for TabletID 72057594037927937 is [47:290:2282] sender: [47:291:2057] recipient: [47:287:2281] 2025-06-03T10:25:30.737908Z node 47 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:25:30.737931Z node 47 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-03T10:25:30.738024Z node 47 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [47:339:2323] 2025-06-03T10:25:30.738413Z node 47 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [47:340:2324] 2025-06-03T10:25:30.739488Z node 47 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:25:30.739501Z node 47 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [47:340:2324] 2025-06-03T10:25:30.739882Z node 47 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:25:30.739891Z node 47 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [47:339:2323] !Reboot 72057594037927937 (actor [47:108:2139]) rebooted! !Reboot 72057594037927937 (actor [47:108:2139]) tablet resolver refreshed! new actor is[47:290:2282] Leader for TabletID 72057594037927937 is [47:290:2282] sender: [47:390:2057] recipient: [47:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [48:104:2057] recipient: [48:102:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [48:104:2057] recipient: [48:102:2135] Leader for TabletID 72057594037927937 is [48:108:2139] sender: [48:109:2057] recipient: [48:102:2135] 2025-06-03T10:25:32.491609Z node 48 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:25:32.491637Z node 48 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [48:150:2057] recipient: [48:148:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [48:150:2057] recipient: [48:148:2170] Leader for TabletID 72057594037927938 is [48:154:2174] sender: [48:155:2057] recipient: [48:148:2170] Leader for TabletID 72057594037927937 is [48:108:2139] sender: [48:178:2057] recipient: [48:14:2061] 2025-06-03T10:25:32.495517Z node 48 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:25:32.495687Z node 48 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037927937] Config applied version 48 actor [48:176:2190] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 48 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 48 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 48 Important: false } 2025-06-03T10:25:32.495801Z node 48 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [48:184:2196] 2025-06-03T10:25:32.496422Z node 48 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [48:184:2196] 2025-06-03T10:25:32.496821Z node 48 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [48:185:2197] 2025-06-03T10:25:32.497251Z node 48 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [48:185:2197] 2025-06-03T10:25:32.499788Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|e14ccb23-b7379e11-1f084dd0-cbe141dc_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:32.500648Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|51d1a945-1a7f02aa-487eb250-5e26658f_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:32.505915Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|2fb2cce2-9b0ff189-b4216e38-5370d2d4_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:32.508721Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|b1cb6ba1-e872b3c3-fcfb238f-d27984e0_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:32.510950Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|d9c1ea72-6132ac2-64533370-1a28d9e9_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:32.512090Z node 48 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|3770341b-8d0e4d24-ea577ee8-f1895008_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Leader for TabletID 72057594037927937 is [0:0:0] sender: [49:104:2057] recipient: [49:102:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [49:104:2057] recipient: [49:102:2135] Leader for TabletID 72057594037927937 is [49:108:2139] sender: [49:109:2057] recipient: [49:102:2135] 2025-06-03T10:25:32.782559Z node 49 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:25:32.782588Z node 49 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [49:150:2057] recipient: [49:148:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [49:150:2057] recipient: [49:148:2170] Leader for TabletID 72057594037927938 is [49:154:2174] sender: [49:155:2057] recipient: [49:148:2170] Leader for TabletID 72057594037927937 is [49:108:2139] sender: [49:180:2057] recipient: [49:14:2061] 2025-06-03T10:25:32.786932Z node 49 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:25:32.787134Z node 49 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037927937] Config applied version 49 actor [49:178:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 SourceIdMaxCounts: 3 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 49 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 49 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 49 Important: false } 2025-06-03T10:25:32.787291Z node 49 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [49:186:2198] 2025-06-03T10:25:32.788120Z node 49 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [49:186:2198] 2025-06-03T10:25:32.788623Z node 49 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [49:187:2199] 2025-06-03T10:25:32.789213Z node 49 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [49:187:2199] 2025-06-03T10:25:32.791366Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|ab1de9af-597c0927-d3662a8e-3d448a49_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:32.792828Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|10ede00c-aa617043-70b5d4ee-668462e1_1 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:32.798136Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|c9270a74-83657aee-624e13fc-1b5baff1_2 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:32.799820Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|9f1a7b7f-512d9ace-a1b0e8fd-c98e881f_3 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:32.801338Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|9f8b4dd0-cf1b07fa-2e369fef-b39b00e1_4 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default 2025-06-03T10:25:32.803239Z node 49 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|6d04efe3-cbe51933-eff05a4b-250c003a_5 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default >> TGRpcStreamingTest::SimpleEcho [GOOD] >> TGRpcStreamingTest::WriteAndFinishWorks >> TGRpcStreamingTest::ClientNeverWrites >> KqpLocks::EmptyRange [GOOD] >> KqpLocks::EmptyRangeAlreadyBroken ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::ClientDisconnects [GOOD] Test command err: 2025-06-03T10:25:31.560975Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667296288680871:2258];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:31.561001Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000e87/r3tmp/tmpXf9Kvl/pdisk_1.dat 2025-06-03T10:25:32.213306Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667296288680652:2079] 1748946331554525 != 1748946331554528 2025-06-03T10:25:32.235778Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:32.235818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:32.237047Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:32.237265Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:32.370189Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x7337bedbc380] stream done notification Name# Session ok# true peer# ipv6:[::1]:40126 2025-06-03T10:25:32.370213Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x7337bedbc380] stream accepted Name# Session ok# true peer# ipv6:[::1]:40126 2025-06-03T10:25:32.370816Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x7337bedbc380] facade attach Name# Session actor# [1:7511667300583648496:2257] peer# ipv6:[::1]:40126 2025-06-03T10:25:32.370821Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:230: Received TEvNotifiedWhenDone 2025-06-03T10:25:32.371466Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x7337bedbc380] stream finished Name# Session ok# false peer# unknown grpc status# (1) message# Request abandoned 2025-06-03T10:25:32.371738Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x7337bedbc380] deregistering request Name# Session peer# unknown (finish done) >> KqpTx::SnapshotRO [GOOD] >> KqpTx::SnapshotROInteractive1 >> TSchemeShardDecimalTypesInTables::CreateWithWrongParameters [GOOD] >> TSchemeShardDecimalTypesInTables::AlterWithWrongParameters ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::WritesDoneFromClient [GOOD] Test command err: 2025-06-03T10:25:32.237030Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667300463078442:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:32.237080Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000e7c/r3tmp/tmp9aNClT/pdisk_1.dat 2025-06-03T10:25:32.559566Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:32.560292Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667300463078281:2079] 1748946332167037 != 1748946332167040 2025-06-03T10:25:32.564348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:32.564623Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:32.567115Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:32.669039Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x71983edf8380] stream accepted Name# Session ok# true peer# ipv6:[::1]:44642 2025-06-03T10:25:32.669142Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x71983edf8380] facade attach Name# Session actor# [1:7511667300463078831:2258] peer# ipv6:[::1]:44642 2025-06-03T10:25:32.669156Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:325: [0x71983edf8380] facade read Name# Session peer# ipv6:[::1]:44642 2025-06-03T10:25:32.669208Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:353: [0x71983edf8380] read finished Name# Session ok# false data# peer# ipv6:[::1]:44642 2025-06-03T10:25:32.669226Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:302: Received TEvReadFinished, success = 0 2025-06-03T10:25:32.669265Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:511: [0x71983edf8380] facade finish Name# Session peer# ipv6:[::1]:44642 grpc status# (9) message# Everything is A-OK 2025-06-03T10:25:32.671158Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x71983edf8380] stream done notification Name# Session ok# true peer# unknown 2025-06-03T10:25:32.671175Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x71983edf8380] stream finished Name# Session ok# true peer# unknown grpc status# (9) message# Everything is A-OK 2025-06-03T10:25:32.671543Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x71983edf8380] deregistering request Name# Session peer# unknown (finish done) 2025-06-03T10:25:32.671575Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:312: Received TEvNotifiedWhenDone >> KqpTx::CommitRoTx [GOOD] >> KqpLocks::MixedTxFail-useSink [GOOD] >> KqpLocksTricky::TestNoLocksIssue+withSink >> KqpTx::CommitRoTx_TLI ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::SimpleEcho [GOOD] Test command err: 2025-06-03T10:25:32.151467Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667301379702059:2234];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:32.151499Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000e73/r3tmp/tmpaaYPmT/pdisk_1.dat 2025-06-03T10:25:32.721967Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667301379701864:2079] 1748946332111291 != 1748946332111294 2025-06-03T10:25:32.722432Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:32.726085Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:32.726109Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:32.727843Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:32.808349Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x530efeb1a380] stream accepted Name# Session ok# true peer# ipv6:[::1]:38400 2025-06-03T10:25:32.809410Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x530efeb1a380] facade attach Name# Session actor# [1:7511667301379702412:2257] peer# ipv6:[::1]:38400 2025-06-03T10:25:32.809422Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:325: [0x530efeb1a380] facade read Name# Session peer# ipv6:[::1]:38400 2025-06-03T10:25:32.809705Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:353: [0x530efeb1a380] read finished Name# Session ok# true data# peer# ipv6:[::1]:38400 2025-06-03T10:25:32.813374Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:142: Received TEvReadFinished, success = 1 2025-06-03T10:25:32.813401Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:401: [0x530efeb1a380] facade write Name# Session data# peer# ipv6:[::1]:38400 2025-06-03T10:25:32.813591Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:511: [0x530efeb1a380] facade finish Name# Session peer# ipv6:[::1]:38400 grpc status# (0) message# 2025-06-03T10:25:32.813621Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:456: [0x530efeb1a380] write finished Name# Session ok# true peer# ipv6:[::1]:38400 2025-06-03T10:25:32.813742Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x530efeb1a380] stream done notification Name# Session ok# true peer# ipv6:[::1]:38400 2025-06-03T10:25:32.813750Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x530efeb1a380] stream finished Name# Session ok# true peer# ipv6:[::1]:38400 grpc status# (0) message# 2025-06-03T10:25:32.813770Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x530efeb1a380] deregistering request Name# Session peer# ipv6:[::1]:38400 (finish done) |60.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart |60.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart >> KqpTx::SnapshotROInteractive1 [GOOD] >> TSchemeShardDecimalTypesInTables::AlterWithWrongParameters [GOOD] >> TSchemeShardInfoTypesTest::EmptyFamilies [GOOD] >> TSchemeShardInfoTypesTest::LostId [GOOD] >> TSchemeShardInfoTypesTest::DeduplicationOrder [GOOD] >> TSchemeShardInfoTypesTest::MultipleDeduplications [GOOD] >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-false >> TGRpcStreamingTest::ReadFinish >> TSubDomainTest::CreateTabletForUnknownDomain [GOOD] >> TSubDomainTest::DatashardNotRunAtAllWhenSubDomainNodesIsStopped |60.1%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/arrow/test-results/unittest/{meta.json ... results_accumulator.log} |60.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/blobstorage-ut_blobstorage-ut_vdisk_restart >> KqpLocks::EmptyRangeAlreadyBroken [GOOD] >> TGRpcStreamingTest::WriteAndFinishWorks [GOOD] >> TGRpcStreamingTest::ClientNeverWrites [GOOD] >> KqpTx::CommitRoTx_TLI [GOOD] >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-false [GOOD] >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-true |60.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_streaming/ut/unittest >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite2 >> TSubDomainTest::ConsistentCopyTable [GOOD] >> KqpSinkTx::OlapInvalidateOnError ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/tx/unittest >> KqpTx::SnapshotROInteractive1 [GOOD] Test command err: Trying to start YDB, gRPC: 6622, MsgBus: 25107 2025-06-03T10:25:30.979457Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667290628487948:2206];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f62/r3tmp/tmp5VKuJg/pdisk_1.dat 2025-06-03T10:25:31.281719Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:31.506293Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:31.508572Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:31.508589Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:31.509362Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667290628487769:2079] 1748946330948664 != 1748946330948667 2025-06-03T10:25:31.515405Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 6622, node 1 2025-06-03T10:25:31.700837Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:31.700849Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:31.700851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:31.700892Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25107 TClient is connected to server localhost:25107 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:32.379392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:32.402311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:32.576011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:32.746737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:25:32.786380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-06-03T10:25:33.076132Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667303513391312:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:33.076168Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:33.169186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:33.189768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:33.229704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:33.269496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:33.295787Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:33.341514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:33.417727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:33.505482Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667303513391976:2468], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:33.505540Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:33.505758Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667303513391981:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:33.506769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:33.513452Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-03T10:25:33.513672Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667303513391983:2472], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:25:33.619081Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667303513392034:3408] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:34.170209Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=ZjM1OTFmMTgtYmJhMjViZjAtMjQ0MWJmOTctMzQyMjllNzg=, ActorId: [1:7511667307808359593:2509], ActorState: ExecuteState, TraceId: 01jwtn6cee58sxwvfx3rdt2abd, Create QueryResponse for error on request, msg:
:3:25: Error: Operation 'Upsert' can't be performed in read only transaction, code: 2008 Trying to start YDB, gRPC: 11462, MsgBus: 12924 2025-06-03T10:25:34.575782Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667307707343282:2213];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f62/r3tmp/tmpevtLBS/pdisk_1.dat 2025-06-03T10:25:34.583080Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:34.613195Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11462, node 2 2025-06-03T10:25:34.628385Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:34.628398Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:34.628400Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:34.628442Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12924 2025-06-03T10:25:34.690117Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:34.690144Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:34.693683Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12924 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:34.731471Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:34.734954Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:25:34.746714Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:34.775004Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:34.842403Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:34.862345Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:35.034226Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667312002312024:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:35.034260Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:35.037225Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.065139Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.080480Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.091165Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.109167Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.121190Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.139691Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.168232Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667312002312678:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:35.168261Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:35.168402Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667312002312683:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:35.169267Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:35.174950Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511667312002312685:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:35.274836Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511667312002312736:3393] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::WriteAndFinishWorks [GOOD] Test command err: 2025-06-03T10:25:35.170536Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667311796168237:2271];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000e57/r3tmp/tmpf1djgl/pdisk_1.dat 2025-06-03T10:25:35.238971Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:35.335017Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:35.337374Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667311796168003:2079] 1748946335133606 != 1748946335133609 2025-06-03T10:25:35.368018Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x53a57efe0380] stream accepted Name# Session ok# true peer# ipv6:[::1]:44186 2025-06-03T10:25:35.368167Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x53a57efe0380] facade attach Name# Session actor# [1:7511667311796168546:2254] peer# ipv6:[::1]:44186 2025-06-03T10:25:35.368176Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:401: [0x53a57efe0380] facade write Name# Session data# peer# ipv6:[::1]:44186 2025-06-03T10:25:35.368287Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:396: [0x53a57efe0380] facade write Name# Session data# peer# ipv6:[::1]:44186 grpc status# (0) message# 2025-06-03T10:25:35.368665Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:456: [0x53a57efe0380] write finished Name# Session ok# true peer# ipv6:[::1]:44186 2025-06-03T10:25:35.368760Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x53a57efe0380] stream done notification Name# Session ok# true peer# ipv6:[::1]:44186 2025-06-03T10:25:35.368765Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:456: [0x53a57efe0380] write finished Name# Session ok# true peer# ipv6:[::1]:44186 2025-06-03T10:25:35.368769Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x53a57efe0380] stream finished Name# Session ok# true peer# ipv6:[::1]:44186 grpc status# (0) message# 2025-06-03T10:25:35.368785Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x53a57efe0380] deregistering request Name# Session peer# ipv6:[::1]:44186 (finish done) 2025-06-03T10:25:35.368854Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:347: Received TEvWriteFinished, success = 1 2025-06-03T10:25:35.368860Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:347: Received TEvWriteFinished, success = 1 2025-06-03T10:25:35.384420Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:35.384452Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:35.384946Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected >> KqpSinkLocks::TInvalidateOlap >> KqpTx::LocksAbortOnCommit >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-true [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/tx/unittest >> KqpLocks::EmptyRangeAlreadyBroken [GOOD] Test command err: Trying to start YDB, gRPC: 10152, MsgBus: 65444 2025-06-03T10:25:29.981699Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667288041517816:2136];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f64/r3tmp/tmpIAxOOD/pdisk_1.dat 2025-06-03T10:25:29.982958Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:30.061304Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667288041517719:2079] 1748946329979636 != 1748946329979639 2025-06-03T10:25:30.063671Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10152, node 1 2025-06-03T10:25:30.077026Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:30.077044Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:30.077049Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:30.077113Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65444 TClient is connected to server localhost:65444 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:25:30.137636Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:30.137676Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:30.141789Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:30.194779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:30.207954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:25:30.223486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:30.398531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:25:30.533431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-06-03T10:25:30.562468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:31.523813Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667296631453965:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:31.523858Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:31.768424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:31.811575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:31.891980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:31.928597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:31.992396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:32.109270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:32.156051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:32.210989Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667300926421925:2468], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:32.211018Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:32.211120Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667300926421930:2471], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:32.212308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:32.215780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-03T10:25:32.215844Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667300926421932:2472], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:25:32.282173Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667300926421983:3406] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 2760, MsgBus: 11691 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f64/r3tmp/tmpCwwC4Q/pdisk_1.dat 2025-06-03T10:25:32.988179Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:33.012641Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2760, node 2 2025-06-03T10:25:33.034151Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:33.034167Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:33.034169Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:33.034237Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11691 2025-06-03T10:25:33.084657Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:33.084690Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:33.089735Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11691 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. ... ting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-03T10:25:34.079199Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 6 at tablet 72075186224037914 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-03T10:25:34.079302Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:798: SelfId: [2:7511667307886767352:2507], Table: `/Root/Test` ([72057594046644480:9:1]), SessionActorId: [2:7511667303591799974:2507]Got LOCKS BROKEN for table `/Root/Test`. ShardID=72075186224037914, Sink=[2:7511667307886767352:2507].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-03T10:25:34.079393Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2935: SelfId: [2:7511667307886767345:2507], SessionActorId: [2:7511667303591799974:2507], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[2:7511667303591799974:2507]. isRollback=0 2025-06-03T10:25:34.079447Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1848: SessionId: ydb://session/3?node_id=2&id=NGU5YTQ3ZGUtYmFkNWQ5NDUtYzYxZTljZGYtMTQwNTQ0ZGQ=, ActorId: [2:7511667303591799974:2507], ActorState: ExecuteState, TraceId: 01jwtn6cbf1y90jpmtt9v1wgy7, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [2:7511667307886767346:2507] from: [2:7511667307886767345:2507] 2025-06-03T10:25:34.079464Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [2:7511667307886767346:2507] TxId: 281474976715675. Ctx: { TraceId: 01jwtn6cbf1y90jpmtt9v1wgy7, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NGU5YTQ3ZGUtYmFkNWQ5NDUtYzYxZTljZGYtMTQwNTQ0ZGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-03T10:25:34.079510Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=2&id=NGU5YTQ3ZGUtYmFkNWQ5NDUtYzYxZTljZGYtMTQwNTQ0ZGQ=, ActorId: [2:7511667303591799974:2507], ActorState: ExecuteState, TraceId: 01jwtn6cbf1y90jpmtt9v1wgy7, Create QueryResponse for error on request, msg:
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 Trying to start YDB, gRPC: 22167, MsgBus: 26620 2025-06-03T10:25:34.623559Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511667308793598618:2277];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:34.625127Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f64/r3tmp/tmpQV1AnZ/pdisk_1.dat 2025-06-03T10:25:34.686304Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22167, node 3 2025-06-03T10:25:34.715859Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:34.715874Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:34.715876Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:34.715921Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26620 2025-06-03T10:25:34.757726Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:34.757749Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:34.759465Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26620 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:34.789544Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:34.802093Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:25:34.810211Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:25:34.835803Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:34.862162Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:25:34.888209Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:35.074549Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667313088567294:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:35.074609Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:35.081853Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.094255Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.113926Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.176681Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.198499Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.264132Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.285657Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.306636Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667313088567954:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:35.306666Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:35.306771Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667313088567959:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:35.307843Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:35.316054Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511667313088567961:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:35.374777Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511667313088568012:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:35.610865Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=3&id=ZmMxNmZjODktZjgxZTJlZDMtYjg5ZWIwM2YtMzU3YTM4MmM=, ActorId: [3:7511667313088568275:2507], ActorState: ExecuteState, TraceId: 01jwtn6dtk81vz0ws8055njaed, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken
: Error: Transaction locks invalidated. Table: `/Root/Test`, code: 2001
: Error: tx has deferred effects, but locks are broken ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::ClientNeverWrites [GOOD] Test command err: 2025-06-03T10:25:35.183599Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667312498373790:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:35.183754Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000e5d/r3tmp/tmpjDXrVK/pdisk_1.dat 2025-06-03T10:25:35.358655Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:35.386203Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x3357bedcc380] stream accepted Name# Session ok# true peer# ipv6:[::1]:54038 2025-06-03T10:25:35.389365Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x3357bedcc380] facade attach Name# Session actor# [1:7511667312498374161:2252] peer# ipv6:[::1]:54038 2025-06-03T10:25:35.389384Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:325: [0x3357bedcc380] facade read Name# Session peer# ipv6:[::1]:54038 2025-06-03T10:25:35.389431Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:401: [0x3357bedcc380] facade write Name# Session data# peer# ipv6:[::1]:54038 2025-06-03T10:25:35.389587Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:511: [0x3357bedcc380] facade finish Name# Session peer# ipv6:[::1]:54038 grpc status# (0) message# 2025-06-03T10:25:35.392427Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:456: [0x3357bedcc380] write finished Name# Session ok# true peer# ipv6:[::1]:54038 2025-06-03T10:25:35.392621Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:353: [0x3357bedcc380] read finished Name# Session ok# false data# peer# ipv6:[::1]:54038 2025-06-03T10:25:35.392630Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x3357bedcc380] stream done notification Name# Session ok# true peer# ipv6:[::1]:54038 2025-06-03T10:25:35.392636Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x3357bedcc380] stream finished Name# Session ok# true peer# ipv6:[::1]:54038 grpc status# (0) message# 2025-06-03T10:25:35.392657Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x3357bedcc380] deregistering request Name# Session peer# ipv6:[::1]:54038 (finish done) 2025-06-03T10:25:35.392708Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:187: Received TEvWriteFinished, success = 1 2025-06-03T10:25:35.392712Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:181: Received TEvReadFinished, success = 0 2025-06-03T10:25:35.392715Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:194: Received TEvNotifiedWhenDone 2025-06-03T10:25:35.422961Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:35.422997Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:35.424302Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected >> BlobStorageBlockRace::Test >> CostMetricsPatchBlock4Plus2::TestPatch4Plus2BlockRequests2Inflight2BlobSize1000 >> TGRpcStreamingTest::ReadFinish [GOOD] >> DataShardWrite::UpsertLostPrepareArbiterRestart [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/tx/unittest >> KqpTx::CommitRoTx_TLI [GOOD] Test command err: Trying to start YDB, gRPC: 10023, MsgBus: 10480 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f60/r3tmp/tmpI11gaL/pdisk_1.dat 2025-06-03T10:25:32.241721Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667298334082224:2270];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:32.241814Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:32.401404Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667298334081991:2079] 1748946331710222 != 1748946331710225 2025-06-03T10:25:32.403474Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:32.404667Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:32.404684Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:32.408331Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10023, node 1 2025-06-03T10:25:32.546846Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:32.546858Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:32.546861Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:32.546903Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10480 TClient is connected to server localhost:10480 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:32.860329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:32.882323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:32.995235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:33.029035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:33.051074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:33.499422Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667306924018251:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:33.499456Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:33.765657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:33.777618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:33.804597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:33.820367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:33.848806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:33.913935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:33.984014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:34.059319Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667311218986204:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:34.059343Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:34.059419Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667311218986209:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:34.060198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:34.069057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-03T10:25:34.069139Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667311218986211:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:25:34.153974Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667311218986262:3402] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 28353, MsgBus: 18762 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f60/r3tmp/tmpUG0SVw/pdisk_1.dat 2025-06-03T10:25:35.048823Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:35.057381Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28353, node 2 2025-06-03T10:25:35.082362Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:35.082376Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:35.082380Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:35.082426Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18762 2025-06-03T10:25:35.109678Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:35.109701Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:35.113690Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18762 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:35.181920Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:35.193689Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:25:35.198051Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:35.219846Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:35.251918Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:35.270917Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:35.461609Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667315510570737:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:35.461638Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:35.469919Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.483268Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.497190Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.510498Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.526713Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.538127Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.551500Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.567863Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667315510571389:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:35.567894Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:35.567964Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667315510571394:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:35.568740Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:35.577905Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511667315510571396:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:35.640290Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511667315510571447:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> IncorrectQueries::VeryBigBlob >> BlobStorageBlockRace::Test [GOOD] >> BlobStorageSync::TestSyncLogCuttingMirror3dc |60.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots |60.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots |60.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_olap_reboots/ydb-core-tx-schemeshard-ut_olap_reboots >> test.py::test[blocks-pg_to_numbers--Results] [GOOD] >> IncorrectQueries::VeryBigBlob [GOOD] >> IncorrectQueries::WrongDataSize >> test.py::test[case-case_val_when_then-default.txt-ForceBlocks] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::ConsistentCopyTable [GOOD] Test command err: test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001169/r3tmp/tmp4hGmQw/pdisk_1.dat 2025-06-03T10:25:27.869411Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:27.901385Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667277138521297:2079] 1748946327760829 != 1748946327760832 2025-06-03T10:25:27.906794Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:27.938165Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:27.938197Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:27.942424Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16393 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:25:28.061980Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511667277138521324:2086] Handle TEvNavigate describe path dc-1 2025-06-03T10:25:28.082199Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511667281433489147:2260] HANDLE EvNavigateScheme dc-1 2025-06-03T10:25:28.082263Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667277138521574:2111], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:28.082291Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:871: [main][1:7511667277138521668:2149][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7511667277138521574:2111], cookie# 1 2025-06-03T10:25:28.083685Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667277138521683:2149][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667277138521680:2149], cookie# 1 2025-06-03T10:25:28.083700Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667277138521684:2149][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667277138521681:2149], cookie# 1 2025-06-03T10:25:28.083705Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667277138521685:2149][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667277138521682:2149], cookie# 1 2025-06-03T10:25:28.083714Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667277138521267:2049] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667277138521683:2149], cookie# 1 2025-06-03T10:25:28.083722Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667277138521270:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667277138521684:2149], cookie# 1 2025-06-03T10:25:28.083728Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667277138521273:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667277138521685:2149], cookie# 1 2025-06-03T10:25:28.083735Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667277138521683:2149][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667277138521267:2049], cookie# 1 2025-06-03T10:25:28.083738Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667277138521684:2149][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667277138521270:2052], cookie# 1 2025-06-03T10:25:28.083741Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667277138521685:2149][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667277138521273:2055], cookie# 1 2025-06-03T10:25:28.083747Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667277138521668:2149][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667277138521680:2149], cookie# 1 2025-06-03T10:25:28.083755Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7511667277138521668:2149][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:25:28.083759Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667277138521668:2149][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667277138521681:2149], cookie# 1 2025-06-03T10:25:28.083763Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7511667277138521668:2149][/dc-1] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:25:28.083767Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667277138521668:2149][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667277138521682:2149], cookie# 1 2025-06-03T10:25:28.083770Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7511667277138521668:2149][/dc-1] Unexpected sync response: sender# [1:7511667277138521682:2149], cookie# 1 2025-06-03T10:25:28.083779Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:7511667277138521574:2111], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-03T10:25:28.103292Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [1:7511667277138521574:2111], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7511667277138521668:2149] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:28.103340Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7511667277138521574:2111], cacheItem# { Subscriber: { Subscriber: [1:7511667277138521668:2149] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-03T10:25:28.105236Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7511667281433489148:2261], recipient# [1:7511667281433489147:2260], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:25:28.105260Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7511667281433489147:2260] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-03T10:25:28.177577Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7511667281433489147:2260] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-03T10:25:28.179771Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7511667281433489147:2260] Handle TEvDescribeSchemeResult Forward to# [1:7511667281433489146:2259] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-03T10:25:28.200760Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7511667277138521324:2086] Handle TEvProposeTransaction 2025-06-03T10:25:28.200777Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7511667277138521324:2086] TxId# 281474976715657 ProcessProposeTransaction 2025-06-03T10:25:28.200824Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7511667277138521324:2086] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:7511667281433489153:2265] 2025-06-03T10:25:28.278662Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:7511667281433489153:2265] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "" PeerName: "" 2025-06-03T10:25:28.278692Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:751166728 ... 304041806988:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/initialization/migrations DomainOwnerId: 72057594046644480 }: sender# [7:7511667313596880555:3050] 2025-06-03T10:25:35.661492Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [5:7511667304041806988:2051] Upsert description: path# /dc-1/USER_0/.metadata/initialization/migrations 2025-06-03T10:25:35.661501Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [5:7511667304041806988:2051] Subscribe: subscriber# [7:7511667313596880555:3050], path# /dc-1/USER_0/.metadata/initialization/migrations, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:35.661522Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [5:7511667304041806991:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/initialization/migrations DomainOwnerId: 72057594046644480 }: sender# [7:7511667313596880556:3050] 2025-06-03T10:25:35.661525Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [5:7511667304041806991:2054] Upsert description: path# /dc-1/USER_0/.metadata/initialization/migrations 2025-06-03T10:25:35.661532Z node 5 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [5:7511667304041806991:2054] Subscribe: subscriber# [7:7511667313596880556:3050], path# /dc-1/USER_0/.metadata/initialization/migrations, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:35.661659Z node 7 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][7:7511667313596880557:3050][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [5:7511667304041806994:2057] 2025-06-03T10:25:35.661672Z node 7 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][7:7511667313596880555:3050][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [5:7511667304041806988:2051] 2025-06-03T10:25:35.661679Z node 7 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][7:7511667313596880556:3050][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [5:7511667304041806991:2054] 2025-06-03T10:25:35.661687Z node 7 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][7:7511667313596880551:3050][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [7:7511667313596880554:3050] 2025-06-03T10:25:35.661700Z node 7 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][7:7511667313596880551:3050][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [7:7511667313596880552:3050] 2025-06-03T10:25:35.661709Z node 7 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][7:7511667313596880551:3050][/dc-1/USER_0/.metadata/initialization/migrations] Set up state: owner# [7:7511667309301911693:2103], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:35.661717Z node 7 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][7:7511667313596880551:3050][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [7:7511667313596880553:3050] 2025-06-03T10:25:35.661723Z node 7 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][7:7511667313596880551:3050][/dc-1/USER_0/.metadata/initialization/migrations] Ignore empty state: owner# [7:7511667309301911693:2103], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:35.661742Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [7:7511667309301911693:2103], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0/.metadata/initialization/migrations PathId: Strong: 1 } 2025-06-03T10:25:35.661760Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [7:7511667309301911693:2103], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0/.metadata/initialization/migrations PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [7:7511667313596880551:3050] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:35.661785Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7511667309301911693:2103], cacheItem# { Subscriber: { Subscriber: [7:7511667313596880551:3050] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:35.661803Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7511667313596880558:3051], recipient# [7:7511667313596880550:2401], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:35.661980Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [5:7511667304041806991:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [7:7511667313596880556:3050] 2025-06-03T10:25:35.661996Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [5:7511667304041806994:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [7:7511667313596880557:3050] 2025-06-03T10:25:35.662004Z node 5 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [5:7511667304041806988:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [7:7511667313596880555:3050] 2025-06-03T10:25:35.793034Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [6:7511667309791301079:2103], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:35.793070Z node 6 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2322: Create subscriber: self# [6:7511667309791301079:2103], path# /dc-1/USER_1/.metadata/initialization/migrations, domainOwnerId# 72057594046644480 2025-06-03T10:25:35.793164Z node 6 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][6:7511667314086268630:2224][/dc-1/USER_1/.metadata/initialization/migrations] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-03T10:25:35.793312Z node 6 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][6:7511667314086268630:2224][/dc-1/USER_1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_1/.metadata/initialization/migrations Version: 0 }: sender# [6:7511667314086268631:2224] 2025-06-03T10:25:35.793329Z node 6 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][6:7511667314086268630:2224][/dc-1/USER_1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_1/.metadata/initialization/migrations Version: 0 }: sender# [6:7511667314086268632:2224] 2025-06-03T10:25:35.793340Z node 6 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][6:7511667314086268630:2224][/dc-1/USER_1/.metadata/initialization/migrations] Set up state: owner# [6:7511667309791301079:2103], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:35.793351Z node 6 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][6:7511667314086268630:2224][/dc-1/USER_1/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_1/.metadata/initialization/migrations Version: 0 }: sender# [6:7511667314086268633:2224] 2025-06-03T10:25:35.793357Z node 6 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][6:7511667314086268630:2224][/dc-1/USER_1/.metadata/initialization/migrations] Ignore empty state: owner# [6:7511667309791301079:2103], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:35.793373Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [6:7511667309791301079:2103], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_1/.metadata/initialization/migrations PathId: Strong: 0 } 2025-06-03T10:25:35.793391Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [6:7511667309791301079:2103], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_1/.metadata/initialization/migrations PathId: Strong: 0 }, by path# { Subscriber: { Subscriber: [6:7511667314086268630:2224] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:35.793413Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [6:7511667309791301079:2103], cacheItem# { Subscriber: { Subscriber: [6:7511667314086268630:2224] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:35.793434Z node 6 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [6:7511667314086268637:2225], recipient# [6:7511667314086268629:2320], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:35.795529Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_1/.metadata/initialization/migrations;error=incorrect path status: LookupError; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_streaming/ut/unittest >> TGRpcStreamingTest::ReadFinish [GOOD] Test command err: 2025-06-03T10:25:35.991309Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667315528392288:2220];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000e4f/r3tmp/tmpJAPUH8/pdisk_1.dat 2025-06-03T10:25:36.058476Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:36.091251Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:36.091545Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667315528392077:2079] 1748946335982490 != 1748946335982493 2025-06-03T10:25:36.136404Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:227: [0x70befefcc380] stream accepted Name# Session ok# true peer# ipv6:[::1]:57646 2025-06-03T10:25:36.136513Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:301: [0x70befefcc380] facade attach Name# Session actor# [1:7511667319823359908:2250] peer# ipv6:[::1]:57646 2025-06-03T10:25:36.136518Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:325: [0x70befefcc380] facade read Name# Session peer# ipv6:[::1]:57646 2025-06-03T10:25:36.136536Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:511: [0x70befefcc380] facade finish Name# Session peer# ipv6:[::1]:57646 grpc status# (0) message# 2025-06-03T10:25:36.136900Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:36.136931Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:36.137013Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:353: [0x70befefcc380] read finished Name# Session ok# false data# peer# ipv6:[::1]:57646 2025-06-03T10:25:36.137035Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:268: [0x70befefcc380] stream done notification Name# Session ok# true peer# ipv6:[::1]:57646 2025-06-03T10:25:36.137046Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:547: [0x70befefcc380] stream finished Name# Session ok# true peer# ipv6:[::1]:57646 grpc status# (0) message# 2025-06-03T10:25:36.137070Z node 1 :GRPC_SERVER DEBUG: grpc_streaming.h:580: [0x70befefcc380] deregistering request Name# Session peer# ipv6:[::1]:57646 (finish done) 2025-06-03T10:25:36.137088Z node 1 :GRPC_SERVER DEBUG: grpc_streaming_ut.cpp:265: Received TEvReadFinished, success = 0 2025-06-03T10:25:36.138017Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base/unittest >> TSchemeShardPgTypesInTables::AlterTableAddPgTypeColumn-EnableTablePgTypes-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:25:07.652496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:25:07.652526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:25:07.652532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:25:07.652538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:25:07.652545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:25:07.652550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:25:07.652560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:25:07.652574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:25:07.652687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:25:07.652753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:25:07.668761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:25:07.668794Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:07.672997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:25:07.673121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:25:07.673164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:25:07.675071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:25:07.675144Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:25:07.675250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:25:07.675310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:25:07.675964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:25:07.676007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:25:07.676321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:25:07.676333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:25:07.676346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:25:07.676354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:25:07.676361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:25:07.676390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:25:07.677721Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:25:07.702294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:25:07.702397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:07.702460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:25:07.702510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:25:07.702522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:07.703392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:25:07.703418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:25:07.703466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:07.703477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:25:07.703484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:25:07.703491Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:25:07.703953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:07.703963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:25:07.703969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:25:07.704348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:07.704357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:07.704363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:25:07.704371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:25:07.705138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:25:07.705637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:25:07.705681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:25:07.705898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:25:07.705929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:25:07.705938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:25:07.706006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:25:07.706015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:25:07.706050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:25:07.706063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:25:07.706547Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:25:07.706557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:25:07.706604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... at schemeshard: 72057594046678944 2025-06-03T10:25:36.386280Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 102 ready parts: 1/1 2025-06-03T10:25:36.386331Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } AffectedSet { TabletId: 72075186233409546 Flags: 2 } ExecLevel: 0 TxId: 102 MinStep: 5000003 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:25:36.386949Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2025-06-03T10:25:36.387006Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72075186233409546 for txId: 102 at step: 5000003 2025-06-03T10:25:36.387232Z node 12 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:25:36.387265Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 51539609708 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:25:36.387277Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_table.cpp:359: TAlterTable TPropose operationId# 102:0 HandleReply TEvOperationPlan, operationId: 102:0, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-03T10:25:36.387392Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 128 -> 129 2025-06-03T10:25:36.387440Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:25:36.389865Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:25:36.389888Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:25:36.389998Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:25:36.390019Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [12:207:2208], at schemeshard: 72057594046678944, txId: 102, path id: 2 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-03T10:25:36.390208Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:25:36.390220Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1045: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-06-03T10:25:36.390381Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:25:36.390401Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:25:36.390407Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:25:36.390414Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-03T10:25:36.390423Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:25:36.390448Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 102 2025-06-03T10:25:36.390737Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6290: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 354 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-03T10:25:36.390748Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-03T10:25:36.390771Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 354 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-03T10:25:36.390789Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 354 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-03T10:25:36.391087Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 310 RawX2: 51539609848 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-03T10:25:36.391116Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-03T10:25:36.391136Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 310 RawX2: 51539609848 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-03T10:25:36.391144Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1014: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-06-03T10:25:36.391153Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1018: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 310 RawX2: 51539609848 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-03T10:25:36.391170Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:25:36.391176Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:25:36.391181Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-03T10:25:36.391190Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 129 -> 240 2025-06-03T10:25:36.392352Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:25:36.394195Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:25:36.394256Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:25:36.394372Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:25:36.394385Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-03T10:25:36.394410Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:25:36.394415Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:25:36.394422Z node 12 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:25:36.394426Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:25:36.394433Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-03T10:25:36.394482Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [12:338:2316] message: TxId: 102 2025-06-03T10:25:36.394493Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:25:36.394501Z node 12 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:25:36.394508Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:25:36.394554Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:25:36.395577Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:25:36.395596Z node 12 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [12:398:2369] TestWaitNotification: OK eventTxId 102 >> IncorrectQueries::WrongDataSize [GOOD] >> IncorrectQueries::WrongVDiskID ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_write/unittest >> DataShardWrite::UpsertLostPrepareArbiterRestart [GOOD] Test command err: 2025-06-03T10:25:16.530219Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:25:16.530364Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:16.530404Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001884/r3tmp/tmpBSfQdC/pdisk_1.dat 2025-06-03T10:25:16.654606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:25:16.675816Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:16.677445Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946315816005 != 1748946315816009 2025-06-03T10:25:16.720571Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:16.720613Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:16.732123Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:16.810472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:25:16.837096Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:25:16.837511Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:25:16.837702Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:25:16.837847Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:25:16.866922Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:25:16.867206Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:25:16.867241Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:25:16.867454Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:25:16.867467Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:25:16.867476Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:25:16.867551Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:25:16.867589Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:25:16.867604Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:25:16.879902Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:25:16.888487Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:25:16.888602Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:25:16.888636Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:25:16.888643Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:25:16.888650Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:25:16.888657Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:25:16.888748Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:16.888757Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:16.888881Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:25:16.888908Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:25:16.889045Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:25:16.889057Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:25:16.889066Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:25:16.889072Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:25:16.889078Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:25:16.889084Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:25:16.889091Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:25:16.889107Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:16.889114Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:16.889122Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:25:16.889147Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:25:16.889153Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:25:16.889178Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:25:16.889239Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:25:16.889252Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:25:16.889276Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:25:16.889288Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:25:16.889315Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:25:16.889322Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:25:16.889328Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:25:16.889425Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-03T10:25:16.889431Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-03T10:25:16.889436Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-03T10:25:16.889441Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:25:16.889455Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-03T10:25:16.889460Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-03T10:25:16.889465Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-03T10:25:16.889470Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-03T10:25:16.889477Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-03T10:25:16.889803Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269746185, Sender [1:683:2579], Recipient [1:663:2568]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-03T10:25:16.889814Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:25:16.901617Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:25:16.901655Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:25:16.901666Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:25:16.901683Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-06-03T10:25:16.901704Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:25:17.057855Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:17.057886Z node 1 :TX_DATASHARD TRACE: datashard_impl. ... : NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:36.532472Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:36.532482Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037890, clientId# [7:962:2778], serverId# [7:963:2779], sessionId# [0:0:0] 2025-06-03T10:25:36.532517Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553169, Sender [7:961:2777], Recipient [7:717:2595]: NKikimrTxDataShard.TEvGetInfoRequest 2025-06-03T10:25:36.532706Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [7:966:2782], Recipient [7:717:2595]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:36.532714Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:36.532719Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037890, clientId# [7:965:2781], serverId# [7:966:2782], sessionId# [0:0:0] 2025-06-03T10:25:36.532750Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553215, Sender [7:964:2780], Recipient [7:717:2595]: NKikimrTxDataShard.TEvRead ReadId: 1002 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC RangesSize: 1 2025-06-03T10:25:36.532790Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037890, FollowerId 0 2025-06-03T10:25:36.532800Z node 7 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037890 CompleteEdge# v1001/1000001 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-03T10:25:36.532806Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037890 changed HEAD read to non-repeatable v4000/18446744073709551615 2025-06-03T10:25:36.532814Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037890 on unit CheckRead 2025-06-03T10:25:36.532829Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037890 is Executed 2025-06-03T10:25:36.532833Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037890 executing on unit CheckRead 2025-06-03T10:25:36.532838Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037890 to execution unit BuildAndWaitDependencies 2025-06-03T10:25:36.532842Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037890 on unit BuildAndWaitDependencies 2025-06-03T10:25:36.532852Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 72075186224037890 2025-06-03T10:25:36.532859Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037890 is Executed 2025-06-03T10:25:36.532863Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037890 executing on unit BuildAndWaitDependencies 2025-06-03T10:25:36.532867Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037890 to execution unit ExecuteRead 2025-06-03T10:25:36.532872Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037890 on unit ExecuteRead 2025-06-03T10:25:36.532885Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037890 Execute read# 1, request: { ReadId: 1002 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC } 2025-06-03T10:25:36.542499Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037890 Complete read# {[7:964:2780], 1002} after executionsCount# 1 2025-06-03T10:25:36.542563Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037890 read iterator# {[7:964:2780], 1002} sends rowCount# 0, bytes# 0, quota rows left# 18446744073709551615, quota bytes left# 18446744073709551615, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-03T10:25:36.542599Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037890 read iterator# {[7:964:2780], 1002} finished in read 2025-06-03T10:25:36.542626Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037890 is Executed 2025-06-03T10:25:36.542635Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037890 executing on unit ExecuteRead 2025-06-03T10:25:36.542640Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 72075186224037890 to execution unit CompletedOperations 2025-06-03T10:25:36.542646Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 72075186224037890 on unit CompletedOperations 2025-06-03T10:25:36.542666Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 72075186224037890 is Executed 2025-06-03T10:25:36.542669Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 72075186224037890 executing on unit CompletedOperations 2025-06-03T10:25:36.542673Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:3] at 72075186224037890 has finished 2025-06-03T10:25:36.542679Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037890 2025-06-03T10:25:36.542733Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037890 2025-06-03T10:25:36.543017Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [7:969:2785], Recipient [7:714:2593]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:36.543026Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:36.543033Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037891, clientId# [7:968:2784], serverId# [7:969:2785], sessionId# [0:0:0] 2025-06-03T10:25:36.543071Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553169, Sender [7:967:2783], Recipient [7:714:2593]: NKikimrTxDataShard.TEvGetInfoRequest 2025-06-03T10:25:36.543261Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [7:972:2788], Recipient [7:714:2593]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:36.543269Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:36.543273Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037891, clientId# [7:971:2787], serverId# [7:972:2788], sessionId# [0:0:0] 2025-06-03T10:25:36.543327Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553215, Sender [7:970:2786], Recipient [7:714:2593]: NKikimrTxDataShard.TEvRead ReadId: 1003 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC RangesSize: 1 2025-06-03T10:25:36.543372Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037891, FollowerId 0 2025-06-03T10:25:36.543384Z node 7 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037891 CompleteEdge# v1000/281474976715657 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-03T10:25:36.543390Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037891 changed HEAD read to non-repeatable v4000/18446744073709551615 2025-06-03T10:25:36.543401Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037891 on unit CheckRead 2025-06-03T10:25:36.543420Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037891 is Executed 2025-06-03T10:25:36.543424Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037891 executing on unit CheckRead 2025-06-03T10:25:36.543427Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037891 to execution unit BuildAndWaitDependencies 2025-06-03T10:25:36.543430Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037891 on unit BuildAndWaitDependencies 2025-06-03T10:25:36.543450Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:2] at 72075186224037891 2025-06-03T10:25:36.543456Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037891 is Executed 2025-06-03T10:25:36.543460Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037891 executing on unit BuildAndWaitDependencies 2025-06-03T10:25:36.543464Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037891 to execution unit ExecuteRead 2025-06-03T10:25:36.543468Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037891 on unit ExecuteRead 2025-06-03T10:25:36.543486Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037891 Execute read# 1, request: { ReadId: 1003 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC } 2025-06-03T10:25:36.543513Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037891 Complete read# {[7:970:2786], 1003} after executionsCount# 1 2025-06-03T10:25:36.543519Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037891 read iterator# {[7:970:2786], 1003} sends rowCount# 0, bytes# 0, quota rows left# 18446744073709551615, quota bytes left# 18446744073709551615, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-03T10:25:36.543527Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037891 read iterator# {[7:970:2786], 1003} finished in read 2025-06-03T10:25:36.543533Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037891 is Executed 2025-06-03T10:25:36.543536Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037891 executing on unit ExecuteRead 2025-06-03T10:25:36.543543Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 72075186224037891 to execution unit CompletedOperations 2025-06-03T10:25:36.543546Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 72075186224037891 on unit CompletedOperations 2025-06-03T10:25:36.543551Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 72075186224037891 is Executed 2025-06-03T10:25:36.543554Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 72075186224037891 executing on unit CompletedOperations 2025-06-03T10:25:36.543556Z node 7 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 72075186224037891 has finished 2025-06-03T10:25:36.543560Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037891 2025-06-03T10:25:36.543571Z node 7 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037891 >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests1Inflight1BlobSize1000 >> IncorrectQueries::WrongVDiskID [GOOD] >> IncorrectQueries::ProtoBlobGet >> CostMetricsPatchBlock4Plus2::TestPatch4Plus2BlockRequests2Inflight2BlobSize1000 [GOOD] >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests1Inflight1BlobSize1000 >> KqpTx::LocksAbortOnCommit [GOOD] >> KqpTx::MixEnginesOldNew >> test.py::test[aggregate-group_by_gs_flatten_expr-default.txt-Results] [GOOD] >> test.py::test[aggregate-group_by_hop_expr_key--Results] [SKIPPED] >> test.py::test[aggregate-group_by_hop_static-default.txt-Results] |60.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut |60.2%| [LD] {RESULT} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut |60.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/metadata/secret/ut/ydb-services-metadata-secret-ut >> test.py::test[aggregate-group_by_hop_static-default.txt-Results] [SKIPPED] |60.2%| [TA] $(B)/ydb/core/grpc_streaming/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> IncorrectQueries::ProtoBlobGet [GOOD] >> IncorrectQueries::ProtoQueryGet [GOOD] >> IncorrectQueries::WrongPartId >> IncorrectQueries::WrongPartId [GOOD] >> IncorrectQueries::ProtobufBlob >> test.py::test[aggregate-percentiles_containers--Results] [GOOD] |60.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql |60.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql >> BsControllerConfig::MergeBoxes [GOOD] |60.2%| [TA] {RESULT} $(B)/ydb/core/grpc_streaming/ut/test-results/unittest/{meta.json ... results_accumulator.log} |60.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_minikql/ydb-core-tx-datashard-ut_minikql >> KqpSinkLocks::EmptyRange [GOOD] >> KqpSinkLocks::EmptyRangeAlreadyBroken >> TStorageTenantTest::CreateTableInsideSubDomain2 >> TStorageTenantTest::CreateSolomonInsideSubDomain >> TStorageTenantTest::LsLs >> TStorageTenantTest::CreateDummyTabletsInDifferentDomains >> IncorrectQueries::ProtobufBlob [GOOD] >> IncorrectQueries::SameBlob >> test.py::test[aggregate-group_by_rollup_grouping_hum_bind--Results] [GOOD] >> test.py::test[aggregate-group_by_rollup_key_check--Results] >> TStorageTenantTest::RemoveStoragePoolBeforeDroppingTablet >> IncorrectQueries::SameBlob [GOOD] >> IncorrectQueries::WrongCrc >> KqpTx::MixEnginesOldNew [GOOD] |60.2%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/hybrid_file/part10/pytest >> test.py::test[aggregate-group_by_hop_static-default.txt-Results] [SKIPPED] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_bscontroller/unittest >> BsControllerConfig::MergeBoxes [GOOD] Test command err: Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:11015:2156] recipient: [1:10814:2166] IGNORE Leader for TabletID 72057594037932033 is [0:0:0] sender: [1:11015:2156] recipient: [1:10814:2166] Leader for TabletID 72057594037932033 is [1:11113:2168] sender: [1:11116:2156] recipient: [1:10814:2166] 2025-06-03T10:24:27.257468Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:24:27.258296Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:24:27.258367Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-03T10:24:27.258675Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:24:27.258865Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-03T10:24:27.258929Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:27.258933Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:521} Handle TEvInterconnect::TEvNodesInfo 2025-06-03T10:24:27.259025Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-03T10:24:27.259798Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-03T10:24:27.259823Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-03T10:24:27.259857Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-03T10:24:27.259878Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:27.259887Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:24:27.259896Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion Leader for TabletID 72057594037932033 is [1:11113:2168] sender: [1:11138:2156] recipient: [1:110:2157] 2025-06-03T10:24:27.270875Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-03T10:24:27.270937Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:27.281402Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:24:27.281459Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:27.281477Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:24:27.281496Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:27.281543Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:24:27.281556Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:27.281564Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:24:27.281581Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:27.292069Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:24:27.292129Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:27.302572Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:24:27.302646Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:19} TTxLoadEverything Execute 2025-06-03T10:24:27.302840Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:557} TTxLoadEverything Complete 2025-06-03T10:24:27.302846Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2188} LoadFinished 2025-06-03T10:24:27.302892Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-03T10:24:27.302904Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:562} TTxLoadEverything InitQueue processed 2025-06-03T10:24:27.306004Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/dev/disk0" } Drive { Path: "/dev/disk1" } Drive { Path: "/dev/disk2" } Drive { Path: "/dev/disk3" } Drive { Path: "/dev/disk4" } Drive { Path: "/dev/disk5" } Drive { Path: "/dev/disk6" } Drive { Path: "/dev/disk7" } Drive { Path: "/dev/disk8" Type: SSD } Drive { Path: "/dev/disk9" Type: SSD } Drive { Path: "/dev/disk10" Type: SSD } Drive { Path: "/dev/disk11" Type: SSD } Drive { Path: "/dev/disk12" Type: SSD } Drive { Path: "/dev/disk13" Type: SSD } Drive { Path: "/dev/disk14" Type: SSD } Drive { Path: "/dev/disk15" Type: SSD } } } Command { DefineBox { BoxId: 1 Name: "test box" Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12003 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12004 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12005 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12006 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12007 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12008 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12009 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12010 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12011 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12012 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12013 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12014 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12015 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12016 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12017 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12018 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12019 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12020 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12021 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12022 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12023 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12024 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12025 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12026 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12027 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12028 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12029 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12030 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12031 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12032 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12033 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12034 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12035 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12036 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12037 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12038 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12039 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12040 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12041 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12042 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12043 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12044 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12045 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12046 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12047 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12048 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12049 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12050 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12051 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12052 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12053 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12054 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12055 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12056 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12057 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12058 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12059 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12060 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12061 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12062 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12063 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12064 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12065 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12066 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12067 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12068 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12069 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12070 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12071 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12072 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12073 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12074 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12075 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12076 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12077 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12078 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12079 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12080 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12081 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12082 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12083 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12084 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12085 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12086 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12087 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12088 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12089 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12090 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12091 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12092 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12093 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12094 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12095 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12096 } HostConfigId: 1 } Host { Ke ... 0} Create new pdisk PDiskId# 275:1002 Path# /dev/disk3 2025-06-03T10:25:30.676098Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 276:1000 Path# /dev/disk1 2025-06-03T10:25:30.676102Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 276:1001 Path# /dev/disk2 2025-06-03T10:25:30.676106Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 276:1002 Path# /dev/disk3 2025-06-03T10:25:30.676110Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 277:1000 Path# /dev/disk1 2025-06-03T10:25:30.676114Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 277:1001 Path# /dev/disk2 2025-06-03T10:25:30.676119Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 277:1002 Path# /dev/disk3 2025-06-03T10:25:30.676123Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 278:1000 Path# /dev/disk1 2025-06-03T10:25:30.676127Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 278:1001 Path# /dev/disk2 2025-06-03T10:25:30.676132Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 278:1002 Path# /dev/disk3 2025-06-03T10:25:30.676136Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 279:1000 Path# /dev/disk1 2025-06-03T10:25:30.676140Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 279:1001 Path# /dev/disk2 2025-06-03T10:25:30.676144Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 279:1002 Path# /dev/disk3 2025-06-03T10:25:30.676149Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 280:1000 Path# /dev/disk1 2025-06-03T10:25:30.676153Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 280:1001 Path# /dev/disk2 2025-06-03T10:25:30.676157Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 280:1002 Path# /dev/disk3 2025-06-03T10:25:30.676161Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 281:1000 Path# /dev/disk1 2025-06-03T10:25:30.676171Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 281:1001 Path# /dev/disk2 2025-06-03T10:25:30.676176Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 281:1002 Path# /dev/disk3 2025-06-03T10:25:30.676180Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 282:1000 Path# /dev/disk1 2025-06-03T10:25:30.676184Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 282:1001 Path# /dev/disk2 2025-06-03T10:25:30.676189Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 282:1002 Path# /dev/disk3 2025-06-03T10:25:30.676193Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 283:1000 Path# /dev/disk1 2025-06-03T10:25:30.676197Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 283:1001 Path# /dev/disk2 2025-06-03T10:25:30.676201Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 283:1002 Path# /dev/disk3 2025-06-03T10:25:30.676205Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 284:1000 Path# /dev/disk1 2025-06-03T10:25:30.676211Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 284:1001 Path# /dev/disk2 2025-06-03T10:25:30.676215Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 284:1002 Path# /dev/disk3 2025-06-03T10:25:30.676219Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 285:1000 Path# /dev/disk1 2025-06-03T10:25:30.676223Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 285:1001 Path# /dev/disk2 2025-06-03T10:25:30.676227Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 285:1002 Path# /dev/disk3 2025-06-03T10:25:30.676231Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 286:1000 Path# /dev/disk1 2025-06-03T10:25:30.676235Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 286:1001 Path# /dev/disk2 2025-06-03T10:25:30.676240Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 286:1002 Path# /dev/disk3 2025-06-03T10:25:30.676244Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 287:1000 Path# /dev/disk1 2025-06-03T10:25:30.676249Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 287:1001 Path# /dev/disk2 2025-06-03T10:25:30.676254Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 287:1002 Path# /dev/disk3 2025-06-03T10:25:30.676259Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 288:1000 Path# /dev/disk1 2025-06-03T10:25:30.676264Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 288:1001 Path# /dev/disk2 2025-06-03T10:25:30.676269Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 288:1002 Path# /dev/disk3 2025-06-03T10:25:30.676273Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 289:1000 Path# /dev/disk1 2025-06-03T10:25:30.676278Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 289:1001 Path# /dev/disk2 2025-06-03T10:25:30.676283Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 289:1002 Path# /dev/disk3 2025-06-03T10:25:30.676287Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 290:1000 Path# /dev/disk1 2025-06-03T10:25:30.676292Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 290:1001 Path# /dev/disk2 2025-06-03T10:25:30.676297Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 290:1002 Path# /dev/disk3 2025-06-03T10:25:30.676302Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 291:1000 Path# /dev/disk1 2025-06-03T10:25:30.676306Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 291:1001 Path# /dev/disk2 2025-06-03T10:25:30.676311Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 291:1002 Path# /dev/disk3 2025-06-03T10:25:30.676319Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 292:1000 Path# /dev/disk1 2025-06-03T10:25:30.676324Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 292:1001 Path# /dev/disk2 2025-06-03T10:25:30.676328Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 292:1002 Path# /dev/disk3 2025-06-03T10:25:30.676333Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 293:1000 Path# /dev/disk1 2025-06-03T10:25:30.676338Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 293:1001 Path# /dev/disk2 2025-06-03T10:25:30.676343Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 293:1002 Path# /dev/disk3 2025-06-03T10:25:30.676347Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 294:1000 Path# /dev/disk1 2025-06-03T10:25:30.676351Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 294:1001 Path# /dev/disk2 2025-06-03T10:25:30.676355Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 294:1002 Path# /dev/disk3 2025-06-03T10:25:30.676359Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 295:1000 Path# /dev/disk1 2025-06-03T10:25:30.676364Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 295:1001 Path# /dev/disk2 2025-06-03T10:25:30.676368Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 295:1002 Path# /dev/disk3 2025-06-03T10:25:30.676372Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 296:1000 Path# /dev/disk1 2025-06-03T10:25:30.676376Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 296:1001 Path# /dev/disk2 2025-06-03T10:25:30.676381Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 296:1002 Path# /dev/disk3 2025-06-03T10:25:30.676385Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 297:1000 Path# /dev/disk1 2025-06-03T10:25:30.676389Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 297:1001 Path# /dev/disk2 2025-06-03T10:25:30.676393Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 297:1002 Path# /dev/disk3 2025-06-03T10:25:30.676397Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 298:1000 Path# /dev/disk1 2025-06-03T10:25:30.676401Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 298:1001 Path# /dev/disk2 2025-06-03T10:25:30.676405Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 298:1002 Path# /dev/disk3 2025-06-03T10:25:30.676409Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 299:1000 Path# /dev/disk1 2025-06-03T10:25:30.676414Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 299:1001 Path# /dev/disk2 2025-06-03T10:25:30.676418Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 299:1002 Path# /dev/disk3 2025-06-03T10:25:30.676422Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 300:1000 Path# /dev/disk1 2025-06-03T10:25:30.676426Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 300:1001 Path# /dev/disk2 2025-06-03T10:25:30.676431Z node 251 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 300:1002 Path# /dev/disk3 2025-06-03T10:25:30.857436Z node 251 :BS_CONTROLLER ERROR: {BSC07@impl.h:2181} ProcessControllerEvent event processing took too much time Type# 268637706 Duration# 0.182158s 2025-06-03T10:25:30.857514Z node 251 :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:689} StateWork event processing took too much time Type# 2146435078 Duration# 0.182256s 2025-06-03T10:25:30.872899Z node 251 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { MergeBoxes { OriginBoxId: 2 OriginBoxGeneration: 1 TargetBoxId: 1 TargetBoxGeneration: 1 StoragePoolIdMap { OriginStoragePoolId: 1 TargetStoragePoolId: 2 } } } } 2025-06-03T10:25:30.902023Z node 251 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { ReadBox { BoxId: 1 } } Command { QueryBaseConfig { } } } >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests1Inflight1BlobSize1000 [GOOD] >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests10Inflight1BlobSize1000 >> KqpLocksTricky::TestNoLocksIssue+withSink [GOOD] |60.2%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part2/pytest >> test.py::test[aggregate-percentiles_containers--Results] [GOOD] >> TStorageTenantTest::RemoveStoragePoolAndCreateOneMore ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/tx/unittest >> KqpTx::MixEnginesOldNew [GOOD] Test command err: Trying to start YDB, gRPC: 25172, MsgBus: 17491 2025-06-03T10:25:36.483833Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667319013813132:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:36.484013Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f58/r3tmp/tmp68HsDJ/pdisk_1.dat 2025-06-03T10:25:36.549813Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:36.550047Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667319013812969:2079] 1748946336482822 != 1748946336482825 TServer::EnableGrpc on GrpcPort 25172, node 1 2025-06-03T10:25:36.572201Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:36.572238Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:36.572243Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:36.572315Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17491 2025-06-03T10:25:36.621619Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:36.621655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:36.625766Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17491 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:36.649255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:36.658197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:36.731618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:36.766510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:36.781806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:37.069925Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667323308781899:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:37.069965Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:37.127406Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:37.137046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:37.151996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:37.166947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:37.180804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:37.191163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:37.203951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:37.229314Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667323308782551:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:37.229347Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:37.229477Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667323308782556:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:37.230668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:37.234256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:25:37.234391Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667323308782558:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:37.322765Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667323308782609:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:37.824684Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=ZDY0YmY2M2MtOGJmNzRmNGQtNmE2OGFhMzMtOGFiZWMzZTg=, ActorId: [1:7511667323308782842:2499], ActorState: ExecuteState, TraceId: 01jwtn6fyr55bm28akn1hce6bt, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken 2025-06-03T10:25:37.829188Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=ZDY0YmY2M2MtOGJmNzRmNGQtNmE2OGFhMzMtOGFiZWMzZTg=, ActorId: [1:7511667323308782842:2499], ActorState: ReadyState, TraceId: 01jwtn6g1588gafvrh3yce0zge, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 21729, MsgBus: 9772 2025-06-03T10:25:38.325356Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667327232508415:2213];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:38.332713Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f58/r3tmp/tmppnBGqQ/pdisk_1.dat 2025-06-03T10:25:38.365545Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21729, node 2 2025-06-03T10:25:38.392725Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:38.392948Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:38.392952Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:38.392997Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:38.436983Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:38.437009Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:38.437691Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9772 TClient is connected to server localhost:9772 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:25:38.558237Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:25:38.563742Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:25:38.586134Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:38.630820Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:38.674606Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:38.701674Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:38.945194Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667327232509844:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:38.945220Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:38.960578Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:38.983077Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:39.045926Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:39.112492Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:39.180837Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:39.244537Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:39.299342Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:39.373796Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667331527477810:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:39.373870Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:39.374052Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667331527477818:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:39.374861Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:39.381566Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:25:39.385747Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511667331527477820:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:39.474830Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511667331527477871:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests1Inflight1BlobSize1000 [GOOD] >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests10Inflight1BlobSize1000 |60.2%| [TA] $(B)/ydb/core/tx/datashard/ut_write/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/tx/unittest >> KqpLocksTricky::TestNoLocksIssue+withSink [GOOD] Test command err: Trying to start YDB, gRPC: 24501, MsgBus: 64088 2025-06-03T10:25:30.054180Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667286466586554:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:30.056762Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f65/r3tmp/tmpTOTz98/pdisk_1.dat 2025-06-03T10:25:30.089878Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667286466586372:2079] 1748946329919381 != 1748946329919384 2025-06-03T10:25:30.093056Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24501, node 1 2025-06-03T10:25:30.108611Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:30.108628Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:30.108631Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:30.108679Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64088 TClient is connected to server localhost:64088 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:25:30.165742Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:30.165772Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:30.166621Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:30.192791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:30.196374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:30.207670Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:25:30.245512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:30.349077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:30.441218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:31.071514Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667290761555308:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:31.071578Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:31.262453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:31.284752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:31.336136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:31.387994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:31.417938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:31.455887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:31.497625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:31.541649Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667295056523268:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:31.541668Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:31.541773Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667295056523273:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:31.542675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:31.555643Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667295056523275:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:31.622704Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667295056523326:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:32.354158Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=281474976715676; 2025-06-03T10:25:32.357596Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:798: SelfId: [1:7511667299351490919:2510], Table: `/Root/Test` ([72057594046644480:9:1]), SessionActorId: [1:7511667299351490891:2510]Got LOCKS BROKEN for table `/Root/Test`. ShardID=72075186224037914, Sink=[1:7511667299351490919:2510].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-03T10:25:32.357694Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2935: SelfId: [1:7511667299351490912:2510], SessionActorId: [1:7511667299351490891:2510], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[1:7511667299351490891:2510]. isRollback=0 2025-06-03T10:25:32.357732Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1848: SessionId: ydb://session/3?node_id=1&id=ZmZhYzUxYTYtZjEzNjcxMTgtNGVhMzUxYjUtNWI2YThjMzA=, ActorId: [1:7511667299351490891:2510], ActorState: ExecuteState, TraceId: 01jwtn6ap1atbnm9qwzd4v0rrw, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [1:7511667299351490969:2510] from: [1:7511667299351490912:2510] 2025-06-03T10:25:32.357748Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [1:7511667299351490969:2510] TxId: 281474976715676. Ctx: { TraceId: 01jwtn6ap1atbnm9qwzd4v0rrw, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmZhYzUxYTYtZjEzNjcxMTgtNGVhMzUxYjUtNWI2YThjMzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-03T10:25:32.357781Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=ZmZhYzUxYTYtZjEzNjcxMTgtNGVhMzUxYjUtNWI2YThjMzA=, ActorId: [1:7511667299351490891:2510], ActorState: ExecuteState, TraceId: 01jwtn6ap1atbnm9qwzd4v0rrw, Create QueryResponse for error on request, msg:
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 Trying to start YDB, gRPC: 10912, MsgBus: 24885 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f65/r3tmp/tmpLgFdLp/pdisk_1.dat 2025-06-03T10:25:32.764367Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:32.766068Z node 2 :IMPORT WARN: schemeshard_impo ... self_id=[2:7511667306449667054:2370];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037903;local_tx_no=16;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037907;receive=72075186224037947; 2025-06-03T10:25:34.434923Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;self_id=[2:7511667306449667054:2370];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037903;local_tx_no=17;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037907;receive=72075186224037947; 2025-06-03T10:25:34.434989Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:25:34.582263Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=2&id=YTFiYmFkNjQtZWNiMTljYWQtNDg1ZDliNjYtMWZhYmI0YzU=, ActorId: [2:7511667310744636744:2827], ActorState: ExecuteState, TraceId: 01jwtn6cts7rnztcaef8pz7cy6, Create QueryResponse for error on request, msg: 2025-06-03T10:25:34.583702Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037936;tx_state=TTxProgressTx::Execute;tx_current=281474976715670;tx_id=281474976715670;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715670; 2025-06-03T10:25:34.584436Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037936;self_id=[2:7511667306449667513:2398];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037936;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715665;problem=finished; 2025-06-03T10:25:34.584768Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037936;tx_state=TTxProgressTx::Complete;fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=lock invalidated;tx_id=281474976715670; 2025-06-03T10:25:34.584787Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: fline=manager.cpp:134;event=abort;tx_id=281474976715665;problem=finished; Trying to start YDB, gRPC: 12799, MsgBus: 12432 2025-06-03T10:25:35.522626Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:302:2346], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:25:35.522673Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:35.522685Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f65/r3tmp/tmp2IZJ9S/pdisk_1.dat TServer::EnableGrpc on GrpcPort 12799, node 3 TClient is connected to server localhost:12432 2025-06-03T10:25:35.653219Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:35.653886Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:35.653909Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:35.653914Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:35.654097Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:35.654601Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:32:2079] 1748946335068930 != 1748946335068934 TClient is connected to server localhost:12432 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:25:35.725336Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:35.725386Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:35.728240Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.729316Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:25:35.816781Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:36.027451Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:36.327027Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:36.590952Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:36.894033Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:1715:3312], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:36.894088Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:36.898350Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:37.074090Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:37.328572Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:37.536831Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:37.816003Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:38.083077Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:38.393251Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:38.674542Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:2385:3807], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:38.674577Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:38.674633Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:2390:3812], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:38.675644Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:38.862788Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:2392:3814], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:38.914632Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:2450:3853] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:39.153495Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:25:39.351889Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:25:39.685623Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 >> TStorageTenantTest::DeclareAndDefine >> IncorrectQueries::WrongCrc [GOOD] >> IncorrectQueries::ProtoHasOnlyVDiskId [GOOD] >> IncorrectQueries::ProtoHasVDiskAndExtQueue [GOOD] >> IndexRestoreGet::BlobRecovery >> TStorageTenantTest::GenericCases >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite2 [GOOD] >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite3 |60.2%| [TA] $(B)/ydb/core/mind/bscontroller/ut_bscontroller/test-results/unittest/{meta.json ... results_accumulator.log} >> TStorageTenantTest::CreateTableInsideSubDomain >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests10Inflight1BlobSize1000 [GOOD] >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests10000Inflight1BlobSize1000 >> BlobStorageSync::TestSyncLogCuttingMirror3dc [GOOD] >> BlobStorageSync::TestSyncLogCuttingMirror3of4 >> TStorageTenantTest::CreateSolomonInsideSubDomain [GOOD] >> KqpSinkLocks::EmptyRangeAlreadyBroken [GOOD] >> IndexRestoreGet::BlobRecovery [GOOD] >> Mirror3dc::GcQuorum >> TStorageTenantTest::CreateTableInsideSubDomain2 [GOOD] >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests10Inflight1BlobSize1000 [GOOD] >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests100Inflight1BlobSize1000 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CreateSolomonInsideSubDomain [GOOD] Test command err: 2025-06-03T10:25:41.065742Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667338425831241:2083];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:41.065983Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0028e9/r3tmp/tmpiKugtO/pdisk_1.dat 2025-06-03T10:25:42.101976Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:42.120906Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:42.126664Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:42.126683Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:42.142260Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10145 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:25:42.477650Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511667338425831456:2141] Handle TEvNavigate describe path dc-1 2025-06-03T10:25:42.500524Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511667342720799192:2439] HANDLE EvNavigateScheme dc-1 2025-06-03T10:25:42.500564Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667338425831479:2154], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:42.500583Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:871: [main][1:7511667342720799087:2355][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7511667338425831479:2154], cookie# 1 2025-06-03T10:25:42.500907Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667342720799094:2355][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667342720799091:2355], cookie# 1 2025-06-03T10:25:42.500911Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667342720799095:2355][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667342720799092:2355], cookie# 1 2025-06-03T10:25:42.500915Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667342720799096:2355][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667342720799093:2355], cookie# 1 2025-06-03T10:25:42.500923Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667329835896497:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667342720799094:2355], cookie# 1 2025-06-03T10:25:42.500930Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667329835896500:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667342720799095:2355], cookie# 1 2025-06-03T10:25:42.500935Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667329835896503:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667342720799096:2355], cookie# 1 2025-06-03T10:25:42.500942Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667342720799094:2355][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667329835896497:2051], cookie# 1 2025-06-03T10:25:42.500944Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667342720799095:2355][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667329835896500:2054], cookie# 1 2025-06-03T10:25:42.500946Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667342720799096:2355][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667329835896503:2057], cookie# 1 2025-06-03T10:25:42.500950Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667342720799087:2355][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667342720799091:2355], cookie# 1 2025-06-03T10:25:42.500956Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7511667342720799087:2355][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:25:42.500959Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667342720799087:2355][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667342720799092:2355], cookie# 1 2025-06-03T10:25:42.500961Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7511667342720799087:2355][/dc-1] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:25:42.500964Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667342720799087:2355][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667342720799093:2355], cookie# 1 2025-06-03T10:25:42.500966Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7511667342720799087:2355][/dc-1] Unexpected sync response: sender# [1:7511667342720799093:2355], cookie# 1 2025-06-03T10:25:42.500973Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:7511667338425831479:2154], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-03T10:25:42.505060Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [1:7511667338425831479:2154], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7511667342720799087:2355] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:42.505093Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7511667338425831479:2154], cacheItem# { Subscriber: { Subscriber: [1:7511667342720799087:2355] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-03T10:25:42.506328Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7511667342720799193:2440], recipient# [1:7511667342720799192:2439], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:25:42.506349Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7511667342720799192:2439] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-03T10:25:42.596833Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7511667342720799192:2439] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-03T10:25:42.604822Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7511667342720799192:2439] Handle TEvDescribeSchemeResult Forward to# [1:7511667342720799191:2438] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-03T10:25:42.641547Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7511667338425831456:2141] Handle TEvProposeTransaction 2025-06-03T10:25:42.641560Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7511667338425831456:2141] TxId# 281474976710657 ProcessProposeTransaction 2025-06-03T10:25:42.641590Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7511667338425831456:2141] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [1:7511667342720799204:2447] 2025-06-03T10:25:42.734805Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:7511667342720799204:2447] txid# 281474976710657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { Wor ... shard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-03T10:25:43.985260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-03T10:25:43.985278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-03T10:25:43.985311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:7 2025-06-03T10:25:43.985314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:7 tabletId 72075186224037894 2025-06-03T10:25:43.985616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-03T10:25:43.985620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-03T10:25:43.985628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-03T10:25:43.985629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-03T10:25:43.985634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:6 2025-06-03T10:25:43.985636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:6 tabletId 72075186224037893 2025-06-03T10:25:43.985641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-03T10:25:43.985643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-03T10:25:43.985647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:8 2025-06-03T10:25:43.985649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:8 tabletId 72075186224037895 2025-06-03T10:25:43.985654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-06-03T10:25:43.985657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:5 tabletId 72075186224037892 2025-06-03T10:25:43.985666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046644480 2025-06-03T10:25:43.985675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-03T10:25:43.985680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-03T10:25:43.985684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-03T10:25:43.985703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-03T10:25:44.002585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-03T10:25:44.125602Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667338425831479:2154], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:44.125648Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7511667338425831479:2154], cacheItem# { Subscriber: { Subscriber: [1:7511667347015766791:2663] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:44.125670Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7511667351310734391:2914], recipient# [1:7511667351310734390:2313], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:44.421403Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7511667345864293994:2105], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:44.421471Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7511667345864293994:2105], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 72057594046644480 Instant: 0 ResultSet [{ Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:44.421481Z node 3 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2322: Create subscriber: self# [3:7511667345864293994:2105], path# /dc-1/USER_0, domainOwnerId# 72057594046644480 2025-06-03T10:25:44.421544Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][3:7511667350159261594:2282][/dc-1/USER_0] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-03T10:25:44.421650Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][3:7511667350159261594:2282][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0 Version: 0 }: sender# [3:7511667350159261595:2282] 2025-06-03T10:25:44.421665Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][3:7511667350159261594:2282][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0 Version: 0 }: sender# [3:7511667350159261596:2282] 2025-06-03T10:25:44.421673Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][3:7511667350159261594:2282][/dc-1/USER_0] Set up state: owner# [3:7511667345864293994:2105], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:44.421677Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][3:7511667350159261594:2282][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0 Version: 0 }: sender# [3:7511667350159261597:2282] 2025-06-03T10:25:44.421682Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][3:7511667350159261594:2282][/dc-1/USER_0] Ignore empty state: owner# [3:7511667345864293994:2105], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:44.421691Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [3:7511667345864293994:2105], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0 PathId: Strong: 0 } 2025-06-03T10:25:44.421704Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [3:7511667345864293994:2105], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0 PathId: Strong: 0 }, by path# { Subscriber: { Subscriber: [3:7511667350159261594:2282] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:44.421720Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7511667345864293994:2105], cacheItem# { Subscriber: { Subscriber: [3:7511667350159261594:2282] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:44.421738Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7511667350159261601:2283], recipient# [3:7511667350159261593:2281], result# { ErrorCount: 1 DatabaseName: DomainOwnerId: 72057594046644480 Instant: 0 ResultSet [{ Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:44.421755Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7511667345864293994:2105], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:44.421765Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7511667350159261602:2284], recipient# [3:7511667350159261592:2318], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:44.425404Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/USER_0/.metadata/initialization/migrations;error=incorrect path status: LookupError; >> VDiskRestart::Simple [GOOD] |60.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/tx/unittest >> KqpSinkLocks::EmptyRangeAlreadyBroken [GOOD] Test command err: Trying to start YDB, gRPC: 22952, MsgBus: 1629 2025-06-03T10:25:30.007102Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667292222452728:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:30.007118Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f69/r3tmp/tmp1U2jM7/pdisk_1.dat 2025-06-03T10:25:30.089853Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667292222452705:2079] 1748946330005260 != 1748946330005263 2025-06-03T10:25:30.091357Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22952, node 1 2025-06-03T10:25:30.106962Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:30.106981Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:30.106983Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:30.107029Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:30.110142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:30.110178Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:30.111188Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1629 TClient is connected to server localhost:1629 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:30.189269Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:30.193139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:25:30.913928Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667292222453355:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:30.913974Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:30.917418Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667292222453375:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:30.920746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-06-03T10:25:30.932664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-03T10:25:30.932777Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667292222453377:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-03T10:25:31.010632Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667296517420724:2325] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:31.423466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-06-03T10:25:31.505871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-06-03T10:25:32.655748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:33.201510Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=6; 2025-06-03T10:25:33.207383Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 6 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-03T10:25:33.207443Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 6 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-03T10:25:33.207534Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:798: SelfId: [1:7511667305107363309:2965], Table: `/Root/Test` ([72057594046644480:6:1]), SessionActorId: [1:7511667305107363238:2965]Got LOCKS BROKEN for table `/Root/Test`. ShardID=72075186224037888, Sink=[1:7511667305107363309:2965].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-03T10:25:33.207655Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2935: SelfId: [1:7511667305107363302:2965], SessionActorId: [1:7511667305107363238:2965], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[1:7511667305107363238:2965]. isRollback=0 2025-06-03T10:25:33.207853Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1848: SessionId: ydb://session/3?node_id=1&id=MzRlMjM5MTEtZmIxNzIzNjYtZjcwNGYwMjQtMzFmOWQ4ZmU=, ActorId: [1:7511667305107363238:2965], ActorState: ExecuteState, TraceId: 01jwtn6bg13x558xm41j445y73, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [1:7511667305107363303:2965] from: [1:7511667305107363302:2965] 2025-06-03T10:25:33.207877Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [1:7511667305107363303:2965] TxId: 281474976710667. Ctx: { TraceId: 01jwtn6bg13x558xm41j445y73, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzRlMjM5MTEtZmIxNzIzNjYtZjcwNGYwMjQtMzFmOWQ4ZmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-03T10:25:33.207933Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=MzRlMjM5MTEtZmIxNzIzNjYtZjcwNGYwMjQtMzFmOWQ4ZmU=, ActorId: [1:7511667305107363238:2965], ActorState: ExecuteState, TraceId: 01jwtn6bg13x558xm41j445y73, Create QueryResponse for error on request, msg:
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 2025-06-03T10:25:35.010805Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511667292222452728:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:35.011387Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 Trying to start YDB, gRPC: 65482, MsgBus: 2451 2025-06-03T10:25:39.396435Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667329378188851:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:39.407760Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f69/r3tmp/tmp9ANu6Z/pdisk_1.dat 2025-06-03T10:25:39.450249Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 65482, node 2 2025-06-03T10:25:39.481539Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:39.481555Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:39.481558Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:39.481620Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2451 2025-06-03T10:25:39.513134Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:39.513160Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:39.521785Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2451 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:39.585237Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:39.594048Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:25:39.816887Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667329378189375:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:39.816912Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:39.817101Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667329378189402:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:39.818180Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:25:39.820827Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-03T10:25:39.820901Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511667329378189404:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:25:39.886477Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511667329378189455:2324] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:39.903911Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:25:39.929895Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:25:40.431354Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:40.720142Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=2&id=MjNlZGM3MmMtZGE5OTY5YzctMzY1Y2NjYTctMzM5YjQ0NmI=, ActorId: [2:7511667333673164534:2963], ActorState: ExecuteState, TraceId: 01jwtn6jtr52b7ys7q49sqh2zn, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken
: Error: Transaction locks invalidated. Table: `/Root/Test`, code: 2001
: Error: tx has deferred effects, but locks are broken WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 2025-06-03T10:25:44.401559Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7511667329378188851:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:44.401605Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; WAIT_INDEXATION: 0 >> TStorageTenantTest::RemoveStoragePoolBeforeDroppingTablet [GOOD] >> TStorageTenantTest::LsLs [GOOD] |60.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest |60.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CreateTableInsideSubDomain2 [GOOD] Test command err: 2025-06-03T10:25:41.056467Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667340081089564:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:41.056493Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0028f2/r3tmp/tmpbTtfyR/pdisk_1.dat 2025-06-03T10:25:41.830182Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:41.840422Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:41.840451Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:41.850783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17114 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:25:42.330110Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511667340081089702:2134] Handle TEvNavigate describe path dc-1 2025-06-03T10:25:42.331849Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511667344376057537:2445] HANDLE EvNavigateScheme dc-1 2025-06-03T10:25:42.331879Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667340081089809:2153], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:42.331901Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:871: [main][1:7511667344376057519:2439][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7511667340081089809:2153], cookie# 1 2025-06-03T10:25:42.332232Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667344376057523:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667344376057520:2439], cookie# 1 2025-06-03T10:25:42.332238Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667344376057524:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667344376057521:2439], cookie# 1 2025-06-03T10:25:42.332242Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667344376057525:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667344376057522:2439], cookie# 1 2025-06-03T10:25:42.332250Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667331491154832:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667344376057523:2439], cookie# 1 2025-06-03T10:25:42.332257Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667331491154835:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667344376057524:2439], cookie# 1 2025-06-03T10:25:42.332262Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667331491154838:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667344376057525:2439], cookie# 1 2025-06-03T10:25:42.332268Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667344376057523:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667331491154832:2051], cookie# 1 2025-06-03T10:25:42.332270Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667344376057524:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667331491154835:2054], cookie# 1 2025-06-03T10:25:42.332273Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667344376057525:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667331491154838:2057], cookie# 1 2025-06-03T10:25:42.332277Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667344376057519:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667344376057520:2439], cookie# 1 2025-06-03T10:25:42.332283Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7511667344376057519:2439][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:25:42.332288Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667344376057519:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667344376057521:2439], cookie# 1 2025-06-03T10:25:42.332290Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7511667344376057519:2439][/dc-1] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:25:42.332294Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667344376057519:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667344376057522:2439], cookie# 1 2025-06-03T10:25:42.332295Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7511667344376057519:2439][/dc-1] Unexpected sync response: sender# [1:7511667344376057522:2439], cookie# 1 2025-06-03T10:25:42.332303Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:7511667340081089809:2153], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-03T10:25:42.341551Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [1:7511667340081089809:2153], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7511667344376057519:2439] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:42.341601Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7511667340081089809:2153], cacheItem# { Subscriber: { Subscriber: [1:7511667344376057519:2439] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-03T10:25:42.342656Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7511667344376057538:2446], recipient# [1:7511667344376057537:2445], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:25:42.342683Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7511667344376057537:2445] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-03T10:25:42.412485Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7511667344376057537:2445] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-03T10:25:42.413231Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7511667344376057537:2445] Handle TEvDescribeSchemeResult Forward to# [1:7511667344376057536:2444] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-03T10:25:42.459263Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7511667340081089702:2134] Handle TEvProposeTransaction 2025-06-03T10:25:42.459276Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7511667340081089702:2134] TxId# 281474976710657 ProcessProposeTransaction 2025-06-03T10:25:42.459303Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7511667340081089702:2134] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [1:7511667344376057544:2451] 2025-06-03T10:25:42.567490Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:7511667344376057544:2451] txid# 281474976710657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "storage-pool-number-2" } StoragePools { Name: "" Kind: "storage-pool-number-1" } StoragePools { Name: "/dc-1:test" Kind: "test" ... rivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:45.389012Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7511667357837109807:2343], recipient# [3:7511667357837109797:2341], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 72057594046644480 Instant: 0 ResultSet [{ Path: dc-1/USER_0 TableId: [72057594046644480:2:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindSubdomain DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037888 Coordinators: 72075186224037889 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037890 Mediators: 72075186224037891 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:25:45.389037Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7511667353542142142:2111], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:45.389043Z node 3 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2322: Create subscriber: self# [3:7511667353542142142:2111], path# /dc-1/USER_0/.metadata/initialization/migrations, domainOwnerId# 72057594046644480 2025-06-03T10:25:45.389068Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][3:7511667357837109808:2344][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-03T10:25:45.389452Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7511667357837109814:2344][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [1:7511667331491154832:2051] 2025-06-03T10:25:45.389456Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7511667357837109815:2344][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [1:7511667331491154835:2054] 2025-06-03T10:25:45.389460Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][3:7511667357837109816:2344][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [1:7511667331491154838:2057] 2025-06-03T10:25:45.389464Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][3:7511667357837109808:2344][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [3:7511667357837109811:2344] 2025-06-03T10:25:45.389470Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][3:7511667357837109808:2344][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [3:7511667357837109812:2344] 2025-06-03T10:25:45.389475Z node 3 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][3:7511667357837109808:2344][/dc-1/USER_0/.metadata/initialization/migrations] Set up state: owner# [3:7511667353542142142:2111], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:45.389479Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][3:7511667357837109808:2344][/dc-1/USER_0/.metadata/initialization/migrations] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0/.metadata/initialization/migrations Version: 0 }: sender# [3:7511667357837109813:2344] 2025-06-03T10:25:45.389483Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][3:7511667357837109808:2344][/dc-1/USER_0/.metadata/initialization/migrations] Ignore empty state: owner# [3:7511667353542142142:2111], state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:45.388532Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667331491154832:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0 DomainOwnerId: 72057594046644480 }: sender# [3:7511667357837109804:2342] 2025-06-03T10:25:45.388561Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667331491154832:2051] Subscribe: subscriber# [3:7511667357837109804:2342], path# /dc-1/USER_0, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:45.388583Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667331491154835:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0 DomainOwnerId: 72057594046644480 }: sender# [3:7511667357837109805:2342] 2025-06-03T10:25:45.388586Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667331491154835:2054] Subscribe: subscriber# [3:7511667357837109805:2342], path# /dc-1/USER_0, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:45.388592Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667331491154838:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0 DomainOwnerId: 72057594046644480 }: sender# [3:7511667357837109806:2342] 2025-06-03T10:25:45.388596Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667331491154838:2057] Subscribe: subscriber# [3:7511667357837109806:2342], path# /dc-1/USER_0, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:45.389328Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667331491154832:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 6 }: sender# [3:7511667357837109804:2342] 2025-06-03T10:25:45.389335Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667331491154832:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/initialization/migrations DomainOwnerId: 72057594046644480 }: sender# [3:7511667357837109814:2344] 2025-06-03T10:25:45.389338Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7511667331491154832:2051] Upsert description: path# /dc-1/USER_0/.metadata/initialization/migrations 2025-06-03T10:25:45.389346Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667331491154832:2051] Subscribe: subscriber# [3:7511667357837109814:2344], path# /dc-1/USER_0/.metadata/initialization/migrations, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:45.389350Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667331491154835:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 6 }: sender# [3:7511667357837109805:2342] 2025-06-03T10:25:45.389354Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667331491154835:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/initialization/migrations DomainOwnerId: 72057594046644480 }: sender# [3:7511667357837109815:2344] 2025-06-03T10:25:45.389357Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7511667331491154835:2054] Upsert description: path# /dc-1/USER_0/.metadata/initialization/migrations 2025-06-03T10:25:45.389360Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667331491154835:2054] Subscribe: subscriber# [3:7511667357837109815:2344], path# /dc-1/USER_0/.metadata/initialization/migrations, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:45.389364Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667331491154838:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 6 }: sender# [3:7511667357837109806:2342] 2025-06-03T10:25:45.389370Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667331491154838:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1/USER_0/.metadata/initialization/migrations DomainOwnerId: 72057594046644480 }: sender# [3:7511667357837109816:2344] 2025-06-03T10:25:45.389372Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:520: [1:7511667331491154838:2057] Upsert description: path# /dc-1/USER_0/.metadata/initialization/migrations 2025-06-03T10:25:45.389375Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667331491154838:2057] Subscribe: subscriber# [3:7511667357837109816:2344], path# /dc-1/USER_0/.metadata/initialization/migrations, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:45.389673Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667331491154832:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7511667357837109814:2344] 2025-06-03T10:25:45.389682Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667331491154835:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7511667357837109815:2344] 2025-06-03T10:25:45.389685Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667331491154838:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 0 }: sender# [3:7511667357837109816:2344] 2025-06-03T10:25:45.389515Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [3:7511667353542142142:2111], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0/.metadata/initialization/migrations PathId: Strong: 1 } 2025-06-03T10:25:45.389526Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [3:7511667353542142142:2111], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0/.metadata/initialization/migrations PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7511667357837109808:2344] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:45.389536Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7511667353542142142:2111], cacheItem# { Subscriber: { Subscriber: [3:7511667357837109808:2344] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:45.389553Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7511667357837109817:2345], recipient# [3:7511667357837109796:2324], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } |60.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest >> VDiskRestart::Simple [GOOD] >> test.py::test[window-win_func_in_lib--Results] [GOOD] >> test.py::test[window-win_func_lead_lag_worm--Results] >> TErasureTypeTest::TestAllSpeciesCrcWhole2of2 [GOOD] |60.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::RemoveStoragePoolBeforeDroppingTablet [GOOD] Test command err: 2025-06-03T10:25:42.586658Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667342748921690:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:42.587293Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0028e5/r3tmp/tmpRZEJDI/pdisk_1.dat 2025-06-03T10:25:43.254728Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:43.258827Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:43.258849Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:43.274448Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62650 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:25:43.672065Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511667342748921760:2138] Handle TEvNavigate describe path dc-1 2025-06-03T10:25:43.706791Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511667347043889517:2445] HANDLE EvNavigateScheme dc-1 2025-06-03T10:25:43.706832Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667342748921811:2159], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:43.706852Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:871: [main][1:7511667347043889499:2439][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7511667342748921811:2159], cookie# 1 2025-06-03T10:25:43.707798Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667347043889503:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667347043889500:2439], cookie# 1 2025-06-03T10:25:43.707805Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667347043889504:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667347043889501:2439], cookie# 1 2025-06-03T10:25:43.707810Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667347043889505:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667347043889502:2439], cookie# 1 2025-06-03T10:25:43.707821Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667334158986808:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667347043889503:2439], cookie# 1 2025-06-03T10:25:43.707829Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667334158986811:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667347043889504:2439], cookie# 1 2025-06-03T10:25:43.707834Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667334158986814:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667347043889505:2439], cookie# 1 2025-06-03T10:25:43.707840Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667347043889503:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667334158986808:2051], cookie# 1 2025-06-03T10:25:43.707843Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667347043889504:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667334158986811:2054], cookie# 1 2025-06-03T10:25:43.707846Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667347043889505:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667334158986814:2057], cookie# 1 2025-06-03T10:25:43.707851Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667347043889499:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667347043889500:2439], cookie# 1 2025-06-03T10:25:43.707857Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7511667347043889499:2439][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:25:43.707860Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667347043889499:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667347043889501:2439], cookie# 1 2025-06-03T10:25:43.707863Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7511667347043889499:2439][/dc-1] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:25:43.707867Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667347043889499:2439][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667347043889502:2439], cookie# 1 2025-06-03T10:25:43.707870Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7511667347043889499:2439][/dc-1] Unexpected sync response: sender# [1:7511667347043889502:2439], cookie# 1 2025-06-03T10:25:43.707879Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:7511667342748921811:2159], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-03T10:25:43.713872Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [1:7511667342748921811:2159], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7511667347043889499:2439] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:43.713910Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7511667342748921811:2159], cacheItem# { Subscriber: { Subscriber: [1:7511667347043889499:2439] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-03T10:25:43.715286Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7511667347043889518:2446], recipient# [1:7511667347043889517:2445], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:25:43.715308Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7511667347043889517:2445] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-03T10:25:43.758190Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7511667347043889517:2445] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-03T10:25:43.758849Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7511667347043889517:2445] Handle TEvDescribeSchemeResult Forward to# [1:7511667347043889516:2444] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-03T10:25:43.795204Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7511667342748921760:2138] Handle TEvProposeTransaction 2025-06-03T10:25:43.795218Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7511667342748921760:2138] TxId# 281474976710657 ProcessProposeTransaction 2025-06-03T10:25:43.795253Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7511667342748921760:2138] Cookie# 0 userReqId# "" txid# 281474976710657 SEND to# [1:7511667347043889524:2451] 2025-06-03T10:25:43.912192Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:7511667347043889524:2451] txid# 281474976710657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "storage-pool-number-2" } StoragePools { Name: "" Kind: "storage-pool-number-1" } StoragePools { Name: "/dc-1:test" Kind: "test" ... ][3:7511667351944206722:2130][/dc-1/USER_0] Path was updated to new version: owner# [3:7511667351944206676:2104], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 2], Version: 4) DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 2], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:44.765652Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][3:7511667351944206722:2130][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Version: 18446744073709551615 }: sender# [3:7511667351944206724:2130] 2025-06-03T10:25:44.765658Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][3:7511667351944206722:2130][/dc-1/USER_0] Path was already updated: owner# [3:7511667351944206676:2104], state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 2], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 2], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:44.765662Z node 3 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][3:7511667351944206722:2130][/dc-1/USER_0] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1/USER_0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Version: 18446744073709551615 }: sender# [3:7511667351944206725:2130] 2025-06-03T10:25:44.765669Z node 3 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][3:7511667351944206722:2130][/dc-1/USER_0] Path was already updated: owner# [3:7511667351944206676:2104], state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 2], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 2], Version: 18446744073709551615) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:44.765687Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [3:7511667351944206676:2104], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Strong: 1 } 2025-06-03T10:25:44.765702Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [3:7511667351944206676:2104], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Strong: 1 }, by path# { Subscriber: { Subscriber: [3:7511667351944206722:2130] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 8 TableKind: 0 Created: 1 CreateStep: 1748946344194 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# { Subscriber: { Subscriber: [3:7511667351944206722:2130] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 8 TableKind: 0 Created: 1 CreateStep: 1748946344194 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 } 2025-06-03T10:25:44.769369Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667334158986808:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 18446744073709551615 }: sender# [3:7511667351944206726:2130] 2025-06-03T10:25:44.769386Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667334158986811:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 18446744073709551615 }: sender# [3:7511667351944206727:2130] 2025-06-03T10:25:44.769391Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667334158986814:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 18446744073709551615 }: sender# [3:7511667351944206728:2130] 2025-06-03T10:25:44.769872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:2 hive 72057594037968897 at ss 72057594046644480 2025-06-03T10:25:44.769881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-03T10:25:44.769883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:1 hive 72057594037968897 at ss 72057594046644480 2025-06-03T10:25:44.769884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-03T10:25:44.769908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710660 2025-06-03T10:25:44.769918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976710660 2025-06-03T10:25:44.774367Z node 1 :HIVE WARN: hive_impl.cpp:1945: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037888) 2025-06-03T10:25:44.778101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046644480 ShardLocalIdx: 2, at schemeshard: 72057594046644480 2025-06-03T10:25:44.778212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 5 2025-06-03T10:25:44.778872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-03T10:25:44.778901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 4 2025-06-03T10:25:44.778918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046644480 ShardLocalIdx: 1, at schemeshard: 72057594046644480 2025-06-03T10:25:44.778934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-03T10:25:44.778947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-03T10:25:44.778961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-03T10:25:44.778978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-03T10:25:44.778981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-03T10:25:44.779008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-03T10:25:44.779031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-03T10:25:44.779034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-03T10:25:44.779044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-03T10:25:44.782255Z node 1 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72057594037968897 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037889 2025-06-03T10:25:44.782287Z node 1 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72057594037968897 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037891 2025-06-03T10:25:44.782292Z node 1 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72057594037968897 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037888 2025-06-03T10:25:44.788404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:2 2025-06-03T10:25:44.788417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:2 tabletId 72075186224037889 2025-06-03T10:25:44.788437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-03T10:25:44.788439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-03T10:25:44.788443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:1 2025-06-03T10:25:44.788445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:1 tabletId 72075186224037888 2025-06-03T10:25:44.788449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-03T10:25:44.788452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-03T10:25:44.788460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-03T10:25:44.788466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 TabletID: 72075186224037888 Status: OK Info { TabletID: 72075186224037888 Channels { Channel: 0 ChannelType: 0 History { FromGeneration: 0 GroupID: 2181038081 } StoragePool: "name_USER_0_kind_storage-pool-number-2" } Channels { Channel: 1 ChannelType: 0 History { FromGeneration: 0 GroupID: 2181038081 } StoragePool: "name_USER_0_kind_storage-pool-number-2" } Channels { Channel: 2 ChannelType: 0 History { FromGeneration: 0 GroupID: 2181038081 } StoragePool: "name_USER_0_kind_storage-pool-number-2" } TabletType: Coordinator Version: 1 TenantIdOwner: 72057594046644480 TenantIdLocalId: 2 } 2025-06-03T10:25:44.793924Z node 1 :HIVE WARN: hive_impl.cpp:1945: HIVE#72057594037968897 Can't find the tablet from RequestHiveInfo(TabletID=72075186224037888) 2025-06-03T10:25:44.794124Z node 1 :HIVE WARN: tx__block_storage_result.cpp:43: HIVE#72057594037968897 THive::TTxBlockStorageResult Complete status was NO_GROUP for TabletId 72075186224037890 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::LsLs [GOOD] Test command err: 2025-06-03T10:25:43.077343Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667346628158426:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:43.077368Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:43.189870Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667347557193392:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:43.226321Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0028da/r3tmp/tmpWh3fhu/pdisk_1.dat 2025-06-03T10:25:44.097054Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:44.097080Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:44.097924Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:44.102974Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:44.102992Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:44.113960Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:44.116632Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:44.116675Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:25:44.122278Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28960 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:25:44.657404Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511667346628158657:2142] Handle TEvNavigate describe path dc-1 2025-06-03T10:25:44.676153Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511667350923126369:2422] HANDLE EvNavigateScheme dc-1 2025-06-03T10:25:44.676195Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667346628158709:2162], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:44.676218Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:871: [main][1:7511667350923126300:2365][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7511667346628158709:2162], cookie# 1 2025-06-03T10:25:44.677382Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667350923126305:2365][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667350923126302:2365], cookie# 1 2025-06-03T10:25:44.677394Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667350923126306:2365][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667350923126303:2365], cookie# 1 2025-06-03T10:25:44.677399Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667350923126307:2365][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667350923126304:2365], cookie# 1 2025-06-03T10:25:44.677412Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667333743256395:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667350923126305:2365], cookie# 1 2025-06-03T10:25:44.677424Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667333743256398:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667350923126306:2365], cookie# 1 2025-06-03T10:25:44.677433Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667333743256401:2058] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667350923126307:2365], cookie# 1 2025-06-03T10:25:44.677441Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667350923126305:2365][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667333743256395:2052], cookie# 1 2025-06-03T10:25:44.677444Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667350923126306:2365][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667333743256398:2055], cookie# 1 2025-06-03T10:25:44.677447Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667350923126307:2365][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667333743256401:2058], cookie# 1 2025-06-03T10:25:44.677455Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667350923126300:2365][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667350923126302:2365], cookie# 1 2025-06-03T10:25:44.677462Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7511667350923126300:2365][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:25:44.677467Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667350923126300:2365][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667350923126303:2365], cookie# 1 2025-06-03T10:25:44.677471Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7511667350923126300:2365][/dc-1] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:25:44.677476Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667350923126300:2365][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667350923126304:2365], cookie# 1 2025-06-03T10:25:44.677479Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7511667350923126300:2365][/dc-1] Unexpected sync response: sender# [1:7511667350923126304:2365], cookie# 1 2025-06-03T10:25:44.677491Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:7511667346628158709:2162], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-03T10:25:44.683342Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [1:7511667346628158709:2162], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7511667350923126300:2365] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:44.683378Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7511667346628158709:2162], cacheItem# { Subscriber: { Subscriber: [1:7511667350923126300:2365] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-03T10:25:44.684101Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7511667350923126370:2423], recipient# [1:7511667350923126369:2422], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:25:44.684118Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7511667350923126369:2422] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-03T10:25:44.769810Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7511667350923126369:2422] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-03T10:25:44.770868Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7511667350923126369:2422] Handle TEvDescribeSchemeResult Forward to# [1:7511667350923126367:2420] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057 ...
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:25:46.619469Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7511667347557193482:2107], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:46.619515Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7511667347557193482:2107], cacheItem# { Subscriber: { Subscriber: [2:7511667360442095426:2118] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:46.619526Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7511667347557193482:2107], cacheItem# { Subscriber: { Subscriber: [2:7511667360442095427:2119] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:46.619561Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7511667360442095443:2121], recipient# [2:7511667360442095425:2307], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:46.619719Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7511667360442095425:2307], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:25:46.813432Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7511667347557193482:2107], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:46.813472Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7511667347557193482:2107], cacheItem# { Subscriber: { Subscriber: [2:7511667360442095426:2118] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:46.813481Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7511667347557193482:2107], cacheItem# { Subscriber: { Subscriber: [2:7511667360442095427:2119] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:46.813515Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7511667360442095445:2122], recipient# [2:7511667360442095425:2307], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:46.813944Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7511667360442095425:2307], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:25:47.121485Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7511667347557193482:2107], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:47.121530Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7511667347557193482:2107], cacheItem# { Subscriber: { Subscriber: [2:7511667360442095426:2118] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:47.121541Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7511667347557193482:2107], cacheItem# { Subscriber: { Subscriber: [2:7511667360442095427:2119] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:47.121575Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7511667364737062743:2123], recipient# [2:7511667360442095425:2307], result# { ErrorCount: 2 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:47.121745Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:7511667360442095425:2307], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:25:47.240676Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7511667347557193482:2107], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:47.240730Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7511667347557193482:2107], cacheItem# { Subscriber: { Subscriber: [2:7511667351852160799:2112] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:47.240755Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7511667364737062745:2124], recipient# [2:7511667364737062744:2308], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } |60.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut |60.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut >> KqpSinkTx::OlapInvalidateOnError [GOOD] >> KqpSinkTx::OlapInteractive >> TStorageTenantTest::CreateTableInsideSubDomain [GOOD] |60.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestAllSpeciesCrcWhole2of2 [GOOD] >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests100Inflight1BlobSize1000 [GOOD] >> KqpScheme::CreateDropTableMultipleTime [GOOD] >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests2Inflight2BlobSize1000 >> KqpScheme::CreateDropTableViaApiMultipleTime |60.3%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_write/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpSinkLocks::TInvalidateOlap [GOOD] >> KqpSinkLocks::UncommittedRead >> TStorageTenantTest::DeclareAndDefine [GOOD] |60.3%| [TA] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_bscontroller/test-results/unittest/{meta.json ... results_accumulator.log} |60.3%| [LD] {RESULT} $(B)/ydb/services/metadata/initializer/ut/ydb-services-metadata-initializer-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CreateTableInsideSubDomain [GOOD] Test command err: 2025-06-03T10:25:46.445604Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667361502418652:2210];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:46.445729Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0028b9/r3tmp/tmpish7Uq/pdisk_1.dat 2025-06-03T10:25:46.615073Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:13208 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:25:46.726156Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511667361502418727:2138] Handle TEvNavigate describe path dc-1 2025-06-03T10:25:46.728261Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511667361502419106:2391] HANDLE EvNavigateScheme dc-1 2025-06-03T10:25:46.728298Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667361502418830:2193], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:46.728308Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2322: Create subscriber: self# [1:7511667361502418830:2193], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-03T10:25:46.728380Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:7511667361502419107:2392][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-03T10:25:46.728918Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667361502418378:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667361502419111:2392] 2025-06-03T10:25:46.728940Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667361502418378:2051] Subscribe: subscriber# [1:7511667361502419111:2392], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:46.728965Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667361502418381:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667361502419112:2392] 2025-06-03T10:25:46.728969Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667361502418381:2054] Subscribe: subscriber# [1:7511667361502419112:2392], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:46.728976Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667361502418384:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667361502419113:2392] 2025-06-03T10:25:46.728979Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667361502418384:2057] Subscribe: subscriber# [1:7511667361502419113:2392], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:46.728991Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667361502419111:2392][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667361502418378:2051] 2025-06-03T10:25:46.728997Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667361502419112:2392][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667361502418381:2054] 2025-06-03T10:25:46.729000Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667361502419113:2392][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667361502418384:2057] 2025-06-03T10:25:46.729007Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667361502419107:2392][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667361502419108:2392] 2025-06-03T10:25:46.729014Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667361502419107:2392][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667361502419109:2392] 2025-06-03T10:25:46.729026Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:7511667361502419107:2392][/dc-1] Set up state: owner# [1:7511667361502418830:2193], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:46.729066Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667361502419107:2392][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667361502419110:2392] 2025-06-03T10:25:46.729073Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:7511667361502419107:2392][/dc-1] Path was already updated: owner# [1:7511667361502418830:2193], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:46.729081Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667361502419111:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667361502419108:2392], cookie# 1 2025-06-03T10:25:46.729084Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667361502419112:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667361502419109:2392], cookie# 1 2025-06-03T10:25:46.729088Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667361502419113:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667361502419110:2392], cookie# 1 2025-06-03T10:25:46.729094Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667361502418378:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667361502419111:2392] 2025-06-03T10:25:46.729098Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667361502418378:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667361502419111:2392], cookie# 1 2025-06-03T10:25:46.729103Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667361502418381:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667361502419112:2392] 2025-06-03T10:25:46.729105Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667361502418381:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667361502419112:2392], cookie# 1 2025-06-03T10:25:46.729120Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667361502418384:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667361502419113:2392] 2025-06-03T10:25:46.729123Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667361502418384:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667361502419113:2392], cookie# 1 2025-06-03T10:25:46.729427Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667361502419111:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667361502418378:2051], cookie# 1 2025-06-03T10:25:46.729440Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667361502419112:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667361502418381:2054], cookie# 1 2025-06-03T10:25:46.729446Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667361502419113:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667361502418384:2057], cookie# 1 2025-06-03T10:25:46.729456Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667361502419107:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667361502419108:2392], cookie# 1 2025-06-03T10:25:46.729466Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7511667361502419107:2392][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:25:46.729471Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667361502419107:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667361502419109:2392], cookie# 1 2025-06-03T10:25:46.729475Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7511667361502419107:2392][/dc-1] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:25:46.729482Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667361502419107:2392][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667361502419110:2392], cookie# 1 2025-06-03T10:25:46.729485Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7511667361502419107:2392][/dc-1] Unexpected sync response: sender# [1:7511667361502419110:2392], cookie# 1 2025-06-03T10:25:46.751332Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:7511667361502418830:2193], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046644480 } 2025-06-03T10:25:46.751409Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [1:7511667361502418830:2193], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: ... esponse { Version: 3 Partial: 0 }: sender# [1:7511667361502418381:2054], cookie# 1 2025-06-03T10:25:48.056355Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667370092354295:2821][/dc-1/USER_0/SimpleTable] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 3 Partial: 0 }: sender# [1:7511667361502418384:2057], cookie# 1 2025-06-03T10:25:48.056359Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667370092354289:2821][/dc-1/USER_0/SimpleTable] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 3 Partial: 0 }: sender# [1:7511667370092354290:2821], cookie# 1 2025-06-03T10:25:48.056364Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7511667370092354289:2821][/dc-1/USER_0/SimpleTable] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:25:48.056371Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667370092354289:2821][/dc-1/USER_0/SimpleTable] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 3 Partial: 0 }: sender# [1:7511667370092354291:2821], cookie# 1 2025-06-03T10:25:48.056373Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7511667370092354289:2821][/dc-1/USER_0/SimpleTable] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:25:48.056377Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667370092354289:2821][/dc-1/USER_0/SimpleTable] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 3 Partial: 0 }: sender# [1:7511667370092354292:2821], cookie# 1 2025-06-03T10:25:48.056379Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7511667370092354289:2821][/dc-1/USER_0/SimpleTable] Unexpected sync response: sender# [1:7511667370092354292:2821], cookie# 1 2025-06-03T10:25:48.056384Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:7511667361502418830:2193], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1/USER_0/SimpleTable PathId: Partial: 0 } 2025-06-03T10:25:48.056597Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [1:7511667361502418830:2193], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1/USER_0/SimpleTable PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7511667370092354289:2821] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1748946348050 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, by pathId# nullptr 2025-06-03T10:25:48.056610Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7511667361502418830:2193], cacheItem# { Subscriber: { Subscriber: [1:7511667370092354289:2821] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1748946348050 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: dc-1/USER_0/SimpleTable TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-03T10:25:48.056646Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7511667370092354296:2822], recipient# [1:7511667370092354288:2820], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/SimpleTable TableId: [72057594046644480:3:1] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037888 Coordinators: 72075186224037889 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037890 Mediators: 72075186224037891 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:25:48.056659Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7511667370092354288:2820] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-03T10:25:48.056673Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7511667370092354288:2820] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/dc-1/USER_0/SimpleTable" Options { ShowPrivateTable: true } 2025-06-03T10:25:48.056898Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7511667370092354288:2820] Handle TEvDescribeSchemeResult Forward to# [1:7511667370092354287:2819] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 52 Record# Status: StatusSuccess Path: "/dc-1/USER_0/SimpleTable" PathDescription { Self { Name: "SimpleTable" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1748946348050 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "SimpleTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } FollowerCount: 2 PartitioningPolicy { MinPartitionsCount: 2 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186224037888 Coordinators: 72075186224037889 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037890 Mediators: 72075186224037891 } DomainKey { SchemeShard: 72057594046644480 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "SimpleTable" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710660 CreateStep: 1748946348050 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "SimpleTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "k... (TRUNCATED) 2025-06-03T10:25:48.083799Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7511667361502418378:2051] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [3:7511667365163848247:2103] 2025-06-03T10:25:48.083824Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7511667361502418378:2051] Unsubscribe: subscriber# [3:7511667365163848247:2103], path# /dc-1/USER_0 2025-06-03T10:25:48.083831Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7511667361502418381:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [3:7511667365163848248:2103] 2025-06-03T10:25:48.083834Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7511667361502418381:2054] Unsubscribe: subscriber# [3:7511667365163848248:2103], path# /dc-1/USER_0 2025-06-03T10:25:48.083838Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7511667361502418384:2057] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [3:7511667365163848249:2103] 2025-06-03T10:25:48.083841Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7511667361502418384:2057] Unsubscribe: subscriber# [3:7511667365163848249:2103], path# /dc-1/USER_0 2025-06-03T10:25:48.084021Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-03T10:25:48.084235Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected >> TStorageTenantTest::CreateDummyTabletsInDifferentDomains [GOOD] >> TStorageTenantTest::GenericCases [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::DeclareAndDefine [GOOD] Test command err: 2025-06-03T10:25:45.925143Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667357370503183:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:45.925164Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0028cf/r3tmp/tmpWFOque/pdisk_1.dat 2025-06-03T10:25:46.322566Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:46.409396Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:46.409432Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:46.414123Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:65150 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:25:46.420958Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511667357370503405:2137] Handle TEvNavigate describe path dc-1 2025-06-03T10:25:46.423549Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511667361665471086:2394] HANDLE EvNavigateScheme dc-1 2025-06-03T10:25:46.423603Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667361665470730:2152], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:46.423615Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2322: Create subscriber: self# [1:7511667361665470730:2152], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-03T10:25:46.423685Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:7511667361665471087:2395][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-03T10:25:46.424194Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667353075535751:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667361665471091:2395] 2025-06-03T10:25:46.424219Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667353075535751:2050] Subscribe: subscriber# [1:7511667361665471091:2395], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:46.424240Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667353075535757:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667361665471093:2395] 2025-06-03T10:25:46.424244Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667353075535757:2056] Subscribe: subscriber# [1:7511667361665471093:2395], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:46.424257Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667361665471091:2395][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667353075535751:2050] 2025-06-03T10:25:46.424264Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667361665471093:2395][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667353075535757:2056] 2025-06-03T10:25:46.424270Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667361665471087:2395][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667361665471088:2395] 2025-06-03T10:25:46.424278Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667361665471087:2395][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667361665471090:2395] 2025-06-03T10:25:46.424290Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:7511667361665471087:2395][/dc-1] Set up state: owner# [1:7511667361665470730:2152], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:46.424328Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667361665471091:2395][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667361665471088:2395], cookie# 1 2025-06-03T10:25:46.424332Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667361665471092:2395][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667361665471089:2395], cookie# 1 2025-06-03T10:25:46.424335Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667361665471093:2395][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667361665471090:2395], cookie# 1 2025-06-03T10:25:46.424342Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667353075535751:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667361665471091:2395] 2025-06-03T10:25:46.424346Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667353075535751:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667361665471091:2395], cookie# 1 2025-06-03T10:25:46.424356Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667353075535757:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667361665471093:2395] 2025-06-03T10:25:46.424359Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667353075535757:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667361665471093:2395], cookie# 1 2025-06-03T10:25:46.427084Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667353075535754:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667361665471092:2395] 2025-06-03T10:25:46.427125Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667353075535754:2053] Subscribe: subscriber# [1:7511667361665471092:2395], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:46.427147Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667353075535754:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667361665471092:2395], cookie# 1 2025-06-03T10:25:46.427163Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667361665471091:2395][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667353075535751:2050], cookie# 1 2025-06-03T10:25:46.427168Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667361665471093:2395][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667353075535757:2056], cookie# 1 2025-06-03T10:25:46.427177Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667361665471092:2395][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667353075535754:2053] 2025-06-03T10:25:46.427183Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667361665471092:2395][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667353075535754:2053], cookie# 1 2025-06-03T10:25:46.427192Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667361665471087:2395][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667361665471088:2395], cookie# 1 2025-06-03T10:25:46.427202Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7511667361665471087:2395][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:25:46.427209Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667361665471087:2395][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667361665471090:2395], cookie# 1 2025-06-03T10:25:46.427213Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7511667361665471087:2395][/dc-1] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:25:46.427221Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667361665471087:2395][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667361665471089:2395] 2025-06-03T10:25:46.427237Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:7511667361665471087:2395][/dc-1] Path was already updated: owner# [1:7511667361665470730:2152], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:46.427243Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667361665471087:2395][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667361665471089:2395], cookie# 1 2025-06-03T10:25:46.427246Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7511667361665471087:2395][/dc-1] Unexpected sync response: sender# [1:7511667361665471089:2395], cookie# 1 2025-06-03T10:25:46.427252Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667353075535754:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667361665471092:2395] 2025-06-03T10:25:46.435729Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:7511667361665470730:2152], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsL ... o }] } 2025-06-03T10:25:49.033848Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7511667361665470726:2293], cacheItem# { Subscriber: { Subscriber: [1:7511667370255406443:2328] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1748946348800 PathId: [OwnerId: 72057594046644480, LocalPathId: 4] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: dc-1/USER_0/dir/table1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:49.033872Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7511667361665470726:2293], cacheItem# { Subscriber: { Subscriber: [1:7511667374550373756:2333] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1748946348950 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { Path: dc-1/USER_0/dir/table2 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:49.033927Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7511667374550373771:2337], recipient# [1:7511667374550373770:2336], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/dir/table1 TableId: [72057594046644480:4:1] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037888 Coordinators: 72075186224037889 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037890 Mediators: 72075186224037891 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } },{ Path: dc-1/USER_0/dir/table2 TableId: [72057594046644480:5:1] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037888 Coordinators: 72075186224037889 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037890 Mediators: 72075186224037891 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:25:49.034750Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7511667357370503405:2137] Handle TEvProposeTransaction 2025-06-03T10:25:49.034757Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7511667357370503405:2137] TxId# 281474976715665 ProcessProposeTransaction 2025-06-03T10:25:49.034766Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:273: actor# [1:7511667357370503405:2137] Cookie# 0 userReqId# "" txid# 281474976715665 SEND to# [1:7511667374550373772:2955] DataReq marker# P0 2025-06-03T10:25:49.034778Z node 1 :TX_PROXY DEBUG: datareq.cpp:1330: Actor# [1:7511667374550373772:2955] Cookie# 0 txid# 281474976715665 HANDLE TDataReq marker# P1 2025-06-03T10:25:49.034845Z node 1 :TX_PROXY DEBUG: datareq.cpp:1245: Actor [1:7511667374550373772:2955] txid 281474976715665 disallow followers cause of operation 2 read target mode 0 2025-06-03T10:25:49.034847Z node 1 :TX_PROXY DEBUG: datareq.cpp:1245: Actor [1:7511667374550373772:2955] txid 281474976715665 disallow followers cause of operation 2 read target mode 0 2025-06-03T10:25:49.034850Z node 1 :TX_PROXY DEBUG: datareq.cpp:1453: Actor# [1:7511667374550373772:2955] txid# 281474976715665 SEND to# [1:7511667361665470730:2152] TSchemeCache with 2 scheme entries. DataReq marker# P2 2025-06-03T10:25:49.034868Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2760: Handle TEvTxProxySchemeCache::TEvResolveKeySet: self# [1:7511667361665470730:2152], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 5] Access: 0 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo Point: (Uint64 : 42) },{ TableId: [OwnerId: 72057594046644480, LocalPathId: 4] Access: 0 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo Point: (Uint64 : 42) }] } 2025-06-03T10:25:49.034878Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2063: FillEntry for TResolve: self# [1:7511667361665470730:2152], cacheItem# { Subscriber: { Subscriber: [1:7511667370255406432:2941] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1748946348950 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { TableId: [OwnerId: 72057594046644480, LocalPathId: 5] Access: 0 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:49.034890Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2063: FillEntry for TResolve: self# [1:7511667361665470730:2152], cacheItem# { Subscriber: { Subscriber: [1:7511667370255406314:2839] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1748946348800 PathId: [OwnerId: 72057594046644480, LocalPathId: 4] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { TableId: [OwnerId: 72057594046644480, LocalPathId: 4] Access: 0 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:49.034919Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7511667374550373774:2957], recipient# [1:7511667374550373772:2955], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 5] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037888 Coordinators: 72075186224037889 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037890 Mediators: 72075186224037891 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Point: (Uint64 : 42) },{ TableId: [OwnerId: 72057594046644480, LocalPathId: 4] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037888 Coordinators: 72075186224037889 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037890 Mediators: 72075186224037891 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Point: (Uint64 : 42) }] } 2025-06-03T10:25:49.034925Z node 1 :TX_PROXY DEBUG: datareq.cpp:1620: Actor# [1:7511667374550373772:2955] txid# 281474976715665 HANDLE EvResolveKeySetResult TDataReq marker# P3 ErrorCount# 0 2025-06-03T10:25:49.035322Z node 1 :TX_PROXY DEBUG: datareq.cpp:1115: Actor# [1:7511667374550373772:2955] txid# 281474976715665 SEND TEvProposeTransaction to datashard 72075186224037892 with 327 bytes program affected shards 2 followers disallowed marker# P4 2025-06-03T10:25:49.035344Z node 1 :TX_PROXY DEBUG: datareq.cpp:1115: Actor# [1:7511667374550373772:2955] txid# 281474976715665 SEND TEvProposeTransaction to datashard 72075186224037894 with 327 bytes program affected shards 2 followers disallowed marker# P4 2025-06-03T10:25:49.039438Z node 1 :TX_PROXY DEBUG: datareq.cpp:1873: Actor# [1:7511667374550373772:2955] txid# 281474976715665 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# PREPARED shard id 72075186224037892 read size 0 out readset size 0 marker# P6 2025-06-03T10:25:49.040122Z node 1 :TX_PROXY DEBUG: datareq.cpp:1873: Actor# [1:7511667374550373772:2955] txid# 281474976715665 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# PREPARED shard id 72075186224037894 read size 0 out readset size 0 marker# P6 2025-06-03T10:25:49.040132Z node 1 :TX_PROXY DEBUG: datareq.cpp:2921: Actor# [1:7511667374550373772:2955] txid# 281474976715665 SEND EvProposeTransaction to# 72075186224037889 Coordinator marker# P7 2025-06-03T10:25:49.040543Z node 1 :TX_PROXY DEBUG: datareq.cpp:2111: Actor# [1:7511667374550373772:2955] txid# 281474976715665 HANDLE TEvProposeTransactionStatus TDataReq marker# P11 Status# 16 2025-06-03T10:25:49.054044Z node 1 :TX_PROXY DEBUG: datareq.cpp:2135: Actor# [1:7511667374550373772:2955] txid# 281474976715665 HANDLE TEvProposeTransactionStatus TDataReq marker# P10 Status# 17 2025-06-03T10:25:49.057799Z node 1 :TX_PROXY DEBUG: datareq.cpp:2286: Actor# [1:7511667374550373772:2955] txid# 281474976715665 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# COMPLETE shard id 72075186224037892 marker# P12 2025-06-03T10:25:49.057817Z node 1 :TX_PROXY DEBUG: datareq.cpp:2286: Actor# [1:7511667374550373772:2955] txid# 281474976715665 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# COMPLETE shard id 72075186224037894 marker# P12 2025-06-03T10:25:49.057909Z node 1 :TX_PROXY DEBUG: datareq.cpp:2691: Actor# [1:7511667374550373772:2955] txid# 281474976715665 MergeResult ExecComplete TDataReq marker# P17 2025-06-03T10:25:49.057948Z node 1 :TX_PROXY INFO: datareq.cpp:834: Actor# [1:7511667374550373772:2955] txid# 281474976715665 RESPONSE Status# ExecComplete prepare time: 0.005352s execute time: 0.017816s total time: 0.023168s marker# P13 2025-06-03T10:25:49.066108Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7511667353075535751:2050] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [2:7511667360436721201:2112] 2025-06-03T10:25:49.066127Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7511667353075535751:2050] Unsubscribe: subscriber# [2:7511667360436721201:2112], path# /dc-1/USER_0 2025-06-03T10:25:49.066132Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7511667353075535754:2053] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [2:7511667360436721202:2112] 2025-06-03T10:25:49.066135Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7511667353075535754:2053] Unsubscribe: subscriber# [2:7511667360436721202:2112], path# /dc-1/USER_0 2025-06-03T10:25:49.066140Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7511667353075535757:2056] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [2:7511667360436721203:2112] 2025-06-03T10:25:49.066143Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7511667353075535757:2056] Unsubscribe: subscriber# [2:7511667360436721203:2112], path# /dc-1/USER_0 2025-06-03T10:25:49.066186Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 2 2025-06-03T10:25:49.066411Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connected -> Disconnected |60.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_vdisk_restart/unittest >> TOlapReboots::DropMultipleStandaloneTables >> TOlapReboots::CreateDropTable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::CreateDummyTabletsInDifferentDomains [GOOD] Test command err: 2025-06-03T10:25:42.189450Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667344602043300:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:42.189492Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:42.190617Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667343773481260:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:42.192410Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0028db/r3tmp/tmpj2Vd7q/pdisk_1.dat 2025-06-03T10:25:43.095125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:43.095161Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:43.099773Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:43.099798Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:43.114704Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:25:43.114746Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:43.121717Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:43.195548Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:28771 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:25:43.597743Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511667343773481330:2139] Handle TEvNavigate describe path dc-1 2025-06-03T10:25:43.611124Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511667348068449048:2414] HANDLE EvNavigateScheme dc-1 2025-06-03T10:25:43.611766Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667343773481357:2153], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:43.611789Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:871: [main][1:7511667348068449023:2404][/dc-1] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:7511667343773481357:2153], cookie# 1 2025-06-03T10:25:43.612400Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667348068449027:2404][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667348068449024:2404], cookie# 1 2025-06-03T10:25:43.612405Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667348068449028:2404][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667348068449025:2404], cookie# 1 2025-06-03T10:25:43.612410Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667348068449029:2404][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667348068449026:2404], cookie# 1 2025-06-03T10:25:43.612417Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667330888579083:2055] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667348068449028:2404], cookie# 1 2025-06-03T10:25:43.612428Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667330888579086:2058] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667348068449029:2404], cookie# 1 2025-06-03T10:25:43.612435Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667348068449028:2404][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667330888579083:2055], cookie# 1 2025-06-03T10:25:43.612438Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667348068449029:2404][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667330888579086:2058], cookie# 1 2025-06-03T10:25:43.612442Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667348068449023:2404][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667348068449025:2404], cookie# 1 2025-06-03T10:25:43.612449Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7511667348068449023:2404][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:25:43.612452Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667348068449023:2404][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667348068449026:2404], cookie# 1 2025-06-03T10:25:43.612456Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7511667348068449023:2404][/dc-1] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:25:43.612465Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:7511667343773481357:2153], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 } 2025-06-03T10:25:43.613905Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667330888579080:2052] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667348068449027:2404], cookie# 1 2025-06-03T10:25:43.613931Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667348068449027:2404][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667330888579080:2052], cookie# 1 2025-06-03T10:25:43.613937Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667348068449023:2404][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667348068449024:2404], cookie# 1 2025-06-03T10:25:43.613939Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7511667348068449023:2404][/dc-1] Unexpected sync response: sender# [1:7511667348068449024:2404], cookie# 1 2025-06-03T10:25:43.618778Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [1:7511667343773481357:2153], notify# NKikimr::NSchemeBoard::NInternalEvents::TEvSyncResponse { Path: /dc-1 PathId: Partial: 0 }, by path# { Subscriber: { Subscriber: [1:7511667348068449023:2404] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:43.618814Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:7511667343773481357:2153], cacheItem# { Subscriber: { Subscriber: [1:7511667348068449023:2404] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 1 IsSync: true Partial: 0 } 2025-06-03T10:25:43.621061Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7511667348068449049:2415], recipient# [1:7511667348068449048:2414], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:25:43.621081Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7511667348068449048:2414] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-03T10:25:43.678026Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7511667348068449048:2414] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ShowPrivateTable: true } 2025-06-03T10:25:43.678677Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7511667348068449048:2414] Handle TEvDescribeSchemeResult Forward to# [1:7511667348068449047:2413] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { ... ePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:48.834891Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [3:7511667366983819829:2177], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_1/.metadata/workload_manager/running_requests PathId: Strong: 0 } 2025-06-03T10:25:48.834895Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [3:7511667366983819829:2177], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_1/.metadata/workload_manager/running_requests PathId: Strong: 0 }, by path# { Subscriber: { Subscriber: [3:7511667371278787417:2217] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:48.834900Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7511667366983819829:2177], cacheItem# { Subscriber: { Subscriber: [3:7511667371278787417:2217] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:48.834910Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7511667371278787431:2218], recipient# [3:7511667371278787414:2552], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:48.835106Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:7511667371278787414:2552], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:25:48.897682Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [3:7511667366983819829:2177], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:48.897731Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7511667366983819829:2177], cacheItem# { Subscriber: { Subscriber: [3:7511667371278787416:2216] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:48.897739Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [3:7511667366983819829:2177], cacheItem# { Subscriber: { Subscriber: [3:7511667371278787417:2217] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:48.897774Z node 3 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [3:7511667371278787433:2219], recipient# [3:7511667371278787414:2552], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: LookupError Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:48.898145Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:7511667371278787414:2552], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:25:49.193647Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7511667344602043532:2107], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:49.193696Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7511667344602043532:2107], cacheItem# { Subscriber: { Subscriber: [2:7511667348897010848:2112] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:49.193720Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7511667374666814687:2127], recipient# [2:7511667374666814686:2314], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:49.221477Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7511667344602043532:2107], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:49.221518Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7511667344602043532:2107], cacheItem# { Subscriber: { Subscriber: [2:7511667348897010848:2112] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:49.221556Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7511667374666814689:2128], recipient# [2:7511667374666814688:2315], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:49.294327Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7511667344602043532:2107], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:49.294369Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7511667344602043532:2107], cacheItem# { Subscriber: { Subscriber: [2:7511667361781912754:2117] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:49.294390Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7511667374666814691:2129], recipient# [2:7511667374666814690:2316], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> TTxDataShardMiniKQL::ReadSpecialColumns >> TTxDataShardMiniKQL::WriteKeyTooLarge ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::GenericCases [GOOD] Test command err: 2025-06-03T10:25:46.267980Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667359604607658:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:46.268108Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0028c1/r3tmp/tmpz0NPlF/pdisk_1.dat 2025-06-03T10:25:46.473845Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:3042 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:25:46.577725Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511667359604607737:2140] Handle TEvNavigate describe path dc-1 2025-06-03T10:25:46.580161Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511667359604608105:2388] HANDLE EvNavigateScheme dc-1 2025-06-03T10:25:46.580220Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667359604607762:2154], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:46.580231Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2322: Create subscriber: self# [1:7511667359604607762:2154], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-03T10:25:46.580296Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:7511667359604608106:2389][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-03T10:25:46.580780Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667355309640077:2050] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667359604608110:2389] 2025-06-03T10:25:46.580801Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667355309640077:2050] Subscribe: subscriber# [1:7511667359604608110:2389], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:46.580825Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667355309640080:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667359604608111:2389] 2025-06-03T10:25:46.580828Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667355309640080:2053] Subscribe: subscriber# [1:7511667359604608111:2389], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:46.580835Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667355309640083:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667359604608112:2389] 2025-06-03T10:25:46.580839Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667355309640083:2056] Subscribe: subscriber# [1:7511667359604608112:2389], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:46.580854Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667359604608110:2389][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667355309640077:2050] 2025-06-03T10:25:46.580860Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667359604608111:2389][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667355309640080:2053] 2025-06-03T10:25:46.580865Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667359604608112:2389][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667355309640083:2056] 2025-06-03T10:25:46.580874Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667359604608106:2389][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667359604608107:2389] 2025-06-03T10:25:46.580881Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667359604608106:2389][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667359604608108:2389] 2025-06-03T10:25:46.580896Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:7511667359604608106:2389][/dc-1] Set up state: owner# [1:7511667359604607762:2154], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:46.580927Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667359604608106:2389][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667359604608109:2389] 2025-06-03T10:25:46.580936Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:7511667359604608106:2389][/dc-1] Path was already updated: owner# [1:7511667359604607762:2154], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:46.580944Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667359604608110:2389][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667359604608107:2389], cookie# 1 2025-06-03T10:25:46.580948Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667359604608111:2389][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667359604608108:2389], cookie# 1 2025-06-03T10:25:46.580952Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667359604608112:2389][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667359604608109:2389], cookie# 1 2025-06-03T10:25:46.580960Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667355309640077:2050] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667359604608110:2389] 2025-06-03T10:25:46.580964Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667355309640077:2050] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667359604608110:2389], cookie# 1 2025-06-03T10:25:46.580975Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667355309640080:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667359604608111:2389] 2025-06-03T10:25:46.580977Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667355309640080:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667359604608111:2389], cookie# 1 2025-06-03T10:25:46.580981Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667355309640083:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667359604608112:2389] 2025-06-03T10:25:46.580984Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667355309640083:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667359604608112:2389], cookie# 1 2025-06-03T10:25:46.582235Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667359604608110:2389][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667355309640077:2050], cookie# 1 2025-06-03T10:25:46.582247Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667359604608111:2389][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667355309640080:2053], cookie# 1 2025-06-03T10:25:46.582253Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667359604608112:2389][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667355309640083:2056], cookie# 1 2025-06-03T10:25:46.582263Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667359604608106:2389][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667359604608107:2389], cookie# 1 2025-06-03T10:25:46.582273Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7511667359604608106:2389][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:25:46.582278Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667359604608106:2389][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667359604608108:2389], cookie# 1 2025-06-03T10:25:46.582283Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7511667359604608106:2389][/dc-1] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:25:46.582290Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667359604608106:2389][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667359604608109:2389], cookie# 1 2025-06-03T10:25:46.582293Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7511667359604608106:2389][/dc-1] Unexpected sync response: sender# [1:7511667359604608109:2389], cookie# 1 2025-06-03T10:25:46.612645Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:7511667359604607762:2154], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046644480 } 2025-06-03T10:25:46.617521Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [1:7511667359604607762:2154], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: " ... esourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037888 Coordinators: 72075186224037889 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037890 Mediators: 72075186224037891 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:25:49.369341Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7511667359604607737:2140] Handle TEvProposeTransaction 2025-06-03T10:25:49.369352Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7511667359604607737:2140] TxId# 281474976715668 ProcessProposeTransaction 2025-06-03T10:25:49.369362Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:273: actor# [1:7511667359604607737:2140] Cookie# 0 userReqId# "" txid# 281474976715668 SEND to# [1:7511667372489510960:3099] DataReq marker# P0 2025-06-03T10:25:49.369385Z node 1 :TX_PROXY TRACE: datareq.cpp:492: StateWaitInit, received event# 269811712, Sender [1:7511667359604607737:2140], Recipient [1:7511667372489510960:3099]: NKikimr::TEvTxProxyReq::TEvMakeRequest 2025-06-03T10:25:49.369388Z node 1 :TX_PROXY TRACE: datareq.cpp:494: StateWaitInit, processing event TEvTxProxyReq::TEvMakeRequest 2025-06-03T10:25:49.369397Z node 1 :TX_PROXY DEBUG: datareq.cpp:1330: Actor# [1:7511667372489510960:3099] Cookie# 0 txid# 281474976715668 HANDLE TDataReq marker# P1 2025-06-03T10:25:49.369488Z node 1 :TX_PROXY DEBUG: datareq.cpp:1245: Actor [1:7511667372489510960:3099] txid 281474976715668 disallow followers cause of operation 2 read target mode 0 2025-06-03T10:25:49.369490Z node 1 :TX_PROXY DEBUG: datareq.cpp:1245: Actor [1:7511667372489510960:3099] txid 281474976715668 disallow followers cause of operation 2 read target mode 0 2025-06-03T10:25:49.369495Z node 1 :TX_PROXY DEBUG: datareq.cpp:1453: Actor# [1:7511667372489510960:3099] txid# 281474976715668 SEND to# [1:7511667359604607762:2154] TSchemeCache with 2 scheme entries. DataReq marker# P2 2025-06-03T10:25:49.369544Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2760: Handle TEvTxProxySchemeCache::TEvResolveKeySet: self# [1:7511667359604607762:2154], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 8] Access: 0 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo Point: (Uint64 : 42) },{ TableId: [OwnerId: 72057594046644480, LocalPathId: 7] Access: 0 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo Point: (Uint64 : 42) }] } 2025-06-03T10:25:49.369556Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2063: FillEntry for TResolve: self# [1:7511667359604607762:2154], cacheItem# { Subscriber: { Subscriber: [1:7511667372489510915:3084] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1748946349250 PathId: [OwnerId: 72057594046644480, LocalPathId: 8] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { TableId: [OwnerId: 72057594046644480, LocalPathId: 8] Access: 0 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:49.369568Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2063: FillEntry for TResolve: self# [1:7511667359604607762:2154], cacheItem# { Subscriber: { Subscriber: [1:7511667372489510795:2976] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 1 } Filled: 1 Status: StatusSuccess Kind: 3 TableKind: 1 Created: 1 CreateStep: 1748946349100 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] DomainId: [OwnerId: 72057594046644480, LocalPathId: 2] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 1 }, entry# { TableId: [OwnerId: 72057594046644480, LocalPathId: 7] Access: 0 SyncVersion: false Status: Unknown Kind: KindUnknown PartitionsCount: 0 DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:49.369642Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:7511667372489510962:3101], recipient# [1:7511667372489510960:3099], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 8] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037888 Coordinators: 72075186224037889 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037890 Mediators: 72075186224037891 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Point: (Uint64 : 42) },{ TableId: [OwnerId: 72057594046644480, LocalPathId: 7] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 2] Params { Version: 2 PlanResolution: 50 Coordinators: 72075186224037888 Coordinators: 72075186224037889 TimeCastBucketsPerMediator: 2 Mediators: 72075186224037890 Mediators: 72075186224037891 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Point: (Uint64 : 42) }] } 2025-06-03T10:25:49.369655Z node 1 :TX_PROXY TRACE: datareq.cpp:499: StateWaitResolve, received event# 269746178, Sender [1:7511667372489510962:3101], Recipient [1:7511667372489510960:3099]: NKikimr::TEvTxProxySchemeCache::TEvResolveKeySetResult 2025-06-03T10:25:49.369657Z node 1 :TX_PROXY TRACE: datareq.cpp:503: StateWaitResolve, processing event TEvTxProxySchemeCache::TEvResolveKeySetResult 2025-06-03T10:25:49.369660Z node 1 :TX_PROXY DEBUG: datareq.cpp:1620: Actor# [1:7511667372489510960:3099] txid# 281474976715668 HANDLE EvResolveKeySetResult TDataReq marker# P3 ErrorCount# 0 2025-06-03T10:25:49.370107Z node 1 :TX_PROXY DEBUG: datareq.cpp:1115: Actor# [1:7511667372489510960:3099] txid# 281474976715668 SEND TEvProposeTransaction to datashard 72075186224037892 with 327 bytes program affected shards 2 followers disallowed marker# P4 2025-06-03T10:25:49.370127Z node 1 :TX_PROXY DEBUG: datareq.cpp:1115: Actor# [1:7511667372489510960:3099] txid# 281474976715668 SEND TEvProposeTransaction to datashard 72075186224037894 with 327 bytes program affected shards 2 followers disallowed marker# P4 2025-06-03T10:25:49.378805Z node 1 :TX_PROXY TRACE: datareq.cpp:531: StateWaitPrepare, received event# 269550080, Sender [2:7511667367618977458:2311], Recipient [1:7511667372489510960:3099] 2025-06-03T10:25:49.378819Z node 1 :TX_PROXY TRACE: datareq.cpp:535: StateWaitPrepare, processing event TEvDataShard::TEvProposeTransactionResult 2025-06-03T10:25:49.378837Z node 1 :TX_PROXY DEBUG: datareq.cpp:1873: Actor# [1:7511667372489510960:3099] txid# 281474976715668 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# PREPARED shard id 72075186224037892 read size 0 out readset size 0 marker# P6 2025-06-03T10:25:49.378841Z node 1 :TX_PROXY TRACE: datareq.cpp:531: StateWaitPrepare, received event# 269550080, Sender [2:7511667371913944971:2337], Recipient [1:7511667372489510960:3099] 2025-06-03T10:25:49.378842Z node 1 :TX_PROXY TRACE: datareq.cpp:535: StateWaitPrepare, processing event TEvDataShard::TEvProposeTransactionResult 2025-06-03T10:25:49.378858Z node 1 :TX_PROXY DEBUG: datareq.cpp:1873: Actor# [1:7511667372489510960:3099] txid# 281474976715668 HANDLE Prepare TEvProposeTransactionResult TDataReq TabletStatus# StatusWait GetStatus# PREPARED shard id 72075186224037894 read size 0 out readset size 0 marker# P6 2025-06-03T10:25:49.378866Z node 1 :TX_PROXY DEBUG: datareq.cpp:2921: Actor# [1:7511667372489510960:3099] txid# 281474976715668 SEND EvProposeTransaction to# 72075186224037888 Coordinator marker# P7 2025-06-03T10:25:49.379207Z node 1 :TX_PROXY TRACE: datareq.cpp:563: StateWaitPlan, received event# 269091328, Sender [2:7511667367618977304:2297], Recipient [1:7511667372489510960:3099] 2025-06-03T10:25:49.379213Z node 1 :TX_PROXY TRACE: datareq.cpp:567: StateWaitPlan, processing event TEvTxProxy::TEvProposeTransactionStatus 2025-06-03T10:25:49.379218Z node 1 :TX_PROXY DEBUG: datareq.cpp:2111: Actor# [1:7511667372489510960:3099] txid# 281474976715668 HANDLE TEvProposeTransactionStatus TDataReq marker# P11 Status# 16 2025-06-03T10:25:49.414053Z node 1 :TX_PROXY TRACE: datareq.cpp:563: StateWaitPlan, received event# 269091328, Sender [2:7511667367618977304:2297], Recipient [1:7511667372489510960:3099] 2025-06-03T10:25:49.414069Z node 1 :TX_PROXY TRACE: datareq.cpp:567: StateWaitPlan, processing event TEvTxProxy::TEvProposeTransactionStatus 2025-06-03T10:25:49.414095Z node 1 :TX_PROXY DEBUG: datareq.cpp:2135: Actor# [1:7511667372489510960:3099] txid# 281474976715668 HANDLE TEvProposeTransactionStatus TDataReq marker# P10 Status# 17 2025-06-03T10:25:49.424342Z node 1 :TX_PROXY TRACE: datareq.cpp:563: StateWaitPlan, received event# 269550080, Sender [2:7511667371913944971:2337], Recipient [1:7511667372489510960:3099] 2025-06-03T10:25:49.424355Z node 1 :TX_PROXY TRACE: datareq.cpp:568: StateWaitPlan, processing event TEvDataShard::TEvProposeTransactionResult 2025-06-03T10:25:49.424374Z node 1 :TX_PROXY DEBUG: datareq.cpp:2286: Actor# [1:7511667372489510960:3099] txid# 281474976715668 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# COMPLETE shard id 72075186224037894 marker# P12 2025-06-03T10:25:49.424389Z node 1 :TX_PROXY TRACE: datareq.cpp:563: StateWaitPlan, received event# 269550080, Sender [2:7511667367618977458:2311], Recipient [1:7511667372489510960:3099] 2025-06-03T10:25:49.424391Z node 1 :TX_PROXY TRACE: datareq.cpp:568: StateWaitPlan, processing event TEvDataShard::TEvProposeTransactionResult 2025-06-03T10:25:49.424397Z node 1 :TX_PROXY DEBUG: datareq.cpp:2286: Actor# [1:7511667372489510960:3099] txid# 281474976715668 HANDLE Plan TEvProposeTransactionResult TDataReq GetStatus# COMPLETE shard id 72075186224037892 marker# P12 2025-06-03T10:25:49.424560Z node 1 :TX_PROXY DEBUG: datareq.cpp:2691: Actor# [1:7511667372489510960:3099] txid# 281474976715668 MergeResult ExecComplete TDataReq marker# P17 2025-06-03T10:25:49.424604Z node 1 :TX_PROXY INFO: datareq.cpp:834: Actor# [1:7511667372489510960:3099] txid# 281474976715668 RESPONSE Status# ExecComplete prepare time: 0.009469s execute time: 0.045734s total time: 0.055203s marker# P13 2025-06-03T10:25:49.462725Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7511667355309640077:2050] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [2:7511667363324009941:2100] 2025-06-03T10:25:49.462745Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7511667355309640077:2050] Unsubscribe: subscriber# [2:7511667363324009941:2100], path# /dc-1/USER_0 2025-06-03T10:25:49.462752Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7511667355309640080:2053] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [2:7511667363324009942:2100] 2025-06-03T10:25:49.462756Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7511667355309640080:2053] Unsubscribe: subscriber# [2:7511667363324009942:2100], path# /dc-1/USER_0 2025-06-03T10:25:49.462760Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7511667355309640083:2056] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [2:7511667363324009943:2100] 2025-06-03T10:25:49.462763Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7511667355309640083:2056] Unsubscribe: subscriber# [2:7511667363324009943:2100], path# /dc-1/USER_0 2025-06-03T10:25:49.463265Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 2 2025-06-03T10:25:49.463594Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connected -> Disconnected >> TOlapReboots::CreateMultipleStandaloneTables >> TTxDataShardMiniKQL::WriteEraseRead >> TOlapReboots::DropTableThenStore >> TTxDataShardMiniKQL::MemoryUsageImmediateSmallTx >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite2 [GOOD] >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite3 >> TTxDataShardMiniKQL::CrossShard_1_Cycle >> TTxDataShardMiniKQL::CrossShard_5_AllToAll >> TTxDataShardMiniKQL::ReadConstant >> TTxDataShardMiniKQL::ReadSpecialColumns [GOOD] >> TTxDataShardMiniKQL::SelectRange >> TTxDataShardMiniKQL::WriteKeyTooLarge [GOOD] >> TTxDataShardMiniKQL::WriteValueTooLarge >> TTxDataShardMiniKQL::WriteEraseRead [GOOD] >> TTxDataShardMiniKQL::WriteAndReadMultipleShards >> test.py::test[case-case_val_when_then-default.txt-ForceBlocks] [GOOD] >> test.py::test[case-case_val_when_then-default.txt-Results] >> TTxDataShardMiniKQL::MemoryUsageImmediateSmallTx [GOOD] >> TTxDataShardMiniKQL::MemoryUsageImmediateMediumTx >> TStorageTenantTest::RemoveStoragePoolAndCreateOneMore [GOOD] >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite3 [GOOD] >> TTxDataShardMiniKQL::Write >> TTxDataShardMiniKQL::SelectRange [GOOD] >> TTxDataShardMiniKQL::SelectRangeWithNotFullKey >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests2Inflight2BlobSize1000 [GOOD] >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests10Inflight10BlobSize1000 >> TTxDataShardMiniKQL::ReadConstant [GOOD] >> TTxDataShardMiniKQL::ReadAfterWrite >> TTxDataShardMiniKQL::MemoryUsageImmediateMediumTx [GOOD] >> TTxDataShardMiniKQL::MemoryUsageMultiShard |60.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/secret/ut/unittest |60.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/secret/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_storage_tenant/unittest >> TStorageTenantTest::RemoveStoragePoolAndCreateOneMore [GOOD] Test command err: 2025-06-03T10:25:45.574636Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667357219534373:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:45.621373Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0028cb/r3tmp/tmpWMK0bf/pdisk_1.dat 2025-06-03T10:25:46.276494Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:46.283362Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:46.283383Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:46.285874Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31057 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:25:46.411188Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511667357219534526:2141] Handle TEvNavigate describe path dc-1 2025-06-03T10:25:46.413349Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511667361514502198:2398] HANDLE EvNavigateScheme dc-1 2025-06-03T10:25:46.413403Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667357219534552:2154], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:46.413413Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2322: Create subscriber: self# [1:7511667357219534552:2154], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-03T10:25:46.413493Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:7511667361514502199:2399][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-03T10:25:46.413919Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667344334632263:2051] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667361514502203:2399] 2025-06-03T10:25:46.413940Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667344334632266:2054] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667361514502204:2399] 2025-06-03T10:25:46.413944Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667344334632263:2051] Subscribe: subscriber# [1:7511667361514502203:2399], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:46.413959Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667344334632266:2054] Subscribe: subscriber# [1:7511667361514502204:2399], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:46.413966Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667344334632269:2057] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667361514502205:2399] 2025-06-03T10:25:46.413970Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667344334632269:2057] Subscribe: subscriber# [1:7511667361514502205:2399], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:46.413983Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667361514502203:2399][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667344334632263:2051] 2025-06-03T10:25:46.413990Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667361514502204:2399][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667344334632266:2054] 2025-06-03T10:25:46.413992Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667344334632263:2051] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667361514502203:2399] 2025-06-03T10:25:46.413994Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667361514502205:2399][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667344334632269:2057] 2025-06-03T10:25:46.413997Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667344334632266:2054] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667361514502204:2399] 2025-06-03T10:25:46.414002Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667344334632269:2057] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667361514502205:2399] 2025-06-03T10:25:46.414002Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667361514502199:2399][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667361514502200:2399] 2025-06-03T10:25:46.414010Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667361514502199:2399][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667361514502201:2399] 2025-06-03T10:25:46.414023Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:7511667361514502199:2399][/dc-1] Set up state: owner# [1:7511667357219534552:2154], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:46.414066Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667361514502199:2399][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667361514502202:2399] 2025-06-03T10:25:46.414074Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:7511667361514502199:2399][/dc-1] Path was already updated: owner# [1:7511667357219534552:2154], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:46.414083Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667361514502203:2399][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667361514502200:2399], cookie# 1 2025-06-03T10:25:46.414087Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667361514502204:2399][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667361514502201:2399], cookie# 1 2025-06-03T10:25:46.414091Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667361514502205:2399][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667361514502202:2399], cookie# 1 2025-06-03T10:25:46.414886Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667344334632263:2051] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667361514502203:2399], cookie# 1 2025-06-03T10:25:46.414905Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667344334632266:2054] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667361514502204:2399], cookie# 1 2025-06-03T10:25:46.414911Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667344334632269:2057] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667361514502205:2399], cookie# 1 2025-06-03T10:25:46.414920Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667361514502203:2399][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667344334632263:2051], cookie# 1 2025-06-03T10:25:46.414925Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667361514502204:2399][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667344334632266:2054], cookie# 1 2025-06-03T10:25:46.414928Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667361514502205:2399][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667344334632269:2057], cookie# 1 2025-06-03T10:25:46.414935Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667361514502199:2399][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667361514502200:2399], cookie# 1 2025-06-03T10:25:46.414942Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7511667361514502199:2399][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:25:46.414946Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667361514502199:2399][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667361514502201:2399], cookie# 1 2025-06-03T10:25:46.414949Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7511667361514502199:2399][/dc-1] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:25:46.414954Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667361514502199:2399][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667361514502202:2399], cookie# 1 2025-06-03T10:25:46.414956Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:7511667361514502199:2399][/dc-1] Unexpected sync response: sender# [1:7511667361514502202:2399], cookie# 1 2025-06-03T10:25:46.433174Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:7511667357219534552:2154], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsL ... dEvents::TEvNotifyDelete { Path: /dc-1/USER_1/.metadata/workload_manager/delayed_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [2:7511667376855845369:2297] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:50.745526Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7511667376855845232:2228], cacheItem# { Subscriber: { Subscriber: [2:7511667376855845369:2297] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:50.745532Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [2:7511667376855845232:2228], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_1/.metadata/workload_manager/running_requests PathId: Strong: 1 } 2025-06-03T10:25:50.745539Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [2:7511667376855845232:2228], notify# NKikimr::TSchemeBoardEvents::TEvNotifyDelete { Path: /dc-1/USER_1/.metadata/workload_manager/running_requests PathId: Strong: 1 }, by path# { Subscriber: { Subscriber: [2:7511667376855845370:2298] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:25:50.745548Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7511667376855845232:2228], cacheItem# { Subscriber: { Subscriber: [2:7511667376855845370:2298] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:50.745574Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7511667376855845383:2299], recipient# [2:7511667376855845366:2549], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:50.762525Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7511667344334632263:2051] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_1 }: sender# [2:7511667376855845226:2226] 2025-06-03T10:25:50.762532Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7511667344334632266:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_1 }: sender# [2:7511667376855845227:2226] 2025-06-03T10:25:50.762544Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7511667344334632266:2054] Unsubscribe: subscriber# [2:7511667376855845227:2226], path# /dc-1/USER_1 2025-06-03T10:25:50.762549Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7511667344334632263:2051] Unsubscribe: subscriber# [2:7511667376855845226:2226], path# /dc-1/USER_1 2025-06-03T10:25:50.762554Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7511667344334632269:2057] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_1 }: sender# [2:7511667376855845228:2226] 2025-06-03T10:25:50.762559Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7511667344334632269:2057] Unsubscribe: subscriber# [2:7511667376855845228:2226], path# /dc-1/USER_1 2025-06-03T10:25:50.762597Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 2 2025-06-03T10:25:50.762713Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:25:50.763073Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7511667344334632263:2051] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [3:7511667371350487710:2254] 2025-06-03T10:25:50.763090Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7511667344334632263:2051] Unsubscribe: subscriber# [3:7511667371350487710:2254], path# /dc-1/USER_0 2025-06-03T10:25:50.763097Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7511667344334632266:2054] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [3:7511667371350487711:2254] 2025-06-03T10:25:50.763101Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7511667344334632266:2054] Unsubscribe: subscriber# [3:7511667371350487711:2254], path# /dc-1/USER_0 2025-06-03T10:25:50.763107Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1075: [1:7511667344334632269:2057] Handle NKikimrSchemeBoard.TEvUnsubscribe { Path: /dc-1/USER_0 }: sender# [3:7511667371350487712:2254] 2025-06-03T10:25:50.763112Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:662: [1:7511667344334632269:2057] Unsubscribe: subscriber# [3:7511667371350487712:2254], path# /dc-1/USER_0 2025-06-03T10:25:50.763147Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-03T10:25:50.763242Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:25:50.764177Z node 1 :HIVE WARN: hive_impl.cpp:934: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[3:7511667371350487703:2253], Type=268959746 2025-06-03T10:25:50.764195Z node 1 :HIVE WARN: hive_impl.cpp:934: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[3:7511667371350487703:2253], Type=268959746 2025-06-03T10:25:50.772029Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7511667376855845232:2228], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:50.772086Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7511667376855845232:2228], cacheItem# { Subscriber: { Subscriber: [2:7511667376855845369:2297] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:50.772097Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7511667376855845232:2228], cacheItem# { Subscriber: { Subscriber: [2:7511667376855845370:2298] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:50.772121Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7511667376855845457:2334], recipient# [2:7511667376855845456:2570], result# { ErrorCount: 2 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/workload_manager/delayed_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo },{ Path: dc-1/USER_1/.metadata/workload_manager/running_requests TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:51.243983Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:7511667376855845232:2228], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:51.244157Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:7511667376855845232:2228], cacheItem# { Subscriber: { Subscriber: [2:7511667376855845350:2291] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:25:51.244202Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:7511667381150812764:2336], recipient# [2:7511667381150812763:2571], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> BlobStorageSync::TestSyncLogCuttingMirror3of4 [GOOD] >> BlobStorageSync::TestSyncLogCuttingBlock4Plus2 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/tx/unittest >> KqpSinkMvcc::ReadWriteTxFailsOnConcurrentWrite3 [GOOD] Test command err: Trying to start YDB, gRPC: 21426, MsgBus: 26485 2025-06-03T10:25:31.569514Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667297361997014:2265];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:31.569569Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f5f/r3tmp/tmp7ZS5Bk/pdisk_1.dat 2025-06-03T10:25:32.114611Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:32.114634Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:32.123564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:32.130214Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:32.131091Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667297361996788:2079] 1748946331526684 != 1748946331526687 TServer::EnableGrpc on GrpcPort 21426, node 1 2025-06-03T10:25:32.253546Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:32.253560Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:32.253562Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:32.253621Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26485 TClient is connected to server localhost:26485 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:32.810311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:32.817724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:25:33.240762Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667305951932029:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:33.240795Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:33.240956Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667305951932065:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:33.244288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-06-03T10:25:33.260565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-03T10:25:33.260663Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667305951932067:2332], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-03T10:25:33.359384Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667305951932118:2330] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:33.554692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-06-03T10:25:33.683395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-06-03T10:25:34.423909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:36.546388Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511667297361997014:2265];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:36.546466Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:25:37.567853Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2935: SelfId: [1:7511667323131809176:2967], SessionActorId: [1:7511667314541874505:2967], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/KV2`, code: 2001 . sessionActorId=[1:7511667314541874505:2967]. isRollback=0 2025-06-03T10:25:37.569033Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1848: SessionId: ydb://session/3?node_id=1&id=YzY3ZmI4ZGQtM2YwMzM2OTEtMWYwNjQ2NC1jMmE0MmI5ZQ==, ActorId: [1:7511667314541874505:2967], ActorState: ExecuteState, TraceId: 01jwtn6fq5dx2xab20jxdhh3b8, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [1:7511667323131809177:2967] from: [1:7511667323131809176:2967] 2025-06-03T10:25:37.569065Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [1:7511667323131809177:2967] TxId: 281474976710666. Ctx: { TraceId: 01jwtn6fq5dx2xab20jxdhh3b8, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzY3ZmI4ZGQtM2YwMzM2OTEtMWYwNjQ2NC1jMmE0MmI5ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/KV2`, code: 2001 } 2025-06-03T10:25:37.569135Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=YzY3ZmI4ZGQtM2YwMzM2OTEtMWYwNjQ2NC1jMmE0MmI5ZQ==, ActorId: [1:7511667314541874505:2967], ActorState: ExecuteState, TraceId: 01jwtn6fq5dx2xab20jxdhh3b8, Create QueryResponse for error on request, msg: WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 Trying to start YDB, gRPC: 23255, MsgBus: 25934 2025-06-03T10:25:43.823648Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667349636279044:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:43.823906Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f5f/r3tmp/tmpheB26Y/pdisk_1.dat 2025-06-03T10:25:43.907533Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23255, node 2 2025-06-03T10:25:43.953898Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:43.953926Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:43.954930Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:44.017894Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:44.017905Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:44.017908Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:44.017957Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25934 TClient is connected to server localhost:25934 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:44.222405Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:44.229931Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:25:44.317688Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667353931246937:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:44.317709Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:44.317786Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667353931246964:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:44.318541Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:25:44.321048Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-03T10:25:44.321106Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511667353931246966:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:25:44.418468Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511667353931247017:2323] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:44.434910Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:25:44.463475Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:25:44.741968Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:45.781672Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=2&id=OTg0MmQ0ZTEtMjhhZjAzNzktYjcyMmM5MmEtZWJkNDEwNzg=, ActorId: [2:7511667358226222115:2964], ActorState: ExecuteState, TraceId: 01jwtn6qs985rc0w9p4wwpgmt2, Create QueryResponse for error on request, msg: tx has deferred effects, but locks are broken WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 2025-06-03T10:25:48.829486Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7511667349636279044:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:48.829537Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; WAIT_INDEXATION: 0 |60.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query |60.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan |60.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query |60.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan |60.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader |60.4%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/query/ydb-core-kqp-ut-query |60.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader |60.4%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp_scan/ydb-core-tx-datashard-ut_kqp_scan |60.4%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_topic_reader/ydb-core-tx-replication-service-ut_topic_reader >> TTxDataShardMiniKQL::Write [GOOD] >> TTxDataShardMiniKQL::TableStats >> TTxDataShardMiniKQL::WriteAndReadMultipleShards [GOOD] >> TTxDataShardMiniKQL::WriteAndReadMany >> TTxDataShardMiniKQL::ReadAfterWrite [GOOD] >> TTxDataShardMiniKQL::ReadNonExisting >> TTxDataShardMiniKQL::SelectRangeWithNotFullKey [GOOD] |60.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/secret/ut/unittest >> TTxDataShardMiniKQL::WriteValueTooLarge [GOOD] >> TTxDataShardMiniKQL::WriteLargeExternalBlob |60.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation |60.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation |60.4%| [LD] {RESULT} $(B)/ydb/core/statistics/service/ut/ut_aggregation/ydb-core-statistics-service-ut-ut_aggregation >> test.py::test[case-case_val_when_then-default.txt-Results] [GOOD] >> TTxDataShardMiniKQL::ReadNonExisting [GOOD] >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests10000Inflight1BlobSize1000 [GOOD] >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests2Inflight2BlobSize1000 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::SelectRangeWithNotFullKey [GOOD] Test command err: 2025-06-03T10:25:50.874680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:25:50.874713Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:50.875526Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:25:50.880006Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:25:50.880201Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:132:2154] 2025-06-03T10:25:50.880282Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:25:50.890613Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:25:50.893638Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:25:50.893698Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:25:50.894000Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-03T10:25:50.894016Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-03T10:25:50.894027Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-03T10:25:50.894147Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:25:50.894263Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:25:50.894284Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:205:2154] in generation 2 2025-06-03T10:25:50.924228Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:25:50.940992Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-03T10:25:50.941107Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:25:50.941135Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:218:2215] 2025-06-03T10:25:50.941143Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-03T10:25:50.941151Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-03T10:25:50.941159Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:50.941240Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:132:2154], Recipient [1:132:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:50.941253Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:50.941400Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-03T10:25:50.941437Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-03T10:25:50.941447Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-03T10:25:50.941458Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:25:50.941468Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-03T10:25:50.941475Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-03T10:25:50.941485Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-03T10:25:50.941491Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-03T10:25:50.941498Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-03T10:25:50.941514Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:214:2212], Recipient [1:132:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:50.941522Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:50.941539Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:212:2211], serverId# [1:214:2212], sessionId# [0:0:0] 2025-06-03T10:25:50.942175Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:100:2134], Recipient [1:132:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 100 RawX2: 4294969430 } TxBody: "\nx\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\016\n\010__tablet\030\004 9\032\023\n\r__updateEpoch\030\004 :\032\020\n\n__updateNo\030\004 ;(\"J\014/Root/table1\222\002\013\th\020\000\000\000\000\000\000\020\r" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-03T10:25:50.942195Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:25:50.942218Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:25:50.942268Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-03T10:25:50.942283Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-03T10:25:50.942297Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-03T10:25:50.942309Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-03T10:25:50.942315Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-03T10:25:50.942323Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-03T10:25:50.942329Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-03T10:25:50.942432Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-03T10:25:50.942437Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-03T10:25:50.942442Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-03T10:25:50.942447Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-03T10:25:50.942462Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-03T10:25:50.942467Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-03T10:25:50.942471Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-03T10:25:50.942476Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-03T10:25:50.942487Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-03T10:25:50.953793Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-03T10:25:50.953835Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-03T10:25:50.953845Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-03T10:25:50.953861Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-03T10:25:50.953907Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-03T10:25:50.954120Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:224:2221], Recipient [1:132:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:50.954133Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:50.954144Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:223:2220], serverId# [1:224:2221], sessionId# [0:0:0] 2025-06-03T10:25:50.954159Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287424, Sender [1:100:2134], Recipient [1:132:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-03T10:25:50.954165Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3147: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-03T10:25:50.954253Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-03T10:25:50.954264Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-03T10:25:50.954269Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-03T10:25:50.954276Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-03T10:25:50.955197Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 100 RawX2: 4294969430 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-03T10:25:50.955227Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:50.955322Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:132:2154], Recipient [1:132:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:50.955332Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:50.955347Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-03T10:25:50.955359Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:25:50.955365Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-03T10:25:50.955376Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-03T10:25:50 ... 004\206\203\014\203\014,SelectRange\000\003?* h\020\000\000\000\000\000\000\016\000\000\000\000\000\000\000?\014\005?2\003?,D\003?.F\003?0p\007\013?:\003?4e\005\'?8\003\013?>\003?\000\003?@\000\003?B\000\006\004?F\003\203\014\000\003\203\014\000\003\003?H\000\377\007\002\000\005?\032\005?\026?r\000\005?\030\003\005? \005?\034?r\000\006\000?\036\003?x\005?&\006\ 2025-06-03T10:25:52.031679Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:25:52.031694Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:25:52.031810Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 9437184 on unit CheckDataTx 2025-06-03T10:25:52.031825Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 9437184 is Executed 2025-06-03T10:25:52.031829Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 9437184 executing on unit CheckDataTx 2025-06-03T10:25:52.031834Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-03T10:25:52.031839Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 9437184 on unit BuildAndWaitDependencies 2025-06-03T10:25:52.031846Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-03T10:25:52.031855Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:8] at 9437184 2025-06-03T10:25:52.031860Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 9437184 is Executed 2025-06-03T10:25:52.031864Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-03T10:25:52.031869Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 9437184 to execution unit ExecuteDataTx 2025-06-03T10:25:52.031873Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 9437184 on unit ExecuteDataTx 2025-06-03T10:25:52.031965Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:8] at tablet 9437184 with status COMPLETE 2025-06-03T10:25:52.031977Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:8] at 9437184: {NSelectRow: 0, NSelectRange: 1, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 2, SelectRangeBytes: 31, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-03T10:25:52.031990Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 9437184 is Executed 2025-06-03T10:25:52.031995Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 9437184 executing on unit ExecuteDataTx 2025-06-03T10:25:52.031999Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 9437184 to execution unit FinishPropose 2025-06-03T10:25:52.032019Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 9437184 on unit FinishPropose 2025-06-03T10:25:52.032027Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 8 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-03T10:25:52.032041Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 9437184 is DelayComplete 2025-06-03T10:25:52.032045Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 9437184 executing on unit FinishPropose 2025-06-03T10:25:52.032049Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 9437184 to execution unit CompletedOperations 2025-06-03T10:25:52.032053Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 9437184 on unit CompletedOperations 2025-06-03T10:25:52.032061Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 9437184 is Executed 2025-06-03T10:25:52.032066Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 9437184 executing on unit CompletedOperations 2025-06-03T10:25:52.032070Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:8] at 9437184 has finished 2025-06-03T10:25:52.032078Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-03T10:25:52.032084Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:8] at 9437184 on unit FinishPropose 2025-06-03T10:25:52.032091Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 |60.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut |60.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut |60.4%| [LD] {RESULT} $(B)/ydb/core/tx/sharding/ut/ydb-core-tx-sharding-ut >> TTxDataShardMiniKQL::MemoryUsageMultiShard [GOOD] >> TTxDataShardMiniKQL::TableStats [GOOD] >> TTxDataShardMiniKQL::TableStatsHistograms >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests10Inflight10BlobSize1000 [GOOD] >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests100Inflight10BlobSize1000 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::ReadNonExisting [GOOD] Test command err: 2025-06-03T10:25:51.430967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:25:51.430995Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:51.431768Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:25:51.439123Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:25:51.439262Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:132:2154] 2025-06-03T10:25:51.439314Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:25:51.448465Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:25:51.456095Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:25:51.456143Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:25:51.456375Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-03T10:25:51.456387Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-03T10:25:51.456396Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-03T10:25:51.456470Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:25:51.456547Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:25:51.456562Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:205:2154] in generation 2 2025-06-03T10:25:51.487466Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:25:51.497053Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-03T10:25:51.497137Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:25:51.497163Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:218:2215] 2025-06-03T10:25:51.497169Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-03T10:25:51.497174Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-03T10:25:51.497180Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:51.497243Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:132:2154], Recipient [1:132:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:51.497251Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:51.497659Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-03T10:25:51.497693Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-03T10:25:51.497703Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-03T10:25:51.497712Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:25:51.497720Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-03T10:25:51.497725Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-03T10:25:51.497730Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-03T10:25:51.497736Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-03T10:25:51.497742Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-03T10:25:51.497759Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:214:2212], Recipient [1:132:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:51.497766Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:51.497782Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:212:2211], serverId# [1:214:2212], sessionId# [0:0:0] 2025-06-03T10:25:51.498360Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:100:2134], Recipient [1:132:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 100 RawX2: 4294969430 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-03T10:25:51.498378Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:25:51.498400Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:25:51.498448Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-03T10:25:51.498462Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-03T10:25:51.498479Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-03T10:25:51.498488Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-03T10:25:51.498493Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-03T10:25:51.498500Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-03T10:25:51.498505Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-03T10:25:51.498592Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-03T10:25:51.498598Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-03T10:25:51.498602Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-03T10:25:51.498607Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-03T10:25:51.498620Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-03T10:25:51.498623Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-03T10:25:51.498627Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-03T10:25:51.498631Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-03T10:25:51.498637Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-03T10:25:51.510590Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-03T10:25:51.510620Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-03T10:25:51.510628Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-03T10:25:51.510644Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-03T10:25:51.510689Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-03T10:25:51.510848Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:224:2221], Recipient [1:132:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:51.510858Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:51.510867Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:223:2220], serverId# [1:224:2221], sessionId# [0:0:0] 2025-06-03T10:25:51.510881Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287424, Sender [1:100:2134], Recipient [1:132:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-03T10:25:51.510887Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3147: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-03T10:25:51.510932Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-03T10:25:51.510942Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-03T10:25:51.510950Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-03T10:25:51.510955Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-03T10:25:51.511921Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 100 RawX2: 4294969430 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-03T10:25:51.511952Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:51.512032Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:132:2154], Recipient [1:132:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:51.512042Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:51.512054Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-03T10:25:51.512065Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:25:51.512072Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-03T10:25:51.512081Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-03T10:25:51.512088Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... oordinators count is 1 buckets per mediator 2 2025-06-03T10:25:52.459312Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [3:284:2267] 2025-06-03T10:25:52.459317Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-03T10:25:52.459323Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 9437184 2025-06-03T10:25:52.459328Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:52.459382Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:711: TxInitSchemaDefaults.Execute 2025-06-03T10:25:52.459423Z node 3 :TX_DATASHARD DEBUG: datashard__init.cpp:723: TxInitSchemaDefaults.Complete 2025-06-03T10:25:52.459480Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [3:236:2228], Recipient [3:236:2228]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:52.459487Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:52.459541Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-03T10:25:52.459559Z node 3 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-03T10:25:52.459573Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 270270976, Sender [3:24:2071], Recipient [3:236:2228]: {TEvRegisterTabletResult TabletId# 9437184 Entry# 0} 2025-06-03T10:25:52.459578Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3167: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-03T10:25:52.459582Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 9437184 time 0 2025-06-03T10:25:52.459587Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:52.459629Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5528: Got TEvDataShard::TEvSchemaChanged for unknown txId 1 message# Source { RawX1: 236 RawX2: 12884904116 } Origin: 9437184 State: 2 TxId: 1 Step: 0 Generation: 3 2025-06-03T10:25:52.459647Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 270270978, Sender [3:24:2071], Recipient [3:236:2228]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 0 NextReadStep# 0 ReadStep# 0 } 2025-06-03T10:25:52.459656Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-03T10:25:52.459661Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 9437184 coordinator 72057594046316545 last step 0 next step 0 2025-06-03T10:25:52.459670Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-03T10:25:52.459676Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:25:52.459683Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-03T10:25:52.459688Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-03T10:25:52.459692Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-03T10:25:52.459696Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-03T10:25:52.459703Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-03T10:25:52.459720Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877760, Sender [3:282:2265], Recipient [3:236:2228]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 4200 Status: OK ServerId: [3:286:2269] Leader: 1 Dead: 0 Generation: 2 VersionInfo: } 2025-06-03T10:25:52.459725Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3162: StateWork, processing event TEvTabletPipe::TEvClientConnected 2025-06-03T10:25:52.459739Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269552132, Sender [3:124:2149], Recipient [3:236:2228]: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 1 2025-06-03T10:25:52.459743Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3133: StateWork, processing event TEvDataShard::TEvSchemaChangedResult 2025-06-03T10:25:52.459748Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 1 datashard 9437184 state Ready 2025-06-03T10:25:52.459755Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 9437184 Got TEvSchemaChangedResult from SS at 9437184 2025-06-03T10:25:52.471015Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877763, Sender [3:282:2265], Recipient [3:236:2228]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 4200 ClientId: [3:282:2265] ServerId: [3:286:2269] } 2025-06-03T10:25:52.471050Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3163: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-03T10:25:52.513057Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269551617, Sender [3:100:2134], Recipient [3:236:2228]: NKikimrTxDataShard.TEvGetShardState Source { RawX1: 100 RawX2: 12884904022 } 2025-06-03T10:25:52.513086Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, processing event TEvDataShard::TEvGetShardState 2025-06-03T10:25:52.513162Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [3:292:2273], Recipient [3:236:2228]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:52.513172Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:52.513181Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [3:290:2272], serverId# [3:292:2273], sessionId# [0:0:0] 2025-06-03T10:25:52.513222Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [3:100:2134], Recipient [3:236:2228]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 100 RawX2: 12884904022 } TxBody: "\032\365\001\037\004\0021\nvalue\005\205\n\205\002\207\205\002\207\203\001H\006\002\205\004\205\002?\006\002\205\000\034MyReads MyWrites\205\004\205\002?\006\002\206\202\024Reply\024Write?\014\205\002\206\203\010\002 AllReads\030MyKeys\014Run4ShardsForRead4ShardsToWrite\005?\010)\211\n?\006\203\005\004\200\205\002\203\004\006\213\002\203\004\203\004$SelectRow\000\003?\036 h\020\000\000\000\000\000\000\r\000\000\000\000\000\000\000?\004\005?\"\003? p\001\013?&\003?$T\001\003?(\000\037\002\000\005?\016\005?\n?8\000\005?\014\003\005?\024\005?\020?8\000\006\000?\022\003?>\005?\032\006\000?\030\001\037/ \0018\001" TxId: 2 ExecLevel: 0 Flags: 0 2025-06-03T10:25:52.513234Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:25:52.513274Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:25:52.513506Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit CheckDataTx 2025-06-03T10:25:52.513540Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-03T10:25:52.513546Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit CheckDataTx 2025-06-03T10:25:52.513553Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-03T10:25:52.513558Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit BuildAndWaitDependencies 2025-06-03T10:25:52.513571Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-03T10:25:52.513588Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:2] at 9437184 2025-06-03T10:25:52.513596Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-03T10:25:52.513601Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-03T10:25:52.513606Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit ExecuteDataTx 2025-06-03T10:25:52.513610Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-03T10:25:52.513719Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:2] at tablet 9437184 with status COMPLETE 2025-06-03T10:25:52.513733Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:2] at 9437184: {NSelectRow: 1, NSelectRange: 0, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-03T10:25:52.513748Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-03T10:25:52.513753Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit ExecuteDataTx 2025-06-03T10:25:52.513757Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit FinishPropose 2025-06-03T10:25:52.513762Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit FinishPropose 2025-06-03T10:25:52.513771Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 2 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-03T10:25:52.513797Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is DelayComplete 2025-06-03T10:25:52.513803Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit FinishPropose 2025-06-03T10:25:52.513808Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit CompletedOperations 2025-06-03T10:25:52.513812Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit CompletedOperations 2025-06-03T10:25:52.513827Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-03T10:25:52.513832Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit CompletedOperations 2025-06-03T10:25:52.513837Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 9437184 has finished 2025-06-03T10:25:52.513852Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-03T10:25:52.513858Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:2] at 9437184 on unit FinishPropose 2025-06-03T10:25:52.513867Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 |60.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/secret/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::MemoryUsageMultiShard [GOOD] Test command err: 2025-06-03T10:25:50.975072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:25:50.975104Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:50.976039Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:25:50.980800Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:25:50.981012Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:132:2154] 2025-06-03T10:25:50.981098Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:25:50.992544Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:25:50.995836Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:25:50.995881Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:25:50.996133Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-03T10:25:50.996147Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-03T10:25:50.996157Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-03T10:25:50.996252Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:25:50.996337Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:25:50.996355Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:205:2154] in generation 2 2025-06-03T10:25:51.025915Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:25:51.034265Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-03T10:25:51.034371Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:25:51.034393Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:218:2215] 2025-06-03T10:25:51.034398Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-03T10:25:51.034402Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-03T10:25:51.034406Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:51.034482Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:132:2154], Recipient [1:132:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:51.034493Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:51.034614Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-03T10:25:51.034648Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-03T10:25:51.034655Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-03T10:25:51.034661Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:25:51.034668Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-03T10:25:51.034672Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-03T10:25:51.034678Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-03T10:25:51.034683Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-03T10:25:51.034687Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-03T10:25:51.034698Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:214:2212], Recipient [1:132:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:51.034703Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:51.034713Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:212:2211], serverId# [1:214:2212], sessionId# [0:0:0] 2025-06-03T10:25:51.035089Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:100:2134], Recipient [1:132:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 100 RawX2: 4294969430 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-03T10:25:51.035096Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:25:51.035108Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:25:51.035137Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-03T10:25:51.035147Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-03T10:25:51.035156Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-03T10:25:51.035163Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-03T10:25:51.035166Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-03T10:25:51.035170Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-03T10:25:51.035174Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-03T10:25:51.035238Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-03T10:25:51.035241Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-03T10:25:51.035244Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-03T10:25:51.035247Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-03T10:25:51.035256Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-03T10:25:51.035258Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-03T10:25:51.035261Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-03T10:25:51.035263Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-03T10:25:51.035268Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-03T10:25:51.047637Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-03T10:25:51.047671Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-03T10:25:51.047678Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-03T10:25:51.047692Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-03T10:25:51.047726Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-03T10:25:51.047867Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:224:2221], Recipient [1:132:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:51.047876Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:51.047885Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:223:2220], serverId# [1:224:2221], sessionId# [0:0:0] 2025-06-03T10:25:51.047898Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287424, Sender [1:100:2134], Recipient [1:132:2154]: {TEvPlanStep step# 2 MediatorId# 0 TabletID 9437184} 2025-06-03T10:25:51.047904Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3147: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-03T10:25:51.047957Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [2:1] at 9437184 on unit WaitForPlan 2025-06-03T10:25:51.047967Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [2:1] at 9437184 is Executed 2025-06-03T10:25:51.047972Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2:1] at 9437184 executing on unit WaitForPlan 2025-06-03T10:25:51.047978Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2:1] at 9437184 to execution unit PlanQueue 2025-06-03T10:25:51.048800Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 2 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 100 RawX2: 4294969430 } } Step: 2 MediatorID: 0 TabletID: 9437184 } 2025-06-03T10:25:51.048819Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:51.048878Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:132:2154], Recipient [1:132:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:51.048883Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:51.048892Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-03T10:25:51.048902Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:25:51.048908Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-03T10:25:51.048918Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [2:1] in PlanQueue unit at 9437184 2025-06-03T10:25:51.048924Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [2:1] at 9437184 on unit PlanQueue 2025-06-03T10:25:51. ... ressTransaction} at tablet 9437185 (3 by [3:369:2313]) from queue queue_transaction 2025-06-03T10:25:52.574740Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437185 (3 by [3:369:2313]) to queue queue_transaction 2025-06-03T10:25:52.574744Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_transaction from 16.936776 to 33.873553 (insert task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437185 (3 by [3:369:2313])) 2025-06-03T10:25:52.574751Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} acquired dyn mem Res{3 96990534b}, Memory{0 dyn 96990534} 2025-06-03T10:25:52.574761Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-03T10:25:52.574766Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437184 on unit ExecuteDataTx 2025-06-03T10:25:52.574956Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 5 at 9437184 restored its data 2025-06-03T10:25:52.651318Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [6:5] at tablet 9437184 with status COMPLETE 2025-06-03T10:25:52.651365Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [6:5] at 9437184: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 2, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 22, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-03T10:25:52.651390Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437184 is ExecutedNoMoreRestarts 2025-06-03T10:25:52.651398Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437184 executing on unit ExecuteDataTx 2025-06-03T10:25:52.651403Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [6:5] at 9437184 to execution unit CompleteOperation 2025-06-03T10:25:52.651407Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437184 on unit CompleteOperation 2025-06-03T10:25:52.651494Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437184 is DelayComplete 2025-06-03T10:25:52.651497Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437184 executing on unit CompleteOperation 2025-06-03T10:25:52.651500Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [6:5] at 9437184 to execution unit CompletedOperations 2025-06-03T10:25:52.651503Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437184 on unit CompletedOperations 2025-06-03T10:25:52.651508Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437184 is Executed 2025-06-03T10:25:52.651510Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437184 executing on unit CompletedOperations 2025-06-03T10:25:52.651513Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [6:5] at 9437184 has finished 2025-06-03T10:25:52.651518Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:25:52.651522Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-03T10:25:52.651525Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-03T10:25:52.651528Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437184 2025-06-03T10:25:52.651558Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437184:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} hope 5 -> done Change{16, redo 636b alter 0b annex 0, ~{ 1001, 1, 3, 4, 12, 7, 8, 5 } -{ }, 0 gb} 2025-06-03T10:25:52.651569Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437184:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} release Res{3 96990534b}, Memory{0 dyn 0} 2025-06-03T10:25:52.651655Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437184 (3 by [3:257:2227]) (release resources {0, 96990534}) 2025-06-03T10:25:52.651672Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_transaction from 33.873553 to 16.936776 (remove task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437184 (3 by [3:257:2227])) 2025-06-03T10:25:52.651700Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437185 2025-06-03T10:25:52.651705Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437185 on unit ExecuteDataTx 2025-06-03T10:25:52.651965Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 5 at 9437185 restored its data 2025-06-03T10:25:52.719230Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [6:5] at tablet 9437185 with status COMPLETE 2025-06-03T10:25:52.719295Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [6:5] at 9437185: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 2, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 22, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-03T10:25:52.719327Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437185 is ExecutedNoMoreRestarts 2025-06-03T10:25:52.719339Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437185 executing on unit ExecuteDataTx 2025-06-03T10:25:52.719348Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [6:5] at 9437185 to execution unit CompleteOperation 2025-06-03T10:25:52.719356Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437185 on unit CompleteOperation 2025-06-03T10:25:52.719491Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437185 is DelayComplete 2025-06-03T10:25:52.719498Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437185 executing on unit CompleteOperation 2025-06-03T10:25:52.719503Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [6:5] at 9437185 to execution unit CompletedOperations 2025-06-03T10:25:52.719508Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [6:5] at 9437185 on unit CompletedOperations 2025-06-03T10:25:52.719516Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [6:5] at 9437185 is Executed 2025-06-03T10:25:52.719521Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [6:5] at 9437185 executing on unit CompletedOperations 2025-06-03T10:25:52.719526Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [6:5] at 9437185 has finished 2025-06-03T10:25:52.719534Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437185 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:25:52.719540Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437185 2025-06-03T10:25:52.719545Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437185 has no attached operations 2025-06-03T10:25:52.719550Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437185 2025-06-03T10:25:52.719601Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} hope 5 -> done Change{16, redo 636b alter 0b annex 0, ~{ 1001, 1, 3, 4, 12, 7, 8, 5 } -{ }, 0 gb} 2025-06-03T10:25:52.719620Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:9} Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} release Res{3 96990534b}, Memory{0 dyn 0} 2025-06-03T10:25:52.719742Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437185 (3 by [3:369:2313]) (release resources {0, 96990534}) 2025-06-03T10:25:52.719768Z node 3 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_transaction from 16.936776 to 0.000000 (remove task Tx{19, NKikimr::NDataShard::TDataShard::TTxProgressTransaction} at tablet 9437185 (3 by [3:369:2313])) 2025-06-03T10:25:52.738200Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437185:3:10} commited cookie 1 for step 9 2025-06-03T10:25:52.738249Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-03T10:25:52.738265Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [6:5] at 9437185 on unit CompleteOperation 2025-06-03T10:25:52.738296Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [6 : 5] from 9437185 at tablet 9437185 send result to client [3:100:2134], exec latency: 1 ms, propose latency: 3 ms 2025-06-03T10:25:52.738476Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437185 {TEvReadSet step# 6 txid# 5 TabletSource# 9437186 TabletDest# 9437185 SetTabletConsumer# 9437185 Flags# 0 Seqno# 2} 2025-06-03T10:25:52.738486Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-03T10:25:52.738592Z node 3 :TABLET_EXECUTOR DEBUG: Leader{9437184:3:10} commited cookie 1 for step 9 2025-06-03T10:25:52.738600Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-03T10:25:52.738605Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [6:5] at 9437184 on unit CompleteOperation 2025-06-03T10:25:52.738614Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [6 : 5] from 9437184 at tablet 9437184 send result to client [3:100:2134], exec latency: 1 ms, propose latency: 3 ms 2025-06-03T10:25:52.738621Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:563: Send delayed Ack RS Ack at 9437184 {TEvReadSet step# 6 txid# 5 TabletSource# 9437186 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 1} 2025-06-03T10:25:52.738625Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:52.738692Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287938, Sender [3:347:2313], Recipient [3:459:2399]: {TEvReadSet step# 6 txid# 5 TabletSource# 9437186 TabletDest# 9437185 SetTabletConsumer# 9437185 Flags# 0 Seqno# 2} 2025-06-03T10:25:52.738701Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3149: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-03T10:25:52.738708Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437186 source 9437186 dest 9437185 consumer 9437185 txId 5 2025-06-03T10:25:52.738725Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287938, Sender [3:235:2227], Recipient [3:459:2399]: {TEvReadSet step# 6 txid# 5 TabletSource# 9437186 TabletDest# 9437184 SetTabletConsumer# 9437184 Flags# 0 Seqno# 1} 2025-06-03T10:25:52.738729Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3149: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-03T10:25:52.738733Z node 3 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 9437186 source 9437186 dest 9437184 consumer 9437184 txId 5 |60.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/secret/ut/unittest |60.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/secret/ut/unittest |60.5%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/dq_file/part12/pytest >> test.py::test[case-case_val_when_then-default.txt-Results] [GOOD] >> test.py::test[aggregate-group_by_rollup_key_check--Results] [GOOD] >> test.py::test[aggregate-group_by_session_nopush--Results] |60.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/secret/ut/unittest >> test.py::test[aggregate-group_by_session_nopush--Results] [SKIPPED] >> test.py::test[bigdate-table_arithmetic-default.txt-Results] >> TTxDataShardMiniKQL::WriteLargeExternalBlob [GOOD] |60.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests2Inflight2BlobSize1000 [GOOD] >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests10Inflight10BlobSize1000 |60.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring |60.5%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring |60.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_monitoring/ydb-core-tx-scheme_board-ut_monitoring ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::WriteLargeExternalBlob [GOOD] Test command err: 2025-06-03T10:25:50.910732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:25:50.910761Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:50.911690Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:25:50.916615Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:25:50.916766Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:132:2154] 2025-06-03T10:25:50.916850Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:25:50.928590Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:25:50.932024Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:25:50.932083Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:25:50.932338Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-03T10:25:50.932353Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-03T10:25:50.932363Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-03T10:25:50.932456Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:25:50.932557Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:25:50.932575Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:205:2154] in generation 2 2025-06-03T10:25:50.962834Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:25:50.976578Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-03T10:25:50.976686Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:25:50.976719Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:218:2215] 2025-06-03T10:25:50.976727Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-03T10:25:50.976734Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-03T10:25:50.976740Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:50.976829Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:132:2154], Recipient [1:132:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:50.976839Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:50.976971Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-03T10:25:50.977010Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-03T10:25:50.977020Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-03T10:25:50.977029Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:25:50.977041Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-03T10:25:50.977049Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-03T10:25:50.977055Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-03T10:25:50.977062Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-03T10:25:50.977069Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-03T10:25:50.977085Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:214:2212], Recipient [1:132:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:50.977092Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:50.977108Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:212:2211], serverId# [1:214:2212], sessionId# [0:0:0] 2025-06-03T10:25:50.977745Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:100:2134], Recipient [1:132:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 100 RawX2: 4294969430 } TxBody: "\nY\n\006table2\032\n\n\004key1\030\002 \"\032\013\n\004key2\030\200$ #\032\014\n\005value\030\200$ 8(\"(#:\010Z\006\010\000\030\000(\000J\014/Root/table2\222\002\013\th\020\000\000\000\000\000\000\020\016" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-03T10:25:50.977771Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:25:50.977800Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:25:50.977857Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-03T10:25:50.977871Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-03T10:25:50.977891Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-03T10:25:50.977902Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-03T10:25:50.977908Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-03T10:25:50.977916Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-03T10:25:50.977921Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-03T10:25:50.978035Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-03T10:25:50.978042Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-03T10:25:50.978050Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-03T10:25:50.978055Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-03T10:25:50.978069Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-03T10:25:50.978073Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-03T10:25:50.978099Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-03T10:25:50.978105Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-03T10:25:50.978111Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-03T10:25:50.990178Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-03T10:25:50.990214Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-03T10:25:50.990225Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-03T10:25:50.990241Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-03T10:25:50.990287Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-03T10:25:50.990475Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:224:2221], Recipient [1:132:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:50.990487Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:50.990496Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:223:2220], serverId# [1:224:2221], sessionId# [0:0:0] 2025-06-03T10:25:50.990512Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287424, Sender [1:100:2134], Recipient [1:132:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-03T10:25:50.990518Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3147: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-03T10:25:50.990585Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-03T10:25:50.990595Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-03T10:25:50.990603Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-03T10:25:50.990610Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-03T10:25:50.991539Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 100 RawX2: 4294969430 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-03T10:25:50.991569Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:50.991667Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:132:2154], Recipient [1:132:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:50.991675Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:50.991688Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-03T10:25:50.991699Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:25:50.991706Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-03T10:25:50.991717Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-03T10:25:50.991724Z node 1 :TX_DATASHARD TRACE: dat ... s: 0, InvisibleRowSkips: 0} 2025-06-03T10:25:53.356251Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is ExecutedNoMoreRestarts 2025-06-03T10:25:53.356261Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit ExecuteDataTx 2025-06-03T10:25:53.356269Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit FinishPropose 2025-06-03T10:25:53.356276Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit FinishPropose 2025-06-03T10:25:53.356315Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-03T10:25:53.356323Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit FinishPropose 2025-06-03T10:25:53.356329Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit CompletedOperations 2025-06-03T10:25:53.356337Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit CompletedOperations 2025-06-03T10:25:53.356357Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-03T10:25:53.356362Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit CompletedOperations 2025-06-03T10:25:53.356367Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 9437184 has finished 2025-06-03T10:25:53.365607Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-03T10:25:53.365638Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:2] at 9437184 on unit FinishPropose 2025-06-03T10:25:53.365653Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 2 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 5 ms, status: COMPLETE 2025-06-03T10:25:53.365692Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:53.926009Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269551617, Sender [3:100:2134], Recipient [3:236:2228]: NKikimrTxDataShard.TEvGetShardState Source { RawX1: 100 RawX2: 12884904022 } 2025-06-03T10:25:53.926045Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, processing event TEvDataShard::TEvGetShardState 2025-06-03T10:25:53.926206Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [3:301:2281], Recipient [3:236:2228]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:53.926215Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:53.926226Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [3:300:2280], serverId# [3:301:2281], sessionId# [0:0:0] 2025-06-03T10:25:53.997608Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [3:100:2134], Recipient [3:236:2228]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 100 RawX2: 12884904022 } TxBody: "\032\332\201\200\010\037\000\005\205\n\205\000\205\004?\000\205\002\202\0041\034MyReads MyWrites\205\004?\000\206\202\024Reply\024Write?\000?\000 AllReads\030MyKeys\014Run4ShardsForRead4ShardsToWrite\005?\000\005?\004?\014\005?\002)\211\006\202\203\005\004\213\004\203\004\203\001H\205\002\203\001H\01056$UpdateRow\000\003?\016 h\020\000\000\000\000\000\000\016\000\000\000\000\000\000\000\013?\024\003?\020\251\003\003?\022\006bar\003\005?\030\003?\026\007\000\000\000\001xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 2025-06-03T10:25:53.997665Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:25:53.997738Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:25:54.029209Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit CheckDataTx 2025-06-03T10:25:54.029286Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is Executed 2025-06-03T10:25:54.029817Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 9437184 executing on unit CheckDataTx 2025-06-03T10:25:54.029843Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-03T10:25:54.029851Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit BuildAndWaitDependencies 2025-06-03T10:25:54.029874Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-03T10:25:54.029902Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:3] at 9437184 2025-06-03T10:25:54.029912Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is Executed 2025-06-03T10:25:54.029917Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-03T10:25:54.029923Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 9437184 to execution unit ExecuteDataTx 2025-06-03T10:25:54.029929Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit ExecuteDataTx 2025-06-03T10:25:54.029944Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-03T10:25:54.029963Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:124: Operation [0:3] at 9437184 requested 46269638 more memory 2025-06-03T10:25:54.029971Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is Restart 2025-06-03T10:25:54.030056Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:25:54.030082Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit ExecuteDataTx 2025-06-03T10:25:54.030088Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-03T10:25:54.051522Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:3] at 9437184 exceeded memory limit 50463942 and requests 403711536 more for the next try 2025-06-03T10:25:54.051643Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 3 released its data 2025-06-03T10:25:54.051659Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is Restart 2025-06-03T10:25:54.051861Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:25:54.051874Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit ExecuteDataTx 2025-06-03T10:25:54.078413Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 3 at 9437184 restored its data 2025-06-03T10:25:54.078466Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-03T10:25:54.155777Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:3] at tablet 9437184 with status COMPLETE 2025-06-03T10:25:54.155822Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:3] at 9437184: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 1, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 16777223, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-03T10:25:54.155852Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is ExecutedNoMoreRestarts 2025-06-03T10:25:54.155861Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 9437184 executing on unit ExecuteDataTx 2025-06-03T10:25:54.155868Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 9437184 to execution unit FinishPropose 2025-06-03T10:25:54.155875Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit FinishPropose 2025-06-03T10:25:54.155891Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is DelayComplete 2025-06-03T10:25:54.155897Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 9437184 executing on unit FinishPropose 2025-06-03T10:25:54.155901Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:3] at 9437184 to execution unit CompletedOperations 2025-06-03T10:25:54.155906Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:3] at 9437184 on unit CompletedOperations 2025-06-03T10:25:54.155922Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:3] at 9437184 is Executed 2025-06-03T10:25:54.155926Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:3] at 9437184 executing on unit CompletedOperations 2025-06-03T10:25:54.155932Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:3] at 9437184 has finished 2025-06-03T10:25:54.234659Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-03T10:25:54.234708Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:3] at 9437184 on unit FinishPropose 2025-06-03T10:25:54.234727Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 3 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 9 ms, status: COMPLETE 2025-06-03T10:25:54.234777Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:54.235973Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 9437184, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-03T10:25:54.236002Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 9437184, table# 1001, finished edge# 0, front# 0 2025-06-03T10:25:54.236775Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268828683, Sender [3:233:2227], Recipient [3:236:2228]: NKikimr::TEvTablet::TEvFollowerGcApplied |60.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests100Inflight10BlobSize1000 [GOOD] >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests10000Inflight100BlobSize1000 |60.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |60.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |60.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |60.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |60.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |60.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |60.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest |60.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/metadata/initializer/ut/unittest >> KqpSinkLocks::UncommittedRead [GOOD] >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests10Inflight10BlobSize1000 [GOOD] >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests100Inflight10BlobSize1000 >> test.py::test[window-win_func_lead_lag_worm--Results] [GOOD] >> test.py::test[window-win_func_lead_lag_worm_with_part--Results] |60.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_topic_reader/unittest |60.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/ut_aggregation/unittest >> BlobStorageSync::TestSyncLogCuttingBlock4Plus2 [GOOD] >> BlobStorageSync::SyncWhenDiskGetsDown [GOOD] >> BurstDetection::TestPutEvenly ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/tx/unittest >> KqpSinkLocks::UncommittedRead [GOOD] Test command err: Trying to start YDB, gRPC: 27224, MsgBus: 20356 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f56/r3tmp/tmpafZHY1/pdisk_1.dat 2025-06-03T10:25:36.461973Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667318099783168:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:36.512726Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:36.554696Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667318099782995:2079] 1748946336457507 != 1748946336457510 2025-06-03T10:25:36.561116Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27224, node 1 2025-06-03T10:25:36.583207Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:36.583222Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:36.583225Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:36.583285Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20356 2025-06-03T10:25:36.612653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:36.612699Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:36.613472Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20356 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:36.670193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:37.090654Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667322394750935:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:37.090704Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:37.090858Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667322394750962:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:37.091941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:25:37.095230Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667322394750964:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:25:37.184511Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667322394751015:2325] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:37.245231Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:25:37.278109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511667322394751204:2344];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:25:37.278217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511667322394751204:2344];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:25:37.278313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511667322394751204:2344];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:25:37.278348Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511667322394751202:2342];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:25:37.278350Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511667322394751204:2344];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:25:37.278373Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511667322394751204:2344];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:25:37.278377Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511667322394751202:2342];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:25:37.278457Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511667322394751204:2344];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:25:37.278488Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511667322394751204:2344];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:25:37.278494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511667322394751202:2342];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:25:37.278511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511667322394751204:2344];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:25:37.278542Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511667322394751202:2342];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:25:37.278544Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511667322394751204:2344];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:25:37.278559Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511667322394751204:2344];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:25:37.278574Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511667322394751202:2342];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:25:37.278580Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511667322394751204:2344];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:25:37.278596Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511667322394751204:2344];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:25:37.278604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511667322394751202:2342];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:25:37.278637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511667322394751202:2342];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:25:37.278660Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511667322394751202:2342];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:25:37.278684Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511667322394751202:2342];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:25:37.278705Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511667322394751202:2342];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:25:37.278726Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511667322394751202:2342];tablet_id=72075186224037890;proces ... n is supported;tx_id=0; 2025-06-03T10:25:43.484426Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037889 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-03T10:25:43.484433Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511667322394751204:2344];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037889;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:43.484441Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037890 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-03T10:25:43.484448Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511667322394751202:2342];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037890;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:43.484455Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037891 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-03T10:25:43.484462Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511667322394751205:2345];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037891;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:43.484471Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037892 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-03T10:25:43.484478Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[1:7511667322394751201:2341];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037892;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:43.484488Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037893 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-03T10:25:43.484494Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7511667322394751203:2343];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037893;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:43.484502Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037894 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-03T10:25:43.484510Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511667322394751215:2348];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037894;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:43.484518Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037895 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-03T10:25:43.484526Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511667322394751304:2350];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037895;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:43.484534Z node 1 :TX_COLUMNSHARD WARN: ctor_logger.h:56: TColumnShard.StateWork at 72075186224037896 unhandled event type: NKikimr::TEvDataShard::TEvCancelTransactionProposal event: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715669 2025-06-03T10:25:43.484540Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511667322394751216:2349];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037896;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:43.484549Z node 1 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511667322394751212:2347];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0;
: Error: Transaction locks invalidated. Table: `/Root/Test`., code: 2001
: Error: tablet lock have another internal generation counter: 18446744073709551615 != 0, code: 2001 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 Trying to start YDB, gRPC: 18637, MsgBus: 9500 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f56/r3tmp/tmppMP5Oe/pdisk_1.dat 2025-06-03T10:25:49.530958Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:49.534458Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667374406033094:2079] 1748946349493939 != 1748946349493942 2025-06-03T10:25:49.539265Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18637, node 2 2025-06-03T10:25:49.600854Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:49.600898Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:49.607364Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:49.645551Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:49.645568Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:49.645570Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:49.645627Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9500 TClient is connected to server localhost:9500 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:25:49.800057Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:25:49.801803Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:25:50.135238Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667378701001025:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:50.135272Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:50.135433Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667378701001060:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:50.136531Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:25:50.139724Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-03T10:25:50.139822Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511667378701001062:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:25:50.238843Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511667378701001113:2327] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:50.255236Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:25:50.314370Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:25:50.550784Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 >> Sharding::XXUsage >> Sharding::XXUsage [GOOD] |60.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/sharding/ut/unittest >> AggregateStatistics::ShouldBePings |60.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/sharding/ut/unittest |60.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/sharding/ut/unittest >> AggregateStatistics::ShouldBePings [GOOD] |60.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/sharding/ut/unittest |60.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge |60.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge |60.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_huge/ydb-core-blobstorage-ut_blobstorage-ut_huge ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/sharding/ut/unittest >> Sharding::XXUsage [GOOD] Test command err: 17722124227392484733 10558020990004277066 10917110436396631952 3394031942317916818 6048793860295821307 10483017260495591479 3871818015849896920 14625544971234224229 7568737847944028486 15417487040408270447 10873824440347056777 7378940698671983817 849199244855620460 13813024319298489584 14906018172804648629 2262039222807350086 2416153342897848997 11367811343030286301 4509415007297665654 14629879021709464493 13846691484988911893 12121444114409203988 11409675584873537440 338691135341743957 4328773826108988390 12794248163031039898 8587458948396031632 4584401883465169676 3907222565285806370 18230189563489707298 4535048076977605827 13225201607207094045 9898031306497621338 316899834589914775 17733172922728937589 8134413533840932362 1033126438242495655 7981864701584349766 4250411742786882622 12395337057707171149 15229893706726763318 3662281511731217672 16901043825158415011 14898951180805151887 9014212416339794754 15850062392072026432 8895682428572903964 7145851889010247513 13569528537359428744 7380627384392956143 5213072000891447799 13647450326620255816 15076356643988056859 14666671173132763867 14139302329802499528 7212911733004358111 16691870804113778077 5418854493686809214 17211612671185913129 7370896463917651634 14991843642915352141 3653073609208306731 8795796790238922046 14903003806239129597 14543946316150729021 14444099946791736700 12597203130500371749 14632654655839978252 11690199661813691202 9370486875822463948 8747974526797068806 2933630988401468083 16014886051944480168 7288827966704738929 5157955801317200091 12106525949145643689 6888158862812047248 1808818136927568385 1728538626153377579 8421239673595840386 15229893706726763318 2629569142393994684 2181883636212493390 4744263831790581696 16413902688094209478 5551921709416526563 10164798420381512160 6269933530129855672 674182030591705027 10372102974590689604 |60.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/ut_aggregation/unittest |60.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/ut_aggregation/unittest >> BurstDetection::TestPutEvenly [GOOD] >> BurstDetection::TestPutBurst >> TMonitoringTests::InvalidActorId [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ShouldBePings [GOOD] Test command err: 2025-06-03T10:25:57.170182Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-03T10:25:57.170313Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-03T10:25:57.289486Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 2 2025-06-03T10:25:57.289522Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-03T10:25:57.289532Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-06-03T10:25:57.289743Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:19:2055], server id = [0:0:0], tablet id = 2, status = ERROR 2025-06-03T10:25:57.289752Z node 2 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:25:57.289759Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:16:2056], server id = [0:0:0], tablet id = 1, status = ERROR 2025-06-03T10:25:57.289763Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:25:57.289781Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 2 2025-06-03T10:25:57.289788Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 |60.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_monitoring/unittest >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests100Inflight10BlobSize1000 [GOOD] >> CountingEvents::Put_Mirror3of4 >> TMonitoringTests::ValidActorId |60.6%| [TA] $(B)/ydb/core/tx/sharding/ut/test-results/unittest/{meta.json ... results_accumulator.log} |60.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_monitoring/unittest >> TMonitoringTests::InvalidActorId [GOOD] >> TMonitoringTests::ValidActorId [GOOD] >> CountingEvents::Put_Mirror3of4 [GOOD] >> CountingEvents::Put_Mirror3dc |60.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_monitoring/unittest |60.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_monitoring/unittest |60.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut >> CountingEvents::Put_Mirror3dc [GOOD] >> CountingEvents::Put_Block42 |60.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut |60.7%| [TA] {RESULT} $(B)/ydb/core/tx/sharding/ut/test-results/unittest/{meta.json ... results_accumulator.log} |60.7%| [LD] {RESULT} $(B)/ydb/core/kqp/executer_actor/ut/ydb-core-kqp-executer_actor-ut |60.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_monitoring/unittest >> BurstDetection::TestPutBurst [GOOD] >> BurstDetection::TestOverlySensitive >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite3 [GOOD] |60.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_monitoring/unittest |60.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_monitoring/unittest >> TMonitoringTests::ValidActorId [GOOD] |60.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_monitoring/unittest >> CountingEvents::Put_Block42 [GOOD] >> CountingEvents::Put_None >> CountingEvents::Put_None [GOOD] >> CountingEvents::Get_Mirror3of4 [GOOD] >> CountingEvents::Get_Mirror3dc |60.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_monitoring/unittest |60.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_monitoring/unittest |60.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ShouldBeCcorrectProcessingTabletTimeout [GOOD] >> KqpSinkTx::OlapInteractive [GOOD] >> CountingEvents::Get_Mirror3dc [GOOD] >> CountingEvents::Get_Block42 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/tx/unittest >> KqpSinkMvcc::OlapReadWriteTxFailsOnConcurrentWrite3 [GOOD] Test command err: Trying to start YDB, gRPC: 11926, MsgBus: 6349 2025-06-03T10:25:36.317751Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667319043795497:2199];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:36.318701Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f5a/r3tmp/tmpWMsda4/pdisk_1.dat 2025-06-03T10:25:36.404044Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667319043795337:2079] 1748946336316526 != 1748946336316529 2025-06-03T10:25:36.406128Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11926, node 1 2025-06-03T10:25:36.421723Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:36.421740Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:36.421743Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:36.421790Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6349 2025-06-03T10:25:36.473168Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:36.473193Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:36.474459Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6349 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:36.507390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:36.513628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:25:36.761593Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667319043795980:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:36.761623Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:36.761813Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667319043796007:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:36.762710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:25:36.765212Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667319043796009:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:25:36.843118Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667319043796060:2324] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:36.900898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:25:36.937197Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511667319043796246:2343];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:25:36.937523Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511667319043796246:2343];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:25:36.937774Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511667319043796246:2343];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:25:36.937976Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511667319043796246:2343];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:25:36.937994Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511667319043796246:2343];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:25:36.938011Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511667319043796246:2343];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:25:36.938274Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511667319043796246:2343];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:25:36.938295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511667319043796246:2343];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:25:36.938524Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511667319043796246:2343];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:25:36.938543Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511667319043796246:2343];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:25:36.938560Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511667319043796246:2343];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:25:36.938763Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511667319043796246:2343];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:25:36.942255Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511667319043796244:2341];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:25:36.942282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511667319043796244:2341];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:25:36.942336Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511667319043796244:2341];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:25:36.942361Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511667319043796244:2341];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:25:36.942384Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511667319043796244:2341];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:25:36.942412Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511667319043796244:2341];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:25:36.942433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511667319043796244:2341];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:25:36.942457Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511667319043796244:2341];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:25:36.942476Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511667319043796244:2341];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:25:36.942492Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511667319043796244:2341];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME= ... e=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435065Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037968;self_id=[2:7511667380889921348:2445];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037968;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435077Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037985;self_id=[2:7511667380889921199:2427];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037985;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435088Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037986;self_id=[2:7511667380889921202:2430];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037986;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435098Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037987;self_id=[2:7511667380889921197:2425];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037987;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435109Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037972;self_id=[2:7511667380889921357:2450];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037972;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435120Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037957;self_id=[2:7511667380889921375:2464];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037957;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435132Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037974;self_id=[2:7511667380889921235:2434];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037974;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435142Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037975;self_id=[2:7511667380889921327:2435];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037975;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435155Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037976;self_id=[2:7511667380889921210:2433];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037976;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435165Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037961;self_id=[2:7511667380889921096:2412];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037961;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435179Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037962;self_id=[2:7511667380889921374:2463];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037962;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435190Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037963;self_id=[2:7511667380889921337:2438];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037963;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435200Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037964;self_id=[2:7511667380889921370:2459];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037964;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435211Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[2:7511667380889921200:2428];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037981;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435222Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037966;self_id=[2:7511667380889921354:2447];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037966;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435234Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[2:7511667380889921190:2420];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037983;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435245Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037984;self_id=[2:7511667380889921194:2422];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037984;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435256Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037988;self_id=[2:7511667380889921196:2424];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037988;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435266Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037989;self_id=[2:7511667380889921195:2423];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037989;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435276Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037993;self_id=[2:7511667380889921091:2407];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037993;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435286Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037994;self_id=[2:7511667380889921128:2414];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037994;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435295Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037973;self_id=[2:7511667380889921381:2468];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037973;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435306Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037990;self_id=[2:7511667380889921189:2419];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037990;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435316Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037991;self_id=[2:7511667380889921094:2410];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037991;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435327Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037992;self_id=[2:7511667380889921093:2409];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037992;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435338Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037977;self_id=[2:7511667380889921201:2429];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037977;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435348Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037978;self_id=[2:7511667380889921209:2432];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037978;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435358Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037979;self_id=[2:7511667380889921208:2431];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037979;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435368Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037980;self_id=[2:7511667380889921198:2426];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037980;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435378Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037995;self_id=[2:7511667380889921176:2418];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037995;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435389Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037996;self_id=[2:7511667380889921133:2417];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037996;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435400Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037997;self_id=[2:7511667380889921131:2416];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037997;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:25:53.435424Z node 2 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037982;self_id=[2:7511667380889921191:2421];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037982;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 2025-06-03T10:25:55.933210Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7511667376594951990:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:55.933267Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ShouldBeCcorrectProcessingTabletTimeout [GOOD] Test command err: 2025-06-03T10:25:59.556644Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-03T10:25:59.557961Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:8:2055], server id = [1:8:2055], tablet id = 1, status = OK 2025-06-03T10:25:59.558065Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:8:2055], path = { OwnerId: 3 LocalId: 3 } 2025-06-03T10:25:59.558090Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:9:2056], server id = [1:9:2056], tablet id = 2, status = OK 2025-06-03T10:25:59.558097Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:9:2056], path = { OwnerId: 3 LocalId: 3 } 2025-06-03T10:25:59.558111Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-06-03T10:25:59.558144Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:10:2057], server id = [1:10:2057], tablet id = 3, status = OK 2025-06-03T10:25:59.558151Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:10:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-03T10:25:59.558174Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:11:2058], server id = [1:11:2058], tablet id = 4, status = OK 2025-06-03T10:25:59.558180Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:11:2058], path = { OwnerId: 3 LocalId: 3 } 2025-06-03T10:25:59.558186Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:12:2059], server id = [1:12:2059], tablet id = 5, status = OK 2025-06-03T10:25:59.558192Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:12:2059], path = { OwnerId: 3 LocalId: 3 } 2025-06-03T10:25:59.558209Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:8:2055], server id = [0:0:0], tablet id = 1, status = ERROR 2025-06-03T10:25:59.558213Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:25:59.558219Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 3 2025-06-03T10:25:59.558228Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:13:2060], server id = [1:13:2060], tablet id = 6, status = OK 2025-06-03T10:25:59.558234Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:13:2060], path = { OwnerId: 3 LocalId: 3 } 2025-06-03T10:25:59.558244Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 5 2025-06-03T10:25:59.558252Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:14:2061], server id = [1:14:2061], tablet id = 7, status = OK 2025-06-03T10:25:59.558258Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:14:2061], path = { OwnerId: 3 LocalId: 3 } 2025-06-03T10:25:59.558266Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:10:2057], server id = [0:0:0], tablet id = 3, status = ERROR 2025-06-03T10:25:59.558269Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:25:59.558274Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:12:2059], server id = [0:0:0], tablet id = 5, status = ERROR 2025-06-03T10:25:59.558278Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:25:59.558282Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 7 2025-06-03T10:25:59.558290Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:14:2061], server id = [0:0:0], tablet id = 7, status = ERROR 2025-06-03T10:25:59.558296Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:25:59.568468Z node 1 :STATISTICS DEBUG: service_impl.cpp:1028: Tablet 1 has already been processed 2025-06-03T10:25:59.568515Z node 1 :STATISTICS ERROR: service_impl.cpp:1032: No result was received from the tablet 2 2025-06-03T10:25:59.568523Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 2 is not local. 2025-06-03T10:25:59.568561Z node 1 :STATISTICS DEBUG: service_impl.cpp:1028: Tablet 3 has already been processed 2025-06-03T10:25:59.568569Z node 1 :STATISTICS ERROR: service_impl.cpp:1032: No result was received from the tablet 4 2025-06-03T10:25:59.568572Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 4 is not local. 2025-06-03T10:25:59.568583Z node 1 :STATISTICS DEBUG: service_impl.cpp:1028: Tablet 5 has already been processed 2025-06-03T10:25:59.568588Z node 1 :STATISTICS ERROR: service_impl.cpp:1032: No result was received from the tablet 6 2025-06-03T10:25:59.568591Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 6 is not local. 2025-06-03T10:25:59.568596Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-03T10:25:59.568610Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-03T10:25:59.568614Z node 1 :STATISTICS DEBUG: service_impl.cpp:1021: Skip TEvStatisticsRequestTimeout 2025-06-03T10:25:59.568638Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:9:2056], server id = [0:0:0], tablet id = 2, status = ERROR 2025-06-03T10:25:59.568642Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:25:59.568648Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:11:2058], server id = [0:0:0], tablet id = 4, status = ERROR 2025-06-03T10:25:59.568651Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:25:59.568656Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:13:2060], server id = [0:0:0], tablet id = 6, status = ERROR 2025-06-03T10:25:59.568660Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected >> AggregateStatistics::RootNodeShouldBeInvalidateByTimeout >> CountingEvents::Get_Block42 [GOOD] >> CountingEvents::Get_None >> BurstDetection::TestOverlySensitive [GOOD] >> CompatibilityInfo::VDiskCompatible [GOOD] >> CompatibilityInfo::VDiskIncompatible [GOOD] >> CompatibilityInfo::VDiskIncompatibleWithDefault >> AggregateStatistics::RootNodeShouldBeInvalidateByTimeout [GOOD] >> AggregateStatistics::ShouldBeCcorrectProcessingOfLocalTablets [GOOD] >> CountingEvents::Get_None [GOOD] >> CountingEvents::Collect_Mirror3of4 >> AggregateStatistics::ShouldBeCorrectlyAggregateStatisticsFromAllNodes >> CompatibilityInfo::VDiskIncompatibleWithDefault [GOOD] >> CompatibilityInfo::VDiskSuppressCompatibilityCheck [GOOD] >> CompatibilityInfo::BSControllerCompatible |60.7%| [TA] $(B)/ydb/core/tx/scheme_board/ut_monitoring/test-results/unittest/{meta.json ... results_accumulator.log} >> AggregateStatistics::ShouldBeCorrectlyAggregateStatisticsFromAllNodes [GOOD] >> CompatibilityInfo::BSControllerCompatible [GOOD] >> CompatibilityInfo::BSControllerIncompatible |60.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/ydb-core-tx-schemeshard-ut_data_erasure |60.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/ydb-core-tx-schemeshard-ut_data_erasure >> RemoteTopicReader::ReadTopic >> CompatibilityInfo::BSControllerIncompatible [GOOD] >> CompatibilityInfo::BSControllerIncompatibleWithDefault >> HugeBlobOnlineSizeChange::Compaction |60.7%| [TA] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_monitoring/test-results/unittest/{meta.json ... results_accumulator.log} |60.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_data_erasure/ydb-core-tx-schemeshard-ut_data_erasure |60.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain |60.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain |60.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain/ydb-core-tx-schemeshard-ut_extsubdomain ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ShouldBeCcorrectProcessingOfLocalTablets [GOOD] Test command err: 2025-06-03T10:26:00.000015Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-03T10:26:00.001403Z node 1 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 1, client id = [1:9:2056], server id = [1:9:2056], tablet id = 2 2025-06-03T10:26:00.001428Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 2 is not local. 2025-06-03T10:26:00.001453Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 3 2025-06-03T10:26:00.001603Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:8:2055], server id = [1:8:2055], tablet id = 1, status = ERROR 2025-06-03T10:26:00.001612Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 1 is not local. 2025-06-03T10:26:00.001630Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:10:2057], server id = [0:0:0], tablet id = 3, status = ERROR 2025-06-03T10:26:00.001634Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:26:00.001640Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:11:2058], server id = [1:11:2058], tablet id = 4, status = ERROR 2025-06-03T10:26:00.001644Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 4 is not local. 2025-06-03T10:26:00.001654Z node 1 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 1, client id = [1:12:2059], server id = [1:12:2059], tablet id = 5 2025-06-03T10:26:00.001658Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 5 is not local. 2025-06-03T10:26:00.001666Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 6 2025-06-03T10:26:00.001675Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:14:2061], server id = [1:14:2061], tablet id = 7, status = ERROR 2025-06-03T10:26:00.001679Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 7 is not local. 2025-06-03T10:26:00.001685Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:13:2060], server id = [0:0:0], tablet id = 6, status = ERROR 2025-06-03T10:26:00.001687Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:26:00.001691Z node 1 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 1, client id = [1:15:2062], server id = [1:15:2062], tablet id = 8 2025-06-03T10:26:00.001693Z node 1 :STATISTICS DEBUG: service_impl.cpp:1063: Tablet 8 is not local. 2025-06-03T10:26:00.001696Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/tx/unittest >> KqpSinkTx::OlapInteractive [GOOD] Test command err: Trying to start YDB, gRPC: 7454, MsgBus: 10220 2025-06-03T10:25:36.509738Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667319456293865:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:36.509776Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f57/r3tmp/tmp4CuIMc/pdisk_1.dat 2025-06-03T10:25:36.592767Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7454, node 1 2025-06-03T10:25:36.605120Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:36.605141Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:36.605146Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:36.605197Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:36.613283Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:36.613334Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:36.613973Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10220 TClient is connected to server localhost:10220 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:36.689366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:37.096261Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667323751261776:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:37.096287Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667323751261766:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:37.096301Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:37.097256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:25:37.099689Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667323751261787:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:25:37.176963Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667323751261838:2325] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:37.250386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:25:37.286894Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511667323751262025:2342];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:25:37.286958Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511667323751262025:2342];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:25:37.287032Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511667323751262025:2342];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:25:37.287070Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511667323751262025:2342];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:25:37.287099Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511667323751262025:2342];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:25:37.287123Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511667323751262025:2342];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:25:37.287150Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511667323751262025:2342];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:25:37.287182Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511667323751262025:2342];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:25:37.287221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511667323751262025:2342];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:25:37.287244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511667323751262025:2342];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:25:37.287268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511667323751262025:2342];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:25:37.287292Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511667323751262025:2342];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:25:37.288842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511667323751262028:2343];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:25:37.288858Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511667323751262028:2343];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:25:37.288915Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511667323751262028:2343];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:25:37.288938Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511667323751262028:2343];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:25:37.288967Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511667323751262028:2343];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:25:37.288990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511667323751262028:2343];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:25:37.289008Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511667323751262028:2343];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:25:37.289027Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511667323751262028:2343];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:25:37.289046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511667323751262028:2343];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:25:37.289067Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511667323751262028:2343];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:25:37.289096Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511667323751262028:2343];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:25:37.289119Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=7207 ... ;ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.005908s; 2025-06-03T10:25:52.933740Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037939;self_id=[2:7511667371945913407:2696];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.008162s; 2025-06-03T10:25:52.933752Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037968;self_id=[2:7511667371945913318:2660];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.005129s; 2025-06-03T10:25:52.933793Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037957;self_id=[2:7511667371945913316:2658];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.008212s; 2025-06-03T10:25:52.933803Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037965;self_id=[2:7511667371945913400:2694];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.005157s; 2025-06-03T10:25:52.933878Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037983;self_id=[2:7511667371945913389:2687];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.005081s; 2025-06-03T10:25:52.934303Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037973;self_id=[2:7511667371945913288:2639];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.004138s; 2025-06-03T10:25:52.934359Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037933;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.934443Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037953;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.934457Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037941;self_id=[2:7511667371945913423:2709];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.001728s; 2025-06-03T10:25:52.934498Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037994;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.934691Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037989;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.934738Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.934747Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037997;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.934866Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037951;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.935183Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037971;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.935234Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037963;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.935322Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037979;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.935335Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037985;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.948677Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037961;self_id=[2:7511667371945913480:2714];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.013852s; 2025-06-03T10:25:52.948953Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037935;self_id=[2:7511667371945913426:2710];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.013149s; 2025-06-03T10:25:52.949328Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037967;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.949609Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037959;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.949681Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037939;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.949782Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037965;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.949873Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037983;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.950009Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037968;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.950018Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037973;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.950058Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037957;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.950084Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037941;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.962530Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037935;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:52.962626Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037961;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:53.504612Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037915;self_id=[2:7511667371945911796:2428];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.001738s; 2025-06-03T10:25:53.506290Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037915;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:53.569592Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037913;self_id=[2:7511667371945911755:2409];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.000954s; 2025-06-03T10:25:53.570190Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[2:7511667371945911761:2415];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.000681s; 2025-06-03T10:25:53.570577Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037909;self_id=[2:7511667371945911802:2434];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.000913s; 2025-06-03T10:25:53.570594Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037919;self_id=[2:7511667371945911800:2432];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.000969s; 2025-06-03T10:25:53.571508Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:53.571586Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037917;self_id=[2:7511667371945911786:2425];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.000817s; 2025-06-03T10:25:53.571970Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:53.572038Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037919;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:53.572294Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:53.577918Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037917;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:53.625513Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;self_id=[2:7511667371945911753:2407];ev=NActors::TEvents::TEvWakeup;fline=sync.h:19;event=tx_timeout;lock=281474976715665;tx_id=281474976715667;d=2.001480s; 2025-06-03T10:25:53.630209Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:25:53.648465Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7511667367650943156:2142];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:53.648503Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::RootNodeShouldBeInvalidateByTimeout [GOOD] Test command err: 2025-06-03T10:25:59.903440Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-03T10:25:59.903627Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [1:38:2058], tablet id = 1, status = OK 2025-06-03T10:25:59.903682Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:38:2058], path = { OwnerId: 3 LocalId: 3 } 2025-06-03T10:25:59.903707Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-06-03T10:25:59.903765Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-03T10:25:59.903792Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [0:0:0], tablet id = 1, status = ERROR 2025-06-03T10:25:59.903796Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:25:59.903812Z node 3 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-03T10:25:59.903836Z node 4 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 1, current Round: 0 2025-06-03T10:25:59.903852Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:45:2057], server id = [3:45:2057], tablet id = 3, status = OK 2025-06-03T10:25:59.903859Z node 3 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [3:45:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-03T10:25:59.903868Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:47:2057], server id = [4:47:2057], tablet id = 4, status = OK 2025-06-03T10:25:59.903873Z node 4 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [4:47:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-03T10:25:59.903879Z node 3 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 3 2025-06-03T10:25:59.903884Z node 3 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-03T10:25:59.903897Z node 4 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 4 2025-06-03T10:25:59.903901Z node 4 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:25:59.903915Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:47:2057], server id = [0:0:0], tablet id = 4, status = ERROR 2025-06-03T10:25:59.903919Z node 4 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:25:59.903925Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:45:2057], server id = [0:0:0], tablet id = 3, status = ERROR 2025-06-03T10:25:59.903929Z node 3 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:25:59.903975Z node 2 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 4 2025-06-03T10:25:59.903983Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 3 2025-06-03T10:25:59.914419Z node 4 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-03T10:25:59.914450Z node 4 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-03T10:25:59.914469Z node 3 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-03T10:25:59.914476Z node 3 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-03T10:25:59.925033Z node 2 :STATISTICS DEBUG: service_impl.cpp:401: Skip TEvKeepAliveTimeout 2025-06-03T10:25:59.925078Z node 1 :STATISTICS INFO: service_impl.cpp:416: Node 2 is unavailable 2025-06-03T10:25:59.925088Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-03T10:25:59.925118Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-03T10:25:59.925122Z node 1 :STATISTICS DEBUG: service_impl.cpp:393: Skip TEvKeepAliveTimeout 2025-06-03T10:25:59.925160Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-03T10:25:59.925165Z node 1 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-03T10:25:59.925177Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-03T10:25:59.925181Z node 1 :STATISTICS DEBUG: service_impl.cpp:428: Skip TEvAggregateKeepAlive >> CountingEvents::Collect_Mirror3of4 [GOOD] >> CountingEvents::Collect_Mirror3dc >> CompatibilityInfo::BSControllerIncompatibleWithDefault [GOOD] >> CompatibilityInfo::BSControllerSuppressCompatibilityCheck [GOOD] >> CompatibilityInfo::VDiskMigration [GOOD] >> CompatibilityInfo::BSControllerMigration |60.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_huge/unittest >> CompatibilityInfo::BSControllerMigration [GOOD] >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests10000Inflight1BlobSize1000 >> CountingEvents::Collect_Mirror3dc [GOOD] >> CountingEvents::Collect_Block42 >> AggregateStatistics::ChildNodesShouldBeInvalidateByTimeout [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ShouldBeCorrectlyAggregateStatisticsFromAllNodes [GOOD] Test command err: 2025-06-03T10:26:00.156616Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-03T10:26:00.156760Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [1:38:2058], tablet id = 1, status = OK 2025-06-03T10:26:00.156803Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:38:2058], path = { OwnerId: 3 LocalId: 3 } 2025-06-03T10:26:00.156863Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:39:2059], server id = [1:39:2059], tablet id = 2, status = OK 2025-06-03T10:26:00.156867Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:39:2059], path = { OwnerId: 3 LocalId: 3 } 2025-06-03T10:26:00.156916Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-03T10:26:00.156938Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-06-03T10:26:00.156965Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:40:2060], server id = [1:40:2060], tablet id = 3, status = OK 2025-06-03T10:26:00.156969Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:40:2060], path = { OwnerId: 3 LocalId: 3 } 2025-06-03T10:26:00.156991Z node 3 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-03T10:26:00.157000Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:44:2057], server id = [2:44:2057], tablet id = 4, status = OK 2025-06-03T10:26:00.157004Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:44:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-03T10:26:00.157012Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 2 2025-06-03T10:26:00.157024Z node 4 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 1, current Round: 0 2025-06-03T10:26:00.157033Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:47:2057], server id = [3:47:2057], tablet id = 5, status = OK 2025-06-03T10:26:00.157037Z node 3 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [3:47:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-03T10:26:00.157041Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 4 2025-06-03T10:26:00.157047Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [0:0:0], tablet id = 1, status = ERROR 2025-06-03T10:26:00.157050Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:26:00.157055Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:44:2057], server id = [0:0:0], tablet id = 4, status = ERROR 2025-06-03T10:26:00.157057Z node 2 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:26:00.157060Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 3 2025-06-03T10:26:00.157066Z node 3 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 5 2025-06-03T10:26:00.157070Z node 3 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-03T10:26:00.157078Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:49:2057], server id = [4:49:2057], tablet id = 6, status = OK 2025-06-03T10:26:00.157081Z node 4 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [4:49:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-03T10:26:00.157089Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:39:2059], server id = [0:0:0], tablet id = 2, status = ERROR 2025-06-03T10:26:00.157093Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:26:00.157097Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:47:2057], server id = [0:0:0], tablet id = 5, status = ERROR 2025-06-03T10:26:00.157099Z node 3 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:26:00.157102Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:40:2060], server id = [0:0:0], tablet id = 3, status = ERROR 2025-06-03T10:26:00.157104Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:26:00.157107Z node 4 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 6 2025-06-03T10:26:00.157110Z node 4 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:00.157119Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:49:2057], server id = [0:0:0], tablet id = 6, status = ERROR 2025-06-03T10:26:00.157121Z node 4 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:26:00.157149Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 3 2025-06-03T10:26:00.157164Z node 2 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 4 2025-06-03T10:26:00.157172Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-03T10:26:00.157185Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 2 2025-06-03T10:26:00.157189Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 >> CountingEvents::Collect_Block42 [GOOD] >> CountingEvents::Collect_None >> KqpScheme::CreateDropTableViaApiMultipleTime [GOOD] >> KqpScheme::CreateDropColumnTable |60.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_huge/unittest |60.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_huge/unittest |60.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_huge/unittest |60.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer >> CountingEvents::Collect_None [GOOD] >> Deadlines::TestPut4Plus2Block |60.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer |60.8%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_target_discoverer/replication-controller-ut_target_discoverer |60.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_huge/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/ut_aggregation/unittest >> AggregateStatistics::ChildNodesShouldBeInvalidateByTimeout [GOOD] Test command err: 2025-06-03T10:26:00.582831Z node 1 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-03T10:26:00.583022Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [1:38:2058], tablet id = 1, status = OK 2025-06-03T10:26:00.583088Z node 1 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [1:38:2058], path = { OwnerId: 3 LocalId: 3 } 2025-06-03T10:26:00.583115Z node 1 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 1 2025-06-03T10:26:00.583169Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-03T10:26:00.583189Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:38:2058], server id = [0:0:0], tablet id = 1, status = ERROR 2025-06-03T10:26:00.583192Z node 1 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:26:00.583204Z node 3 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 1, Round: 1, current Round: 0 2025-06-03T10:26:00.583222Z node 4 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 1, current Round: 0 2025-06-03T10:26:00.583234Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:45:2057], server id = [3:45:2057], tablet id = 3, status = OK 2025-06-03T10:26:00.583240Z node 3 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [3:45:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-03T10:26:00.583245Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:47:2057], server id = [4:47:2057], tablet id = 4, status = OK 2025-06-03T10:26:00.583249Z node 4 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [4:47:2057], path = { OwnerId: 3 LocalId: 3 } 2025-06-03T10:26:00.583253Z node 3 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 3 2025-06-03T10:26:00.583257Z node 3 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-03T10:26:00.583266Z node 4 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 4 2025-06-03T10:26:00.583280Z node 4 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:00.583290Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:47:2057], server id = [0:0:0], tablet id = 4, status = ERROR 2025-06-03T10:26:00.583293Z node 4 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:26:00.583297Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:45:2057], server id = [0:0:0], tablet id = 3, status = ERROR 2025-06-03T10:26:00.583299Z node 3 :STATISTICS DEBUG: service_impl.cpp:1110: Skip EvClientConnected 2025-06-03T10:26:00.583330Z node 2 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 4 2025-06-03T10:26:00.583337Z node 1 :STATISTICS DEBUG: service_impl.cpp:448: Received TEvAggregateStatisticsResponse SenderNodeId: 3 2025-06-03T10:26:00.593520Z node 4 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-03T10:26:00.593545Z node 4 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-03T10:26:00.593558Z node 3 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-03T10:26:00.593562Z node 3 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-03T10:26:00.603790Z node 2 :STATISTICS DEBUG: service_impl.cpp:401: Skip TEvKeepAliveTimeout 2025-06-03T10:26:00.603843Z node 1 :STATISTICS INFO: service_impl.cpp:416: Node 2 is unavailable 2025-06-03T10:26:00.603853Z node 1 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 1 2025-06-03T10:26:00.603890Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-03T10:26:00.603895Z node 1 :STATISTICS DEBUG: service_impl.cpp:393: Skip TEvKeepAliveTimeout 2025-06-03T10:26:00.603944Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-03T10:26:00.603949Z node 1 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-03T10:26:00.603962Z node 1 :STATISTICS DEBUG: service_impl.cpp:252: Event round 1 is different from the current 0 2025-06-03T10:26:00.603966Z node 1 :STATISTICS DEBUG: service_impl.cpp:428: Skip TEvAggregateKeepAlive |60.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_huge/unittest >> Deadlines::TestPut4Plus2Block [GOOD] >> Deadlines::TestGetMirror3dc >> Deadlines::TestGetMirror3dc [GOOD] >> Deadlines::TestGet4Plus2Block |60.8%| [TA] $(B)/ydb/core/statistics/service/ut/ut_aggregation/test-results/unittest/{meta.json ... results_accumulator.log} |60.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_huge/unittest >> KqpScheme::CreateDropColumnTable [GOOD] >> KqpScheme::CreateDropColumnTableNegative >> Deadlines::TestGet4Plus2Block [GOOD] >> Deadlines::TestGetMirror3of4 |60.8%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/executer_actor/ut/unittest |60.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/executer_actor/ut/unittest |60.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/executer_actor/ut/unittest |60.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/executer_actor/ut/unittest >> Deadlines::TestGetMirror3of4 [GOOD] >> TTxDataShardMiniKQL::CrossShard_1_Cycle [GOOD] >> TTxDataShardMiniKQL::CrossShard_2_SwapAndCopy |60.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/executer_actor/ut/unittest |60.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk |60.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk |60.9%| [TA] {RESULT} $(B)/ydb/core/statistics/service/ut/ut_aggregation/test-results/unittest/{meta.json ... results_accumulator.log} |60.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/ut_blobstorage-ut_read_only_pdisk |60.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/executer_actor/ut/unittest >> KqpScheme::CreateDropColumnTableNegative [GOOD] >> KqpScheme::CreateExternalDataSource >> TSchemeShardExtSubDomainTest::CreateAndWait |60.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/executer_actor/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/unittest >> Deadlines::TestGetMirror3of4 [GOOD] Test command err: RandomSeed# 17698828859901114402 2025-06-03T10:26:01.220875Z 9 00h01m40.010512s :BS_PROXY_PUT ERROR: [e61af0a23505c2e0] Result# TEvPutResult {Id# [1:1:1:1:123:1000:0] Status# DEADLINE StatusFlags# { } ErrorReason# "Deadline timer hit" ApproximateFreeSpaceShare# 0} GroupId# 2181038080 Marker# BPP12 2025-06-03T10:26:01.573145Z 10 00h01m40.010512s :BS_PROXY_GET ERROR: [ea8e7d347cc7bd9b] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:1:1:123:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "Deadline timer hit"} Marker# BPG29 2025-06-03T10:26:01.924985Z 9 00h01m40.010512s :BS_PROXY_GET ERROR: [49467672cabfb91d] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:1:1:123:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "Deadline timer hit"} Marker# BPG29 2025-06-03T10:26:02.249766Z 9 00h01m40.010512s :BS_PROXY_GET ERROR: [d9b6756ede99cc97] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:1:1:123:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "Deadline timer hit"} Marker# BPG29 |60.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/executer_actor/ut/unittest |60.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/executer_actor/ut/unittest >> TSchemeShardExtSubDomainTest::CreateAndWait [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-ExternalHive >> RemoteTopicReader::ReadTopic [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx >> TSchemeShardExtSubDomainTest::NothingInsideGSS-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::Fake [GOOD] >> TSchemeShardExtSubDomainTest::CreateWithOnlyDotsNotAllowed >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSchemeShard-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-AlterDatabaseCreateHiveFirst >> TSchemeShardExtSubDomainTest::NothingInsideGSS-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::SysViewProcessorSync-AlterDatabaseCreateHiveFirst-false ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_topic_reader/unittest >> RemoteTopicReader::ReadTopic [GOOD] Test command err: 2025-06-03T10:26:00.353016Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667421944317753:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:00.353041Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0026e0/r3tmp/tmpVdZtFk/pdisk_1.dat 2025-06-03T10:26:00.412663Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:14342 TServer::EnableGrpc on GrpcPort 22988, node 1 2025-06-03T10:26:00.441846Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:00.441874Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:00.441879Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:00.441938Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:00.454013Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:00.454075Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:00.455103Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14342 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:00.484543Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:01.358415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:2, at schemeshard: 72057594046644480 2025-06-03T10:26:01.392963Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667426239285926:2359], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:01.393004Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:01.393202Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667426239285939:2362], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:01.394180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:2, at schemeshard: 72057594046644480 2025-06-03T10:26:01.397041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-03T10:26:01.397138Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667426239285941:2363], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-03T10:26:01.457883Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667426239285981:2455] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:01.624779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:26:01.693934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:26:01.792471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715670:0, at schemeshard: 72057594046644480 2025-06-03T10:26:01.883192Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:26:01.977700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480 2025-06-03T10:26:02.295595Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:32: [RemoteTopicReader][/Root/topic][0][1:7511667430534253833:2695] Handshake: worker# [1:7511667421944318334:2284] 2025-06-03T10:26:02.296332Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:41: [RemoteTopicReader][/Root/topic][0][1:7511667430534253833:2695] Create read session: session# [1:7511667430534253834:2283] 2025-06-03T10:26:02.296483Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7511667430534253833:2695] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-03T10:26:02.317668Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:79: [RemoteTopicReader][/Root/topic][0][1:7511667430534253833:2695] Handle NKikimr::NReplication::TEvYdbProxy::TEvStartTopicReadingSession { Result: { ReadSessionId: consumer_1_1_457159178605448165_v1 } } 2025-06-03T10:26:02.337528Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7511667430534253833:2695] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 9b Offset: 0 SeqNo: 1 CreateTime: 2025-06-03T10:26:02.191000Z MessageGroupId: producer ProducerId: producer }] } } 2025-06-03T10:26:02.338167Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7511667430534253833:2695] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-03T10:26:02.425708Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7511667430534253833:2695] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 9b Offset: 1 SeqNo: 2 CreateTime: 2025-06-03T10:26:02.412000Z MessageGroupId: producer ProducerId: producer }] } } 2025-06-03T10:26:02.529503Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:32: [RemoteTopicReader][/Root/topic][0][1:7511667430534253957:2735] Handshake: worker# [1:7511667421944318334:2284] 2025-06-03T10:26:02.530127Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:41: [RemoteTopicReader][/Root/topic][0][1:7511667430534253957:2735] Create read session: session# [1:7511667430534253958:2283] 2025-06-03T10:26:02.531785Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7511667430534253957:2735] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-03T10:26:02.534075Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:79: [RemoteTopicReader][/Root/topic][0][1:7511667430534253957:2735] Handle NKikimr::NReplication::TEvYdbProxy::TEvStartTopicReadingSession { Result: { ReadSessionId: consumer_1_2_3806021003734396657_v1 } } 2025-06-03T10:26:02.540917Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7511667430534253957:2735] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 9b Offset: 1 SeqNo: 2 CreateTime: 2025-06-03T10:26:02.412000Z MessageGroupId: producer ProducerId: producer }] } } >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-ExternalHive >> TSchemeShardExtSubDomainTest::CreateWithOnlyDotsNotAllowed [GOOD] >> TSchemeShardExtSubDomainTest::NothingInsideGSS-AlterDatabaseCreateHiveFirst-false >> KqpScheme::CreateExternalDataSource [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSchemeShard-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSchemeShard-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeSetParams-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain >> TSchemeShardExtSubDomainTest::CreateAndAlterWithoutEnablingTx-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithExternalHive-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-ExternalHive >> TargetDiscoverer::Basic >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSchemeShard-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalHive-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::CreateAndAlterWithExternalHive-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterWithExternalHive-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::NothingInsideGSS-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::Drop >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeSetParams-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::AlterCantChangeSetParams-AlterDatabaseCreateHiveFirst-true >> TargetDiscoverer::Negative >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain [GOOD] >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-ExternalHive >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-AlterDatabaseCreateHiveFirst >> TargetDiscoverer::Dirs >> TSchemeShardExtSubDomainTest::SysViewProcessorSync-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::SysViewProcessorSync-AlterDatabaseCreateHiveFirst-true >> test.py::test[bigdate-table_arithmetic-default.txt-Results] [GOOD] >> test.py::test[bigdate-table_common_type-default.txt-Results] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalHive-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalHive-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateAndAlterWithExternalHive-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-ExternalHive >> TSchemeShardExtSubDomainTest::AlterCantChangeSetParams-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterRequiresParamCombinations-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterAddStoragePool-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::Drop [GOOD] >> TSchemeShardExtSubDomainTest::Drop-ExternalHive |60.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_target_discoverer/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateExternalDataSource [GOOD] Test command err: Trying to start YDB, gRPC: 64122, MsgBus: 23008 2025-06-03T10:25:08.618727Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667197263962764:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:08.618757Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0017c6/r3tmp/tmpjjF2Eq/pdisk_1.dat 2025-06-03T10:25:08.675664Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64122, node 1 2025-06-03T10:25:08.693701Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:08.693719Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:08.693722Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:08.693770Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23008 2025-06-03T10:25:08.720399Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:08.720425Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:08.721705Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23008 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:08.756622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.764515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.830221Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.870831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:08.902017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:09.065276Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667201558931657:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:09.065324Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:09.130048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.157726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.169839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.187573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.200708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.216176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.241263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:09.261845Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667201558932311:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:09.261867Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:09.261987Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667201558932316:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:09.263033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:09.265359Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667201558932318:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:09.349599Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667201558932369:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:09.643984Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037911 not found 2025-06-03T10:25:09.649027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 13762, MsgBus: 2185 2025-06-03T10:25:09.954647Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667203670468496:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:09.954668Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0017c6/r3tmp/tmpYUtx8X/pdisk_1.dat 2025-06-03T10:25:09.970262Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13762, node 2 2025-06-03T10:25:09.977793Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:09.977810Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:09.977812Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:09.977879Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2185 TClient is connected to server localhost:2185 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:10.055097Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:10.055131Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:10.056275Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#720575940 ... ated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:02.100857Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:02.103440Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:26:02.446094Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511667430318300123:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:02.446118Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:02.456580Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511667430318300144:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:02.456606Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:02.459222Z node 5 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [5:7511667430318300150:2296] txid# 281474976715658, issues: { message: "Nullable key column \'Key\'" severity: 1 } 2025-06-03T10:26:02.461966Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511667430318300158:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:02.461988Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:02.467053Z node 5 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [5:7511667430318300164:2304] txid# 281474976715659, issues: { message: "Nullable key column \'Key\'" severity: 1 } Trying to start YDB, gRPC: 61639, MsgBus: 22156 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0017c6/r3tmp/tmpe88yMB/pdisk_1.dat 2025-06-03T10:26:02.743813Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511667430980739081:2091];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:02.744089Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:26:02.786975Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61639, node 6 2025-06-03T10:26:02.797881Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:02.797894Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:02.797897Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:02.797946Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22156 2025-06-03T10:26:02.854204Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:02.854227Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:02.854727Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22156 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:02.863944Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:02.865317Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:26:02.869540Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:02.887975Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:02.962387Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:02.979600Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:03.155467Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511667435275707941:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:03.155516Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:03.158335Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:26:03.167630Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:26:03.182730Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:26:03.194772Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:26:03.208304Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:26:03.224758Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:26:03.236420Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:26:03.258891Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511667435275708593:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:03.258915Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:03.258944Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511667435275708598:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:03.259925Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:26:03.263067Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511667435275708600:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:26:03.360952Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511667435275708651:3393] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:03.535619Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715672:0, at schemeshard: 72057594046644480 >> test.py::test[window-win_func_lead_lag_worm_with_part--Results] [GOOD] >> test.py::test[window-win_func_special--Results] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalHive-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSysViewProcessor-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::AlterRequiresParamCombinations-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterRequiresParamCombinations-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-ExternalHive >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst >> TargetDiscoverer::Basic [GOOD] >> TargetDiscoverer::Negative [GOOD] >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] |60.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |60.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut |60.9%| [LD] {RESULT} $(B)/ydb/services/ydb/ut/ydb-services-ydb-ut >> TargetDiscoverer::Transfer >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSysViewProcessor-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSysViewProcessor-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst-ExternalHive >> CostMetricsPatchMirror3dc::TestPatchMirror3dcRequests10000Inflight100BlobSize1000 [GOOD] >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests1Inflight1BlobSize1000 >> TSchemeShardExtSubDomainTest::AlterRequiresParamCombinations-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-AlterDatabaseCreateHiveFirst >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::Drop-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst >> TSchemeShardExtSubDomainTest::SysViewProcessorSync-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::StatisticsAggregatorSync-AlterDatabaseCreateHiveFirst-false >> TargetDiscoverer::Dirs [GOOD] >> TSchemeShardExtSubDomainTest::AlterWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-ExternalHive >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalSysViewProcessor-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-false >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TTxDataShardMiniKQL::CrossShard_5_AllToAll [GOOD] >> TTxDataShardMiniKQL::CrossShard_6_Local ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::Basic [GOOD] Test command err: 2025-06-03T10:26:04.111083Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667436328825979:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:04.111109Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0029fe/r3tmp/tmpUaA88b/pdisk_1.dat 2025-06-03T10:26:04.171432Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:29179 TServer::EnableGrpc on GrpcPort 31953, node 1 2025-06-03T10:26:04.211551Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:04.211573Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:04.211576Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:04.211635Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:04.212473Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:04.212518Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:04.213578Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29179 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:04.257536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:04.260965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:04.340867Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { name: Root, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1748946364305, tx_id: 1 } } } 2025-06-03T10:26:04.340884Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:42: [TargetDiscoverer][rid 1] Describe path succeeded: path# /Root 2025-06-03T10:26:04.344357Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:247: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvListDirectoryResponse { Result: { children [{ name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946364361, tx_id: 281474976715658 } }, { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } }] } } 2025-06-03T10:26:04.344371Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:260: [TargetDiscoverer][rid 1] Listing succeeded: path# /Root 2025-06-03T10:26:04.579053Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:98: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946364361, tx_id: 281474976715658 } } } 2025-06-03T10:26:04.579072Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:113: [TargetDiscoverer][rid 1] Describe table succeeded: path# /Root/Table 2025-06-03T10:26:04.579080Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:120: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Table, dstPath# /Root/Replicated/Table, kind# Table ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateAndSameAlterTwice-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:26:03.030610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:03.030647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:03.030653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:03.030659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:03.030671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:03.030676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:03.030692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:03.030709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:03.030836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:03.030932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:03.043979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:03.044009Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:03.051501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:03.051672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:03.051715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:03.077848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:03.077956Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:03.078175Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:03.078284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:03.079550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:03.079620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:03.080089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:03.080106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:03.080122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:03.080133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:03.080140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:03.080178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.081959Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:26:03.109286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:03.109487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.109566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:03.109618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:03.109633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.114503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:03.114562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:03.114646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.114662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:03.114671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:03.114678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:03.115878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.115911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:03.115942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:03.117630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.117661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.117670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:03.117682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:03.118710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:03.121719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:03.121786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:03.121988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:03.122046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:03.122059Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:03.122149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:03.122157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:03.122198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:03.122210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:03.126069Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:03.126098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:03.126169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... ersion: 6 2025-06-03T10:26:04.760925Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-03T10:26:04.760936Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-03T10:26:04.762453Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5814: Handle TEvSyncTenantSchemeShard, at schemeshard: 72057594046678944, msg: DomainSchemeShard: 72057594046678944 DomainPathId: 2 TabletID: 72075186234409546 Generation: 2 EffectiveACLVersion: 0 SubdomainVersion: 4 UserAttributesVersion: 1 TenantHive: 72075186233409546 TenantSysViewProcessor: 18446744073709551615 TenantRootACL: "" TenantStatisticsAggregator: 18446744073709551615 TenantGraphShard: 18446744073709551615 2025-06-03T10:26:04.762474Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:26: TTxSyncTenant DoExecute, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:26:04.762494Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:567: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2], Generation: 2, ActorId:[7:392:2361], EffectiveACLVersion: 0, SubdomainVersion: 4, UserAttributesVersion: 1, TenantHive: 72075186233409546, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 4, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 72075186233409546, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:26:04.762538Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186234409546 2025-06-03T10:26:04.762546Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186234409546, txId: 0, path id: [OwnerId: 72075186234409546, LocalPathId: 1] 2025-06-03T10:26:04.762577Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186234409546 2025-06-03T10:26:04.762583Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:484:2425], at schemeshard: 72075186234409546, txId: 0, path id: 1 2025-06-03T10:26:04.762762Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:26:04.762772Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-03T10:26:04.762785Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:26:04.762789Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:26:04.762794Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:26:04.762798Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:26:04.762802Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-03T10:26:04.762808Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:26:04.762813Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-06-03T10:26:04.762817Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:0 2025-06-03T10:26:04.762831Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-03T10:26:04.762922Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186234409546, msg: Owner: 72075186234409546 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72075186234409546, cookie: 0 2025-06-03T10:26:04.765244Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:26:04.765276Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-03T10:26:04.765539Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-03T10:26:04.765551Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-03T10:26:04.765637Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-03T10:26:04.765660Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:26:04.765666Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [7:564:2503] TestWaitNotification: OK eventTxId 103 2025-06-03T10:26:04.765757Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:04.765804Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 61us result status StatusSuccess 2025-06-03T10:26:04.765912Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409548 SchemeShard: 72075186234409546 Hive: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:04.765976Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:04.765992Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 17us result status StatusSuccess 2025-06-03T10:26:04.766050Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409548 SchemeShard: 72075186234409546 Hive: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:04.766130Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186234409546 2025-06-03T10:26:04.766154Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186234409546 describe path "/MyRoot/USER_0" took 24us result status StatusSuccess 2025-06-03T10:26:04.766200Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "MyRoot/USER_0" PathId: 1 SchemeshardId: 72075186234409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 4 PlanResolution: 50 Coordinators: 72075186234409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186234409548 SchemeShard: 72075186234409546 Hive: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "hdd" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot/USER_0" } } } PathId: 1 PathOwnerId: 72075186234409546, at schemeshard: 72075186234409546 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::Negative [GOOD] Test command err: 2025-06-03T10:26:04.291958Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667437699511160:2135];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:04.293034Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0029e0/r3tmp/tmp9qsW7v/pdisk_1.dat 2025-06-03T10:26:04.375759Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667437699511064:2079] 1748946364290779 != 1748946364290782 2025-06-03T10:26:04.383409Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:15908 TServer::EnableGrpc on GrpcPort 7361, node 1 2025-06-03T10:26:04.433530Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:04.433546Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:04.433548Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:04.433587Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:04.439351Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:04.439392Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:04.440647Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15908 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:04.494734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:04.506024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:26:04.534050Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { status: SCHEME_ERROR, issues: {
: Error: Path not found } } } 2025-06-03T10:26:04.534076Z node 1 :REPLICATION_CONTROLLER ERROR: target_discoverer.cpp:78: [TargetDiscoverer][rid 1] Describe path failed: path# /Root/Table, status# SCHEME_ERROR, issues# {
: Error: Path not found } >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::CreateAndAlterAlterSameStoragePools-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent >> TargetDiscoverer::Transfer [GOOD] >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-true [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::Dirs [GOOD] Test command err: 2025-06-03T10:26:04.436615Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667438398947001:2204];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:04.436797Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0029db/r3tmp/tmpNDvpAc/pdisk_1.dat 2025-06-03T10:26:04.521826Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667438398946830:2079] 1748946364431346 != 1748946364431349 2025-06-03T10:26:04.527968Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:04.537570Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:04.537621Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:04.538523Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28596 TServer::EnableGrpc on GrpcPort 12612, node 1 2025-06-03T10:26:04.571859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:04.571880Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:04.571884Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:04.571934Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28596 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:26:04.641642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:26:04.645183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:04.649070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:04.727310Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { name: Root, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1748946364690, tx_id: 1 } } } 2025-06-03T10:26:04.727331Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:42: [TargetDiscoverer][rid 1] Describe path succeeded: path# /Root 2025-06-03T10:26:04.731243Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:247: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvListDirectoryResponse { Result: { children [{ name: Dir, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1748946364697, tx_id: 281474976710658 } }, { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } }] } } 2025-06-03T10:26:04.731259Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:260: [TargetDiscoverer][rid 1] Listing succeeded: path# /Root 2025-06-03T10:26:04.736132Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:247: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvListDirectoryResponse { Result: { children [{ name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946364753, tx_id: 281474976710659 } }] } } 2025-06-03T10:26:04.736148Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:260: [TargetDiscoverer][rid 1] Listing succeeded: path# /Root/Dir 2025-06-03T10:26:04.938145Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:98: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946364753, tx_id: 281474976710659 } } } 2025-06-03T10:26:04.938158Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:113: [TargetDiscoverer][rid 1] Describe table succeeded: path# /Root/Dir/Table 2025-06-03T10:26:04.938164Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:120: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Dir/Table, dstPath# /Root/Replicated/Dir/Table, kind# Table ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:26:03.528328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:03.528356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:03.528364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:03.528370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:03.528384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:03.528389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:03.528400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:03.528422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:03.528550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:03.528631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:03.539018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:03.539042Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:03.542332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:03.542428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:03.542456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:03.544330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:03.544385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:03.544489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:03.544544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:03.545242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:03.545279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:03.545586Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:03.545598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:03.545608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:03.545619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:03.545626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:03.545650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.546777Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:26:03.566957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:03.567033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.567095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:03.567135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:03.567145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.567907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:03.567939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:03.568025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.568043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:03.568054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:03.568063Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:03.568668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.568684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:03.568691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:03.569039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.569049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.569059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:03.569068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:03.569782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:03.570253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:03.570297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:03.570489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:03.570515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:03.570523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:03.570593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:03.570601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:03.570637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:03.570650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:03.571056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:03.571064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:03.571105Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... ion: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:26:05.176362Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-03T10:26:05.176367Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-03T10:26:05.176373Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-03T10:26:05.176388Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-03T10:26:05.176847Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72075186233409546 at ss 72057594046678944 2025-06-03T10:26:05.176861Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72075186233409546 at ss 72057594046678944 2025-06-03T10:26:05.176866Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72075186233409546 at ss 72057594046678944 2025-06-03T10:26:05.176870Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72075186233409546 at ss 72057594046678944 2025-06-03T10:26:05.176984Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:26:05.176993Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-03T10:26:05.177008Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:26:05.177013Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:26:05.177019Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:26:05.177023Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:26:05.177028Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-03T10:26:05.177035Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:26:05.177041Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-06-03T10:26:05.177101Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:0 2025-06-03T10:26:05.177145Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-03T10:26:05.177704Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:26:05.177905Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-03T10:26:05.177944Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:26:05.177970Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:05.178050Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:26:05.178105Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186234409547 2025-06-03T10:26:05.178177Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-03T10:26:05.178203Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:26:05.178456Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186234409546 Forgetting tablet 72075186234409547 2025-06-03T10:26:05.178573Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:26:05.178609Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:26:05.178847Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:05.179801Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186234409548 Forgetting tablet 72075186234409546 2025-06-03T10:26:05.180230Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-03T10:26:05.180299Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 Forgetting tablet 72075186234409548 2025-06-03T10:26:05.181254Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:26:05.181271Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:26:05.181327Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:26:05.181392Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:26:05.181403Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:26:05.181416Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:05.182612Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:26:05.182631Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:26:05.182649Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:26:05.182652Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186234409547 2025-06-03T10:26:05.185071Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:26:05.185102Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186234409546 2025-06-03T10:26:05.185121Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-03T10:26:05.185126Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186234409548 2025-06-03T10:26:05.186065Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:26:05.186096Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-03T10:26:05.186164Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-03T10:26:05.186173Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-03T10:26:05.186265Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-03T10:26:05.186294Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:26:05.186301Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [7:583:2522] TestWaitNotification: OK eventTxId 103 2025-06-03T10:26:05.186412Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:05.186457Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 61us result status StatusPathDoesNotExist 2025-06-03T10:26:05.186498Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst [GOOD] >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst-ExternalHive >> TSchemeShardExtSubDomainTest::StatisticsAggregatorSync-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::StatisticsAggregatorSync-AlterDatabaseCreateHiveFirst-true >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-true [GOOD] >> TargetDiscoverer::SystemObjects >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent [GOOD] >> TargetDiscoverer::InvalidCredentials >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::Transfer [GOOD] Test command err: 2025-06-03T10:26:05.026160Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667443366563264:2203];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:05.026278Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0029d2/r3tmp/tmpC9ZW6t/pdisk_1.dat 2025-06-03T10:26:05.116926Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:05.117077Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667443366563089:2079] 1748946365020649 != 1748946365020652 2025-06-03T10:26:05.117228Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:05.120812Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:05.165335Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24754 TServer::EnableGrpc on GrpcPort 22549, node 1 2025-06-03T10:26:05.193931Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:05.193950Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:05.193953Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:05.194042Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24754 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:05.261571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:26:05.336385Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { name: Topic, owner: root@builtin, type: Topic, size_bytes: 0, created_at: { plan_step: 1748946365369, tx_id: 281474976715658 } } } 2025-06-03T10:26:05.336408Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:42: [TargetDiscoverer][rid 1] Describe path succeeded: path# /Root/Topic 2025-06-03T10:26:05.339874Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:166: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTopicResponse { Result: { status: SUCCESS, issues: } } 2025-06-03T10:26:05.339890Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:181: [TargetDiscoverer][rid 1] Describe topic succeeded: path# /Root/Topic 2025-06-03T10:26:05.339898Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:191: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Topic, dstPath# /Root/Replicated/Table, kind# Transfer ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::AlterNameConflicts-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:26:03.766105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:03.766139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:03.766145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:03.766151Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:03.766163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:03.766168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:03.766177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:03.766190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:03.766305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:03.766368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:03.779398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:03.779420Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:03.782714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:03.782813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:03.782843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:03.784485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:03.784542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:03.784642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:03.784702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:03.785341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:03.785391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:03.785643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:03.785652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:03.785662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:03.785669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:03.785675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:03.785690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.786929Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:26:03.805757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:03.805839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.805902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:03.805949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:03.805961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.806960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:03.806998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:03.807077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.807091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:03.807098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:03.807106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:03.807697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.807712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:03.807719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:03.808123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.808138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.808146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:03.808156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:03.808898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:03.809346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:03.809437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:03.809622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:03.809649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:03.809659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:03.809735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:03.809744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:03.809786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:03.809800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:03.810406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:03.810417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:03.810465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 2057594046678944 2025-06-03T10:26:05.448374Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:05.448381Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:05.448965Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:05.448981Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:05.448988Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:05.448997Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:05.449034Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:05.449431Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:05.449484Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:05.449679Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:05.449706Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 30064773228 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:05.449715Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:05.449784Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:05.449793Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:05.449834Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:05.449847Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:05.450363Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:05.450376Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:05.450419Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:05.450425Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:206:2207], at schemeshard: 72057594046678944, txId: 1, path id: 1 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:05.450523Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:05.450532Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-03T10:26:05.450548Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-06-03T10:26:05.450554Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:26:05.450560Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-06-03T10:26:05.450563Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:26:05.450571Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-03T10:26:05.450577Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:26:05.450583Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-06-03T10:26:05.450588Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1:0 2025-06-03T10:26:05.450601Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:26:05.450609Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-03T10:26:05.450614Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-03T10:26:05.450710Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-03T10:26:05.450723Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-03T10:26:05.450729Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-03T10:26:05.450735Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-03T10:26:05.450740Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:05.450754Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-03T10:26:05.451425Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-03T10:26:05.451526Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-03T10:26:05.451680Z node 7 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [7:269:2259] Bootstrap 2025-06-03T10:26:05.453871Z node 7 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [7:269:2259] Become StateWork (SchemeCache [7:274:2264]) 2025-06-03T10:26:05.454711Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "USER_1" ExternalSchemeShard: true } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:05.454757Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1102: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 101:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "USER_1" ExternalSchemeShard: true } 2025-06-03T10:26:05.454765Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1108: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 101:0, path /MyRoot/USER_1 2025-06-03T10:26:05.454801Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 101:0, explain: Invalid AlterExtSubDomain request: Check failed: path: '/MyRoot/USER_1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp:1125, at schemeshard: 72057594046678944 2025-06-03T10:26:05.454811Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusPathDoesNotExist, reason: Invalid AlterExtSubDomain request: Check failed: path: '/MyRoot/USER_1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp:1125, at schemeshard: 72057594046678944 2025-06-03T10:26:05.455012Z node 7 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [7:269:2259] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:26:05.456246Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusPathDoesNotExist Reason: "Invalid AlterExtSubDomain request: Check failed: path: \'/MyRoot/USER_1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp:1125" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:05.456285Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusPathDoesNotExist, reason: Invalid AlterExtSubDomain request: Check failed: path: '/MyRoot/USER_1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard__operation_alter_extsubdomain.cpp:1125, operation: ALTER DATABASE, path: /MyRoot/USER_1 2025-06-03T10:26:05.456418Z node 7 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests1Inflight1BlobSize1000 [GOOD] >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests10Inflight1BlobSize1000 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::AlterCantChangeExternalStatisticsAggregator-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:26:03.712870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:03.712904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:03.712912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:03.712919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:03.712933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:03.712937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:03.712947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:03.712964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:03.713102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:03.713190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:03.729407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:03.729435Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:03.733698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:03.733844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:03.733884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:03.737556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:03.737632Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:03.737768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:03.737848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:03.738609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:03.738649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:03.738913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:03.738922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:03.738929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:03.738935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:03.738940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:03.738957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.740147Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:26:03.757481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:03.757568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.757640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:03.757688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:03.757701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.761821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:03.761867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:03.761951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.761965Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:03.761973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:03.761981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:03.762879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.762897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:03.762906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:03.763586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.763601Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.763610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:03.763619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:03.764404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:03.765252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:03.765315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:03.765527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:03.765565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:03.765581Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:03.765669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:03.765678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:03.765737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:03.765754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:03.766310Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:03.766320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:03.766370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... D DEBUG: schemeshard__operation_common_subdomain.cpp:120: NSubDomainState::TConfigureParts operationId# 102:0 Got OK TEvConfigureStatus from tablet# 72075186233409549 shardIdx# 72057594046678944:4 at schemeshard# 72057594046678944 2025-06-03T10:26:05.742012Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 3 -> 128 2025-06-03T10:26:05.743459Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:26:05.743526Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:26:05.743535Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:26:05.743543Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 102:0, at tablet# 72057594046678944 2025-06-03T10:26:05.743554Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 102 ready parts: 1/1 2025-06-03T10:26:05.743604Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 102 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:05.744830Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2025-06-03T10:26:05.744881Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 2025-06-03T10:26:05.744976Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:05.745003Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 34359740525 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:05.745011Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 102:0, at tablet# 72057594046678944 2025-06-03T10:26:05.745090Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 128 -> 240 2025-06-03T10:26:05.745100Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 102:0, at tablet# 72057594046678944 2025-06-03T10:26:05.745132Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-03T10:26:05.745158Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:567: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2], Generation: 2, ActorId:[8:361:2334], EffectiveACLVersion: 0, SubdomainVersion: 2, UserAttributesVersion: 1, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 72075186233409549, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 2, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 102 2025-06-03T10:26:05.745680Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:05.745689Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:26:05.745737Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:05.745743Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [8:208:2209], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-03T10:26:05.745837Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:26:05.745845Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:787: [72057594046678944] TSyncHive, operationId 102:0, ProgressState, NeedSyncHive: 0 2025-06-03T10:26:05.745851Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 240 -> 240 2025-06-03T10:26:05.745941Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:26:05.745952Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:26:05.745957Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:26:05.745962Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-03T10:26:05.745968Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-03T10:26:05.745981Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-03T10:26:05.746520Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:26:05.746532Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-03T10:26:05.746551Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:26:05.746556Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:26:05.746561Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:26:05.746565Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:26:05.746569Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-03T10:26:05.746582Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [8:302:2292] message: TxId: 102 2025-06-03T10:26:05.746590Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:26:05.746596Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:26:05.746601Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:26:05.746636Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-03T10:26:05.746745Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:26:05.747025Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:26:05.747035Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [8:515:2453] TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 103 2025-06-03T10:26:05.747845Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "USER_0" ExternalStatisticsAggregator: false } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:05.747878Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1102: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 103:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "USER_0" ExternalStatisticsAggregator: false } 2025-06-03T10:26:05.747885Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1108: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 103:0, path /MyRoot/USER_0 2025-06-03T10:26:05.747915Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 103:0, explain: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ExternalStatisticsAggregator could only be added, not removed, at schemeshard: 72057594046678944 2025-06-03T10:26:05.747922Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 103:1, propose status:StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ExternalStatisticsAggregator could only be added, not removed, at schemeshard: 72057594046678944 2025-06-03T10:26:05.748375Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 103, response: Status: StatusInvalidParameter Reason: "Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ExternalStatisticsAggregator could only be added, not removed" TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:05.748409Z node 8 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: ExternalStatisticsAggregator could only be added, not removed, operation: ALTER DATABASE, path: /MyRoot/USER_0 TestModificationResult got TxId: 103, wait until txId: 103 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::CreateAndAlterThenDropChangesParent [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:26:04.057486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:04.057515Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:04.057529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:04.057534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:04.057547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:04.057551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:04.057560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:04.057575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:04.057716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:04.057794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:04.072199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:04.072225Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:04.080244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:04.080427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:04.080465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:04.084236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:04.084350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:04.084548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:04.084658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:04.085981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:04.086077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:04.086539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:04.086567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:04.086585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:04.086611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:04.086619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:04.086657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:04.088874Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:26:04.116191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:04.116274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:04.116342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:04.116400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:04.116419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:04.117273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:04.117326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:04.117382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:04.117390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:04.117395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:04.117399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:04.117968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:04.117983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:04.117989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:04.118455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:04.118466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:04.118476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:04.118484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:04.119069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:04.119481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:04.119514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:04.119655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:04.119679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:04.119685Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:04.119744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:04.119750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:04.119774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:04.119783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:04.120163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:04.120169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:04.120197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... wnerId: 72057594046678944, cookie: 103 2025-06-03T10:26:05.790609Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-03T10:26:05.790618Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-03T10:26:05.790623Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:26:05.790911Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:26:05.790925Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:26:05.790929Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-03T10:26:05.790938Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-03T10:26:05.790943Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-03T10:26:05.790961Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-03T10:26:05.791319Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:26:05.791332Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:26:05.791338Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:26:05.791378Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:26:05.791385Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-03T10:26:05.791399Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:26:05.791405Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:26:05.791410Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:26:05.791414Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:26:05.791419Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-03T10:26:05.791425Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:26:05.791432Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-06-03T10:26:05.791436Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:0 2025-06-03T10:26:05.791474Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:26:05.791658Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:26:05.791866Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-03T10:26:05.791916Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-03T10:26:05.791946Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:05.792007Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:26:05.792286Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:05.793251Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-03T10:26:05.793327Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409546 Forgetting tablet 72075186233409548 2025-06-03T10:26:05.793726Z node 7 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409547 2025-06-03T10:26:05.793918Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:26:05.793955Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:26:05.794020Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:26:05.796876Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:26:05.796904Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:26:05.796949Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:26:05.797150Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:26:05.797155Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:26:05.797165Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:05.797794Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:26:05.797807Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:26:05.797821Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:26:05.797825Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-03T10:26:05.797855Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:26:05.797859Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:26:05.798034Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:26:05.798080Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-03T10:26:05.798170Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-03T10:26:05.798177Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-03T10:26:05.798234Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-03T10:26:05.798249Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:26:05.798253Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [7:532:2480] TestWaitNotification: OK eventTxId 103 2025-06-03T10:26:05.798322Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:05.798356Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 52us result status StatusPathDoesNotExist 2025-06-03T10:26:05.798406Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::AlterTwiceAndWithPlainAlterSubdomain-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:26:04.107085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:04.107126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:04.107133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:04.107139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:04.107151Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:04.107156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:04.107166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:04.107182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:04.107315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:04.107401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:04.122760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:04.122787Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:04.127343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:04.127464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:04.127503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:04.129800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:04.129861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:04.130002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:04.130079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:04.130906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:04.130953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:04.131251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:04.131262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:04.131272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:04.131284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:04.131290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:04.131313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:04.132826Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:26:04.157180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:04.157256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:04.157326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:04.157372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:04.157384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:04.161260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:04.161321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:04.161381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:04.161393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:04.161399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:04.161405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:04.164871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:04.164893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:04.164901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:04.165453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:04.165468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:04.165479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:04.165486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:04.166298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:04.166825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:04.166881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:04.167075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:04.167103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:04.167110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:04.167196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:04.167206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:04.167242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:04.167256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:04.167752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:04.167764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:04.167806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... pose ProgressState leave, operationId 103:0, at tablet# 72057594046678944 2025-06-03T10:26:05.867253Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 103 ready parts: 1/1 2025-06-03T10:26:05.867305Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 103 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:05.867570Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 103:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:103 msg type: 269090816 2025-06-03T10:26:05.867608Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 103, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 103 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000004 2025-06-03T10:26:05.867691Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:05.867712Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 103 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 30064773228 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:05.867719Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 103:0, at tablet# 72057594046678944 2025-06-03T10:26:05.867790Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 103:0 128 -> 240 2025-06-03T10:26:05.867797Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 103:0, at tablet# 72057594046678944 2025-06-03T10:26:05.867827Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-03T10:26:05.867869Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:575: Send TEvUpdateTenantSchemeShard, to actor: [7:392:2361], msg: TabletId: 72057594046678944 Generation: 2 StoragePools { Name: "pool-1" Kind: "hdd" } SubdomainVersion: 4, at schemeshard: 72057594046678944 2025-06-03T10:26:05.868283Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5827: Handle TEvUpdateTenantSchemeShard, at schemeshard: 72075186234409546, msg: TabletId: 72057594046678944 Generation: 2 StoragePools { Name: "pool-1" Kind: "hdd" } SubdomainVersion: 4 2025-06-03T10:26:05.868307Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:79: TTxUpdateTenant DoExecute, msg: TabletId: 72057594046678944 Generation: 2 StoragePools { Name: "pool-1" Kind: "hdd" } SubdomainVersion: 4, at schemeshard: 72075186234409546 2025-06-03T10:26:05.868347Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:588: Cannot publish paths for unknown operation id#0 FAKE_COORDINATOR: Erasing txId 103 2025-06-03T10:26:05.868416Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:05.868423Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:26:05.868479Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:05.868484Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:206:2207], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-03T10:26:05.868506Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:26:05.868514Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:787: [72057594046678944] TSyncHive, operationId 103:0, ProgressState, NeedSyncHive: 0 2025-06-03T10:26:05.868519Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 103:0 240 -> 240 2025-06-03T10:26:05.868739Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:26:05.868755Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:26:05.868761Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-03T10:26:05.868767Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 6 2025-06-03T10:26:05.868774Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-03T10:26:05.868793Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-03T10:26:05.869158Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5814: Handle TEvSyncTenantSchemeShard, at schemeshard: 72057594046678944, msg: DomainSchemeShard: 72057594046678944 DomainPathId: 2 TabletID: 72075186234409546 Generation: 2 EffectiveACLVersion: 0 SubdomainVersion: 4 UserAttributesVersion: 1 TenantHive: 72075186233409546 TenantSysViewProcessor: 18446744073709551615 TenantRootACL: "" TenantStatisticsAggregator: 18446744073709551615 TenantGraphShard: 18446744073709551615 2025-06-03T10:26:05.869175Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:26: TTxSyncTenant DoExecute, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:26:05.869196Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:567: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 2], Generation: 2, ActorId:[7:392:2361], EffectiveACLVersion: 0, SubdomainVersion: 4, UserAttributesVersion: 1, TenantHive: 72075186233409546, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 4, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 72075186233409546, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:26:05.869225Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186234409546 2025-06-03T10:26:05.869229Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186234409546, txId: 0, path id: [OwnerId: 72075186234409546, LocalPathId: 1] 2025-06-03T10:26:05.869261Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186234409546 2025-06-03T10:26:05.869265Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:484:2425], at schemeshard: 72075186234409546, txId: 0, path id: 1 2025-06-03T10:26:05.869569Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186234409546, msg: Owner: 72075186234409546 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72075186234409546, cookie: 0 2025-06-03T10:26:05.869678Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:26:05.869688Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-03T10:26:05.869702Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:26:05.869706Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:26:05.869712Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:26:05.869715Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:26:05.869720Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-03T10:26:05.869726Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:26:05.869732Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-06-03T10:26:05.869737Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:0 2025-06-03T10:26:05.869751Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-03T10:26:05.869793Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:26:05.869801Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 104 TestModificationResults wait txId: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 103 2025-06-03T10:26:05.870166Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-03T10:26:05.870177Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-03T10:26:05.870273Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-03T10:26:05.870292Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:26:05.870297Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [7:566:2505] TestWaitNotification: OK eventTxId 103 >> BSCReadOnlyPDisk::SetBrokenDiskInBrokenGroupReadOnly >> TSchemeShardExtSubDomainTest::StatisticsAggregatorSync-AlterDatabaseCreateHiveFirst-true [GOOD] >> TSchemeShardExtSubDomainTest::SchemeQuotas-AlterDatabaseCreateHiveFirst-false >> TargetDiscoverer::SystemObjects [GOOD] >> TargetDiscoverer::IndexedTable |61.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> KqpLocksTricky::TestNoWrite [GOOD] >> KqpSinkLocks::DifferentKeyUpdate >> TargetDiscoverer::InvalidCredentials [GOOD] |61.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut >> BSCReadOnlyPDisk::SetBrokenDiskInBrokenGroupReadOnly [GOOD] |61.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut |61.0%| [LD] {RESULT} $(B)/ydb/core/client/server/ut/ydb-core-client-server-ut >> BSCReadOnlyPDisk::ReadOnlyOneByOne ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::Drop-AlterDatabaseCreateHiveFirst-ExternalHive [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:26:03.729928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:03.729958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:03.729965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:03.729970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:03.729984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:03.729989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:03.730004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:03.730040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:03.730173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:03.730267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:03.745241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:03.745267Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:03.755299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:03.755457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:03.755497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:03.781726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:03.781801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:03.781946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:03.782026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:03.785987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:03.786076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:03.786480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:03.786497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:03.786509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:03.786523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:03.786532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:03.786561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.788595Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:26:03.814317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:03.814404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.814473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:03.814528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:03.814540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.815314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:03.815349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:03.815411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.815422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:03.815430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:03.815437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:03.815932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.815944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:03.815950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:03.816333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.816345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.816352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:03.816361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:03.817231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:03.817716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:03.817758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:03.817981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:03.818031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:03.818041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:03.818128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:03.818138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:03.818176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:03.818191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:03.818698Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:03.818708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:03.818756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 2075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186234409547 2025-06-03T10:26:06.224378Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:06.224481Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 Forgetting tablet 72075186234409547 2025-06-03T10:26:06.224798Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-03T10:26:06.224854Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:26:06.224998Z node 6 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186234409546 2025-06-03T10:26:06.225087Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-03T10:26:06.225164Z node 6 :TX_DATASHARD ERROR: datashard.cpp:3573: Datashard's schemeshard pipe destroyed while no messages to sent at 72075186234409549 2025-06-03T10:26:06.225173Z node 6 :TX_DATASHARD ERROR: datashard.cpp:3573: Datashard's schemeshard pipe destroyed while no messages to sent at 72075186234409550 2025-06-03T10:26:06.225221Z node 6 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186234409548 Forgetting tablet 72075186234409546 2025-06-03T10:26:06.225263Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:26:06.225318Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:26:06.225622Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Forgetting tablet 72075186234409548 2025-06-03T10:26:06.227131Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-03T10:26:06.227218Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:26:06.227618Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:26:06.227632Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:26:06.227671Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:26:06.233955Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:26:06.233986Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:26:06.234038Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:06.235095Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:26:06.235117Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:26:06.235142Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:26:06.235146Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186234409547 2025-06-03T10:26:06.235160Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:26:06.235166Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186234409546 2025-06-03T10:26:06.235247Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-03T10:26:06.235255Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186234409548 2025-06-03T10:26:06.235648Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:26:06.235671Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-03T10:26:06.235767Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-03T10:26:06.235777Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-03T10:26:06.235872Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-03T10:26:06.235900Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-03T10:26:06.235906Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [6:794:2700] TestWaitNotification: OK eventTxId 105 2025-06-03T10:26:06.236040Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir/table_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:06.236088Z node 6 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir/table_1" took 71us result status StatusPathDoesNotExist 2025-06-03T10:26:06.236141Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/dir/table_1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0/dir/table_1" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:26:06.236218Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:06.236234Z node 6 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 18us result status StatusPathDoesNotExist 2025-06-03T10:26:06.236257Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:26:06.236318Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:06.236346Z node 6 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 30us result status StatusSuccess 2025-06-03T10:26:06.236435Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> BSCReadOnlyPDisk::ReadOnlySlay ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::SystemObjects [GOOD] Test command err: 2025-06-03T10:26:06.005313Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667444538867022:2258];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:06.005473Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0029ca/r3tmp/tmpZM2Ja5/pdisk_1.dat 2025-06-03T10:26:06.171237Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667440243899507:2079] 1748946366003306 != 1748946366003309 2025-06-03T10:26:06.171557Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:06.172272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:06.172295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:06.174402Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11590 TServer::EnableGrpc on GrpcPort 28390, node 1 2025-06-03T10:26:06.221574Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:06.221589Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:06.221591Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:06.221646Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11590 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:06.336561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:06.353592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:26:06.355115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:26:06.393514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-06-03T10:26:06.394768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:06.453428Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { name: Root, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1748946366384, tx_id: 1 } } } 2025-06-03T10:26:06.453446Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:42: [TargetDiscoverer][rid 1] Describe path succeeded: path# /Root 2025-06-03T10:26:06.473610Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:247: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvListDirectoryResponse { Result: { children [{ name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946366426, tx_id: 281474976710658 } }, { name: export-100500, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1748946366440, tx_id: 281474976710659 } }, { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } }] } } 2025-06-03T10:26:06.473632Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:260: [TargetDiscoverer][rid 1] Listing succeeded: path# /Root 2025-06-03T10:26:06.587528Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:98: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946366426, tx_id: 281474976710658 } } } 2025-06-03T10:26:06.587544Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:113: [TargetDiscoverer][rid 1] Describe table succeeded: path# /Root/Table 2025-06-03T10:26:06.587549Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:120: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Table, dstPath# /Root/Replicated/Table, kind# Table ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::InvalidCredentials [GOOD] Test command err: 2025-06-03T10:26:06.013814Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667444625958631:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:06.013936Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0029c4/r3tmp/tmpbe6UyM/pdisk_1.dat 2025-06-03T10:26:06.145471Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:06.145512Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:06.146549Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:06.151554Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:17762 TServer::EnableGrpc on GrpcPort 62429, node 1 2025-06-03T10:26:06.213593Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:06.213611Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:06.213615Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:06.213669Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17762 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:06.390445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:06.409705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:26:06.426287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:06.605244Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { status: CLIENT_UNAUTHENTICATED, issues: {
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/types/credentials/login/login.cpp:217: Cannot find user: user } } } 2025-06-03T10:26:06.605265Z node 1 :REPLICATION_CONTROLLER ERROR: target_discoverer.cpp:78: [TargetDiscoverer][rid 1] Describe path failed: path# /Root, status# CLIENT_UNAUTHENTICATED, issues# {
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/types/credentials/login/login.cpp:217: Cannot find user: user } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::SetBrokenDiskInBrokenGroupReadOnly [GOOD] Test command err: RandomSeed# 1656036406146976234 2025-06-03T10:26:07.042229Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:07.042263Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:07.042274Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:07.042284Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:07.042294Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:07.042303Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:07.042313Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:07.042322Z 8 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:07.042468Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:07.042488Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:07.042502Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:07.042512Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:07.042524Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:07.042536Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:07.042547Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:07.042559Z 8 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:07.042571Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:07.042624Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:07.042635Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:07.042641Z 8 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:07.042646Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:07.042650Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:07.042654Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:07.042658Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:07.043032Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:07.043044Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:07.043050Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:07.043057Z 8 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:07.043065Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:07.043073Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:07.043080Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:07.043088Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:07.080815Z 1 00h01m30.011024s :BS_LOCALRECOVERY CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# true EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {} ReadLogReplies# {}} reason# Yard::Init failed, errorReason# "Some error reason" status# CORRUPTED;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR |61.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::SetGoodDiskInBrokenGroupReadOnlyNotAllowed >> TargetDiscoverer::IndexedTable [GOOD] >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests10Inflight1BlobSize1000 [GOOD] >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests10000Inflight1BlobSize1000 >> TSchemeShardExtSubDomainTest::SchemeQuotas-AlterDatabaseCreateHiveFirst-false [GOOD] >> TSchemeShardExtSubDomainTest::SchemeQuotas-AlterDatabaseCreateHiveFirst-true >> BSCReadOnlyPDisk::SetGoodDiskInBrokenGroupReadOnlyNotAllowed [GOOD] |61.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest |61.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::ReadOnlyNotAllowed ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_target_discoverer/unittest >> TargetDiscoverer::IndexedTable [GOOD] Test command err: 2025-06-03T10:26:07.008689Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667451079560037:2203];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:07.008824Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0029bf/r3tmp/tmpUuqTsG/pdisk_1.dat 2025-06-03T10:26:07.114842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:07.114870Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:07.117097Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:07.117411Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667451079559863:2079] 1748946367007222 != 1748946367007225 2025-06-03T10:26:07.120189Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26807 TServer::EnableGrpc on GrpcPort 16495, node 1 2025-06-03T10:26:07.157528Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:07.157544Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:07.157546Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:07.157589Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26807 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:07.233326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:07.236869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:26:07.238243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:07.343957Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { name: Root, owner: root@builtin, type: Directory, size_bytes: 0, created_at: { plan_step: 1748946367280, tx_id: 1 } } } 2025-06-03T10:26:07.343973Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:42: [TargetDiscoverer][rid 1] Describe path succeeded: path# /Root 2025-06-03T10:26:07.363416Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:247: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvListDirectoryResponse { Result: { children [{ name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946367343, tx_id: 281474976715658 } }, { name: .sys, owner: , type: Directory, size_bytes: 0, created_at: { plan_step: 0, tx_id: 0 } }] } } 2025-06-03T10:26:07.363432Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:260: [TargetDiscoverer][rid 1] Listing succeeded: path# /Root 2025-06-03T10:26:07.573385Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:98: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946367343, tx_id: 281474976715658 } } } 2025-06-03T10:26:07.573402Z node 1 :REPLICATION_CONTROLLER DEBUG: target_discoverer.cpp:113: [TargetDiscoverer][rid 1] Describe table succeeded: path# /Root/Table 2025-06-03T10:26:07.573409Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:120: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Table, dstPath# /Root/Replicated/Table, kind# Table 2025-06-03T10:26:07.573428Z node 1 :REPLICATION_CONTROLLER INFO: target_discoverer.cpp:140: [TargetDiscoverer][rid 1] Add target: srcPath# /Root/Table/Index, dstPath# /Root/Replicated/Table/Index/indexImplTable, kind# IndexTable >> TGRpcYdbTest::MakeListRemoveDirectory ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::SetGoodDiskInBrokenGroupReadOnlyNotAllowed [GOOD] Test command err: RandomSeed# 10072961630615817656 2025-06-03T10:26:08.220103Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:08.220170Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:08.220193Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:08.220215Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:08.220236Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:08.220257Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:08.220279Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:08.222395Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:08.222499Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:08.222522Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:08.222539Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:08.222557Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:08.222576Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:08.222595Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:08.222627Z 1 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:08.222639Z 6 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:08.222648Z 7 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:08.222669Z 2 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:08.222684Z 3 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:08.222693Z 4 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:08.222701Z 5 00h00m30.010512s :BS_VDISK_OTHER ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) CheckPDiskResponse: Recoverable error from PDisk: Status# 'CORRUPTED' StatusFlags# 'None' ErrorReason# 'Some error reason' 2025-06-03T10:26:08.223406Z 1 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:08.223438Z 6 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:08.223453Z 7 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:08.223476Z 2 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:08.223494Z 3 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:08.223511Z 4 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 2025-06-03T10:26:08.223528Z 5 00h00m30.010512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:4:0]: (2181038080) SkeletonFront: got TEvPDiskErrorStateChange;State# NoWrites, PDiskError# Some error reason Marker# BSVSF03 >> YdbYqlClient::ConnectDbAclIsStrictlyChecked >> Mirror3dc::GcQuorum [GOOD] >> Mirror3dcRestore::TestRestore >> YdbMonitoring::SelfCheck >> YdbS3Internal::TestS3Listing >> TTableProfileTests::UseDefaultProfile |61.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index |61.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index |61.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_index/ydb-core-tx-schemeshard-ut_index >> TSchemeShardExtSubDomainTest::SchemeQuotas-AlterDatabaseCreateHiveFirst-true [GOOD] >> TTxDataShardMiniKQL::CrossShard_6_Local [GOOD] >> TTxDataShardMiniKQL::MemoryUsageImmediateHugeTx >> YdbOlapStore::ManyTables >> ClientStatsCollector::PrepareQuery >> YdbYqlClient::TestTzTypesFullStack >> TYqlDateTimeTests::SimpleUpsertSelect >> BSCReadOnlyPDisk::ReadOnlySlay [GOOD] >> TTxDataShardMiniKQL::MemoryUsageImmediateHugeTx [GOOD] >> TGRpcYdbTest::MakeListRemoveDirectory [GOOD] >> TGRpcYdbTest::GetOperationBadRequest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain/unittest >> TSchemeShardExtSubDomainTest::SchemeQuotas-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:26:03.536486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:03.536509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:03.536520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:03.536525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:03.536534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:03.536538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:03.536546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:03.536565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:03.536668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:03.536741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:03.547148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:03.547165Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:03.550387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:03.550488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:03.550527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:03.553676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:03.553746Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:03.553865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:03.553926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:03.554767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:03.554804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:03.555049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:03.555056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:03.555063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:03.555068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:03.555072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:03.555087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.556596Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:26:03.577900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:03.577986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.578068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:03.578120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:03.578133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.578911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:03.578940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:03.578986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.578995Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:03.579001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:03.579008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:03.579518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.579530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:03.579536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:03.579932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.579941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:03.579947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:03.579954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:03.580718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:03.581213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:03.581254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:03.581491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:03.581519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:03.581532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:03.581610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:03.581619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:03.581652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:03.581666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:03.582169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:03.582177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:03.582211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... HARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 116, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-03T10:26:09.264378Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 116, path id: [OwnerId: 72075186233409546, LocalPathId: 9] 2025-06-03T10:26:09.264395Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-03T10:26:09.264401Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:716:2617], at schemeshard: 72075186233409546, txId: 116, path id: 1 2025-06-03T10:26:09.264408Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [7:716:2617], at schemeshard: 72075186233409546, txId: 116, path id: 9 2025-06-03T10:26:09.264529Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 116:0, at schemeshard: 72075186233409546 2025-06-03T10:26:09.264539Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 116:0 ProgressState, operation type: TxCreateTable, at tablet# 72075186233409546 2025-06-03T10:26:09.264585Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:357: TCreateParts opId# 116:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72075186233409546 OwnerIdx: 11 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 2 } ObjectId: 9 BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 2 } 2025-06-03T10:26:09.264734Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 3 LocalPathId: 1 Version: 16 PathOwnerId: 72075186233409546, cookie: 116 2025-06-03T10:26:09.264751Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 3 LocalPathId: 1 Version: 16 PathOwnerId: 72075186233409546, cookie: 116 2025-06-03T10:26:09.264756Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 116 2025-06-03T10:26:09.264761Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 116, pathId: [OwnerId: 72075186233409546, LocalPathId: 1], version: 16 2025-06-03T10:26:09.264768Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 13 2025-06-03T10:26:09.264886Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 3 LocalPathId: 9 Version: 1 PathOwnerId: 72075186233409546, cookie: 116 2025-06-03T10:26:09.264899Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 3 LocalPathId: 9 Version: 1 PathOwnerId: 72075186233409546, cookie: 116 2025-06-03T10:26:09.264903Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 116 2025-06-03T10:26:09.264909Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 116, pathId: [OwnerId: 72075186233409546, LocalPathId: 9], version: 1 2025-06-03T10:26:09.264913Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 9] was 4 2025-06-03T10:26:09.264924Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 116, ready parts: 0/1, is published: true 2025-06-03T10:26:09.265502Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 116:0 from tablet: 72075186233409546 to tablet: 72057594037968897 cookie: 72075186233409546:11 msg type: 268697601 2025-06-03T10:26:09.265536Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 116, partId: 0, tablet: 72057594037968897 2025-06-03T10:26:09.265543Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1780: TOperation RegisterRelationByShardIdx, TxId: 116, shardIdx: 72075186233409546:11, partId: 0 2025-06-03T10:26:09.265650Z node 7 :HIVE INFO: tablet_helpers.cpp:1181: [72057594037968897] TEvCreateTablet, msg: Owner: 72075186233409546 OwnerIdx: 11 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 2 } ObjectId: 9 BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } BindedChannels { StoragePoolName: "/dc-1/users/tenant-1:hdd" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 2 } 2025-06-03T10:26:09.265708Z node 7 :HIVE INFO: tablet_helpers.cpp:1245: [72057594037968897] TEvCreateTablet, Owner 72075186233409546, OwnerIdx 11, type DataShard, boot OK, tablet id 72075186233409556 2025-06-03T10:26:09.265758Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5872: Handle TEvCreateTabletReply at schemeshard: 72075186233409546 message: Status: OK Owner: 72075186233409546 OwnerIdx: 11 TabletID: 72075186233409556 Origin: 72057594037968897 2025-06-03T10:26:09.265765Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1794: TOperation FindRelatedPartByShardIdx, TxId: 116, shardIdx: 72075186233409546:11, partId: 0 2025-06-03T10:26:09.265784Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 116:0, at schemeshard: 72075186233409546, message: Status: OK Owner: 72075186233409546 OwnerIdx: 11 TabletID: 72075186233409556 Origin: 72057594037968897 2025-06-03T10:26:09.265791Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:175: TCreateParts opId# 116:0 HandleReply TEvCreateTabletReply, at tabletId: 72075186233409546 2025-06-03T10:26:09.265799Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:178: TCreateParts opId# 116:0 HandleReply TEvCreateTabletReply, message: Status: OK Owner: 72075186233409546 OwnerIdx: 11 TabletID: 72075186233409556 Origin: 72057594037968897 2025-06-03T10:26:09.265822Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 116:0 2 -> 3 2025-06-03T10:26:09.265954Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 116 2025-06-03T10:26:09.266498Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 116 2025-06-03T10:26:09.266652Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 116:0, at schemeshard: 72075186233409546 2025-06-03T10:26:09.266681Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 116:0, at schemeshard: 72075186233409546 2025-06-03T10:26:09.266688Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_table.cpp:200: TCreateTable TConfigureParts operationId# 116:0 ProgressState at tabletId# 72075186233409546 2025-06-03T10:26:09.266700Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:220: TCreateTable TConfigureParts operationId# 116:0 ProgressState Propose modify scheme on datashard datashardId: 72075186233409556 seqNo: 3:8 2025-06-03T10:26:09.266782Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:236: TCreateTable TConfigureParts operationId# 116:0 ProgressState Propose modify scheme on datashard datashardId: 72075186233409556 message: TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 666 RawX2: 30064773650 } TxBody: "\n\236\004\n\007Table11\020\t\032\r\n\003key\030\002 \001(\000@\000\032\020\n\005Value\030\200$ \002(\000@\000(\001:\262\003\022\253\003\010\200\200\200\002\020\254\002\030\364\003 \200\200\200\010(\0000\200\200\200 8\200\200\200\010@\2008H\000RX\010\000\020\000\030\010 \010(\200\200\200@0\377\377\377\377\0178\001B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen1P\nX\200\200\001`nh\000p\000Rb\010\001\020\200\200\200\024\030\005 \020(\200\200\200\200\0020\377\377\377\377\0178\000B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen2P\nX\200\200\001`nh\200\200\200\004p\200\200\200\004Rc\010\002\020\200\200\200\310\001\030\005 \020(\200\200\200\200@0\377\377\377\377\0178\000B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen3P\nX\200\200\001`nh\200\200\200(p\200\200\200(X\001`\005j$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionr\017compaction_gen0z\017compaction_gen0\202\001\004scan\210\001\200\200\200\010\220\001\364\003\230\0012\270\001\2008\300\001\006R\002\020\001J\026/MyRoot/USER_0/Table11\242\001\006\001\000\000\000\000\200\252\001\000\260\001\001\270\001\000\210\002\001\222\002\013\t\n\000\220\000\000\020\000\001\020\t:\004\010\003\020\010" TxId: 116 ExecLevel: 0 Flags: 0 SchemeShardId: 72075186233409546 ProcessingParams { Version: 3 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } SubDomainPathId: 1 2025-06-03T10:26:09.267505Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 116:0 from tablet: 72075186233409546 to tablet: 72075186233409556 cookie: 72075186233409546:11 msg type: 269549568 2025-06-03T10:26:09.267548Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 116, partId: 0, tablet: 72075186233409556 TestModificationResult got TxId: 116, wait until txId: 116 TestModificationResults wait txId: 117 2025-06-03T10:26:09.272602Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_0" OperationType: ESchemeOpCreateTable CreateTable { Name: "Table12" Columns { Name: "key" Type: "Uint32" } Columns { Name: "Value" Type: "Utf8" } KeyColumnNames: "key" } } TxId: 117 TabletId: 72075186233409546 , at schemeshard: 72075186233409546 2025-06-03T10:26:09.273146Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 117, response: Status: StatusQuotaExceeded Reason: "Request exceeded a limit on the number of schema operations, try again later." TxId: 117 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-03T10:26:09.273184Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 117, database: /MyRoot/USER_0, subject: , status: StatusQuotaExceeded, reason: Request exceeded a limit on the number of schema operations, try again later., operation: CREATE TABLE, path: /MyRoot/USER_0/Table12 TestModificationResult got TxId: 117, wait until txId: 117 >> YdbYqlClient::TestReadTableMultiShardWholeTable >> YdbYqlClient::ConnectDbAclIsStrictlyChecked [GOOD] >> YdbYqlClient::ConnectDbAclIsOffWhenYdbRequestsWithoutDatabase ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::ReadOnlySlay [GOOD] Test command err: RandomSeed# 14828950827867034714 2025-06-03T10:26:08.319180Z 1 00h01m14.361536s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-03T10:26:08.319601Z 1 00h01m14.361536s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 13141971958059011811] 2025-06-03T10:26:08.321047Z 1 00h01m14.361536s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 |61.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk |61.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk |61.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_restart_pdisk/blobstorage-ut_blobstorage-ut_restart_pdisk >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests10000Inflight1BlobSize1000 [GOOD] >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests10Inflight10BlobSize1000 >> YdbMonitoring::SelfCheck [GOOD] >> YdbMonitoring::SelfCheckWithNodesDying >> YdbS3Internal::TestS3Listing [GOOD] >> YdbS3Internal::TestAccessCheck >> BSCReadOnlyPDisk::ReadOnlyNotAllowed [GOOD] >> test.py::test[window-win_func_special--Results] [GOOD] >> test.py::test[window-win_inline_spec-default.txt-Results] >> TGRpcYdbTest::GetOperationBadRequest [GOOD] >> TGRpcYdbTest::OperationTimeout |61.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut |61.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut |61.1%| [LD] {RESULT} $(B)/ydb/core/statistics/service/ut/ydb-core-statistics-service-ut >> TTableProfileTests::UseDefaultProfile [GOOD] >> TTableProfileTests::UseTableProfilePreset >> TTxDataShardMiniKQL::TableStatsHistograms [GOOD] >> ClientStatsCollector::PrepareQuery [GOOD] >> ClientStatsCollector::CounterCacheMiss ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::ReadOnlyNotAllowed [GOOD] Test command err: RandomSeed# 4309072112580081999 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::MemoryUsageImmediateHugeTx [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:115:2057] recipient: [1:109:2140] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:115:2057] recipient: [1:109:2140] Leader for TabletID 9437184 is [1:132:2154] sender: [1:133:2057] recipient: [1:109:2140] 2025-06-03T10:25:51.218216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:25:51.218256Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:51.219286Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:25:51.222963Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:25:51.223181Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:132:2154] 2025-06-03T10:25:51.223270Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:25:51.235417Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:25:51.238849Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:25:51.238885Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:25:51.239090Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-03T10:25:51.239102Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-03T10:25:51.239110Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-03T10:25:51.239187Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:25:51.239201Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:25:51.239216Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 Leader for TabletID 9437184 is [1:132:2154] sender: [1:211:2057] recipient: [1:14:2061] 2025-06-03T10:25:51.273732Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:25:51.294387Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-03T10:25:51.294515Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:25:51.294547Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2213] 2025-06-03T10:25:51.294557Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-03T10:25:51.294564Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-03T10:25:51.294571Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:51.294666Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:132:2154], Recipient [1:132:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:51.294678Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:51.294804Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-03T10:25:51.294841Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-03T10:25:51.294856Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-03T10:25:51.294866Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:25:51.294876Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-03T10:25:51.294887Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-03T10:25:51.294893Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-03T10:25:51.294899Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-03T10:25:51.294905Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-03T10:25:51.294922Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:212:2210], Recipient [1:132:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:51.294929Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:51.294945Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2209], serverId# [1:212:2210], sessionId# [0:0:0] 2025-06-03T10:25:51.295918Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:100:2134], Recipient [1:132:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 100 RawX2: 4294969430 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-03T10:25:51.295951Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:25:51.295983Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:25:51.296040Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-03T10:25:51.296056Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-03T10:25:51.296070Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-03T10:25:51.296081Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-03T10:25:51.296087Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-03T10:25:51.296095Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-03T10:25:51.296101Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-03T10:25:51.296220Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-03T10:25:51.296226Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-03T10:25:51.296232Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-03T10:25:51.296237Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-03T10:25:51.296259Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-03T10:25:51.296263Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-03T10:25:51.296267Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-03T10:25:51.296272Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-03T10:25:51.296280Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-03T10:25:51.307924Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-03T10:25:51.307964Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-03T10:25:51.307975Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-03T10:25:51.307992Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-03T10:25:51.308035Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-03T10:25:51.308197Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:222:2219], Recipient [1:132:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:51.308207Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:51.308217Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2218], serverId# [1:222:2219], sessionId# [0:0:0] 2025-06-03T10:25:51.308243Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287424, Sender [1:100:2134], Recipient [1:132:2154]: {TEvPlanStep step# 2 MediatorId# 0 TabletID 9437184} 2025-06-03T10:25:51.308249Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3147: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-03T10:25:51.308303Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [2:1] at 9437184 on unit WaitForPlan 2025-06-03T10:25:51.308313Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [2:1] at 9437184 is Executed 2025-06-03T10:25:51.308318Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2:1] at 9437184 executing on unit WaitForPlan 2025-06-03T10:25:51.308325Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2:1] at 9437184 to execution unit PlanQueue 2025-06-03T10:25:51.309338Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 2 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 100 RawX2: 4294969430 } } Step: 2 MediatorID: 0 TabletID: 9437184 } 2025-06-03T10:25:51.309369Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:51.309469Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:132:2154], Recipient [1:132:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:51.309479Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:51.309493Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-03T10:25:51.309504Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:25:51.309511Z node 1 :TX_DATASHARD TRACE: datashard_pipelin ... shard_impl.h:3129: StateWork, received event# 268830214, Sender [20:290:2272], Recipient [20:236:2228]: NKikimrTabletBase.TEvGetCounters 2025-06-03T10:26:09.837487Z node 20 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269551617, Sender [20:100:2134], Recipient [20:236:2228]: NKikimrTxDataShard.TEvGetShardState Source { RawX1: 100 RawX2: 85899348054 } 2025-06-03T10:26:09.837509Z node 20 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, processing event TEvDataShard::TEvGetShardState 2025-06-03T10:26:09.837602Z node 20 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [20:295:2276], Recipient [20:236:2228]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:26:09.837609Z node 20 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:26:09.837617Z node 20 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [20:294:2275], serverId# [20:295:2276], sessionId# [0:0:0] 2025-06-03T10:26:09.837667Z node 20 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [20:100:2134], Recipient [20:236:2228]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 100 RawX2: 85899348054 } TxBody: "\032\324\002\037\002\006Arg\005\205\n\205\000\205\004?\000\205\002\202\0047\034MyReads MyWrites\205\004?\000\206\202\024Reply\024Write?\000?\000 AllReads\030MyKeys\014Run4ShardsForRead4ShardsToWrite\005?\000\005?\004?\014\005?\002)\211\006\202\203\005\004\213\002\203\004\205\002\203\004\01057$UpdateRow\000\003?\016 h\020\000\000\000\000\000\000\r\000\000\000\000\000\000\000\013?\022\003?\020T\001\005?\026)\211\n?\024\206\203\004?\024? ?\024\203\004\020Fold\000)\211\002?\"\206? \034Collect\000)\211\006?(? \203\004\203\0024ListFromRange\000\003? \000\003?,\003\022z\003?.\004\007\010\000\n\003?\024\000)\251\000? \002\000\004)\251\000?\024\002\000\002)\211\006?$\203\005@? ?\024\030Invoke\000\003?F\006Add?@?D\001\006\002\014\000\007\016\000\003\005?\010?\014\006\002?\006?R\000\003?\014?\014\037/ \0018\000" TxId: 2 ExecLevel: 0 Flags: 0 2025-06-03T10:26:09.837674Z node 20 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:26:09.837709Z node 20 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:26:09.837968Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit CheckDataTx 2025-06-03T10:26:09.838008Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-03T10:26:09.838014Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit CheckDataTx 2025-06-03T10:26:09.838021Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-03T10:26:09.838026Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit BuildAndWaitDependencies 2025-06-03T10:26:09.838038Z node 20 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-03T10:26:09.838057Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:2] at 9437184 2025-06-03T10:26:09.838065Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-03T10:26:09.838069Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-03T10:26:09.838075Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit ExecuteDataTx 2025-06-03T10:26:09.838080Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-03T10:26:09.838091Z node 20 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-03T10:26:09.838102Z node 20 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:124: Operation [0:2] at 9437184 requested 132374 more memory 2025-06-03T10:26:09.838111Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Restart 2025-06-03T10:26:09.838195Z node 20 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:26:09.838200Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-03T10:26:09.838205Z node 20 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-03T10:26:09.838760Z node 20 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:2] at 9437184 exceeded memory limit 132502 and requests 1060016 more for the next try 2025-06-03T10:26:09.838797Z node 20 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 2 released its data 2025-06-03T10:26:09.838804Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Restart 2025-06-03T10:26:09.838848Z node 20 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:26:09.838853Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-03T10:26:09.838968Z node 20 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 2 at 9437184 restored its data 2025-06-03T10:26:09.838976Z node 20 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-03T10:26:09.839109Z node 20 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:2] at 9437184 exceeded memory limit 1192518 and requests 9540144 more for the next try 2025-06-03T10:26:09.839121Z node 20 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 2 released its data 2025-06-03T10:26:09.839126Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Restart 2025-06-03T10:26:09.839149Z node 20 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:26:09.839153Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-03T10:26:09.839220Z node 20 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 2 at 9437184 restored its data 2025-06-03T10:26:09.839226Z node 20 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-03T10:26:09.839351Z node 20 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:2] at 9437184 exceeded memory limit 10732662 and requests 85861296 more for the next try 2025-06-03T10:26:09.839361Z node 20 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 2 released its data 2025-06-03T10:26:09.839366Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Restart 2025-06-03T10:26:09.839386Z node 20 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:26:09.839390Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit ExecuteDataTx 2025-06-03T10:26:09.839446Z node 20 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 2 at 9437184 restored its data 2025-06-03T10:26:09.839452Z node 20 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v2/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-03T10:26:09.911762Z node 20 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:2] at tablet 9437184 with status COMPLETE 2025-06-03T10:26:09.911821Z node 20 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:2] at 9437184: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 1, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 8, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-03T10:26:09.911849Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is ExecutedNoMoreRestarts 2025-06-03T10:26:09.911860Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit ExecuteDataTx 2025-06-03T10:26:09.911867Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit FinishPropose 2025-06-03T10:26:09.911875Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit FinishPropose 2025-06-03T10:26:09.911926Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-03T10:26:09.911932Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit FinishPropose 2025-06-03T10:26:09.911937Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:2] at 9437184 to execution unit CompletedOperations 2025-06-03T10:26:09.911942Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:2] at 9437184 on unit CompletedOperations 2025-06-03T10:26:09.911961Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:2] at 9437184 is Executed 2025-06-03T10:26:09.911966Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:2] at 9437184 executing on unit CompletedOperations 2025-06-03T10:26:09.911972Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:2] at 9437184 has finished 2025-06-03T10:26:09.923032Z node 20 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-03T10:26:09.923064Z node 20 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:2] at 9437184 on unit FinishPropose 2025-06-03T10:26:09.923078Z node 20 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 2 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: COMPLETE 2025-06-03T10:26:09.923115Z node 20 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:26:09.923426Z node 20 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [20:300:2281], Recipient [20:236:2228]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:26:09.923439Z node 20 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:26:09.923447Z node 20 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [20:299:2280], serverId# [20:300:2281], sessionId# [0:0:0] 2025-06-03T10:26:09.923487Z node 20 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830214, Sender [20:298:2279], Recipient [20:236:2228]: NKikimrTabletBase.TEvGetCounters >> YdbYqlClient::TestTzTypesFullStack [GOOD] >> YdbYqlClient::TestVariant >> YdbYqlClient::ConnectDbAclIsOffWhenYdbRequestsWithoutDatabase [GOOD] >> YdbYqlClient::CopyTables >> YdbImport::Simple >> TTxDataShardMiniKQL::CrossShard_2_SwapAndCopy [GOOD] >> TTxDataShardMiniKQL::CrossShard_3_AllToOne >> YdbYqlClient::TestReadTableMultiShardWholeTable [GOOD] >> YdbYqlClient::TestReadTableMultiShardWholeTableUseSnapshot >> TYqlDateTimeTests::SimpleUpsertSelect [GOOD] >> TYqlDateTimeTests::DatetimeKey |61.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/tools/fqrun/fqrun |61.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/fqrun/fqrun |61.1%| [LD] {RESULT} $(B)/ydb/tests/tools/fqrun/fqrun >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesTimeout >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::HandlesTimeout [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::SuccessfullyPassesResponsesFromTablets >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests10Inflight10BlobSize1000 [GOOD] >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests100Inflight10BlobSize1000 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::TableStatsHistograms [GOOD] Test command err: 2025-06-03T10:25:51.780470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:25:51.780501Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:51.781934Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:25:51.786219Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:25:51.786382Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:132:2154] 2025-06-03T10:25:51.786450Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:25:51.796650Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:25:51.800759Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:25:51.800805Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:25:51.801010Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-03T10:25:51.801021Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-03T10:25:51.801037Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-03T10:25:51.801104Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:25:51.801186Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:25:51.801202Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:205:2154] in generation 2 2025-06-03T10:25:51.829676Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:25:51.839420Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-03T10:25:51.839520Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:25:51.839548Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:218:2215] 2025-06-03T10:25:51.839555Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-03T10:25:51.839561Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-03T10:25:51.839567Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:51.839628Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:132:2154], Recipient [1:132:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:51.839637Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:51.839734Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-03T10:25:51.839765Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-03T10:25:51.839775Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-03T10:25:51.839783Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:25:51.839791Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-03T10:25:51.839797Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-03T10:25:51.839802Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-03T10:25:51.839808Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-03T10:25:51.839814Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-03T10:25:51.839830Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:214:2212], Recipient [1:132:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:51.839837Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:51.839851Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:212:2211], serverId# [1:214:2212], sessionId# [0:0:0] 2025-06-03T10:25:51.840422Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:100:2134], Recipient [1:132:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 100 RawX2: 4294969430 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-03T10:25:51.840444Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:25:51.840465Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:25:51.840505Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-03T10:25:51.840518Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-03T10:25:51.840534Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-03T10:25:51.840543Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-03T10:25:51.840547Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-03T10:25:51.840553Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-03T10:25:51.840558Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-03T10:25:51.840638Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-03T10:25:51.840644Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-03T10:25:51.840649Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-03T10:25:51.840653Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-03T10:25:51.840665Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-03T10:25:51.840669Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-03T10:25:51.840673Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-03T10:25:51.840678Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-03T10:25:51.840684Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-03T10:25:51.851707Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-03T10:25:51.851739Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-03T10:25:51.851747Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-03T10:25:51.851759Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-03T10:25:51.851795Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-03T10:25:51.851933Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:224:2221], Recipient [1:132:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:51.851943Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:51.851951Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:223:2220], serverId# [1:224:2221], sessionId# [0:0:0] 2025-06-03T10:25:51.851963Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287424, Sender [1:100:2134], Recipient [1:132:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-03T10:25:51.851968Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3147: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-03T10:25:51.852018Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-03T10:25:51.852028Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-03T10:25:51.852037Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-03T10:25:51.852043Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-03T10:25:51.852893Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 100 RawX2: 4294969430 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-03T10:25:51.852916Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:51.852983Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:132:2154], Recipient [1:132:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:51.852992Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:51.853003Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-03T10:25:51.853011Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:25:51.853016Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-03T10:25:51.853024Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-03T10:25:51.853029Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... pp:1862: Execution status for [0:1002] at 9437184 is Executed 2025-06-03T10:26:11.238175Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1002] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-03T10:26:11.238182Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1002] at 9437184 to execution unit ExecuteDataTx 2025-06-03T10:26:11.238188Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1002] at 9437184 on unit ExecuteDataTx 2025-06-03T10:26:11.238200Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-03T10:26:11.238345Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:1002] at tablet 9437184 with status COMPLETE 2025-06-03T10:26:11.238362Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:1002] at 9437184: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 1, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 109, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-03T10:26:11.238380Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1002] at 9437184 is ExecutedNoMoreRestarts 2025-06-03T10:26:11.238385Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1002] at 9437184 executing on unit ExecuteDataTx 2025-06-03T10:26:11.238389Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1002] at 9437184 to execution unit FinishPropose 2025-06-03T10:26:11.238395Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1002] at 9437184 on unit FinishPropose 2025-06-03T10:26:11.238406Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1002] at 9437184 is DelayComplete 2025-06-03T10:26:11.238410Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1002] at 9437184 executing on unit FinishPropose 2025-06-03T10:26:11.238415Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1002] at 9437184 to execution unit CompletedOperations 2025-06-03T10:26:11.238420Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1002] at 9437184 on unit CompletedOperations 2025-06-03T10:26:11.238433Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1002] at 9437184 is Executed 2025-06-03T10:26:11.238442Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1002] at 9437184 executing on unit CompletedOperations 2025-06-03T10:26:11.238447Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:1002] at 9437184 has finished 2025-06-03T10:26:11.257723Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-03T10:26:11.257765Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1002] at 9437184 on unit FinishPropose 2025-06-03T10:26:11.257778Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1002 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: COMPLETE 2025-06-03T10:26:11.257814Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 .2025-06-03T10:26:11.265086Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269551617, Sender [3:100:2134], Recipient [3:236:2228]: NKikimrTxDataShard.TEvGetShardState Source { RawX1: 100 RawX2: 12884904022 } 2025-06-03T10:26:11.265126Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3132: StateWork, processing event TEvDataShard::TEvGetShardState 2025-06-03T10:26:11.265555Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [3:4551:6466], Recipient [3:236:2228]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:26:11.265573Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:26:11.265584Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [3:4550:6465], serverId# [3:4551:6466], sessionId# [0:0:0] 2025-06-03T10:26:11.265713Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [3:100:2134], Recipient [3:236:2228]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 100 RawX2: 12884904022 } TxBody: "\032\265\002\037\000\005\205\n\205\000\205\004?\000\205\002\202\0041\034MyReads MyWrites\205\004?\000\206\202\024Reply\024Write?\000?\000 AllReads\030MyKeys\014Run4ShardsForRead4ShardsToWrite\005?\000\005?\004?\014\005?\002)\211\006\202\203\005\004\213\002\203\004\205\002\203\001H\01056$UpdateRow\000\003?\016 h\020\000\000\000\000\000\000\r\000\000\000\000\000\000\000\013?\022\003?\020\235\017\001\005?\026\003?\024\322ImInShard111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111\001\007\002\000\003\005?\010?\014\006\002?\006?$\000\003?\014?\014\037/ \0018\000" TxId: 1003 ExecLevel: 0 Flags: 0 2025-06-03T10:26:11.265724Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:26:11.265760Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:26:11.265951Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1003] at 9437184 on unit CheckDataTx 2025-06-03T10:26:11.266001Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1003] at 9437184 is Executed 2025-06-03T10:26:11.266008Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1003] at 9437184 executing on unit CheckDataTx 2025-06-03T10:26:11.266016Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1003] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-03T10:26:11.266022Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1003] at 9437184 on unit BuildAndWaitDependencies 2025-06-03T10:26:11.266035Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-03T10:26:11.266061Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:1003] at 9437184 2025-06-03T10:26:11.266069Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1003] at 9437184 is Executed 2025-06-03T10:26:11.266075Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1003] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-03T10:26:11.266080Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1003] at 9437184 to execution unit ExecuteDataTx 2025-06-03T10:26:11.266085Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1003] at 9437184 on unit ExecuteDataTx 2025-06-03T10:26:11.266095Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-03T10:26:11.266226Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:1003] at tablet 9437184 with status COMPLETE 2025-06-03T10:26:11.266242Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:1003] at 9437184: {NSelectRow: 0, NSelectRange: 0, NUpdateRow: 1, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 109, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-03T10:26:11.266257Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1003] at 9437184 is ExecutedNoMoreRestarts 2025-06-03T10:26:11.266262Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1003] at 9437184 executing on unit ExecuteDataTx 2025-06-03T10:26:11.266266Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1003] at 9437184 to execution unit FinishPropose 2025-06-03T10:26:11.266270Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1003] at 9437184 on unit FinishPropose 2025-06-03T10:26:11.266280Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1003] at 9437184 is DelayComplete 2025-06-03T10:26:11.266286Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1003] at 9437184 executing on unit FinishPropose 2025-06-03T10:26:11.266290Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1003] at 9437184 to execution unit CompletedOperations 2025-06-03T10:26:11.266294Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1003] at 9437184 on unit CompletedOperations 2025-06-03T10:26:11.266305Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1003] at 9437184 is Executed 2025-06-03T10:26:11.266309Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1003] at 9437184 executing on unit CompletedOperations 2025-06-03T10:26:11.266313Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:1003] at 9437184 has finished 2025-06-03T10:26:11.285491Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 9437184, table# 1001, finished edge# 0, ts 1970-01-01T00:00:00.000000Z 2025-06-03T10:26:11.285526Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 9437184, table# 1001, finished edge# 0, front# 0 2025-06-03T10:26:11.286268Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-03T10:26:11.286302Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1003] at 9437184 on unit FinishPropose 2025-06-03T10:26:11.286318Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1003 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 3 ms, status: COMPLETE 2025-06-03T10:26:11.286400Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:26:11.290157Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268828683, Sender [3:233:2227], Recipient [3:236:2228]: NKikimr::TEvTablet::TEvFollowerGcApplied .2025-06-03T10:26:11.291491Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [3:4565:6479], Recipient [3:236:2228]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:26:11.291522Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:26:11.291535Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [3:4564:6478], serverId# [3:4565:6479], sessionId# [0:0:0] 2025-06-03T10:26:11.291681Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553160, Sender [3:4563:6477], Recipient [3:236:2228]: NKikimrTxDataShard.TEvGetTableStats TableId: 13 { InMemSize: 0 LastAccessTime: 1720 LastUpdateTime: 1720 } |61.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity >> TGRpcYdbTest::OperationTimeout [GOOD] >> TGRpcYdbTest::OperationCancelAfter |61.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity |61.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/data_integrity/ydb-core-kqp-ut-data_integrity >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::SuccessfullyPassesResponsesFromTablets [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailesOnNotATopic >> YdbS3Internal::TestAccessCheck [GOOD] >> YdbS3Internal::BadRequests >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailesOnNotATopic [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesTimeout >> ClientStatsCollector::CounterCacheMiss [GOOD] >> ClientStatsCollector::CounterRetryOperation >> YdbYqlClient::TestVariant [GOOD] >> YdbYqlClient::TestTransactionQueryError >> Mirror3dcRestore::TestRestore [GOOD] >> Mirror3of4::Compaction >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnZeroBalancerTabletIdInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst >> TYqlDateTimeTests::DatetimeKey [GOOD] >> TYqlDateTimeTests::TimestampKey >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesTimeout [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::SuccessfullyPassesResponsesFromTablets >> YdbImport::Simple [GOOD] >> YdbIndexTable::AlterIndexImplBySuperUser >> TErasureTypeTest::TestAllSpeciesCrcWhole1of2 [GOOD] >> TGRpcYdbTest::OperationCancelAfter [GOOD] >> TGRpcYdbTest::KeepAlive >> YdbYqlClient::TestReadTableMultiShardWholeTableUseSnapshot [GOOD] >> YdbYqlClient::TestReadTableMultiShardWithDescribe >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesFirst [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::SuccessfullyPassesResponsesFromTablets [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailesOnNotATopic [GOOD] Test command err: Assert failed: Check response: { Status: 130 ErrorReason: "Timeout while waiting for response, may be just slow, Marker# PQ16" ErrorCode: ERROR } 2025-06-03T10:26:12.117338Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:26:12.118684Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:26:12.118792Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928037] doesn't have tx info 2025-06-03T10:26:12.118802Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:26:12.118808Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-03T10:26:12.118824Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:26:12.118834Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:12.118845Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-03T10:26:12.119040Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928037] server connected, pipe [2:263:2254], now have 1 active actors on pipe 2025-06-03T10:26:12.119064Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1460: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-03T10:26:12.122229Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1646: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-03T10:26:12.123425Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-03T10:26:12.123469Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:12.123677Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037928037] Config applied version 1 actor [2:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-03T10:26:12.123709Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-03T10:26:12.123813Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-03T10:26:12.123883Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:271:2260] 2025-06-03T10:26:12.124509Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-03T10:26:12.124523Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:271:2260] 2025-06-03T10:26:12.124531Z node 2 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:26:12.124542Z node 2 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-03T10:26:12.124775Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928037] server connected, pipe [2:274:2262], now have 1 active actors on pipe 2025-06-03T10:26:12.139635Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72057594037928138] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:26:12.140404Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72057594037928138] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:26:12.140481Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928138] doesn't have tx info 2025-06-03T10:26:12.140489Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928138] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:26:12.140494Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72057594037928138] no config, start with empty partitions and default config 2025-06-03T10:26:12.140500Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72057594037928138] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:26:12.140510Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:12.140520Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928138] doesn't have tx writes info 2025-06-03T10:26:12.140681Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928138] server connected, pipe [2:409:2363], now have 1 active actors on pipe 2025-06-03T10:26:12.140704Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1460: [PQ: 72057594037928138] Handle TEvPersQueue::TEvUpdateConfig 2025-06-03T10:26:12.140762Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1646: [PQ: 72057594037928138] Config update version 2(current 0) received from actor [2:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-03T10:26:12.141259Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-03T10:26:12.141316Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:12.141466Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037928138] Config applied version 2 actor [2:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-03T10:26:12.141492Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitConfigStep 2025-06-03T10:26:12.141561Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-03T10:26:12.141595Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037928138, Partition: 1, State: StateInit] bootstrapping 1 [2:417:2369] 2025-06-03T10:26:12.142234Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:1:Initializer] Initializing completed. 2025-06-03T10:26:12.142250Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037928138, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 1 generation 2 [2:417:2369] 2025-06-03T10:26:12.142260Z node 2 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72057594037928138, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:26:12.142268Z node 2 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-03T10:26:12.142454Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928138] server connected, pipe [2:420:2371], now have 1 active actors on pipe 2025-06-03T10:26:12.145988Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:26:12.147075Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:26:12.147157Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-03T10:26:12.147165Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:26:12.147170Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-03T10:26:12.147177Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:26:12.147185Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:12.147196Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-03T10:26:12.147347Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928139] server connected, pipe [2:469:2408], now have 1 active actors on pipe 2025-06-03T10:26:12.147372Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1460: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-03T10:26:12.147432Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1646: [PQ: 72057594037928139] Config update version 3(current 0) received from actor [2:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-03T10:26:12.147978Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-03T10:26:12.148012Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:12.148133Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037928139] Config applied version 3 actor [2:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-03T10:26:12.148156Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-03T10:26:12.148222Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-03T10:26:12.148266Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [2:477:2414] 2025-06-03T10:26:12.148750Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-03T10:26:12.148762Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [2:477:2414] 2025-06-03T10:26:12.148771Z node 2 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:26:12.148778Z node 2 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-03T10:26:12.148958Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928139] server connected, pipe [2:480:2416], now have 1 active actors on pipe REQUEST MetaRequest { CmdGetReadSessionsInfo { ClientId: "client_id" Topic: "rt3.dc1--topic1" Topic: "rt3.dc1--topic2" } } Ticket: "client_id@builtin" 2025-06-03T10:26:12.150658Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928037] server connected, pipe [2:487:2419], now have 1 active actors on pipe 2025-06-03T10:26:12.150759Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928138] server connected, pipe [2:490:2420], now have 1 active actors on pipe 2025-06-03T10:26:12.150796Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928139] server connected, pipe [2:491:2420], now have 1 active actors on pipe 2025-06-03T10:26:12.150931Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72057594037928037] server disconnected, pipe [2:487:2419] destroyed 2025-06-03T10:26:12.151017Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72057594037928138] server disconnected, pipe [2:490:2420] destroyed 2025-06-03T10:26:12.151031Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72057594037928139] server disconnected, pipe [2:491:2420] destroyed RESULT Status: 1 ErrorCode: OK MetaResponse { CmdGetReadSessionsInfoResult { TopicResult { Topic: "rt3.dc1--topic2" PartitionResult { Partition: 0 ErrorCode: INITIALIZING ErrorReason: "tablet for partition is not running" } PartitionResult { Partition: 1 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 2 ErrorCode: OK } PartitionResult { Partition: 2 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 2 ErrorCode: OK } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic1" PartitionResult { Partition: 0 ClientOffset: 0 StartOffset: 0 EndOffset: 0 TimeLag: 0 TabletNode: "::1" ClientReadOffset: 0 ReadTimeLag: 0 TabletNodeId: 2 ErrorCode: OK } ErrorCode: OK } } } Assert failed: Check response: { Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic2, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC } >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests100Inflight10BlobSize1000 [GOOD] >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests10000Inflight1000BlobSize1000 >> TTableProfileTests::UseTableProfilePreset [GOOD] >> TTableProfileTests::UseTableProfilePresetViaSdk >> YdbYqlClient::CopyTables [GOOD] >> YdbYqlClient::CreateAndAltertTableWithCompactionPolicy >> KqpSinkLocks::DifferentKeyUpdate [GOOD] |61.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/erasure/ut/unittest >> TErasureTypeTest::TestAllSpeciesCrcWhole1of2 [GOOD] >> KqpSinkLocks::DifferentKeyUpdateOlap >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailesOnNotATopic >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly >> YdbS3Internal::BadRequests [GOOD] >> YdbScripting::BasicV0 >> test.py::test[bigdate-table_common_type-default.txt-Results] [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailesOnNotATopic [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnEmptyTopicName >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests10000Inflight1BlobSize1000 [GOOD] >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests2Inflight2BlobSize1000 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::HandlesPipeDisconnection_AnswerDoesNotArrive [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' is not created, Marker# PQ94" ErrorCode: UNKNOWN_TOPIC } 2025-06-03T10:26:13.152034Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:26:13.153186Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:26:13.153264Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928037] doesn't have tx info 2025-06-03T10:26:13.153272Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:26:13.153277Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-03T10:26:13.153283Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:26:13.153308Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:13.153319Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-03T10:26:13.153453Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928037] server connected, pipe [2:262:2253], now have 1 active actors on pipe 2025-06-03T10:26:13.153473Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1460: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-03T10:26:13.156183Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1646: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-03T10:26:13.157189Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-03T10:26:13.157220Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:13.157377Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037928037] Config applied version 1 actor [2:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-03T10:26:13.157407Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-03T10:26:13.157483Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-03T10:26:13.157541Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:270:2259] 2025-06-03T10:26:13.158303Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-03T10:26:13.158319Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:270:2259] 2025-06-03T10:26:13.158326Z node 2 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:26:13.158337Z node 2 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-03T10:26:13.158487Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928037] server connected, pipe [2:273:2261], now have 1 active actors on pipe 2025-06-03T10:26:13.169431Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72057594037928137] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:26:13.170075Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72057594037928137] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:26:13.170147Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928137] doesn't have tx info 2025-06-03T10:26:13.170158Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928137] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:26:13.170164Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72057594037928137] no config, start with empty partitions and default config 2025-06-03T10:26:13.170170Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72057594037928137] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:26:13.170180Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:13.170192Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928137] doesn't have tx writes info 2025-06-03T10:26:13.170346Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928137] server connected, pipe [2:408:2362], now have 1 active actors on pipe 2025-06-03T10:26:13.170364Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1460: [PQ: 72057594037928137] Handle TEvPersQueue::TEvUpdateConfig 2025-06-03T10:26:13.170410Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1646: [PQ: 72057594037928137] Config update version 2(current 0) received from actor [2:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-03T10:26:13.170805Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928137] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-03T10:26:13.170837Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928137] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:13.170959Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037928137] Config applied version 2 actor [2:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-03T10:26:13.170990Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitConfigStep 2025-06-03T10:26:13.171074Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-03T10:26:13.171111Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037928137, Partition: 0, State: StateInit] bootstrapping 0 [2:416:2368] 2025-06-03T10:26:13.171703Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:0:Initializer] Initializing completed. 2025-06-03T10:26:13.171723Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037928137, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 0 generation 2 [2:416:2368] 2025-06-03T10:26:13.171741Z node 2 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72057594037928137, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:26:13.171749Z node 2 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72057594037928137, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-03T10:26:13.171938Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928137] server connected, pipe [2:419:2370], now have 1 active actors on pipe 2025-06-03T10:26:13.181288Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72057594037928138] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:26:13.182459Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72057594037928138] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:26:13.182567Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928138] doesn't have tx info 2025-06-03T10:26:13.182577Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928138] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:26:13.182583Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72057594037928138] no config, start with empty partitions and default config 2025-06-03T10:26:13.182591Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72057594037928138] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:26:13.182600Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:13.182612Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928138] doesn't have tx writes info 2025-06-03T10:26:13.182821Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928138] server connected, pipe [2:468:2407], now have 1 active actors on pipe 2025-06-03T10:26:13.182833Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1460: [PQ: 72057594037928138] Handle TEvPersQueue::TEvUpdateConfig 2025-06-03T10:26:13.182897Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1646: [PQ: 72057594037928138] Config update version 3(current 0) received from actor [2:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-03T10:26:13.183474Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-03T10:26:13.183509Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:13.183647Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037928138] Config applied version 3 actor [2:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 3 Partitions { PartitionId ... riteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 11 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-03T10:26:13.783411Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 11 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-03T10:26:13.783449Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:13.783580Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037928138] Config applied version 11 actor [4:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 11 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-03T10:26:13.783613Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitConfigStep 2025-06-03T10:26:13.783703Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-03T10:26:13.783750Z node 4 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037928138, Partition: 1, State: StateInit] bootstrapping 1 [4:475:2412] 2025-06-03T10:26:13.784485Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:1:Initializer] Initializing completed. 2025-06-03T10:26:13.784495Z node 4 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037928138, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 1 generation 2 [4:475:2412] 2025-06-03T10:26:13.784501Z node 4 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72057594037928138, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:26:13.784507Z node 4 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-03T10:26:13.784656Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928138] server connected, pipe [4:478:2414], now have 1 active actors on pipe 2025-06-03T10:26:13.793682Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:26:13.795778Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:26:13.795919Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-03T10:26:13.795928Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:26:13.795934Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-03T10:26:13.795941Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:26:13.795950Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:13.795962Z node 4 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-03T10:26:13.796161Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928139] server connected, pipe [4:527:2451], now have 1 active actors on pipe 2025-06-03T10:26:13.796186Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:1460: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-03T10:26:13.796245Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:1646: [PQ: 72057594037928139] Config update version 12(current 0) received from actor [4:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-03T10:26:13.797117Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-03T10:26:13.797160Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:13.797288Z node 4 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037928139] Config applied version 12 actor [4:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 12 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-03T10:26:13.797338Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-03T10:26:13.797453Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-03T10:26:13.797502Z node 4 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [4:535:2457] 2025-06-03T10:26:13.798023Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-03T10:26:13.798033Z node 4 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [4:535:2457] 2025-06-03T10:26:13.798042Z node 4 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:26:13.798049Z node 4 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-03T10:26:13.798251Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928139] server connected, pipe [4:538:2459], now have 1 active actors on pipe 2025-06-03T10:26:13.798493Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928037] server connected, pipe [4:544:2462], now have 1 active actors on pipe 2025-06-03T10:26:13.798550Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928138] server connected, pipe [4:545:2463], now have 1 active actors on pipe 2025-06-03T10:26:13.798591Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928139] server connected, pipe [4:546:2463], now have 1 active actors on pipe 2025-06-03T10:26:13.809471Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928139] server connected, pipe [4:554:2470], now have 1 active actors on pipe 2025-06-03T10:26:13.818507Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:26:13.819339Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:26:13.819414Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-03T10:26:13.819420Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:26:13.819458Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:26:13.819566Z node 4 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:13.819573Z node 4 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-03T10:26:13.819596Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-03T10:26:13.819647Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-03T10:26:13.819676Z node 4 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [4:611:2515] 2025-06-03T10:26:13.820204Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-06-03T10:26:13.820617Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-06-03T10:26:13.820674Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-06-03T10:26:13.820722Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-06-03T10:26:13.820755Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-06-03T10:26:13.820760Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-03T10:26:13.820767Z node 4 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:26:13.820772Z node 4 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-03T10:26:13.820782Z node 4 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [4:611:2515] 2025-06-03T10:26:13.820791Z node 4 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:26:13.820800Z node 4 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-03T10:26:13.821037Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72057594037928138] server disconnected, pipe [4:545:2463] destroyed 2025-06-03T10:26:13.821057Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72057594037928037] server disconnected, pipe [4:544:2462] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetPartitionOffsetsResult { TopicResult { Topic: "rt3.dc1--topic1" PartitionResult { Partition: 0 StartOffset: 0 EndOffset: 0 ErrorCode: OK WriteTimestampEstimateMS: 0 } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic2" PartitionResult { Partition: 1 StartOffset: 0 EndOffset: 0 ErrorCode: OK WriteTimestampEstimateMS: 0 } PartitionResult { Partition: 2 ErrorCode: INITIALIZING ErrorReason: "partition is not ready yet" } ErrorCode: OK } } } >> YdbYqlClient::TestTransactionQueryError [GOOD] >> YdbYqlClient::TestReadWrongTable >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnEmptyTopicName [GOOD] >> TGRpcYdbTest::KeepAlive [GOOD] >> TYqlDateTimeTests::TimestampKey [GOOD] >> TYqlDateTimeTests::IntervalKey >> YdbIndexTable::AlterIndexImplBySuperUser [GOOD] >> YdbIndexTable::CreateTableAddIndex ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailesOnNotATopic [GOOD] Test command err: Assert failed: Check response: { Status: 130 ErrorReason: "Timeout while waiting for response, may be just slow, Marker# PQ16" ErrorCode: ERROR } 2025-06-03T10:26:13.152034Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:26:13.153047Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:26:13.153133Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928037] doesn't have tx info 2025-06-03T10:26:13.153143Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:26:13.153148Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-03T10:26:13.153161Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:26:13.153171Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:13.153182Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-03T10:26:13.153389Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928037] server connected, pipe [2:263:2254], now have 1 active actors on pipe 2025-06-03T10:26:13.153414Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1460: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-03T10:26:13.156139Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1646: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-03T10:26:13.157061Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-03T10:26:13.157100Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:13.157267Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037928037] Config applied version 1 actor [2:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-03T10:26:13.157317Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-03T10:26:13.157418Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-03T10:26:13.157485Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:271:2260] 2025-06-03T10:26:13.158153Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-03T10:26:13.158171Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:271:2260] 2025-06-03T10:26:13.158180Z node 2 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:26:13.158192Z node 2 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-03T10:26:13.158397Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928037] server connected, pipe [2:274:2262], now have 1 active actors on pipe 2025-06-03T10:26:13.169234Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:26:13.170246Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:26:13.170314Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-03T10:26:13.170322Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:26:13.170327Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-03T10:26:13.170333Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:26:13.170341Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:13.170351Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-03T10:26:13.170472Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928139] server connected, pipe [2:408:2362], now have 1 active actors on pipe 2025-06-03T10:26:13.170491Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1460: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-03T10:26:13.170531Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1646: [PQ: 72057594037928139] Config update version 2(current 0) received from actor [2:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-03T10:26:13.171003Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-03T10:26:13.171027Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:13.171148Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037928139] Config applied version 2 actor [2:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-03T10:26:13.171174Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-03T10:26:13.171234Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-03T10:26:13.171262Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [2:416:2368] 2025-06-03T10:26:13.171819Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-03T10:26:13.237450Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [2:416:2368] 2025-06-03T10:26:13.237488Z node 2 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:26:13.237502Z node 2 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-03T10:26:13.237817Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928139] server connected, pipe [2:419:2370], now have 1 active actors on pipe 2025-06-03T10:26:13.238285Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928037] server connected, pipe [2:425:2373], now have 1 active actors on pipe 2025-06-03T10:26:13.238369Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928139] server connected, pipe [2:427:2374], now have 1 active actors on pipe 2025-06-03T10:26:13.238404Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72057594037928037] server disconnected, pipe [2:425:2373] destroyed 2025-06-03T10:26:13.238519Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72057594037928139] server disconnected, pipe [2:427:2374] destroyed 2025-06-03T10:26:13.472960Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:26:13.474042Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:26:13.474152Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928037] doesn't have tx info 2025-06-03T10:26:13.474162Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:26:13.474168Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-03T10:26:13.474175Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:26:13.474185Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:13.474198Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-03T10:26:13.474382Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928037] server connected, pipe [3:263:2254], now have 1 active actors on pipe 2025-06-03T10:26:13.474395Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1460: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-03T10:26:13.474456Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1646: [PQ: 72057594037928037] Config update version 3(current 0) received from actor [3:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 3 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-03T10:26:13.475059Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 3 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2 ... icName: "rt3.dc1--topic2" Version: 5 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-03T10:26:13.561469Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928138] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 5 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-03T10:26:13.561507Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928138] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:13.561662Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037928138] Config applied version 5 actor [3:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 1 TopicName: "rt3.dc1--topic2" Version: 5 Partitions { PartitionId: 1 } AllPartitions { PartitionId: 1 } 2025-06-03T10:26:13.561691Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitConfigStep 2025-06-03T10:26:13.561773Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-03T10:26:13.561815Z node 3 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037928138, Partition: 1, State: StateInit] bootstrapping 1 [3:475:2412] 2025-06-03T10:26:13.562386Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:1:Initializer] Initializing completed. 2025-06-03T10:26:13.562403Z node 3 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037928138, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 1 generation 2 [3:475:2412] 2025-06-03T10:26:13.562412Z node 3 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72057594037928138, Partition: 1, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:26:13.562425Z node 3 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-03T10:26:13.562624Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928138] server connected, pipe [3:478:2414], now have 1 active actors on pipe 2025-06-03T10:26:13.567353Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:26:13.568536Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:26:13.568640Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-03T10:26:13.568651Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:26:13.568656Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-03T10:26:13.568663Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:26:13.568673Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:13.568684Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-03T10:26:13.568872Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928139] server connected, pipe [3:527:2451], now have 1 active actors on pipe 2025-06-03T10:26:13.568897Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1460: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-03T10:26:13.568960Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1646: [PQ: 72057594037928139] Config update version 6(current 0) received from actor [3:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 6 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-03T10:26:13.575069Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 6 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-03T10:26:13.575159Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:13.575368Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037928139] Config applied version 6 actor [3:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 6 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-03T10:26:13.575415Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-03T10:26:13.575589Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-03T10:26:13.575633Z node 3 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:535:2457] 2025-06-03T10:26:13.576191Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-03T10:26:13.659187Z node 3 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [3:535:2457] 2025-06-03T10:26:13.659221Z node 3 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:26:13.659248Z node 3 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-03T10:26:13.659487Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928139] server connected, pipe [3:538:2459], now have 1 active actors on pipe 2025-06-03T10:26:13.659859Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928138] server connected, pipe [3:545:2463], now have 1 active actors on pipe 2025-06-03T10:26:13.659868Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928037] server connected, pipe [3:544:2462], now have 1 active actors on pipe 2025-06-03T10:26:13.659889Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928139] server connected, pipe [3:546:2463], now have 1 active actors on pipe 2025-06-03T10:26:13.670452Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928139] server connected, pipe [3:551:2467], now have 1 active actors on pipe 2025-06-03T10:26:13.677761Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:26:13.678641Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:26:13.678741Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-03T10:26:13.678750Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:26:13.678797Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:26:13.678897Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:13.678904Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-03T10:26:13.678926Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-03T10:26:13.678987Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-03T10:26:13.679019Z node 3 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:608:2512] 2025-06-03T10:26:13.679575Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-06-03T10:26:13.679877Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-06-03T10:26:13.679928Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-06-03T10:26:13.679982Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-06-03T10:26:13.680011Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-06-03T10:26:13.680019Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-03T10:26:13.680027Z node 3 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:26:13.680035Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-03T10:26:13.680043Z node 3 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [3:608:2512] 2025-06-03T10:26:13.680053Z node 3 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:26:13.680061Z node 3 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-03T10:26:13.680289Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72057594037928138] server disconnected, pipe [3:545:2463] destroyed 2025-06-03T10:26:13.680303Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72057594037928037] server disconnected, pipe [3:544:2462] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetPartitionLocationsResult { TopicResult { Topic: "rt3.dc1--topic2" PartitionLocation { Partition: 1 Host: "::1" HostId: 3 ErrorCode: OK } PartitionLocation { Partition: 2 Host: "::1" HostId: 3 ErrorCode: OK } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic1" PartitionLocation { Partition: 0 Host: "::1" HostId: 3 ErrorCode: OK } ErrorCode: OK } } } Assert failed: Check response: { Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic2, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC } >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnFailedGetAllTopicsRequest >> YdbYqlClient::CreateAndAltertTableWithCompactionPolicy [GOOD] >> YdbYqlClient::CreateAndAltertTableWithKeyBloomFilter >> ClientStatsCollector::CounterRetryOperation [GOOD] >> ClientStatsCollector::ExternalMetricRegistryByRawPtr >> YdbYqlClient::TestReadTableMultiShardWithDescribe [GOOD] >> YdbYqlClient::TestReadTableMultiShardWithDescribeAndRowLimit >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnFailedGetAllTopicsRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/ut/unittest >> TGRpcYdbTest::KeepAlive [GOOD] Test command err: 2025-06-03T10:26:08.961405Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667453919407263:2263];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:08.961484Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00281e/r3tmp/tmpicdvGr/pdisk_1.dat 2025-06-03T10:26:09.049430Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:09.050420Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:09.050443Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:09.054532Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 24164, node 1 2025-06-03T10:26:09.077607Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:09.077630Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:09.077632Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:09.077702Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8876 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:09.135597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:09.252776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-03T10:26:09.277371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:26:10.200994Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511667463498681219:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:10.201063Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00281e/r3tmp/tmpkzcaaO/pdisk_1.dat 2025-06-03T10:26:10.256472Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22296, node 4 2025-06-03T10:26:10.299471Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:10.299512Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:10.300044Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:10.300048Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:10.300050Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:10.300105Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:10.300998Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2605 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:26:10.326037Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00281e/r3tmp/tmp2NV2C5/pdisk_1.dat 2025-06-03T10:26:11.319687Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:26:11.366373Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62579, node 7 2025-06-03T10:26:11.403247Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:11.403304Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:11.404602Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:11.404628Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:11.404631Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:11.404695Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:11.406510Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20176 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:11.442495Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting...
: Error: Operation timeout. 2025-06-03T10:26:12.409574Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7511667473876046785:2207];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:12.409638Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00281e/r3tmp/tmpY8ix4a/pdisk_1.dat 2025-06-03T10:26:12.457281Z node 10 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2458, node 10 2025-06-03T10:26:12.480682Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:12.480698Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:12.480701Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:12.480762Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16864 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:12.510341Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:12.510400Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:12.516179Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:12.516567Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:12.521781Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480
: Error: Operation cancelled. 2025-06-03T10:26:13.459892Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7511667475033288106:2207];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:13.459955Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00281e/r3tmp/tmpqiDhW6/pdisk_1.dat 2025-06-03T10:26:13.495317Z node 13 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12977, node 13 2025-06-03T10:26:13.525532Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:13.525548Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:13.525550Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:13.525601Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23652 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:13.551020Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:13.560858Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:13.560902Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:13.567923Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetTopicMetadataMetaRequestTest::FailsOnEmptyTopicName [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "path \'Root/PQ\' has unknown/invalid root prefix \'Root\', Marker# PQ14" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'Root/PQ\' describe error, Status# LookupError, Marker# PQ1" ErrorCode: ERROR } Assert failed: Check response: { Status: 128 ErrorReason: "empty topic in GetTopicMetadata request" ErrorCode: BAD_REQUEST } >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnNotOkStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnNoBalancerInGetNodeRequest >> YdbScripting::BasicV0 [GOOD] >> YdbScripting::BasicV1 >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnDuplicatedTopicName >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] >> YdbYqlClient::TestReadWrongTable [GOOD] >> TTableProfileTests::UseTableProfilePresetViaSdk [GOOD] >> TTableProfileTests::WrongTableProfile >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests2Inflight2BlobSize1000 [GOOD] >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests10Inflight10BlobSize1000 >> YdbIndexTable::CreateTableAddIndex [GOOD] >> YdbIndexTable::AlterTableAddIndex >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnDuplicatedTopicName [GOOD] >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnDuplicatedPartition >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesTimeout >> YdbYqlClient::CreateAndAltertTableWithKeyBloomFilter [GOOD] >> TYqlDateTimeTests::IntervalKey [GOOD] >> TYqlDateTimeTests::SimpleOperations >> ClientStatsCollector::ExternalMetricRegistryByRawPtr [GOOD] >> ClientStatsCollector::ExternalMetricRegistryStdSharedPtr >> test.py::test[window-win_inline_spec-default.txt-Results] [GOOD] >> test.py::test[ypath-empty_range--Results] [SKIPPED] >> test.py::test[ypath-limit_with_key-default.txt-Results] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailesOnNotATopic >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnDuplicatedPartition [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::FailsOnNoBalancerInGetNodeRequest [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "no path \'/Root/PQ/\', Marker# PQ17" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "no path \'Root/PQ\', Marker# PQ150" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'rt3.dc1--topic1\' has no balancer, Marker# PQ193" ErrorCode: UNKNOWN_TOPIC } >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesTimeout [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::SuccessfullyPassesResponsesFromTablets ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestReadWrongTable [GOOD] Test command err: 2025-06-03T10:26:10.152232Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667464248055796:2207];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:10.152312Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027ed/r3tmp/tmpEkuo6L/pdisk_1.dat 2025-06-03T10:26:10.249765Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:10.256840Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:10.256863Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 18806, node 1 2025-06-03T10:26:10.257879Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:10.274828Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:10.274843Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:10.274845Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:10.274892Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25787 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:10.330239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:10.726900Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667464248056615:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:10.726901Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667464248056627:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:10.726925Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:10.727842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:26:10.750052Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667464248056629:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:26:10.809848Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667464248056703:2664] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027ed/r3tmp/tmpf4M0v3/pdisk_1.dat 2025-06-03T10:26:11.653640Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511667469288206493:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:11.654494Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:26:11.697881Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5639, node 4 2025-06-03T10:26:11.735040Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:11.735056Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:11.735058Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:11.735125Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:11.759235Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:11.759264Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:11.764901Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31947 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:11.771383Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:12.162676Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667473583174594:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:12.162703Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667473583174589:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:12.162762Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:12.163704Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:26:12.176283Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511667473583174603:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:26:12.278696Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511667473583174678:2660] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:13.039180Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511667476101730024:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:13.039803Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027ed/r3tmp/tmpnccAjI/pdisk_1.dat 2025-06-03T10:26:13.065774Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18366, node 7 2025-06-03T10:26:13.094167Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:13.094184Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:13.094187Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:13.094257Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29986 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:26:13.139031Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:13.139068Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:13.346657Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls respons ... _WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511667476101730818:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:13.517815Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:13.522718Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:26:13.604844Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511667476101730981:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:13.604878Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:13.605033Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511667476101730986:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:13.606128Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:26:13.623146Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-03T10:26:13.625393Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7511667476101730988:2348], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:26:13.697859Z node 7 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [7:7511667476101731059:2772] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:13.724144Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtn7jz4egy5195rtn0bckms, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MzZiOTM2YTMtNGZiZWNhMmItNGI4NWI2Y2UtMWYzYzg1YzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:13.773529Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtn7k3p5n2gd582fsha48tq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDJmOTQ0YmQtN2M4MTQ1ZmEtZGUzOWIzOTQtNDYxMTFlNWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:13.801852Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=7&id=MzZiOTM2YTMtNGZiZWNhMmItNGI4NWI2Y2UtMWYzYzg1YzQ=, ActorId: [7:7511667476101730791:2331], ActorState: ExecuteState, TraceId: 01jwtn7k4xcje64k7nchhgvd6n, Create QueryResponse for error on request, msg: 2025-06-03T10:26:14.487510Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7511667483033970509:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:14.487564Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027ed/r3tmp/tmpOYm0uX/pdisk_1.dat 2025-06-03T10:26:14.520236Z node 10 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4271, node 10 2025-06-03T10:26:14.534677Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:14.534695Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:14.534697Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:14.534751Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2855 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:14.589358Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:14.589401Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:14.593230Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:14.611547Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:14.657639Z node 10 :GRPC_SERVER INFO: grpc_request_proxy.cpp:592: Got grpc request# ListEndpointsRequest, traceId# 01jwtn7m01d5dc0bkkrz7brezj, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:50762, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# 9.998672s 2025-06-03T10:26:14.661889Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# CreateSessionRequest, traceId# 01jwtn7m055jnzkabjtvvq6hqc, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:50762, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-03T10:26:14.909980Z node 10 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# ReadTableRequest, traceId# 01jwtn7m7x45x88cnjexqr335a, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:50762, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-03T10:26:14.910440Z node 10 :TX_PROXY ERROR: read_table_impl.cpp:567: [ReadTable [10:7511667483033971441:2333] TxId# 281474976715658] Navigate request failed for table 'Root/NoTable' 2025-06-03T10:26:14.910491Z node 10 :TX_PROXY ERROR: read_table_impl.cpp:2919: [ReadTable [10:7511667483033971441:2333] TxId# 281474976715658] RESPONSE Status# ResolveError shard: 0 table: Root/NoTable 2025-06-03T10:26:14.910669Z node 10 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [10:7511667483033971440:2333] Finish grpc stream, status: 400070
: Error: Failed to resolve table Root/NoTable, code: 200400
: Error: Got ResolveError response from TxProxy
: Error: Failed to resolve table Root/NoTable 2025-06-03T10:26:14.914496Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x519f9d849180] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-06-03T10:26:14.914594Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x519fba26d600] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-06-03T10:26:14.914630Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x519fbfc16100] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-06-03T10:26:14.914663Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x519fb4ffc680] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-06-03T10:26:14.914695Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x519fba26c000] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-06-03T10:26:14.914710Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x519fbb161080] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-06-03T10:26:14.914728Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x519fb4ffbb80] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-06-03T10:26:14.914762Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x519fbb01ec00] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-06-03T10:26:14.914800Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x519fbfc63600] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-06-03T10:26:14.914806Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x519fbb163180] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-06-03T10:26:14.914842Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x519fba3c9700] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-06-03T10:26:14.914844Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x519f9d847600] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-06-03T10:26:14.914879Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x519f9b7f8c00] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-06-03T10:26:14.914879Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x519fbfc62580] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-06-03T10:26:14.914909Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x519fbb160580] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-06-03T10:26:14.914921Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x519fbfc64100] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-06-03T10:26:14.914941Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x519fba26f700] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailesOnNotATopic [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly >> YdbYqlClient::TestReadTableMultiShardWithDescribeAndRowLimit [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/ut/unittest >> YdbYqlClient::CreateAndAltertTableWithKeyBloomFilter [GOOD] Test command err: 2025-06-03T10:26:09.308741Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667457333885365:2207];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:09.308794Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002803/r3tmp/tmpevRDqt/pdisk_1.dat 2025-06-03T10:26:09.419516Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:09.419544Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:09.426381Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:09.466456Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8731, node 1 2025-06-03T10:26:09.543597Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:09.543613Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:09.543617Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:09.543672Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24314 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:09.629762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:09.652538Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:542: Skip check permission connect db, AllowYdbRequestsWithoutDatabase is off, there is no db provided from user, database: /Root, user: root@builtin, from ip: ipv6:[::1]:48310 Call 2025-06-03T10:26:09.654531Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:542: Skip check permission connect db, AllowYdbRequestsWithoutDatabase is off, there is no db provided from user, database: /Root, user: root@builtin, from ip: ipv6:[::1]:48310 2025-06-03T10:26:09.871683Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:542: Skip check permission connect db, AllowYdbRequestsWithoutDatabase is off, there is no db provided from user, database: /Root, user: root@builtin, from ip: ipv6:[::1]:48310 Call Call 2025-06-03T10:26:09.878502Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:578: Skip check permission connect db, user is a admin, database: /Root, user: root@builtin, from ip: ipv6:[::1]:48310 2025-06-03T10:26:09.880858Z node 1 :GRPC_PROXY_NO_CONNECT_ACCESS DEBUG: grpc_request_check_actor.h:578: Skip check permission connect db, user is a admin, database: /Root, user: root@builtin, from ip: ipv6:[::1]:48314 2025-06-03T10:26:09.881409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:26:10.540657Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511667462144362079:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:10.540805Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002803/r3tmp/tmpJIWr0U/pdisk_1.dat 2025-06-03T10:26:10.605026Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5909, node 4 2025-06-03T10:26:10.634710Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:10.634725Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:10.634728Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:10.634803Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:10.641273Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:10.641333Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:10.642873Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19978 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:10.701888Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:11.650866Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511667470180885368:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:11.650887Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002803/r3tmp/tmpasBTqv/pdisk_1.dat 2025-06-03T10:26:11.678821Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7822, node 7 2025-06-03T10:26:11.692365Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:11.692381Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:11.692385Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:11.692448Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2068 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:11.752312Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:11.752350Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:11.757833Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:11.760360Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:12.150741Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/Table-1, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:26:12.151105Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:26:12.151114Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:26:12.153796Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715658, database: /Root, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /Root/Table-1 2025-06-03T10:26:12.216665Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1748946372264, transactions count in step: 1, at schemeshard: 720575940466 ... emeshard: 72057594046644480 2025-06-03T10:26:12.848074Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1103: All parts have reached barrier, tx: 281474976715687, done: 0, blocked: 1 2025-06-03T10:26:12.850750Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715687:0 2025-06-03T10:26:12.854707Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_drop_table.cpp:492: TDropTable Propose, path: Root/Table-8, pathId: 0, opId: 281474976715688:0, at schemeshard: 72057594046644480 2025-06-03T10:26:12.854760Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715688:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:26:12.855236Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715688, database: /Root, subject: , status: StatusAccepted, operation: DROP TABLE, path: Root/Table-8 2025-06-03T10:26:12.856928Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037892 not found 2025-06-03T10:26:12.857470Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-03T10:26:12.860444Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1748946372908, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-03T10:26:12.862140Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1103: All parts have reached barrier, tx: 281474976715688, done: 0, blocked: 1 2025-06-03T10:26:12.864927Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715688:0 2025-06-03T10:26:12.879806Z node 7 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 7, TabletId: 72075186224037893 not found 2025-06-03T10:26:12.879924Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-03T10:26:13.601060Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7511667475943849679:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:13.601147Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002803/r3tmp/tmp5xKwZa/pdisk_1.dat 2025-06-03T10:26:13.650245Z node 10 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5112, node 10 2025-06-03T10:26:13.683058Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:13.683076Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:13.683078Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:13.683146Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:13.700226Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:13.700268Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:13.706287Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10357 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:13.714571Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:10357 2025-06-03T10:26:14.052404Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:10357 TClient::Ls request: Root/Test TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Test" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946374126 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Test" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) 2025-06-03T10:26:14.112981Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:10357 TClient::Ls request: Root/Test TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Test" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946374126 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Test" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) 2025-06-03T10:26:14.728713Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7511667480586655006:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:14.728748Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002803/r3tmp/tmpbv2wna/pdisk_1.dat 2025-06-03T10:26:14.749224Z node 13 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7782, node 13 2025-06-03T10:26:14.765707Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:14.765725Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:14.765728Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:14.765783Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64628 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:14.829447Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:14.829486Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:14.831293Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:14.835113Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:15.172411Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:26:15.246602Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::SuccessfullyPassesResponsesFromTablets [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond >> TAsyncIndexTests::MergeMainWithReboots[PipeResets] |61.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut |61.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut |61.2%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/ydb-public-sdk-cpp-src-client-topic-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionOffsetsMetaRequestTest::FailsOnDuplicatedPartition [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "path \'Root/PQ\' has unknown/invalid root prefix \'Root\', Marker# PQ14" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'Root/PQ\' describe error, Status# LookupError, Marker# PQ1" ErrorCode: ERROR } Assert failed: Check response: { Status: 128 ErrorReason: "multiple TopicRequest for topic \'rt3.dc1--topic1\'" ErrorCode: BAD_REQUEST } Assert failed: Check response: { Status: 128 ErrorReason: "multiple partition 2 in TopicRequest for topic \'rt3.dc1--topic2\'" ErrorCode: BAD_REQUEST } >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedPartition >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnBadRootStatusInGetNodeRequest [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailesOnNotATopic >> YdbIndexTable::AlterTableAddIndex [GOOD] >> YdbLogStore::AlterLogStore >> YdbScripting::BasicV1 [GOOD] >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedPartition [GOOD] >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailesOnNotATopic [GOOD] >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/ut/unittest >> YdbYqlClient::TestReadTableMultiShardWithDescribeAndRowLimit [GOOD] Test command err: 2025-06-03T10:26:10.267913Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667464947201803:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:10.268026Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027dc/r3tmp/tmpYFrI5i/pdisk_1.dat 2025-06-03T10:26:10.351637Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:10.365657Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:10.365688Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:10.375152Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28042, node 1 2025-06-03T10:26:10.409795Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:10.409810Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:10.409813Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:10.409883Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25383 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:10.478673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:10.499102Z node 1 :GRPC_SERVER INFO: grpc_request_proxy.cpp:592: Got grpc request# ListEndpointsRequest, traceId# 01jwtn7fy207fd2e1n9pwpe2hn, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:43120, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# 9.999205s 2025-06-03T10:26:10.505402Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# CreateSessionRequest, traceId# 01jwtn7fy609bjhtaat7jdvsj4, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:43120, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-03T10:26:10.829700Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# CreateTableRequest, traceId# 01jwtn7g8d93j4cjkmwm4v7dwm, sdkBuildInfo# ydb-cpp-sdk/dev, state# AS_NOT_PERFORMED, database# undef, peer# ipv6:[::1]:43120, grpcInfo# grpc-c++/1.54.3 grpc-c/31.0.0 (linux; chttp2), timeout# undef 2025-06-03T10:26:10.833389Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7511667464947201885:2140] Handle TEvProposeTransaction 2025-06-03T10:26:10.833415Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7511667464947201885:2140] TxId# 281474976715658 ProcessProposeTransaction 2025-06-03T10:26:10.833446Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7511667464947201885:2140] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7511667464947202608:2606] 2025-06-03T10:26:10.846509Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:7511667464947202608:2606] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "Test" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Fk" Type: "Uint64" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" KeyColumnNames: "Fk" UniformPartitionsCount: 16 PartitionConfig { } Temporary: false } CreateIndexedTable { } } } DatabaseName: "" RequestType: "" PeerName: "ipv6:[::1]:43120" 2025-06-03T10:26:10.846541Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:7511667464947202608:2606] txid# 281474976715658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-03T10:26:10.846701Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1627: Actor# [1:7511667464947202608:2606] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-03T10:26:10.846713Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:7511667464947202608:2606] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:26:10.846911Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:7511667464947202608:2606] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:26:10.846956Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:7511667464947202608:2606] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:26:10.846966Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7511667464947202608:2606] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-03T10:26:10.847032Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:7511667464947202608:2606] txid# 281474976715658 HANDLE EvClientConnected 2025-06-03T10:26:10.847660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:26:10.848870Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [1:7511667464947202608:2606] txid# 281474976715658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715658} 2025-06-03T10:26:10.848896Z node 1 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [1:7511667464947202608:2606] txid# 281474976715658 SEND to# [1:7511667464947202607:2333] Source {TEvProposeTransactionStatus txid# 281474976715658 Status# 53} 2025-06-03T10:26:10.849111Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-03T10:26:10.849141Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-03T10:26:10.849215Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:489: SchemeBoardUpdate /Root 2025-06-03T10:26:10.849229Z node 1 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:518: Can't update SecurityState for /Root - no PublicKeys 2025-06-03T10:26:10.867542Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:7511667464947202649:2645], Recipient [1:7511667464947202771:2341]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:26:10.867824Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:7511667464947202651:2647], Recipient [1:7511667464947202773:2343]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:26:10.867882Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:7511667464947202643:2639], Recipient [1:7511667464947202777:2347]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:26:10.867957Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:7511667464947202645:2641], Recipient [1:7511667464947202802:2350]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:26:10.867998Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:7511667464947202648:2644], Recipient [1:7511667464947202774:2344]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:26:10.868090Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:7511667464947202640:2636], Recipient [1:7511667464947202770:2340]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:26:10.868092Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:7511667464947202636:2632], Recipient [1:7511667464947202762:2336]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:26:10.868196Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:7511667464947202638:2634], Recipient [1:7511667464947202764:2338]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:26:10.868200Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:7511667464947202637:2633], Recipient [1:7511667464947202763:2337]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:26:10.868336Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:7511667464947202650:2646], Recipient [1:7511667464947202772:2342]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:26:10.868446Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:7511667464947202641:2637], Recipient [1:7511667464947202775:2345]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:26:10.868553Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:7511667464947202644:2640], Recipient [1:7511667464947202789:2349]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:26:10.868572Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:7511667464947202647:2643], Recipient [1:7511667464947202765:2339]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:26:10.868734Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:7511667464947202639:2635], Recipient [1:7511667464947202778:2348]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:26:10.868756Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:7511667464947202642:2638], Recipient [1:7511667464947202776:2346]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:26:10.868902Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:7511667464947202646:2642], Recipient [1:7511667464947202840:2351]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:26:10.869914Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:7511667464947202651:2647], Recipient [1:7511667464947202773:2343]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:26:10.869914Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:7511667464947202640:2636], Recipient [1:75116674649472027 ... 075186224037897 has no attached operations 2025-06-03T10:26:15.585509Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037897 2025-06-03T10:26:15.585516Z node 10 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037897 2025-06-03T10:26:15.585719Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435082, Sender [10:7511667483205714647:2083], Recipient [10:7511667483205713699:2339]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-03T10:26:15.585724Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-03T10:26:15.585739Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:267: [10:7511667483205714629:2397] Adding quota request to queue ShardId: 0, TxId: 281474976715680 2025-06-03T10:26:15.585749Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:629: [10:7511667483205714629:2397] Assign stream quota to Shard 0, Quota 5, TxId 281474976715680 Reserved: 5 of 25, Queued: 0 2025-06-03T10:26:15.585820Z node 10 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037897, TxId: 281474976715681, MessageQuota: 5 2025-06-03T10:26:15.585886Z node 10 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037897, TxId: 281474976715681, Size: 54, Rows: 0, PendingAcks: 1, MessageQuota: 4 2025-06-03T10:26:15.586026Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:647: [10:7511667483205714629:2397] got stream part, size: 75, RU required: 128 rate limiter absent 2025-06-03T10:26:15.586052Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549569, Sender [10:7511667483205714630:2397], Recipient [10:7511667483205713699:2339]: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715681 2025-06-03T10:26:15.586056Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvDataShard::TEvCancelTransactionProposal 2025-06-03T10:26:15.586060Z node 10 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:73: Got TEvDataShard::TEvCancelTransactionProposal 72075186224037897 txId 281474976715681 2025-06-03T10:26:15.586076Z node 10 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:44: Start TTxCancelTransactionProposal at tablet 72075186224037897 txId 281474976715681 2025-06-03T10:26:15.586095Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287431, Sender [10:7511667483205714630:2397], Recipient [10:7511667483205713699:2339]: NKikimrTx.TEvInterruptTransaction TxId: 281474976715681 2025-06-03T10:26:15.586098Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvInterruptTransaction 2025-06-03T10:26:15.586116Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553190, Sender [10:7511667483205714630:2397], Recipient [10:7511667483205713699:2339]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1748946375631 TxId: 281474976715680 2025-06-03T10:26:15.586171Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [10:7511667483205713699:2339], Recipient [10:7511667483205713699:2339]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:26:15.586173Z node 10 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:26:15.586177Z node 10 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037897 2025-06-03T10:26:15.586181Z node 10 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037897 active 1 active planned 0 immediate 1 planned 0 2025-06-03T10:26:15.586187Z node 10 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715681] at 72075186224037897 for ReadTableScan 2025-06-03T10:26:15.586191Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715681] at 72075186224037897 on unit ReadTableScan 2025-06-03T10:26:15.586205Z node 10 :TX_DATASHARD NOTICE: read_table_scan_unit.cpp:240: Interrupted operation [0:281474976715681] at 72075186224037897 while waiting for scan finish 2025-06-03T10:26:15.586209Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715681] at 72075186224037897 is Executed 2025-06-03T10:26:15.586212Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715681] at 72075186224037897 executing on unit ReadTableScan 2025-06-03T10:26:15.586214Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715681] at 72075186224037897 to execution unit FinishPropose 2025-06-03T10:26:15.586217Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715681] at 72075186224037897 on unit FinishPropose 2025-06-03T10:26:15.586230Z node 10 :TX_DATASHARD ERROR: finish_propose_unit.cpp:245: Prepare transaction failed. txid 281474976715681 at tablet 72075186224037897 errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976715681] at 72075186224037897 while waiting for scan finish) | 2025-06-03T10:26:15.586238Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715681] at 72075186224037897 is DelayCompleteNoMoreRestarts 2025-06-03T10:26:15.586241Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715681] at 72075186224037897 executing on unit FinishPropose 2025-06-03T10:26:15.586244Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715681] at 72075186224037897 to execution unit CompletedOperations 2025-06-03T10:26:15.586246Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715681] at 72075186224037897 on unit CompletedOperations 2025-06-03T10:26:15.586258Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715681] at 72075186224037897 is Executed 2025-06-03T10:26:15.586259Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715681] at 72075186224037897 executing on unit CompletedOperations 2025-06-03T10:26:15.586261Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715681] at 72075186224037897 has finished 2025-06-03T10:26:15.586263Z node 10 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037897 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:26:15.586264Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037897 2025-06-03T10:26:15.586265Z node 10 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037897 has no attached operations 2025-06-03T10:26:15.586267Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037897 2025-06-03T10:26:15.586275Z node 10 :READ_TABLE_API DEBUG: rpc_read_table.cpp:563: [10:7511667483205714629:2397] Starting inactivity timer for 600.000000s with tag 3 2025-06-03T10:26:15.586302Z node 10 :READ_TABLE_API NOTICE: rpc_read_table.cpp:531: [10:7511667483205714629:2397] Finish grpc stream, status: 400000 2025-06-03T10:26:15.586515Z node 10 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037897, TxId: 281474976715681, PendingAcks: 0 2025-06-03T10:26:15.586537Z node 10 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037897, TxId: 281474976715681, MessageQuota: 4 2025-06-03T10:26:15.588069Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x72fa7bdf6b00] received request Name# SchemeOperation ok# false data# peer# current inflight# 0 2025-06-03T10:26:15.588140Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x72fa7bd58000] received request Name# SchemeOperationStatus ok# false data# peer# current inflight# 0 2025-06-03T10:26:15.588177Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x72fa7bdf8100] received request Name# SchemeDescribe ok# false data# peer# current inflight# 0 2025-06-03T10:26:15.588216Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x72fa7a2ab600] received request Name# ChooseProxy ok# false data# peer# current inflight# 0 2025-06-03T10:26:15.588259Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x72fa7bdf6000] received request Name# PersQueueRequest ok# false data# peer# current inflight# 0 2025-06-03T10:26:15.588404Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x72fa7b453600] received request Name# SchemeInitRoot ok# false data# peer# current inflight# 0 2025-06-03T10:26:15.588440Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x72fa7fc17180] received request Name# ResolveNode ok# false data# peer# current inflight# 0 2025-06-03T10:26:15.588474Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x72fa7bd7ab00] received request Name# FillNode ok# false data# peer# current inflight# 0 2025-06-03T10:26:15.588511Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x72fa7fc16680] received request Name# DrainNode ok# false data# peer# current inflight# 0 2025-06-03T10:26:15.588549Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x72fa5b634000] received request Name# BlobStorageConfig ok# false data# peer# current inflight# 0 2025-06-03T10:26:15.588582Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x72fa5b636680] received request Name# HiveCreateTablet ok# false data# peer# current inflight# 0 2025-06-03T10:26:15.588614Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x72fa7b482c00] received request Name# RegisterNode ok# false data# peer# current inflight# 0 2025-06-03T10:26:15.588616Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x72fa74ff8b00] received request Name# TestShardControl ok# false data# peer# current inflight# 0 2025-06-03T10:26:15.588648Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x72fa7a2abb80] received request Name# CmsRequest ok# false data# peer# current inflight# 0 2025-06-03T10:26:15.588690Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x72fa7f98a680] received request Name# ConsoleRequest ok# false data# peer# current inflight# 0 2025-06-03T10:26:15.588703Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x72fa74ff9600] received request Name# InterconnectDebug ok# false data# peer# current inflight# 0 2025-06-03T10:26:15.588717Z node 10 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037897 2025-06-03T10:26:15.588727Z node 10 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715681] at 72075186224037897 on unit FinishPropose 2025-06-03T10:26:15.588731Z node 10 :GRPC_SERVER DEBUG: grpc_server.cpp:283: [0x72fa79df0100] received request Name# TabletStateRequest ok# false data# peer# current inflight# 0 2025-06-03T10:26:15.588740Z node 10 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715681 at tablet 72075186224037897 send to client, exec latency: 1 ms, propose latency: 3 ms, status: ERROR 2025-06-03T10:26:15.588751Z node 10 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715681 at tablet 72075186224037897 status: ERROR errors: WRONG_SHARD_STATE (Interrupted operation [0:281474976715681] at 72075186224037897 while waiting for scan finish) | 2025-06-03T10:26:15.588793Z node 10 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037897 >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] >> TTableProfileTests::WrongTableProfile [GOOD] >> TYqlDateTimeTests::DateKey ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/ut/unittest >> YdbScripting::BasicV1 [GOOD] Test command err: 2025-06-03T10:26:09.557765Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667460781567954:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:09.557825Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027f5/r3tmp/tmpMea69T/pdisk_1.dat 2025-06-03T10:26:09.657358Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:09.657411Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:09.657491Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:09.659881Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9110, node 1 2025-06-03T10:26:09.716914Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:09.716931Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:09.716934Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:09.716992Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10237 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:09.762474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:09.999886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:26:10.104377Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667465076537870:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:10.104410Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:10.104704Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667465076537882:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:10.106185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:26:10.133761Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667465076537884:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:26:10.196600Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667465076537955:4048] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:10.291720Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtn7fhq2bc6fqwhd33vr0s3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDk5ODc1NjAtNjhiNDQ2OS0zM2Q1NjE4NS1kMTc4ODkyNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS 2025-06-03T10:26:11.004214Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511667466276892829:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:11.004788Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027f5/r3tmp/tmpYhYLNo/pdisk_1.dat 2025-06-03T10:26:11.036807Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9132, node 4 2025-06-03T10:26:11.070571Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:11.070585Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:11.070588Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:11.070652Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16905 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:26:11.104240Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:11.104270Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:11.110460Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:11.114119Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:11.121988Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:26:11.481073Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:26:11.594090Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667466276895600:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:11.594132Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:11.594428Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667466276895612:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:11.595395Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:26:11.601757Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511667466276895614:2437], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:26:11.706002Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511667466276895693:4017] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:11.735497Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtn7h0972jtfqve8ydjevqa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=YTFjMDRlZjYtZjQzNjhiMTAtYjMzNTA3NTMtODI2ZTc4YTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:11.752836Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:16905 2025-06-03T10:26:12.574726Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: f ... UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:14.195983Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:14.196022Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:14.197596Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:14.205158Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:14.566732Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7511667480398178430:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:14.566766Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:14.578209Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:26:14.622242Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7511667480398178593:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:14.622289Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:14.622828Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7511667480398178598:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:14.623956Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:26:14.632331Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7511667480398178600:2348], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:26:14.695331Z node 10 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [10:7511667480398178669:2766] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:14.704999Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtn7kjc67vs1gtqf6p2mynn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=NzM3MzE1ZTQtZWNjNjM5NTgtYThhOWQ1NmUtZWFiODMzNjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:14.733243Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jwtn7m1p2bap2q0vvtep0p37, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=YjQ1NmYwMzUtOTNjMzk1MTUtYTE4N2ViN2EtNmJiYmVlMg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:14.736719Z node 10 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946374777, txId: 281474976715662] shutting down 2025-06-03T10:26:15.389210Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7511667487026559349:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:15.389232Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027f5/r3tmp/tmpqAsmCi/pdisk_1.dat 2025-06-03T10:26:15.426082Z node 13 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10988, node 13 2025-06-03T10:26:15.448099Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:15.448112Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:15.448114Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:15.448174Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29277 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:15.489800Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:15.489831Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:15.491525Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:15.501201Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:15.836019Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7511667487026560262:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:15.836050Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:15.845928Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:26:15.907990Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7511667487026560444:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:15.908021Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:15.908090Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7511667487026560449:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:15.909032Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:26:15.920656Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7511667487026560451:2348], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:26:16.022225Z node 13 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [13:7511667491321527824:2767] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:16.037584Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtn7mtwenpgq080snv2fbwm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NGIyMjY2ZGItZWQ4YmY4MTYtNzkwOWFhN2QtMTkxOTMxMzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:16.060485Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jwtn7nbb18qffw5sn70vkqak, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=NmZlZThhNzQtOTJhNDRhNTQtZWI4ZDgxNjAtZDBmZTZlODk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:16.063565Z node 13 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946376100, txId: 281474976715662] shutting down >> ClientStatsCollector::ExternalMetricRegistryStdSharedPtr [GOOD] >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests10Inflight10BlobSize1000 [GOOD] >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests100Inflight10BlobSize1000 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionStatusMetaRequestTest::HandlesPipeDisconnection_DisconnectionComesSecond [GOOD] Test command err: Assert failed: Check response: { Status: 130 ErrorReason: "Timeout while waiting for response, may be just slow, Marker# PQ16" ErrorCode: ERROR } 2025-06-03T10:26:16.154192Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:26:16.156586Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:26:16.156693Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928037] doesn't have tx info 2025-06-03T10:26:16.156702Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:26:16.156708Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-03T10:26:16.156724Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:26:16.156735Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:16.156746Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-03T10:26:16.156929Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928037] server connected, pipe [2:263:2254], now have 1 active actors on pipe 2025-06-03T10:26:16.156952Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1460: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-03T10:26:16.159974Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1646: [PQ: 72057594037928037] Config update version 1(current 0) received from actor [2:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-03T10:26:16.161331Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928037] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-03T10:26:16.161372Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:16.161548Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037928037] Config applied version 1 actor [2:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "rt3.dc1--topic1" Version: 1 Partitions { PartitionId: 0 } AllPartitions { PartitionId: 0 } 2025-06-03T10:26:16.161580Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitConfigStep 2025-06-03T10:26:16.161677Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic1:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-03T10:26:16.161745Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037928037, Partition: 0, State: StateInit] bootstrapping 0 [2:271:2260] 2025-06-03T10:26:16.162419Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic1:0:Initializer] Initializing completed. 2025-06-03T10:26:16.162432Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037928037, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--topic1' partition 0 generation 2 [2:271:2260] 2025-06-03T10:26:16.162442Z node 2 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72057594037928037, Partition: 0, State: StateInit] SYNC INIT topic rt3.dc1--topic1 partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:26:16.162455Z node 2 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-03T10:26:16.162662Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928037] server connected, pipe [2:274:2262], now have 1 active actors on pipe 2025-06-03T10:26:16.178601Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:26:16.179513Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:26:16.179609Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-03T10:26:16.179618Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:26:16.179623Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72057594037928139] no config, start with empty partitions and default config 2025-06-03T10:26:16.179630Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:26:16.179640Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:16.179651Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-03T10:26:16.179849Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928139] server connected, pipe [2:409:2363], now have 1 active actors on pipe 2025-06-03T10:26:16.179875Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1460: [PQ: 72057594037928139] Handle TEvPersQueue::TEvUpdateConfig 2025-06-03T10:26:16.179947Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1646: [PQ: 72057594037928139] Config update version 2(current 0) received from actor [2:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-03T10:26:16.180538Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037928139] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-03T10:26:16.180573Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:16.180728Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037928139] Config applied version 2 actor [2:100:2134] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 2 TopicName: "rt3.dc1--topic2" Version: 2 Partitions { PartitionId: 2 } AllPartitions { PartitionId: 2 } 2025-06-03T10:26:16.180758Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-03T10:26:16.180827Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-03T10:26:16.180865Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [2:417:2369] 2025-06-03T10:26:16.181533Z node 2 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-03T10:26:16.181552Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [2:417:2369] 2025-06-03T10:26:16.181562Z node 2 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:26:16.181571Z node 2 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-03T10:26:16.181782Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928139] server connected, pipe [2:420:2371], now have 1 active actors on pipe 2025-06-03T10:26:16.182140Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928037] server connected, pipe [2:426:2374], now have 1 active actors on pipe 2025-06-03T10:26:16.182200Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928139] server connected, pipe [2:428:2375], now have 1 active actors on pipe 2025-06-03T10:26:16.182254Z node 2 :PERSQUEUE DEBUG: partition.cpp:858: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-03T10:26:16.182293Z node 2 :PERSQUEUE DEBUG: partition.cpp:858: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-03T10:26:16.182370Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72057594037928037] server disconnected, pipe [2:426:2374] destroyed 2025-06-03T10:26:16.182427Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72057594037928139] server disconnected, pipe [2:428:2375] destroyed 2025-06-03T10:26:16.533915Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72057594037928037] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:26:16.535249Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72057594037928037] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:26:16.535354Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928037] doesn't have tx info 2025-06-03T10:26:16.535365Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928037] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:26:16.535371Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72057594037928037] no config, start with empty partitions and default config 2025-06-03T10:26:16.535380Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72057594037928037] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:26:16.535390Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928037] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:16.535402Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928037] doesn't have tx writes info 2025-06-03T10:26:16.535587Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928037] server connected, pipe [3:262:2253], now have 1 active actors on pipe 2025-06-03T10:26:16.535601Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1460: [PQ: 72057594037928037] Handle TEvPersQueue::TEvUpdateConfig 2025-06-03T10:26:16.535664Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:1646: [PQ: 72057594037928037] Config update version 3(current 0) received from actor [3:100:2134] txId 12345 config: CacheS ... 2, State: StateInit] bootstrapping 2 [3:533:2455] 2025-06-03T10:26:16.568442Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-03T10:26:16.568466Z node 3 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 2 [3:533:2455] 2025-06-03T10:26:16.568477Z node 3 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:26:16.568494Z node 3 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-03T10:26:16.568713Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928139] server connected, pipe [3:536:2457], now have 1 active actors on pipe 2025-06-03T10:26:16.568993Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928037] server connected, pipe [3:542:2460], now have 1 active actors on pipe 2025-06-03T10:26:16.569048Z node 3 :PERSQUEUE DEBUG: partition.cpp:858: [PQ: 72057594037928037, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-03T10:26:16.569074Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928138] server connected, pipe [3:543:2461], now have 1 active actors on pipe 2025-06-03T10:26:16.569126Z node 3 :PERSQUEUE DEBUG: partition.cpp:858: [PQ: 72057594037928138, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-03T10:26:16.569136Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928139] server connected, pipe [3:544:2461], now have 1 active actors on pipe 2025-06-03T10:26:16.569169Z node 3 :PERSQUEUE DEBUG: partition.cpp:858: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } 2025-06-03T10:26:16.579628Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037928139] server connected, pipe [3:552:2468], now have 1 active actors on pipe 2025-06-03T10:26:16.587825Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72057594037928139] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:26:16.588777Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72057594037928139] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:26:16.588902Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037928139] doesn't have tx info 2025-06-03T10:26:16.588916Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037928139] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:26:16.588956Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72057594037928139] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:26:16.589063Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037928139] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:26:16.589072Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037928139] doesn't have tx writes info 2025-06-03T10:26:16.589110Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitConfigStep 2025-06-03T10:26:16.589201Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-03T10:26:16.589250Z node 3 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037928139, Partition: 2, State: StateInit] bootstrapping 2 [3:609:2513] 2025-06-03T10:26:16.589848Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDiskStatusStep 2025-06-03T10:26:16.590225Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitMetaStep 2025-06-03T10:26:16.590290Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitInfoRangeStep 2025-06-03T10:26:16.590343Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataRangeStep 2025-06-03T10:26:16.590382Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitDataStep 2025-06-03T10:26:16.590388Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:75: [rt3.dc1--topic2:2:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-03T10:26:16.590394Z node 3 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--topic2:2:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:26:16.590399Z node 3 :PERSQUEUE DEBUG: partition_init.cpp:55: [rt3.dc1--topic2:2:Initializer] Initializing completed. 2025-06-03T10:26:16.590409Z node 3 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037928139, Partition: 2, State: StateInit] init complete for topic 'rt3.dc1--topic2' partition 2 generation 3 [3:609:2513] 2025-06-03T10:26:16.590420Z node 3 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72057594037928139, Partition: 2, State: StateInit] SYNC INIT topic rt3.dc1--topic2 partitition 2 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:26:16.590428Z node 3 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72057594037928139, Partition: 2, State: StateIdle] Process pending events. Count 0 2025-06-03T10:26:16.590705Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72057594037928138] server disconnected, pipe [3:543:2461] destroyed 2025-06-03T10:26:16.590733Z node 3 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72057594037928037] server disconnected, pipe [3:542:2460] destroyed RESPONSE Status: 1 ErrorCode: OK MetaResponse { CmdGetPartitionStatusResult { TopicResult { Topic: "rt3.dc1--topic2" PartitionResult { Partition: 1 Status: STATUS_OK LastInitDurationSeconds: 0 CreationTimestamp: 0 GapCount: 0 GapSize: 0 AvgWriteSpeedPerSec: 0 AvgWriteSpeedPerMin: 0 AvgWriteSpeedPerHour: 0 AvgWriteSpeedPerDay: 0 AvgReadSpeedPerSec: 0 AvgReadSpeedPerMin: 0 AvgReadSpeedPerHour: 0 AvgReadSpeedPerDay: 0 ReadBytesQuota: 0 WriteBytesQuota: 50000000 PartitionSize: 0 StartOffset: 0 EndOffset: 0 LastWriteTimestampMs: 78 WriteLagMs: 0 AvgQuotaSpeedPerSec: 0 AvgQuotaSpeedPerMin: 0 AvgQuotaSpeedPerHour: 0 AvgQuotaSpeedPerDay: 0 SourceIdCount: 0 SourceIdRetentionPeriodSec: 0 UsedReserveSize: 0 AggregatedCounters { Values: 78 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 50000000 Values: 0 Values: 9223372036854775807 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 } Generation: 2 Cookie: 1 ScaleStatus: NORMAL } PartitionResult { Partition: 2 Status: STATUS_OK LastInitDurationSeconds: 0 CreationTimestamp: 0 GapCount: 0 GapSize: 0 AvgWriteSpeedPerSec: 0 AvgWriteSpeedPerMin: 0 AvgWriteSpeedPerHour: 0 AvgWriteSpeedPerDay: 0 AvgReadSpeedPerSec: 0 AvgReadSpeedPerMin: 0 AvgReadSpeedPerHour: 0 AvgReadSpeedPerDay: 0 ReadBytesQuota: 0 WriteBytesQuota: 50000000 PartitionSize: 0 StartOffset: 0 EndOffset: 0 LastWriteTimestampMs: 92 WriteLagMs: 0 AvgQuotaSpeedPerSec: 0 AvgQuotaSpeedPerMin: 0 AvgQuotaSpeedPerHour: 0 AvgQuotaSpeedPerDay: 0 SourceIdCount: 0 SourceIdRetentionPeriodSec: 0 UsedReserveSize: 0 AggregatedCounters { Values: 92 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 50000000 Values: 0 Values: 9223372036854775807 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 } Generation: 2 Cookie: 1 ScaleStatus: NORMAL } ErrorCode: OK } TopicResult { Topic: "rt3.dc1--topic1" PartitionResult { Partition: 0 Status: STATUS_OK LastInitDurationSeconds: 0 CreationTimestamp: 0 GapCount: 0 GapSize: 0 AvgWriteSpeedPerSec: 0 AvgWriteSpeedPerMin: 0 AvgWriteSpeedPerHour: 0 AvgWriteSpeedPerDay: 0 AvgReadSpeedPerSec: 0 AvgReadSpeedPerMin: 0 AvgReadSpeedPerHour: 0 AvgReadSpeedPerDay: 0 ReadBytesQuota: 0 WriteBytesQuota: 50000000 PartitionSize: 0 StartOffset: 0 EndOffset: 0 LastWriteTimestampMs: 39 WriteLagMs: 0 AvgQuotaSpeedPerSec: 0 AvgQuotaSpeedPerMin: 0 AvgQuotaSpeedPerHour: 0 AvgQuotaSpeedPerDay: 0 SourceIdCount: 0 SourceIdRetentionPeriodSec: 0 UsedReserveSize: 0 AggregatedCounters { Values: 39 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 50000000 Values: 0 Values: 9223372036854775807 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 1 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 Values: 0 } Generation: 2 Cookie: 1 ScaleStatus: NORMAL } ErrorCode: OK } } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetPartitionLocationsMetaRequestTest::FailsOnDuplicatedPartition [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "path \'Root/PQ\' has unknown/invalid root prefix \'Root\', Marker# PQ14" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic2, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'Root/PQ\' describe error, Status# LookupError, Marker# PQ1" ErrorCode: ERROR } Assert failed: Check response: { Status: 128 ErrorReason: "multiple partition 2 in TopicRequest for topic \'rt3.dc1--topic2\'" ErrorCode: BAD_REQUEST } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/client/server/ut/unittest >> TMessageBusServerPersQueueGetReadSessionsInfoMetaRequestTest::FailsOnBalancerDescribeResultFailureWhenTopicsAreGivenExplicitly [GOOD] Test command err: Assert failed: Check response: { Status: 128 ErrorReason: "path \'Root/PQ\' has unknown/invalid root prefix \'Root\', Marker# PQ14" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--topic2, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC } Assert failed: Check response: { Status: 128 ErrorReason: "topic \'Root/PQ\' describe error, Status# LookupError, Marker# PQ1" ErrorCode: ERROR } |61.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/ut/unittest >> ClientStatsCollector::ExternalMetricRegistryStdSharedPtr [GOOD] Test command err: 2025-06-03T10:26:10.029774Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667461610420522:2146];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:10.029884Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027e8/r3tmp/tmpnevabU/pdisk_1.dat 2025-06-03T10:26:10.150925Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:10.150950Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:10.153054Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:10.153768Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7861, node 1 2025-06-03T10:26:10.178252Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:10.178266Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:10.178269Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:10.178323Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26523 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:10.241529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:10.248015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:26:10.688966Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667461610421387:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:10.688998Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:10.689163Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667461610421399:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:10.689950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:26:10.696341Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667461610421401:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:26:10.762000Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667461610421468:2647] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:10.855231Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=1&id=ZDk1NjUzMTktMTAzMDliNjEtYmY5MGViYzctY2VlMzlmYjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Database not set, use /Root 2025-06-03T10:26:11.500383Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511667467840023411:2259];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:11.500418Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027e8/r3tmp/tmpwuwo0e/pdisk_1.dat 2025-06-03T10:26:11.533588Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24817, node 4 2025-06-03T10:26:11.556417Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:11.556432Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:11.556434Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:11.556495Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30276 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:11.600825Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:11.600861Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:11.602381Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:11.607053Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:11.950968Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667467840024152:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:11.951000Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667467840024165:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:11.951011Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:11.952532Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:26:11.967918Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511667467840024169:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:26:12.059808Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511667472134991534:2643] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:13.019970Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511667474529451067:2206];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:13.020003Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027e8/r3tmp/tmp9LjW8t/pdisk_1.dat 2025-06-03T10:26:13.075691Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:13.120964Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:13.121001Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 12552, node 7 2025-06-03T10:26:13.127630Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:13.153579Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use fil ... s aborting because locks are not valid, code: 2001 } 2025-06-03T10:26:14.163920Z node 7 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2935: SelfId: [7:7511667478824419709:2332], SessionActorId: [7:7511667474529451856:2332], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `Root/names`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[7:7511667474529451856:2332]. isRollback=0 2025-06-03T10:26:14.163958Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:1848: SessionId: ydb://session/3?node_id=7&id=YmQxOTRhMGUtMzdmMjYzNzctZjZiYWRiNGQtYzQzYTBjMTI=, ActorId: [7:7511667474529451856:2332], ActorState: ExecuteState, TraceId: 01jwtn7kgjdm7n12ft2dhggfhm, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [7:7511667478824419739:2332] from: [7:7511667478824419709:2332] 2025-06-03T10:26:14.163973Z node 7 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [7:7511667478824419739:2332] TxId: 281474976715678. Ctx: { TraceId: 01jwtn7kgjdm7n12ft2dhggfhm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=YmQxOTRhMGUtMzdmMjYzNzctZjZiYWRiNGQtYzQzYTBjMTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `Root/names`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-03T10:26:14.164011Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=7&id=YmQxOTRhMGUtMzdmMjYzNzctZjZiYWRiNGQtYzQzYTBjMTI=, ActorId: [7:7511667474529451856:2332], ActorState: ExecuteState, TraceId: 01jwtn7kgjdm7n12ft2dhggfhm, Create QueryResponse for error on request, msg: 2025-06-03T10:26:14.828652Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7511667481626451814:2144];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:14.828857Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027e8/r3tmp/tmpIuzOMK/pdisk_1.dat 2025-06-03T10:26:14.858010Z node 10 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26205, node 10 2025-06-03T10:26:14.876716Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:14.876726Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:14.876729Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:14.876783Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9851 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:14.929357Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:14.929387Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:14.935460Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:14.935874Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:15.254820Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7511667485921419968:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:15.254859Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:15.254989Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7511667485921419980:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:15.255656Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:26:15.261649Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7511667485921419982:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:26:15.348711Z node 10 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [10:7511667485921420051:2645] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:15.882395Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7511667486788186871:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:15.882417Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027e8/r3tmp/tmpXVTQpN/pdisk_1.dat 2025-06-03T10:26:15.906491Z node 13 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24119, node 13 2025-06-03T10:26:15.932032Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:15.932051Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:15.932053Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:15.932111Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14486 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:15.983116Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:15.983154Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:15.985537Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:15.997113Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:16.341444Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7511667491083155106:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:16.341507Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:16.341685Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7511667491083155118:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:16.342863Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:26:16.377158Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7511667491083155120:2338], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:26:16.441056Z node 13 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [13:7511667491083155198:2658] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> BasicStatistics::NotFullStatisticsColumnshard >> YdbLogStore::AlterLogStore [GOOD] >> TYqlDateTimeTests::SimpleOperations [GOOD] |61.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/ut/unittest >> YdbLogStore::AlterLogStore [GOOD] Test command err: 2025-06-03T10:26:11.731000Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667468043882418:2184];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:11.731036Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027da/r3tmp/tmpmpNS2s/pdisk_1.dat 2025-06-03T10:26:11.844376Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:11.844409Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:11.849351Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:11.853218Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2089, node 1 2025-06-03T10:26:11.883473Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:11.883489Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:11.883492Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:11.883555Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19386 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:11.954384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:12.186875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 SUCCESS 3 rows in 0.004896s 2025-06-03T10:26:12.276148Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667472338850708:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:12.276183Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667472338850720:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:12.276194Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:12.277027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-06-03T10:26:12.282242Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667472338850722:2345], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-03T10:26:12.375100Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667472338850793:2783] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:12.481593Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jwtn7hnk0w75hr9ddawm88ak, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmQ1NWY3MGYtZGIwMjdjNDMtYTU5MDliZjQtZDA1OTM1Y2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root SUCCESS count returned 3 rows 2025-06-03T10:26:13.483522Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511667474775882547:2184];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:13.483621Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027da/r3tmp/tmpKYFQd5/pdisk_1.dat 2025-06-03T10:26:13.520603Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23533, node 4 2025-06-03T10:26:13.583660Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:13.583703Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:13.584586Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:13.584597Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:13.584600Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:13.584659Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:13.587409Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6575 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:26:13.614676Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:6575 2025-06-03T10:26:13.909071Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:26:13.947532Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:508: TAlterTable Propose, path: Root/Foo/TimestampIndex/indexImplTable, pathId: , opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:26:13.947572Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715659:1, propose status:StatusNameConflict, reason: Check failed: path: '/Root/Foo/TimestampIndex/indexImplTable', error: path is not a common path (id: [OwnerId: 72057594046644480, LocalPathId: 4], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:521, at schemeshard: 72057594046644480 2025-06-03T10:26:13.948294Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715659, database: /Root, subject: , status: StatusNameConflict, reason: Check failed: path: '/Root/Foo/TimestampIndex/indexImplTable', error: path is not a common path (id: [OwnerId: 72057594046644480, LocalPathId: 4], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:521, operation: ALTER TABLE, path: Root/Foo/TimestampIndex/indexImplTable 2025-06-03T10:26:13.948373Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511667474775883743:2885] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/Foo/TimestampIndex/indexImplTable\', error: path is not a common path (id: [OwnerId: 72057594046644480, LocalPathId: 4], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:521" severity: 1 } Error 128: Administrative access denied TClient::Ls request: /Root/Foo/TimestampIndex/indexImplTable TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946373986 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "Timestamp" Type: "Int64" TypeId: 3 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 ... pping columns in index table is not supported, at schemeshard: 72057594046644480 2025-06-03T10:26:14.004202Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:151: Abort operation: IgniteOperation fail to propose a part, opId: 281474976715663:1, at schemeshard: 72057594046644480, already accepted parts: 1, propose result status: StatusInvalidParameter, with reason: Adding or dropping columns in index table is not supported, tx message: Transaction { WorkingDir: "/Root/Foo/TimestampIndex" OperationType: ESchemeOpAlterTable AlterTable { Name: "indexImplTable" DropColumns { Name: "Timestamp" } } } TxId: 281474976715663 TabletId: 72057594046644480 Owner: "root@builtin" UserToken: "***" PeerName: "" 2025-06-03T10:26:14.004264Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_index.cpp:219: TAlterTableIndex AbortPropose, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:26:14.004967Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715663, database: /Root, subject: root@builtin, status: StatusInvalidParameter, reason: Adding or dropping columns in index table is not supported, operation: ALTER TABLE, path: /Root/Foo/TimestampIndex/indexImplTable 2025-06-03T10:26:14.005026Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511667479070851204:3032] txid# 281474976715663, issues: { message: "Adding or dropping columns in index table is not supported" severity: 1 } Error 128: Adding or dropping columns in index table is not supported 2025-06-03T10:26:14.629356Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511667479746687489:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:14.629386Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027da/r3tmp/tmpYefmDR/pdisk_1.dat 2025-06-03T10:26:14.670634Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7582, node 7 2025-06-03T10:26:14.697687Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:14.697703Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:14.697705Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:14.697766Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65268 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:14.730289Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:14.730326Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:14.732166Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:14.751191Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:14.766335Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:26:15.693635Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7511667485353148476:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:15.693751Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027da/r3tmp/tmpsT8IZ0/pdisk_1.dat 2025-06-03T10:26:15.718743Z node 10 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15299, node 10 2025-06-03T10:26:15.741570Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:15.741589Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:15.741591Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:15.741646Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7001 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:15.793921Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:15.793961Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:15.795583Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:15.801533Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:15.812071Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:26:15.886361Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:26:15.916113Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:26:16.632154Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7511667490947981544:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:16.632183Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027da/r3tmp/tmpX8SAJx/pdisk_1.dat 2025-06-03T10:26:16.661635Z node 13 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22944, node 13 2025-06-03T10:26:16.709488Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:16.709514Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:16.709521Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:16.709639Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15927 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:26:16.734109Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:16.734149Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:16.746102Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:16.749913Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/ut/unittest >> TYqlDateTimeTests::SimpleOperations [GOOD] Test command err: 2025-06-03T10:26:10.110419Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667462429239858:2272];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:10.110448Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027e3/r3tmp/tmphzE8Q9/pdisk_1.dat 2025-06-03T10:26:10.216867Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:10.221667Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:10.221694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:10.228573Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 7270, node 1 2025-06-03T10:26:10.265748Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:10.265765Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:10.265768Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:10.265829Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20056 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:10.328096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:10.335912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:26:10.676630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:26:10.769606Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667462429240739:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:10.769650Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:10.769797Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667462429240751:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:10.770961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:26:10.785482Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667462429240753:2341], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:26:10.857843Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667462429240822:2766] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:10.945379Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtn7g6aekw0kj2b8v9s0nhz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWNhNjBlMTctNDI1MWNlNzctMWZiMWJhZTAtMjUxNGYwNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:10.993474Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtn7gchb5ncf6yqcn80vpsz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWNhNjBlMTctNDI1MWNlNzctMWZiMWJhZTAtMjUxNGYwNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:11.022063Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jwtn7gdn442nfwcyw18h7869, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWNhNjBlMTctNDI1MWNlNzctMWZiMWJhZTAtMjUxNGYwNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:11.055937Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jwtn7gem8489cfhy4cncy37h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWNhNjBlMTctNDI1MWNlNzctMWZiMWJhZTAtMjUxNGYwNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:11.078981Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jwtn7gfmf3vqw4akvbesdnm1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWNhNjBlMTctNDI1MWNlNzctMWZiMWJhZTAtMjUxNGYwNzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:11.792733Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511667466192342960:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:11.792768Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027e3/r3tmp/tmpMwtrt1/pdisk_1.dat 2025-06-03T10:26:11.843544Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30825, node 4 2025-06-03T10:26:11.899042Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:11.899073Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:11.901506Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:11.901520Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:11.901522Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:11.901595Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:11.902000Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11517 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:11.941257Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:11.946393Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:26:12.246264Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:26:12.275206Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667470487311368:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:12.275242Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:12.275367Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511667470487311380:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:12.276582Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, subo ... rkload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:15.203934Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtn7mdtfpgsgbxrjd1pmgp3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=Zjg3YzFhYWEtNTYwMzg3MjYtYjJiNzNmNjAtMjY1MDQ3Y2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:15.224663Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtn7mh71xm01rjjd4515mah, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=Zjg3YzFhYWEtNTYwMzg3MjYtYjJiNzNmNjAtMjY1MDQ3Y2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:15.246787Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jwtn7mhvf2ka3b3f9cbbarnp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=Zjg3YzFhYWEtNTYwMzg3MjYtYjJiNzNmNjAtMjY1MDQ3Y2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:15.271955Z node 10 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jwtn7mjja1mn87pcfna4e5bp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=10&id=Zjg3YzFhYWEtNTYwMzg3MjYtYjJiNzNmNjAtMjY1MDQ3Y2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:15.868793Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7511667484623414776:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:15.868883Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027e3/r3tmp/tmpRLHKsZ/pdisk_1.dat 2025-06-03T10:26:15.890129Z node 13 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14194, node 13 2025-06-03T10:26:15.912176Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:15.912204Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:15.912207Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:15.912292Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63630 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:15.968899Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:15.968934Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:15.970807Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:15.975471Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:16.390217Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:26:16.507374Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:26:16.533678Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7511667488918383253:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:16.533723Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:16.533907Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7511667488918383265:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:16.534870Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480 2025-06-03T10:26:16.543759Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7511667488918383267:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-03T10:26:16.643540Z node 13 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [13:7511667488918383334:2850] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:16.657355Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtn7ntm2y4ghj2y8qrq9ehm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YjAwMTA3NWItYWVmNDUyMGMtMzZiMDg5OWQtM2YyYmRmYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:16.680304Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jwtn7nyt4w4dqkyvc03qs24p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YjAwMTA3NWItYWVmNDUyMGMtMzZiMDg5OWQtM2YyYmRmYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:16.784828Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jwtn7nzq2v9vmh3cq9baryxj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YjAwMTA3NWItYWVmNDUyMGMtMzZiMDg5OWQtM2YyYmRmYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:16.787675Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jwtn7nzq2v9vmh3cq9baryxj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YjAwMTA3NWItYWVmNDUyMGMtMzZiMDg5OWQtM2YyYmRmYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:16.881409Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715666. Ctx: { TraceId: 01jwtn7p3aeqm0q0jr1zdy4qrt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YjAwMTA3NWItYWVmNDUyMGMtMzZiMDg5OWQtM2YyYmRmYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:16.884938Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jwtn7p3aeqm0q0jr1zdy4qrt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YjAwMTA3NWItYWVmNDUyMGMtMzZiMDg5OWQtM2YyYmRmYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:16.925883Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715668. Ctx: { TraceId: 01jwtn7p5z3jv115b8sb3zdzmx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YjAwMTA3NWItYWVmNDUyMGMtMzZiMDg5OWQtM2YyYmRmYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:16.987593Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715669. Ctx: { TraceId: 01jwtn7p75ev4gjpjm5ahjmv9c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YjAwMTA3NWItYWVmNDUyMGMtMzZiMDg5OWQtM2YyYmRmYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:17.023585Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715670. Ctx: { TraceId: 01jwtn7p90860cy1tsn7dmssk0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YjAwMTA3NWItYWVmNDUyMGMtMzZiMDg5OWQtM2YyYmRmYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:17.046496Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715671. Ctx: { TraceId: 01jwtn7pa22f6sxsaappdt024e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YjAwMTA3NWItYWVmNDUyMGMtMzZiMDg5OWQtM2YyYmRmYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:17.065101Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715672. Ctx: { TraceId: 01jwtn7par83d9w7yn34kbvyez, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YjAwMTA3NWItYWVmNDUyMGMtMzZiMDg5OWQtM2YyYmRmYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:17.122808Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jwtn7pbfaf0bw632yf1wb1cz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YjAwMTA3NWItYWVmNDUyMGMtMzZiMDg5OWQtM2YyYmRmYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:17.124891Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715674. Ctx: { TraceId: 01jwtn7pbfaf0bw632yf1wb1cz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YjAwMTA3NWItYWVmNDUyMGMtMzZiMDg5OWQtM2YyYmRmYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> KqpDataIntegrityTrails::Upsert-LogEnabled+UseSink >> KqpDataIntegrityTrails::BrokenReadLockAbortedTx |61.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests100Inflight10BlobSize1000 [GOOD] >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests10000Inflight1000BlobSize1000 >> TYqlDateTimeTests::DateKey [GOOD] |61.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |61.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |61.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap+useOltpSink >> test_sql_streaming.py::test[suites-GroupByHoppingWindowTimeExtractorUnusedColumns-default.txt] |61.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/ut/unittest >> TYqlDateTimeTests::DateKey [GOOD] Test command err: 2025-06-03T10:26:09.565866Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667458492660150:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:09.565891Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027f9/r3tmp/tmp87kdIg/pdisk_1.dat 2025-06-03T10:26:09.703082Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24426, node 1 2025-06-03T10:26:09.737380Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:09.737396Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:09.737399Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:09.737463Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12036 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:09.782225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:12036 2025-06-03T10:26:09.826341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:09.831752Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:09.867264Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:09.867301Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:09.869001Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:10.334778Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511667461783484348:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:10.339380Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:10.339417Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:10.342855Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:26:10.345620Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-03T10:26:10.346083Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12036 2025-06-03T10:26:10.412287Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:12036 TClient::Ls request: /Root/table-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-1" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715660 CreateStep: 1748946370493 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-03T10:26:10.506137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:12036 TClient::Ls request: /Root/table-2 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-2" PathId: 4 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715661 CreateStep: 1748946370605 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-03T10:26:10.658043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 TClient is connected to server localhost:12036 TClient::Ls request: /Root/table-3 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "table-3" PathId: 5 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715662 CreateStep: 1748946370731 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table-3" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 ... (TRUNCATED) 2025-06-03T10:26:10.740211Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-03T10:26:10.740435Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:26:10.894273Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:7511667461783484548:2307], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:10.894320Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:26:10.979531Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:7511667461783484548:2307], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027f9/r3tmp/tmp0Nk2n8/pdisk_1.dat 2025-06-03T10:26:11.443923Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:26:11.462194Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20527, node 4 2025-06-03T10:26:11.488556Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:11.488574Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:11.488576Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:11.488617Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9102 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true Cre ... ns;error=incorrect path status: LookupError; 2025-06-03T10:26:15.640606Z node 10 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[10:7511667484238597540:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:15.640742Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027f9/r3tmp/tmp8gD0C8/pdisk_1.dat 2025-06-03T10:26:15.667176Z node 10 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27710, node 10 2025-06-03T10:26:15.678044Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:15.678062Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:15.678064Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:15.678123Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6640 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:15.741067Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:15.741102Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:15.742845Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:15.747646Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:6640 2025-06-03T10:26:15.777229Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:15.781666Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:16.302614Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7511667487955089295:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:16.308036Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/ydb_ut_tenant/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:26:16.318135Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:16.318176Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:16.325811Z node 10 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 12 Cookie 12 2025-06-03T10:26:16.331481Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6640 2025-06-03T10:26:16.471927Z node 10 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 12 2025-06-03T10:26:16.472115Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:26:17.182634Z node 13 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[13:7511667494152774276:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:17.182786Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027f9/r3tmp/tmpipNr0O/pdisk_1.dat 2025-06-03T10:26:17.214947Z node 13 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23243, node 13 2025-06-03T10:26:17.234642Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:17.234672Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:17.234690Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:17.234791Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1303 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:17.285821Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:17.285865Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:17.288274Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:26:17.288696Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:26:17.582813Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:26:17.646392Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7511667494152775198:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:17.646421Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:17.646438Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7511667494152775208:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:17.647547Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:26:17.677743Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7511667494152775214:2341], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:26:17.784283Z node 13 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [13:7511667494152775289:2759] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:17.824059Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtn7pxdaa1qx9np6wwxn261, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YjdmYzgyZGMtZjk2MDdjMjctZTUxNjUxNC05Y2RjZjIwYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:17.867846Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtn7q3e1w9rrqwry89nsgjq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YjdmYzgyZGMtZjk2MDdjMjctZTUxNjUxNC05Y2RjZjIwYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:17.911000Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jwtn7q4gdbye7wfaem0yb98q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YjdmYzgyZGMtZjk2MDdjMjctZTUxNjUxNC05Y2RjZjIwYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:26:17.959104Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jwtn7q618sqn4srjam45b3ty, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YjdmYzgyZGMtZjk2MDdjMjctZTUxNjUxNC05Y2RjZjIwYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> test.py::test[ypath-limit_with_key-default.txt-Results] [GOOD] >> test.py::test[ypath-multi_range-default.txt-Results] >> TTxDataShardMiniKQL::CrossShard_3_AllToOne [GOOD] >> TTxDataShardMiniKQL::CrossShard_4_OneToAll >> KqpDataIntegrityTrails::Upsert-LogEnabled+UseSink [GOOD] >> KqpDataIntegrityTrails::BrokenReadLockAbortedTx [GOOD] >> KqpDataIntegrityTrails::Select |61.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> YdbOlapStore::ManyTables [GOOD] >> YdbOlapStore::LogPagingBetween |61.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert-LogEnabled+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 18072, MsgBus: 9490 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001afc/r3tmp/tmpd6A0nE/pdisk_1.dat TServer::EnableGrpc on GrpcPort 18072, node 1 TClient is connected to server localhost:9490 TClient is connected to server localhost:9490 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... |61.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::BrokenReadLockAbortedTx [GOOD] Test command err: Trying to start YDB, gRPC: 16353, MsgBus: 12913 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001af4/r3tmp/tmpdBIle5/pdisk_1.dat TServer::EnableGrpc on GrpcPort 16353, node 1 TClient is connected to server localhost:12913 TClient is connected to server localhost:12913 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> KqpDataIntegrityTrails::Upsert+LogEnabled+UseSink >> KqpDataIntegrityTrails::Upsert-LogEnabled-UseSink |61.4%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/hybrid_file/part6/pytest >> test.py::test[bigdate-table_common_type-default.txt-Results] [GOOD] |61.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |61.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap+useOltpSink [GOOD] >> KqpDataIntegrityTrails::Select [GOOD] >> KqpSinkLocks::DifferentKeyUpdateOlap [GOOD] |61.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> test_sql_streaming.py::test[suites-GroupByHoppingWindowTimeExtractorUnusedColumns-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-GroupByHoppingWithDataWatermarks-default.txt] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap+useOltpSink [GOOD] Test command err: Trying to start YDB, gRPC: 15576, MsgBus: 7707 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001adf/r3tmp/tmp7GZOX0/pdisk_1.dat TServer::EnableGrpc on GrpcPort 15576, node 1 TClient is connected to server localhost:7707 TClient is connected to server localhost:7707 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... |61.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Select [GOOD] Test command err: Trying to start YDB, gRPC: 6203, MsgBus: 32210 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001adb/r3tmp/tmpm3qmdD/pdisk_1.dat TServer::EnableGrpc on GrpcPort 6203, node 1 TClient is connected to server localhost:32210 TClient is connected to server localhost:32210 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... |61.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert+LogEnabled+UseSink [GOOD] >> KqpDataIntegrityTrails::Ddl >> KqpDataIntegrityTrails::Upsert-LogEnabled-UseSink [GOOD] |61.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/tx/unittest >> KqpSinkLocks::DifferentKeyUpdateOlap [GOOD] Test command err: Trying to start YDB, gRPC: 26678, MsgBus: 12494 2025-06-03T10:25:30.207600Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:25:30.207691Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:30.207719Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f73/r3tmp/tmpRtLORT/pdisk_1.dat TServer::EnableGrpc on GrpcPort 26678, node 1 2025-06-03T10:25:30.512012Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:30.514371Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:25:30.514422Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:25:30.514429Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:25:30.514536Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:25:30.514631Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946329291127 != 1748946329291131 TClient is connected to server localhost:12494 TClient is connected to server localhost:12494 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:25:30.613321Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:30.613365Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:30.614172Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:30.617165Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:30.717509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:31.176398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:31.831531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:32.234828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:25:32.898253Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:1718:3314], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:32.898317Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:32.904858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:25:33.149686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:25:33.444476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:25:33.692222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:25:34.028290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:25:34.341464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:25:34.731864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.074645Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2389:3809], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:35.074691Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:35.074781Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2394:3814], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:25:35.076411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:25:35.248651Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2396:3816], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:25:35.294357Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:2454:3855] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:25:35.545927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:25:35.728478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:25:36.041713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:25:36.577644Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=281474976715676; 2025-06-03T10:25:36.601993Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2699: SelfId: [1:3097:4037], SessionActorId: [1:2695:4037], Got LOCKS BROKEN for table. ShardID=72075186224037911, Sink=[1:3097:4037].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-03T10:25:36.602207Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2935: SelfId: [1:3097:4037], SessionActorId: [1:2695:4037], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/KeyValue`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[1:2695:4037]. isRollback=0 2025-06-03T10:25:36.602382Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1848: SessionId: ydb://session/3?node_id=1&id=ZTMwYmFhMTgtZGIwNTE3Y2ItOTZlYWYzNTUtNGFmZmMzYjI=, ActorId: [1:2695:4037], ActorState: ExecuteState, TraceId: 01jwtn6ejd7wp97jnftsz0qdks, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [1:3107:4037] from: [1:3097:4037] 2025-06-03T10:25:36.602545Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [1:3107:4037] TxId: 281474976715676. Ctx: { TraceId: 01jwtn6ejd7wp97jnftsz0qdks, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTMwYmFhMTgtZGIwNTE3Y2ItOTZlYWYzNTUtNGFmZmMzYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/KeyValue`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-03T10:25:36.602732Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=ZTMwYmFhMTgtZGIwNTE3Y2ItOTZlYWYzNTUtNGFmZmMzYjI=, ActorId: [1:2695:4037], ActorState: ExecuteState, TraceId: 01jwtn6ejd7wp97jnftsz0qdks, Create QueryResponse for error on request, msg: GRpc shutdown warning: le ... mr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=39;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970,72075186224037993;receive=72075186224037896; 2025-06-03T10:26:15.271176Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=40;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970,72075186224037993;receive=72075186224037996; 2025-06-03T10:26:15.271186Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=41;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970,72075186224037993;receive=72075186224037896; 2025-06-03T10:26:15.271197Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=42;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970,72075186224037993;receive=72075186224037896; 2025-06-03T10:26:15.271206Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=43;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970,72075186224037993;receive=72075186224037996; 2025-06-03T10:26:15.271216Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=44;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970,72075186224037993;receive=72075186224037896; 2025-06-03T10:26:15.271226Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=45;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970,72075186224037993;receive=72075186224037996; 2025-06-03T10:26:15.271234Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=46;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970,72075186224037993;receive=72075186224037996; 2025-06-03T10:26:15.271244Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=47;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970,72075186224037993;receive=72075186224037896; 2025-06-03T10:26:15.271254Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=48;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897,72075186224037913,72075186224037970,72075186224037993;receive=72075186224037896; 2025-06-03T10:26:15.271337Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=51;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037970; 2025-06-03T10:26:15.271349Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=52;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037897; 2025-06-03T10:26:15.271360Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=53;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037970; 2025-06-03T10:26:15.271371Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=54;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037897; 2025-06-03T10:26:15.271381Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=55;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037897; 2025-06-03T10:26:15.271391Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=56;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037970; 2025-06-03T10:26:15.271402Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=57;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037897; 2025-06-03T10:26:15.271412Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=58;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037970; 2025-06-03T10:26:15.271421Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=59;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037897; 2025-06-03T10:26:15.271430Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=60;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037897; 2025-06-03T10:26:15.271440Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=61;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037970; 2025-06-03T10:26:15.271449Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=62;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037970; 2025-06-03T10:26:15.271458Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=63;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037897; 2025-06-03T10:26:15.271467Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=64;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913,72075186224037993;receive=72075186224037970; 2025-06-03T10:26:15.271500Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=66;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037993; 2025-06-03T10:26:15.271509Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=67;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037993; 2025-06-03T10:26:15.271527Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=68;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037993; 2025-06-03T10:26:15.271537Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=69;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037993; 2025-06-03T10:26:15.271546Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=70;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037993; 2025-06-03T10:26:15.271556Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=71;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037993; 2025-06-03T10:26:15.271565Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[3:7511667479927121930:2559];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037981;local_tx_no=72;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037913;receive=72075186224037993; 2025-06-03T10:26:15.271771Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037981;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:26:15.381819Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 |61.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |61.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |61.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert+LogEnabled+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 23430, MsgBus: 7971 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ac5/r3tmp/tmpq1zu7F/pdisk_1.dat TServer::EnableGrpc on GrpcPort 23430, node 1 TClient is connected to server localhost:7971 TClient is connected to server localhost:7971 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert-LogEnabled-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 16054, MsgBus: 20634 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ac8/r3tmp/tmpZHid3g/pdisk_1.dat TServer::EnableGrpc on GrpcPort 16054, node 1 TClient is connected to server localhost:20634 TClient is connected to server localhost:20634 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap-useOltpSink >> test.py::test[ypath-multi_range-default.txt-Results] [GOOD] >> CostMetricsPutBlock4Plus2::TestPut4Plus2BlockRequests10000Inflight1000BlobSize1000 [GOOD] >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests1Inflight1BlobSize2000000 >> KqpDataIntegrityTrails::Ddl [GOOD] >> KqpDataIntegrityTrails::Upsert+LogEnabled-UseSink >> KqpDataIntegrityTrails::BrokenReadLock-UseSink |61.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> test_sql_streaming.py::test[suites-GroupByHoppingWithDataWatermarks-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-ReadTopic-default.txt] >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap+useOltpSink |61.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |61.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Ddl [GOOD] Test command err: Trying to start YDB, gRPC: 10730, MsgBus: 19321 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ab6/r3tmp/tmpDLGTNa/pdisk_1.dat TServer::EnableGrpc on GrpcPort 10730, node 1 TClient is connected to server localhost:19321 TClient is connected to server localhost:19321 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... |61.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |61.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |61.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |61.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap-useOltpSink >> KqpDataIntegrityTrails::Upsert+LogEnabled-UseSink [GOOD] >> KqpDataIntegrityTrails::BrokenReadLock-UseSink [GOOD] >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests1Inflight1BlobSize2000000 [GOOD] >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests10Inflight1BlobSize2000000 >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap-useOltpSink [GOOD] >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests10000Inflight1000BlobSize1000 [GOOD] |61.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::Upsert+LogEnabled-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 15097, MsgBus: 14493 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001aa7/r3tmp/tmppXE48r/pdisk_1.dat TServer::EnableGrpc on GrpcPort 15097, node 1 TClient is connected to server localhost:14493 TClient is connected to server localhost:14493 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> KqpDataIntegrityTrails::BrokenReadLock+UseSink >> YdbOlapStore::LogPagingBetween [GOOD] >> YdbOlapStore::LogWithUnionAllAscending |61.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::BrokenReadLock-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 24287, MsgBus: 20463 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001a9f/r3tmp/tmpmTW3LY/pdisk_1.dat TServer::EnableGrpc on GrpcPort 24287, node 1 TClient is connected to server localhost:20463 TClient is connected to server localhost:20463 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... |61.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::UpsertEvWriteQueryService+isOlap-useOltpSink [GOOD] Test command err: Trying to start YDB, gRPC: 3228, MsgBus: 8362 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001aaa/r3tmp/tmpFIQk4y/pdisk_1.dat TServer::EnableGrpc on GrpcPort 3228, node 1 TClient is connected to server localhost:8362 TClient is connected to server localhost:8362 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap-useOltpSink [GOOD] |61.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |61.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/unittest >> CostMetricsGetBlock4Plus2::TestGet4Plus2BlockRequests10000Inflight1000BlobSize1000 [GOOD] Test command err: RandomSeed# 956912719917758582 2025-06-03T10:26:00.040814Z 1 00h00m00.010512s :BS_LOCALRECOVERY CRIT: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) LocalRecovery FINISHED: {RecoveryDuration# INPROGRESS RecoveredLogStartLsn# 0 SuccessfulRecovery# false EmptyLogoBlobsDb# true EmptyBlocksDb# true EmptyBarriersDb# true EmptySyncLog# true EmptySyncer# false EmptyHuge# true LogRecLogoBlob# 0 LogRecBlock# 0 LogRecGC# 0 LogRecSyncLogIdx# 0 LogRecLogoBlobsDB# 0 LogRecBlocksDB# 0 LogRecBarriersDB# 0 LogRecCutLog# 0 LogRecLocalSyncData# 0 LogRecSyncerState# 0 LogRecHandoffDel# 0 LogRecHugeBlobAllocChunk# 0 LogRecHugeBlobFreeChunk# 0 LogRecHugeBlobEntryPoint# 0 LogRecHugeLogoBlob# 0 LogRecLogoBlobOpt# 0 LogRecPhantomBlob# 0 LogRecAnubisOsirisPut# 0 LogRecAddBulkSst# 0 LogoBlobFreshApply# 0 LogoBlobFreshSkip# 0 LogoBlobsBatchFreshApply# 0 LogoBlobsBatchFreshSkip#0 LogoBlobSyncLogApply# 0 LogoBlobSyncLogSkip# 0 HugeLogoBlobFreshApply# 0 HugeLogoBlobFreshSkip# 0 HugeLogoBlobSyncLogApply# 0 HugeLogoBlobSyncLogSkip# 0 BlockFreshApply# 0 BlockFreshSkip# 0 BlocksBatchFreshApply# 0 BlocksBatchFreshSkip# 0 BlockSyncLogApply# 0 BlockSyncLogSkip# 0 BarrierFreshApply# 0 BarrierFreshSkip# 0 BarriersBatchFreshApply# 0 BarriersBatchFreshSkip# 0 BarrierSyncLogApply# 0 BarrierSyncLogSkip# 0 GCBarrierFreshApply# 0 GCBarrierFreshSkip# 0 GCLogoBlobFreshApply# 0 GCLogoBlobFreshSkip# 0 GCSyncLogApply# 0 GCSyncLogSkip# 0 TryPutLogoBlobSyncData# 0 TryPutBlockSyncData# 0 TryPutBarrierSyncData# 0 HandoffDelFreshApply# 0 HandoffDelFreshSkip# 0 HugeBlobAllocChunkApply# 0 HugeBlobAllocChunkSkip# 0 HugeBlobFreeChunkApply# 0 HugeBlobFreeChunkSkip# 0 HugeLogoBlobToHeapApply# 0 HugeLogoBlobToHeapSkip# 0 HugeSlotsDelGenericApply# 0 HugeSlotsDelGenericSkip# 0 TryPutLogoBlobPhantom# 0 RecoveryLogDiapason# [18446744073709551615 0] StartingPoints# {[SyncerState 12][HugeBlobEntryPoint 1]} ReadLogReplies# {}} reason# Entry point for Syncer check failed, ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 6 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 6 } } status# ERROR;VDISK LOCAL RECOVERY FAILURE DUE TO LOGICAL ERROR 2025-06-03T10:26:00.043363Z 1 00h00m30.000512s :BS_PROXY_PUT ERROR: [79b674d74e8df190] Result# TEvPutResult {Id# [1:1:1:1:3:4:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 2181038080 BlobId# [1:1:1:1:3:4:0] Reported ErrorReasons# [ { OrderNumber# 0 VDiskId# [82000000:1:0:0:0] NodeId# 1 ErrorReasons# [ "BS_QUEUE: VDISK_ERROR_STATE status in response", ] } ] Part situations# [ { OrderNumber# 0 Situations# E } ] " ApproximateFreeSpaceShare# 0} GroupId# 2181038080 Marker# BPP12 2025-06-03T10:26:00.185395Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-03T10:26:00.187144Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-03T10:26:00.188768Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-03T10:26:00.190391Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-03T10:26:00.191921Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-03T10:26:00.193398Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-03T10:26:00.194892Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: ... 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-03T10:26:00.335734Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-03T10:26:00.337252Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-03T10:26:00.338591Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-03T10:26:00.339818Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-03T10:26:00.341188Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-03T10:26:00.342439Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-03T10:26:00.343714Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-03T10:26:00.344754Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } 2025-06-03T10:26:00.345950Z 1 00h00m00.000000s :BS_CONTROLLER ALERT: {BSCTXM00@migrate.cpp:253} CompatibilityInfo check failed ErrorReason# Versions are not compatible neither by common rule nor by provided rule sets, Stored CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 1 Minor: 19 Hotfix: 0 } } Current CompatibilityInfo# { Application: "ydb" Version { Year: 23 Major: 3 Minor: 1 Hotfix: 0 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } CanLoadFrom { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 4 } StoresReadableBy { Application: "ydb" LowerLimit { Year: 0 Major: 0 Minor: 0 Hotfix: 0 } UpperLimit { Year: 1000 Major: 1000 Minor: 1000 Hotfix: 1000 } ComponentId: 5 } } >> test_sql_streaming.py::test[suites-ReadTopic-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-ReadTopicGroupWriteToSolomon-default.txt] >> AnalyzeDatashard::DropTableNavigateError [GOOD] |61.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |61.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap-useOltpSink [GOOD] Test command err: Trying to start YDB, gRPC: 63256, MsgBus: 61008 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001a84/r3tmp/tmpUY3jxl/pdisk_1.dat TServer::EnableGrpc on GrpcPort 63256, node 1 TClient is connected to server localhost:61008 TClient is connected to server localhost:61008 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... >> KqpDataIntegrityTrails::BrokenReadLock+UseSink [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeDatashard::DropTableNavigateError [GOOD] Test command err: 2025-06-03T10:23:56.876375Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:453:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:23:56.876457Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:23:56.876488Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002a98/r3tmp/tmpmvAJ1a/pdisk_1.dat 2025-06-03T10:23:57.120245Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1764, node 1 2025-06-03T10:23:57.260751Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:57.260781Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:57.260787Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:57.260905Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:23:57.261645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:23:57.364726Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:57.364775Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:57.381602Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5325 2025-06-03T10:23:57.815077Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:23:58.871481Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:23:58.881375Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:58.881413Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:58.941688Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:23:58.942311Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:59.130036Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.130352Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.130417Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.130451Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.130502Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.130522Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.130545Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.130564Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.130584Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.291313Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:59.291361Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:59.308072Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:59.349946Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:23:59.362576Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:23:59.362630Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:23:59.370247Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:23:59.370552Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:23:59.370584Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:23:59.370591Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:23:59.370598Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:23:59.370605Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:23:59.370611Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:23:59.370619Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:23:59.370806Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:23:59.389502Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:23:59.389541Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1863:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:23:59.391636Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1877:2607] 2025-06-03T10:23:59.395674Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1896:2617] 2025-06-03T10:23:59.395763Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1896:2617], schemeshard id = 72075186224037897 2025-06-03T10:23:59.397983Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:23:59.405194Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:23:59.405229Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:23:59.405243Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:23:59.409004Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:23:59.411177Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:23:59.411223Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:23:59.533107Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:23:59.754207Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:23:59.826633Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:24:00.493758Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2219:3063], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:00.493852Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:00.505532Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:24:00.837550Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2523:3112], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:00.837634Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:00.838264Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2528:3116]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:24:00.838328Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:24:00.838342Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2530:3118] 2025-06-03T10:24:00.838355Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2530:3118] 2025-06-03T10:24:00.838597Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2531:2984] 2025-06-03T10:24:00.838730Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2530:3118], server id = [2:2531:2984], tablet id = 72075186224037894, status = OK 2025-06-03T10:24:00.838808Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2531:2984], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:24:00.838826Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-03T10:24:00.838896Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:24:00.838909Z node 1 :STATISTICS DEBUG: ... 86224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:25:49.669802Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:25:53.045594Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:25:55.671409Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:25:55.671630Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:25:59.143347Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:26:01.663505Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:26:01.663700Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:26:05.492813Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:26:08.272850Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:26:08.273060Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:26:12.215550Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:26:14.720033Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:26:14.720320Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:26:18.438447Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:26:19.729738Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-03T10:26:19.729779Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7881: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-03T10:26:19.729787Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7912: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-03T10:26:19.729794Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-03T10:26:21.177127Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:26:21.177341Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:26:21.218825Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7996: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224037897 2025-06-03T10:26:21.218860Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 188.000000s, at schemeshard: 72075186224037897 2025-06-03T10:26:21.218958Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 25 2025-06-03T10:26:21.230322Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-03T10:26:22.382181Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:26:22.382223Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:26:22.382230Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-03T10:26:22.382243Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-03T10:26:22.382251Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:26:22.382359Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:22.383472Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:26:22.384337Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6691:4717], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:22.384357Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6702:4722], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:22.384377Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:22.388848Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897 2025-06-03T10:26:22.402049Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:6705:4725], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-03T10:26:22.570215Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:6801:4771] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:22.590544Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:6830:4786]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:22.590624Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:26:22.590641Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:6832:4788] 2025-06-03T10:26:22.590654Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:6832:4788] 2025-06-03T10:26:22.590777Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:6833:4789] 2025-06-03T10:26:22.590808Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:6833:4789], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:26:22.590818Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-03T10:26:22.590846Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:6832:4788], server id = [2:6833:4789], tablet id = 72075186224037894, status = OK 2025-06-03T10:26:22.590862Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:26:22.590874Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:6830:4786], StatRequests.size() = 1 2025-06-03T10:26:22.636009Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MWJhNzk3NzgtNDQ5Y2YzNDYtNDJhYmI2ZTktZDg4NTI4YTE=, TxId: 2025-06-03T10:26:22.636044Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MWJhNzk3NzgtNDQ5Y2YzNDYtNDJhYmI2ZTktZDg4NTI4YTE=, TxId: 2025-06-03T10:26:22.636194Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:22.647872Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:26:22.647897Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:26:22.689231Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-03T10:26:22.689284Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-03T10:26:22.771772Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:6832:4788], schemeshard count = 1 2025-06-03T10:26:23.735479Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:26:23.735513Z node 2 :STATISTICS ERROR: aggregator_impl.cpp:805: [72075186224037894] IsColumnTable. traversal path [OwnerId: 72075186224037897, LocalPathId: 4] is not known to schemeshard 2025-06-03T10:26:23.735593Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:23.736148Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:26:23.738281Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MjVhNWFlMzAtODZjY2M0YTAtODU0ZmU5ZS00NTkzMTk3, TxId: 2025-06-03T10:26:23.738309Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MjVhNWFlMzAtODZjY2M0YTAtODU0ZmU5ZS00NTkzMTk3, TxId: 2025-06-03T10:26:23.738451Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:23.750030Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:23.750060Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:2829:3258] 2025-06-03T10:26:23.750220Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:6908:4836]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:23.750794Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:26:23.750808Z node 2 :STATISTICS ERROR: service_impl.cpp:796: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] Navigate failed 2025-06-03T10:26:23.750813Z node 2 :STATISTICS DEBUG: service_impl.cpp:1304: ReplyFailed(), request id = 2 |61.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |61.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests10Inflight1BlobSize2000000 [GOOD] >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests100Inflight1BlobSize2000000 |61.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |61.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest |61.5%| [TM] {default-linux-x86_64, pic, relwithdebinfo} ydb/library/yql/tests/sql/hybrid_file/part6/pytest >> test.py::test[ypath-multi_range-default.txt-Results] [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::BrokenReadLock+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 30804, MsgBus: 5985 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001a67/r3tmp/tmpMC2QOq/pdisk_1.dat TServer::EnableGrpc on GrpcPort 30804, node 1 TClient is connected to server localhost:5985 TClient is connected to server localhost:5985 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... |61.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> BasicStatistics::SimpleGlobalIndex |61.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest |61.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataWithFilter-default.txt] >> test_sql_streaming.py::test[suites-GroupByHopWithDataWatermarks-default.txt] >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap+useOltpSink [FAIL] |61.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> test_sql_streaming.py::test[suites-ReadTopicGroupWriteToSolomon-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-ReadTopicWithMetadata-default.txt] >> ColumnStatistics::CountMinSketchServerlessStatistics >> BSCReadOnlyPDisk::ReadOnlyOneByOne [GOOD] >> test_sql_streaming.py::test[suites-GroupByHop-default.txt] |61.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest |61.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> BasicStatistics::TwoDatabases >> HttpRequest::ProbeServerless >> BasicStatistics::Serverless |61.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest |61.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest |61.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_read_only_pdisk/unittest >> BSCReadOnlyPDisk::ReadOnlyOneByOne [GOOD] Test command err: RandomSeed# 8422892465987531876 |61.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> TTxDataShardMiniKQL::CrossShard_4_OneToAll [GOOD] |61.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest |61.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest |61.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> test_sql_streaming.py::test[suites-GroupByHopWithDataWatermarks-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-GroupByHoppingWindow-default.txt] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/data_integrity/unittest >> KqpDataIntegrityTrails::UpsertEvWriteQueryService-isOlap+useOltpSink [FAIL] Test command err: Trying to start YDB, gRPC: 11529, MsgBus: 17337 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001a8e/r3tmp/tmpt91Ebt/pdisk_1.dat TServer::EnableGrpc on GrpcPort 11529, node 1 TClient is connected to server localhost:17337 TClient is connected to server localhost:17337 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... assertion failed at ydb/core/kqp/ut/data_integrity/kqp_data_integrity_trails_ut.cpp:24, void NKikimr::NKqp::(anonymous namespace)::CheckRegexMatch(const TString &, const TVector> &): (expectedMatchCount == matchCount) failed: (2 != 3) 0. /-S/util/system/backtrace.cpp:284: ?? @ 0x13AB235B 1. /tmp//-S/library/cpp/testing/unittest/registar.cpp:46: RaiseError @ 0x13C697C8 2. /tmp//-S/ydb/core/kqp/ut/data_integrity/kqp_data_integrity_trails_ut.cpp:24: CheckRegexMatch @ 0x13986BB7 3. /tmp//-S/ydb/core/kqp/ut/data_integrity/kqp_data_integrity_trails_ut.cpp:167: UpsertEvWriteQueryService @ 0x1399D75F 4. /tmp//-S/ydb/core/kqp/ut/data_integrity/kqp_data_integrity_trails_ut.cpp:42: operator() @ 0x1398FAC6 5. /tmp//-S/library/cpp/testing/unittest/registar.cpp:373: Run @ 0x13C6B67D 6. /tmp//-S/ydb/core/kqp/ut/data_integrity/kqp_data_integrity_trails_ut.cpp:42: Execute @ 0x1398F320 7. /tmp//-S/library/cpp/testing/unittest/registar.cpp:494: Execute @ 0x13C6BDF2 8. /tmp//-S/library/cpp/testing/unittest/utmain.cpp:872: RunMain @ 0x13C7D99C 9. ??:0: ?? @ 0x7FD27230ED8F 10. ??:0: ?? @ 0x7FD27230EE3F 11. ??:0: ?? @ 0x129E4028 |61.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> test_sql_streaming.py::test[suites-ReadTopicWithMetadata-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataInsideFilter-default.txt] >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataWithFilter-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-ReadTopicWithSchema-default.txt] |61.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> BasicStatistics::TwoNodes |61.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> test_sql_streaming.py::test[suites-GroupByHop-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-GroupByHopByStringKey-default.txt] |61.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests100Inflight1BlobSize2000000 [GOOD] >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests2Inflight2BlobSize2000000 >> BasicStatistics::Simple ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::CrossShard_4_OneToAll [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:115:2057] recipient: [1:109:2140] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:115:2057] recipient: [1:109:2140] Leader for TabletID 9437184 is [1:132:2154] sender: [1:133:2057] recipient: [1:109:2140] 2025-06-03T10:25:51.083020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:25:51.083052Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:51.083929Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:25:51.088612Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:25:51.088805Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:132:2154] 2025-06-03T10:25:51.088889Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:25:51.099482Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:25:51.102526Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:25:51.102569Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:25:51.102746Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-03T10:25:51.102754Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-03T10:25:51.102760Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-03T10:25:51.102817Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:25:51.102827Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:25:51.102840Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:201:2154] in generation 2 Leader for TabletID 9437184 is [1:132:2154] sender: [1:211:2057] recipient: [1:14:2061] 2025-06-03T10:25:51.133901Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:25:51.142917Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-03T10:25:51.143040Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:25:51.143070Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:216:2213] 2025-06-03T10:25:51.143076Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-03T10:25:51.143082Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-03T10:25:51.143089Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:51.143159Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:132:2154], Recipient [1:132:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:51.143168Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:51.143276Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-03T10:25:51.143309Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-03T10:25:51.143322Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-03T10:25:51.143330Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:25:51.143339Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-03T10:25:51.143348Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-03T10:25:51.143351Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-03T10:25:51.143356Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-03T10:25:51.143360Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-03T10:25:51.143370Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:212:2210], Recipient [1:132:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:51.143375Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:51.143386Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:210:2209], serverId# [1:212:2210], sessionId# [0:0:0] 2025-06-03T10:25:51.143913Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:100:2134], Recipient [1:132:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 100 RawX2: 4294969430 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-03T10:25:51.143925Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:25:51.143941Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:25:51.143987Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-03T10:25:51.144000Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-03T10:25:51.144011Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-03T10:25:51.144021Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-03T10:25:51.144026Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-03T10:25:51.144034Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-03T10:25:51.144039Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-03T10:25:51.144138Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-03T10:25:51.144144Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-03T10:25:51.144148Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-03T10:25:51.144152Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-03T10:25:51.144167Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-03T10:25:51.144171Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-03T10:25:51.144176Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-03T10:25:51.144181Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-03T10:25:51.144189Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-03T10:25:51.157329Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-03T10:25:51.157368Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-03T10:25:51.157378Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-03T10:25:51.157394Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-03T10:25:51.157438Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-03T10:25:51.157598Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:222:2219], Recipient [1:132:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:51.157610Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:51.157619Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:221:2218], serverId# [1:222:2219], sessionId# [0:0:0] 2025-06-03T10:25:51.157648Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287424, Sender [1:100:2134], Recipient [1:132:2154]: {TEvPlanStep step# 2 MediatorId# 0 TabletID 9437184} 2025-06-03T10:25:51.157655Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3147: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-03T10:25:51.157716Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [2:1] at 9437184 on unit WaitForPlan 2025-06-03T10:25:51.157728Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [2:1] at 9437184 is Executed 2025-06-03T10:25:51.157735Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [2:1] at 9437184 executing on unit WaitForPlan 2025-06-03T10:25:51.157742Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [2:1] at 9437184 to execution unit PlanQueue 2025-06-03T10:25:51.158705Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 2 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 100 RawX2: 4294969430 } } Step: 2 MediatorID: 0 TabletID: 9437184 } 2025-06-03T10:25:51.158729Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:51.158816Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:132:2154], Recipient [1:132:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:51.158823Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:51.158836Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-03T10:25:51.158844Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:25:51.158851Z node 1 :TX_DATASHARD TRACE: datashard_pipelin ... bletID: 9437186 } 2025-06-03T10:26:26.452827Z node 41 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-03T10:26:26.452844Z node 41 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [41:456:2396], Recipient [41:456:2396]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:26:26.452847Z node 41 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:26:26.452850Z node 41 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437186 2025-06-03T10:26:26.452853Z node 41 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437186 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:26:26.452856Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437186 2025-06-03T10:26:26.452859Z node 41 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [7:6] in PlanQueue unit at 9437186 2025-06-03T10:26:26.452861Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit PlanQueue 2025-06-03T10:26:26.452864Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-03T10:26:26.452867Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit PlanQueue 2025-06-03T10:26:26.452869Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit LoadTxDetails 2025-06-03T10:26:26.452871Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit LoadTxDetails 2025-06-03T10:26:26.452934Z node 41 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 9437186 loaded tx from db 7:6 keys extracted: 1 2025-06-03T10:26:26.452937Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-03T10:26:26.452939Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit LoadTxDetails 2025-06-03T10:26:26.452941Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit FinalizeDataTxPlan 2025-06-03T10:26:26.452944Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit FinalizeDataTxPlan 2025-06-03T10:26:26.452947Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-03T10:26:26.452949Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit FinalizeDataTxPlan 2025-06-03T10:26:26.452951Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit BuildAndWaitDependencies 2025-06-03T10:26:26.452953Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit BuildAndWaitDependencies 2025-06-03T10:26:26.452958Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [7:6] is the new logically complete end at 9437186 2025-06-03T10:26:26.452961Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [7:6] is the new logically incomplete end at 9437186 2025-06-03T10:26:26.452963Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [7:6] at 9437186 2025-06-03T10:26:26.452966Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-03T10:26:26.452968Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit BuildAndWaitDependencies 2025-06-03T10:26:26.452970Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit BuildDataTxOutRS 2025-06-03T10:26:26.452972Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit BuildDataTxOutRS 2025-06-03T10:26:26.452976Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-03T10:26:26.452978Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit BuildDataTxOutRS 2025-06-03T10:26:26.452981Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit StoreAndSendOutRS 2025-06-03T10:26:26.452983Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit StoreAndSendOutRS 2025-06-03T10:26:26.452985Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-03T10:26:26.452988Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit StoreAndSendOutRS 2025-06-03T10:26:26.452991Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit PrepareDataTxInRS 2025-06-03T10:26:26.452993Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit PrepareDataTxInRS 2025-06-03T10:26:26.452996Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-03T10:26:26.452998Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit PrepareDataTxInRS 2025-06-03T10:26:26.453000Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit LoadAndWaitInRS 2025-06-03T10:26:26.453002Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit LoadAndWaitInRS 2025-06-03T10:26:26.453004Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-03T10:26:26.453007Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit LoadAndWaitInRS 2025-06-03T10:26:26.453009Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit ExecuteDataTx 2025-06-03T10:26:26.453011Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit ExecuteDataTx 2025-06-03T10:26:26.453059Z node 41 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [7:6] at tablet 9437186 with status COMPLETE 2025-06-03T10:26:26.453064Z node 41 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [7:6] at 9437186: {NSelectRow: 1, NSelectRange: 0, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 1, SelectRowBytes: 10, SelectRangeRows: 0, SelectRangeBytes: 0, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-03T10:26:26.453069Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-03T10:26:26.453071Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit ExecuteDataTx 2025-06-03T10:26:26.453074Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit CompleteOperation 2025-06-03T10:26:26.453076Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit CompleteOperation 2025-06-03T10:26:26.453100Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is DelayComplete 2025-06-03T10:26:26.453104Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit CompleteOperation 2025-06-03T10:26:26.453107Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [7:6] at 9437186 to execution unit CompletedOperations 2025-06-03T10:26:26.453111Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [7:6] at 9437186 on unit CompletedOperations 2025-06-03T10:26:26.453115Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [7:6] at 9437186 is Executed 2025-06-03T10:26:26.453119Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [7:6] at 9437186 executing on unit CompletedOperations 2025-06-03T10:26:26.453122Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [7:6] at 9437186 has finished 2025-06-03T10:26:26.453125Z node 41 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437186 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:26:26.453129Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437186 2025-06-03T10:26:26.453134Z node 41 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437186 has no attached operations 2025-06-03T10:26:26.453138Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 9437186 2025-06-03T10:26:26.464386Z node 41 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437186 step# 7 txid# 6} 2025-06-03T10:26:26.464415Z node 41 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437186 step# 7} 2025-06-03T10:26:26.464431Z node 41 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437186 2025-06-03T10:26:26.464444Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [7:6] at 9437186 on unit CompleteOperation 2025-06-03T10:26:26.464466Z node 41 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [7 : 6] from 9437186 at tablet 9437186 send result to client [41:100:2134], exec latency: 0 ms, propose latency: 2 ms 2025-06-03T10:26:26.464477Z node 41 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437186 2025-06-03T10:26:26.464548Z node 41 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437185 step# 7 txid# 6} 2025-06-03T10:26:26.464551Z node 41 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437185 step# 7} 2025-06-03T10:26:26.464558Z node 41 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437185 2025-06-03T10:26:26.464561Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [7:6] at 9437185 on unit CompleteOperation 2025-06-03T10:26:26.464566Z node 41 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [7 : 6] from 9437185 at tablet 9437185 send result to client [41:100:2134], exec latency: 0 ms, propose latency: 2 ms 2025-06-03T10:26:26.464570Z node 41 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437185 2025-06-03T10:26:26.464603Z node 41 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:91: Sending '{TEvPlanStepAck TabletId# 9437184 step# 7 txid# 6} 2025-06-03T10:26:26.464606Z node 41 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 9437184 step# 7} 2025-06-03T10:26:26.464612Z node 41 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-03T10:26:26.464615Z node 41 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [7:6] at 9437184 on unit CompleteOperation 2025-06-03T10:26:26.464621Z node 41 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [7 : 6] from 9437184 at tablet 9437184 send result to client [41:100:2134], exec latency: 0 ms, propose latency: 2 ms 2025-06-03T10:26:26.464624Z node 41 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 >> HttpRequest::Status |61.7%| [TA] $(B)/ydb/core/kqp/ut/data_integrity/test-results/unittest/{meta.json ... results_accumulator.log} |61.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> BasicStatistics::ServerlessGlobalIndex |61.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest |61.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> HttpRequest::Analyze >> BasicStatistics::TwoTables >> test_sql_streaming.py::test[suites-GroupByHoppingWindow-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-GroupByHoppingWindowByStringKey-default.txt] >> test_sql_streaming.py::test[suites-ReadTopicWithSchema-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-ReadTwoTopics-default.txt] >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataInsideFilter-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataNestedDeep-default.txt] >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests2Inflight2BlobSize2000000 [GOOD] >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests10Inflight10BlobSize2000000 |61.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> test_sql_streaming.py::test[suites-GroupByHopByStringKey-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-GroupByHopExprKey-default.txt] |61.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol |61.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol |61.7%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/data_integrity/test-results/unittest/{meta.json ... results_accumulator.log} |61.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest |61.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut_ftol/ydb-core-blobstorage-dsproxy-ut_ftol >> HttpRequest::AnalyzeServerless >> YdbOlapStore::LogWithUnionAllAscending [GOOD] >> YdbOlapStore::LogWithUnionAllDescending |61.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> test_sql_streaming.py::test[suites-GroupByHoppingWindowByStringKey-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-GroupByHoppingWindowExprKey-default.txt] >> test_sql_streaming.py::test[suites-ReadTwoTopics-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-ReadWriteSameTopic-default.txt] >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataNestedDeep-default.txt] [GOOD] |61.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests10Inflight10BlobSize2000000 [GOOD] >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests100Inflight10BlobSize2000000 >> TraverseColumnShard::TraverseColumnTableHiveDistributionZeroNodes [GOOD] |61.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> test_sql_streaming.py::test[suites-GroupByHopExprKey-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-GroupByHopListKey-default.txt] |61.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableHiveDistributionZeroNodes [GOOD] Test command err: 2025-06-03T10:23:57.058632Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:453:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:23:57.058719Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:23:57.058748Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002a8e/r3tmp/tmpUKAmz9/pdisk_1.dat 2025-06-03T10:23:57.196666Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64939, node 1 2025-06-03T10:23:57.335073Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:57.335119Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:57.335124Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:57.335239Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:23:57.335908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:23:57.441984Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:57.442039Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:57.456406Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13995 2025-06-03T10:23:57.875309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:23:58.854813Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:23:58.862845Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:58.862881Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:58.911022Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:23:58.911789Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:59.105688Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.105907Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.106093Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.106137Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.106194Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.106215Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.106235Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.106268Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.106289Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.273891Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:59.273955Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:59.290036Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:59.338302Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:23:59.350429Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:23:59.350455Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:23:59.358229Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:23:59.358461Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:23:59.358487Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:23:59.358494Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:23:59.358515Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:23:59.358523Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:23:59.358529Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:23:59.358537Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:23:59.358656Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:23:59.375284Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:23:59.375322Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1863:2597], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:23:59.376839Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1876:2608] 2025-06-03T10:23:59.377969Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1896:2618] 2025-06-03T10:23:59.378091Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1896:2618], schemeshard id = 72075186224037897 2025-06-03T10:23:59.380011Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:23:59.401868Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:23:59.401895Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:23:59.401910Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:23:59.406225Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:23:59.408122Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:23:59.408164Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:23:59.527054Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:23:59.689262Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:23:59.754078Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:24:00.560425Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2221:3063], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:00.560506Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:00.566475Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:24:00.621386Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:24:00.621512Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:24:00.621589Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:24:00.621671Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:24:00.621722Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:24:00.621773Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:24:00.621821Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:24:00.621877Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_re ... equest accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:26.037176Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:8337:6163]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:26.037251Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:26:26.037269Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8339:6165] 2025-06-03T10:26:26.037282Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8339:6165] 2025-06-03T10:26:26.037434Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8340:6166] 2025-06-03T10:26:26.037496Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8339:6165], server id = [2:8340:6166], tablet id = 72075186224037894, status = OK 2025-06-03T10:26:26.037508Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8340:6166], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:26:26.037525Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-03T10:26:26.037558Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:26:26.037579Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:8337:6163], StatRequests.size() = 1 2025-06-03T10:26:26.055847Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZjkwNWY1NGEtMjNlMDBhYzctOTBhZDYxMGYtMTMyMWE0ZDk=, TxId: 2025-06-03T10:26:26.055876Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZjkwNWY1NGEtMjNlMDBhYzctOTBhZDYxMGYtMTMyMWE0ZDk=, TxId: 2025-06-03T10:26:26.056004Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:26.067499Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:26:26.067529Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:26:26.109712Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-03T10:26:26.109752Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-03T10:26:26.203368Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:8339:6165], schemeshard count = 1 2025-06-03T10:26:28.390247Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:26:28.390285Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:26:28.390296Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-03T10:26:28.390304Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:28.391326Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:28.403502Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:28.403690Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:28.403713Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:28.404004Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 2 2025-06-03T10:26:28.404014Z node 2 :STATISTICS WARN: tx_response_tablet_distribution.cpp:65: [72075186224037894] TTxResponseTabletDistribution::Execute. Some tablets are probably in Hive boot queue 2025-06-03T10:26:28.404023Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:29.596877Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:26:29.608073Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:29.608136Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-03T10:26:29.608312Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8493:6241], server id = [2:8497:6245], tablet id = 72075186224037899, status = OK 2025-06-03T10:26:29.608406Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8493:6241], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:29.608455Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8494:6242], server id = [2:8498:6246], tablet id = 72075186224037900, status = OK 2025-06-03T10:26:29.608460Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8494:6242], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:29.608477Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8495:6243], server id = [2:8499:6247], tablet id = 72075186224037901, status = OK 2025-06-03T10:26:29.608481Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8495:6243], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:29.608964Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8496:6244], server id = [2:8500:6248], tablet id = 72075186224037902, status = OK 2025-06-03T10:26:29.608977Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8496:6244], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:29.609956Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-03T10:26:29.610068Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8493:6241], server id = [2:8497:6245], tablet id = 72075186224037899 2025-06-03T10:26:29.610072Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:29.610164Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-03T10:26:29.610237Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8494:6242], server id = [2:8498:6246], tablet id = 72075186224037900 2025-06-03T10:26:29.610240Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:29.610287Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-03T10:26:29.610348Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8495:6243], server id = [2:8499:6247], tablet id = 72075186224037901 2025-06-03T10:26:29.610350Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:29.610371Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-03T10:26:29.610376Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:29.610403Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:29.610424Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:29.610503Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:29.610923Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8496:6244], server id = [2:8500:6248], tablet id = 72075186224037902 2025-06-03T10:26:29.610929Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:29.611100Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:26:29.616176Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8529:6273]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:29.616233Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:26:29.616239Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8529:6273], StatRequests.size() = 1 2025-06-03T10:26:29.673944Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NDA3NTM3ZTItODczODUyNTMtMjlkMzExMTUtMjg2ZTFjNGU=, TxId: 2025-06-03T10:26:29.673972Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NDA3NTM3ZTItODczODUyNTMtMjlkMzExMTUtMjg2ZTFjNGU=, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-03T10:26:29.674121Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8542:6279]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:29.674178Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:29.674288Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-03T10:26:29.674293Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-03T10:26:29.674977Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-03T10:26:29.674990Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-03T10:26:29.674997Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-03T10:26:29.675931Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 >> TraverseColumnShard::TraverseColumnTableRebootColumnshard [GOOD] |61.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/streaming_optimize/py3test >> test_sql_streaming.py::test[suites-ReadTopicWithMetadataNestedDeep-default.txt] [GOOD] |61.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> TOlapReboots::CreateDropTable [GOOD] >> TOlapReboots::CreateDropStore ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableRebootColumnshard [GOOD] Test command err: 2025-06-03T10:23:54.296139Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:23:54.296173Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:23:54.296181Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002ab0/r3tmp/tmpdk90ya/pdisk_1.dat 2025-06-03T10:23:54.431093Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9774, node 1 2025-06-03T10:23:54.545722Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:54.545749Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:54.545755Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:54.545824Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:23:54.546532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:23:54.641442Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:54.641484Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:54.659122Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3658 2025-06-03T10:23:55.066750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:23:55.974771Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:23:55.986607Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:55.986645Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:56.051991Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:23:56.052996Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:56.225003Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:56.225229Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:56.225462Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:56.225508Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:56.225562Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:56.225585Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:56.225605Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:56.225625Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:56.225664Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:56.405314Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:56.405375Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:56.417735Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:56.467243Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:23:56.499462Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:23:56.499513Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:23:56.510483Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:23:56.510554Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:23:56.510584Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:23:56.510592Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:23:56.510600Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:23:56.510608Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:23:56.510616Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:23:56.510624Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:23:56.510819Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:23:56.527404Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:23:56.527449Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:23:56.531720Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:23:56.532703Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:23:56.532854Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:23:56.536260Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:23:56.541305Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:23:56.541336Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:23:56.541352Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:23:56.545836Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:23:56.547718Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:23:56.547768Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:23:56.672168Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:23:56.763823Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:23:56.811673Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:23:57.400945Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2216:3061], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:23:57.401036Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:23:57.406117Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:23:57.481215Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2330:2859];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:23:57.481315Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2330:2859];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:23:57.481391Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2330:2859];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:23:57.481443Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2330:2859];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:23:57.481494Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2330:2859];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:23:57.481532Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2330:2859];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:23:57.481567Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2330:2859];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:23:57.481603Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2330:2859];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_regi ... DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:8364:6147]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:27.698784Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:26:27.698800Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8366:6149] 2025-06-03T10:26:27.698814Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8366:6149] 2025-06-03T10:26:27.698947Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8367:6150] 2025-06-03T10:26:27.699002Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8366:6149], server id = [2:8367:6150], tablet id = 72075186224037894, status = OK 2025-06-03T10:26:27.699017Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8367:6150], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:26:27.699034Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-03T10:26:27.699070Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:26:27.699087Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:8364:6147], StatRequests.size() = 1 2025-06-03T10:26:27.725591Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=Y2YxNGRhNS1hZmIzNGQ1OC0yZDBjZmI5NS0xYTI0NWNmYw==, TxId: 2025-06-03T10:26:27.725625Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=Y2YxNGRhNS1hZmIzNGQ1OC0yZDBjZmI5NS0xYTI0NWNmYw==, TxId: 2025-06-03T10:26:27.725789Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:27.747837Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:26:27.747865Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:26:27.789702Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-03T10:26:27.789755Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-03T10:26:27.841408Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:8366:6149], schemeshard count = 1 2025-06-03T10:26:29.834164Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:26:29.834199Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:26:29.834209Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-03T10:26:29.834215Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:29.835302Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:29.847268Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:29.847411Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:29.847425Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:29.847693Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:26:29.859017Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:29.859068Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-03T10:26:29.859208Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8483:6209], server id = [2:8487:6213], tablet id = 72075186224037899, status = OK 2025-06-03T10:26:29.859293Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8483:6209], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:29.859613Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8484:6210], server id = [2:8488:6214], tablet id = 72075186224037900, status = OK 2025-06-03T10:26:29.859623Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8484:6210], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:29.859716Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8485:6211], server id = [2:8489:6215], tablet id = 72075186224037901, status = OK 2025-06-03T10:26:29.859721Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8485:6211], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:29.859883Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8486:6212], server id = [2:8492:6218], tablet id = 72075186224037902, status = OK 2025-06-03T10:26:29.859893Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8486:6212], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:29.860719Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-03T10:26:29.860772Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8483:6209], server id = [2:8487:6213], tablet id = 72075186224037899 2025-06-03T10:26:29.860775Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:29.860846Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-03T10:26:29.860956Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8484:6210], server id = [2:8488:6214], tablet id = 72075186224037900 2025-06-03T10:26:29.860961Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:29.861042Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-03T10:26:29.861097Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8485:6211], server id = [2:8489:6215], tablet id = 72075186224037901 2025-06-03T10:26:29.861100Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:29.861174Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-03T10:26:29.861178Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:29.861198Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:29.861218Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:29.861273Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:29.861732Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8486:6212], server id = [2:8492:6218], tablet id = 72075186224037902 2025-06-03T10:26:29.861740Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:29.861851Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:26:29.866814Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8519:6241]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:29.866868Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:26:29.866873Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8519:6241], StatRequests.size() = 1 2025-06-03T10:26:29.923291Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OTAzN2YxNjgtNTBkMDZiNjMtMTk5ZDNiNWQtM2U1ZTAyMDg=, TxId: 2025-06-03T10:26:29.923324Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OTAzN2YxNjgtNTBkMDZiNjMtMTk5ZDNiNWQtM2U1ZTAyMDg=, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-03T10:26:29.923524Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:29.923903Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2330:2859];ev=NActors::IEventHandle;fline=columnshard_impl.cpp:1152;event=tablet_die; 2025-06-03T10:26:29.939442Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:29.939470Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:26:29.952978Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:8539:6251];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=18; 2025-06-03T10:26:30.111207Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8652:6347]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:30.111326Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-03T10:26:30.111335Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-03T10:26:30.112059Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-03T10:26:30.112080Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-03T10:26:30.112090Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-03T10:26:30.113253Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 |61.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> test_sql_streaming.py::test[suites-GroupByHoppingWindowExprKey-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-GroupByHoppingWindowListKey-default.txt] >> test_sql_streaming.py::test[suites-ReadWriteSameTopic-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-ReadWriteTopic-default.txt] >> Mirror3of4::Compaction [GOOD] >> MultiGet::SequentialGet >> BasicStatistics::NotFullStatisticsDatashard |61.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> AnalyzeDatashard::AnalyzeOneTable [GOOD] >> YdbMonitoring::SelfCheckWithNodesDying [GOOD] >> YdbOlapStore::BulkUpsert |61.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> test_sql_streaming.py::test[suites-GroupByHopListKey-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-GroupByHopNoKey-default.txt] >> BasicStatistics::TwoServerlessTwoSharedDbs >> ColumnStatistics::CountMinSketchStatistics ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeDatashard::AnalyzeOneTable [GOOD] Test command err: 2025-06-03T10:23:54.823674Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:453:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:23:54.823745Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:23:54.823774Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002aaf/r3tmp/tmpZ6MeMc/pdisk_1.dat 2025-06-03T10:23:54.941543Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62942, node 1 2025-06-03T10:23:55.065383Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:55.065408Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:55.065414Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:55.065531Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:23:55.066229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:23:55.161982Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:55.162025Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:55.181171Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26073 2025-06-03T10:23:55.612574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:23:56.583292Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:23:56.594934Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:56.594979Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:56.654372Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:23:56.660702Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:56.822948Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:56.823141Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:56.823311Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:56.823361Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:56.823420Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:56.823438Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:56.823460Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:56.823479Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:56.823496Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:56.978074Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:56.978129Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:56.994088Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:57.073559Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:23:57.101009Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:23:57.101049Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:23:57.115702Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:23:57.116035Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:23:57.116066Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:23:57.116073Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:23:57.116081Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:23:57.116088Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:23:57.116095Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:23:57.116104Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:23:57.116553Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:23:57.144564Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:23:57.144605Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1865:2600], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:23:57.146980Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1872:2606] 2025-06-03T10:23:57.153473Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1913:2626] 2025-06-03T10:23:57.153580Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:23:57.153841Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1913:2626], schemeshard id = 72075186224037897 2025-06-03T10:23:57.166438Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:23:57.166467Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:23:57.166480Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:23:57.170954Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:23:57.173605Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:23:57.173660Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:23:57.298546Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:23:57.423092Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:23:57.513697Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:23:58.158850Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2211:3056], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:23:58.158917Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:23:58.170130Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:23:58.466811Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2516:3106], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:23:58.466886Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:23:58.467502Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2521:3110]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:23:58.468131Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:23:58.468147Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2523:3112] 2025-06-03T10:23:58.468161Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2523:3112] 2025-06-03T10:23:58.468376Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2524:2983] 2025-06-03T10:23:58.468458Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2523:3112], server id = [2:2524:2983], tablet id = 72075186224037894, status = OK 2025-06-03T10:23:58.468527Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2524:2983], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:23:58.468546Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-03T10:23:58.468618Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:23:58.468630Z node 1 :STATISTICS DEBU ... [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:26:21.147054Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:26:24.660802Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:26:25.904232Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-03T10:26:25.904274Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7881: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-03T10:26:25.904279Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7912: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-03T10:26:25.904283Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-03T10:26:27.318699Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:26:27.318874Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:26:27.360224Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7996: SendBaseStatsToSA(), path count: 2, at schemeshard: 72075186224037897 2025-06-03T10:26:27.360255Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 182.000000s, at schemeshard: 72075186224037897 2025-06-03T10:26:27.360351Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 49 2025-06-03T10:26:27.371690Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-03T10:26:28.598031Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:26:28.598068Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:26:28.598073Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-03T10:26:28.598082Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-03T10:26:28.598088Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:26:28.598217Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:28.601659Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:26:28.602772Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6582:4647], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:28.602806Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6592:4652], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:28.602819Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:28.605507Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897 2025-06-03T10:26:28.620007Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:6596:4655], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-03T10:26:28.800489Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:6696:4704] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:28.808951Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:6725:4719]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:28.809015Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:26:28.809025Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:6727:4721] 2025-06-03T10:26:28.809035Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:6727:4721] 2025-06-03T10:26:28.809125Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:6728:4722] 2025-06-03T10:26:28.809153Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:6728:4722], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:26:28.809162Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-03T10:26:28.809199Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:6727:4721], server id = [2:6728:4722], tablet id = 72075186224037894, status = OK 2025-06-03T10:26:28.809209Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:26:28.809222Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:6725:4719], StatRequests.size() = 1 2025-06-03T10:26:28.825911Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OTA2MWI1ZDctZDRkNThmNGUtZDdiOWYxZGEtNGYxYTc4MmU=, TxId: 2025-06-03T10:26:28.825949Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OTA2MWI1ZDctZDRkNThmNGUtZDdiOWYxZGEtNGYxYTc4MmU=, TxId: 2025-06-03T10:26:28.826108Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:28.837546Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:26:28.837569Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:26:28.888962Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-03T10:26:28.888998Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-03T10:26:28.962186Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:6727:4721], schemeshard count = 1 2025-06-03T10:26:29.920546Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:26:29.920580Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-03T10:26:29.920585Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:645: [72075186224037894] ScheduleNextAnalyze. Skip analyze for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:31.129810Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:26:31.140182Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:26:31.140252Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-03T10:26:31.140259Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:31.140357Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:31.140962Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:26:31.143609Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OGI2ZjhiYy04OTdhNDBhMC04ZTIxMDQ5Zi00N2NiZWRiYw==, TxId: 2025-06-03T10:26:31.143627Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OGI2ZjhiYy04OTdhNDBhMC04ZTIxMDQ5Zi00N2NiZWRiYw==, TxId: 2025-06-03T10:26:31.143713Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:31.154984Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:31.155006Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:2718:3222] 2025-06-03T10:26:31.155157Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:6837:4785]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:31.155768Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:26:31.155776Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-03T10:26:31.156378Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:26:31.156387Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 2 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-03T10:26:31.156396Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-03T10:26:31.156940Z node 2 :STATISTICS ERROR: service_impl.cpp:691: [TStatService::ReadRowsResponse] QueryId[ 1 ], RowsCount[ 0 ] 2025-06-03T10:26:31.157016Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 2 |61.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> test_sql_streaming.py::test[suites-GroupByHoppingWindowListKey-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-GroupByHoppingWindowNoKey-default.txt] >> TBsProxyFaultToleranceTest::CheckTDiscoverFaultToleranceTestErasureMirror3of4 >> CostMetricsPutHugeMirror3dc::TestPutMirror3dcRequests100Inflight10BlobSize2000000 [GOOD] >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests10000Inflight1000BlobSize1000 >> test_sql_streaming.py::test[suites-ReadWriteTopic-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-ReadWriteTopicWithSchema-default.txt] >> TBsProxyFaultToleranceTest::CheckTDiscoverFaultToleranceTestErasureMirror3of4 [GOOD] >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureMirror3dcCount6Idx3 |61.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_ftol/unittest >> TBsProxyFaultToleranceTest::CheckTDiscoverFaultToleranceTestErasureMirror3of4 [GOOD] >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureMirror3dcCount6Idx1 >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureBlock42Count6Idx2 >> MultiGet::SequentialGet [GOOD] >> NodeDisconnected::BsQueueRetries >> test_sql_streaming.py::test[suites-GroupByHopNoKey-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-GroupByHopPercentile-default.txt] |61.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table |61.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table |61.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_table/ydb-core-tx-schemeshard-ut_external_table >> test_sql_streaming.py::test[suites-GroupByHoppingWindowNoKey-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-GroupByHoppingWindowPercentile-default.txt] >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureMirror3dcCount6Idx4 >> AnalyzeColumnshard::AnalyzeStatus [GOOD] >> test_sql_streaming.py::test[suites-ReadWriteTopicWithSchema-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-WriteTwoTopics-default.txt] >> TOlapReboots::DropTableThenStore [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeStatus [GOOD] Test command err: 2025-06-03T10:24:01.200469Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:453:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:24:01.200566Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:24:01.200599Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002a7f/r3tmp/tmpmpKcaS/pdisk_1.dat 2025-06-03T10:24:01.474184Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16554, node 1 2025-06-03T10:24:01.617648Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:01.617676Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:01.617682Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:01.617842Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:01.618515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:01.728598Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:01.728645Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:01.746215Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29873 2025-06-03T10:24:02.193027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:03.421166Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:24:03.452256Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:03.452298Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:03.518783Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:03.519454Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:03.674042Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:03.674275Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:03.674417Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:03.674455Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:03.674507Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:03.674525Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:03.674552Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:03.674573Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:03.674589Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:03.829220Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:03.829275Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:03.841909Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:03.880247Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:03.892434Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:24:03.892464Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:24:03.903965Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:24:03.904270Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:24:03.904301Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:24:03.904309Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:24:03.904315Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:24:03.904320Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:24:03.904324Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:24:03.904330Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:24:03.904559Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:24:03.922892Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:03.922923Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1870:2601], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:03.924161Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1880:2609] 2025-06-03T10:24:03.927190Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:24:03.927367Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1917:2626] 2025-06-03T10:24:03.927792Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1917:2626], schemeshard id = 72075186224037897 2025-06-03T10:24:03.946088Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:24:03.946173Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:24:03.946191Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:24:03.958959Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:24:03.962329Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:24:03.962382Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:24:04.085889Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:24:04.187098Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:24:04.279961Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:24:05.154582Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2223:3066], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:05.154633Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:05.158986Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:24:05.239489Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2307:2840];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:24:05.239578Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2307:2840];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:24:05.239653Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2307:2840];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:24:05.239680Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2307:2840];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:24:05.239703Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2307:2840];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:24:05.239736Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2307:2840];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:24:05.239760Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2307:2840];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:24:05.239789Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2307:2840];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_re ... : Error: Transaction 281474976720658 completed, doublechecking } 2025-06-03T10:26:31.350397Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7385:5400] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:31.360327Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:7414:5415]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:31.360394Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:26:31.360405Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:7416:5417] 2025-06-03T10:26:31.360413Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:7416:5417] 2025-06-03T10:26:31.360503Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7417:5418] 2025-06-03T10:26:31.360528Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7417:5418], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:26:31.360542Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-03T10:26:31.360577Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7416:5417], server id = [2:7417:5418], tablet id = 72075186224037894, status = OK 2025-06-03T10:26:31.360592Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:26:31.360603Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:7414:5415], StatRequests.size() = 1 2025-06-03T10:26:31.380436Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZjFiODBiNGUtZjRmOGY0YmYtNzNhMTY3MGYtOGUyNTMxYjE=, TxId: 2025-06-03T10:26:31.380466Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZjFiODBiNGUtZjRmOGY0YmYtNzNhMTY3MGYtOGUyNTMxYjE=, TxId: 2025-06-03T10:26:31.380657Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:31.392241Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:26:31.392277Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:26:31.444437Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-03T10:26:31.444483Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-03T10:26:31.506879Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:7416:5417], schemeshard count = 1 2025-06-03T10:26:32.399807Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:26:32.399841Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-03T10:26:32.400722Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:32.412926Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:32.413079Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:32.413089Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 4], AnalyzedShards 1 2025-06-03T10:26:32.424734Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:32.445649Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. ... blocking NKikimr::NStat::TEvStatistics::TEvAnalyzeTableResponse from TX_COLUMNSHARD_ACTOR to STATISTICS_AGGREGATOR cookie 0 ... waiting for TEvAnalyzeTableResponse (done) 2025-06-03T10:26:32.446390Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7488:5460] 2025-06-03T10:26:32.446539Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:478: [72075186224037894] Send TEvStatistics::TEvAnalyzeStatusResponse. Status STATUS_ENQUEUED 2025-06-03T10:26:32.446830Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7490:5461]
---- StatisticsAggregator ----
Database: /Root/Database
BaseStatistics: 1
SchemeShards: 1
    72075186224037897
Nodes: 1
    2
RequestedSchemeShards: 1
    72075186224037897
FastCounter: 3
FastCheckInFlight: 0
FastSchemeShards: 0
FastNodes: 0
PropagationInFlight: 0
PropagationSchemeShards: 0
PropagationNodes: 0
LastSSIndex: 0
PendingRequests: 0
ProcessUrgentInFlight: 0
Columns: 2
DatashardRanges: 0
CountMinSketches: 0
ScheduleTraversalsByTime: 2
  oldest table: [OwnerId: 72075186224037897, LocalPathId: 4], update time: 1970-01-01T00:00:00Z
ScheduleTraversalsBySchemeShard: 1
    72075186224037897
    [OwnerId: 72075186224037897, LocalPathId: 4], [OwnerId: 72075186224037897, LocalPathId: 3]
ForceTraversals: 1
    1970-01-01T00:00:06Z
NavigateType: Analyze
NavigateAnalyzeOperationId: 
NavigatePathId: 
ForceTraversalOperationId: 
TraversalStartTime: 1970-01-01T00:00:00Z
TraversalPathId: 
TraversalIsColumnTable: 0
TraversalStartKey: 
GlobalTraversalRound: 1
TraversalRound: 0
HiveRequestRound: 0
... unblocking NKikimr::NStat::TEvStatistics::TEvAnalyzeTableResponse from TX_COLUMNSHARD_ACTOR to STATISTICS_AGGREGATOR 2025-06-03T10:26:32.447144Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-03T10:26:32.447169Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-03T10:26:32.458970Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-03T10:26:33.560235Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:26:33.560310Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-03T10:26:33.560319Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:33.560589Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:33.572035Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:33.572156Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:33.572182Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:33.572473Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:26:33.583917Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:33.584012Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-03T10:26:33.584211Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7539:5489], server id = [2:7540:5490], tablet id = 72075186224037899, status = OK 2025-06-03T10:26:33.584248Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7539:5489], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:33.585393Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-03T10:26:33.585416Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:33.585498Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:33.585533Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:33.585585Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7539:5489], server id = [2:7540:5490], tablet id = 72075186224037899 2025-06-03T10:26:33.585589Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:33.585631Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:33.586308Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:26:33.593201Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7560:5509]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:33.593268Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:26:33.593276Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7560:5509], StatRequests.size() = 1 2025-06-03T10:26:33.620455Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MmU1NGQ0YzgtNGJmMzg0ZC1iNGZjNmJhYy02MTZkZjRmNQ==, TxId: 2025-06-03T10:26:33.620492Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MmU1NGQ0YzgtNGJmMzg0ZC1iNGZjNmJhYy02MTZkZjRmNQ==, TxId: 2025-06-03T10:26:33.620666Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:33.632623Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:33.632661Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:947:2751] 2025-06-03T10:26:33.633083Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7577:5517] 2025-06-03T10:26:33.633249Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:478: [72075186224037894] Send TEvStatistics::TEvAnalyzeStatusResponse. Status STATUS_NO_OPERATION >> test_sql_streaming.py::test[suites-GroupByHopPercentile-default.txt] [GOOD] >> test_sql_streaming.py::test[suites-GroupByHopTimeExtractorUnusedColumns-default.txt] >> YdbOlapStore::LogWithUnionAllDescending [GOOD] >> YdbOlapStore::LogTsRangeDescending ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_olap_reboots/unittest >> TOlapReboots::DropTableThenStore [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:25:50.901936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:25:50.901972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:25:50.901979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:25:50.901985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:25:50.902003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:25:50.902008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:25:50.902020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:25:50.902042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:25:50.902195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:25:50.902311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:25:50.918733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:25:50.918760Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:50.918871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:25:50.926892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:25:50.927093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:25:50.927146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:25:50.930679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:25:50.930752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:25:50.930942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:25:50.931026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:25:50.931824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:25:50.931894Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:25:50.932265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:25:50.932282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:25:50.932303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:25:50.932315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:25:50.932322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:25:50.932378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:25:50.936229Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:25:50.963533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:25:50.963627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.963703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:25:50.963769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:25:50.963783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.964640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:25:50.964681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:25:50.964738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.964751Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:25:50.964758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:25:50.964765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:25:50.965379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.965396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:25:50.965402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:25:50.965854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.965868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.965874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:25:50.965882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:25:50.966747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:25:50.967232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:25:50.967281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:25:50.967549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:25:50.967582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:25:50.967593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:25:50.967682Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... 4046678944, cookie: 1005 2025-06-03T10:26:34.222609Z node 88 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1005 2025-06-03T10:26:34.222617Z node 88 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-03T10:26:34.222623Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-03T10:26:34.222745Z node 88 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:26:34.222757Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:26:34.222762Z node 88 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1005 2025-06-03T10:26:34.222767Z node 88 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-03T10:26:34.222771Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:26:34.222781Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1005, ready parts: 0/1, is published: true 2025-06-03T10:26:34.223602Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1005:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275382275 2025-06-03T10:26:34.223633Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1005, partId: 0, tablet: 72075186233409546 2025-06-03T10:26:34.223975Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6151: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 1005 2025-06-03T10:26:34.223993Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 1005, tablet: 72075186233409546, partId: 0 2025-06-03T10:26:34.224015Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 1005:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 1005 2025-06-03T10:26:34.224026Z node 88 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1005:0 129 -> 130 2025-06-03T10:26:34.224250Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-03T10:26:34.224273Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-03T10:26:34.224706Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 1005:0, at schemeshard: 72057594046678944 2025-06-03T10:26:34.224742Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1005:0, at schemeshard: 72057594046678944 2025-06-03T10:26:34.224764Z node 88 :FLAT_TX_SCHEMESHARD INFO: drop_store.cpp:235: TDropOlapStore TProposedDeleteParts operationId# 1005:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:34.224783Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:26:34.224813Z node 88 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1005:0 progress is 1/1 2025-06-03T10:26:34.224818Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:26:34.224825Z node 88 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1005:0 progress is 1/1 2025-06-03T10:26:34.224829Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:26:34.224834Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1005, ready parts: 1/1, is published: true 2025-06-03T10:26:34.224839Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:26:34.224845Z node 88 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1005:0 2025-06-03T10:26:34.224850Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1005:0 2025-06-03T10:26:34.224879Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:26:34.225378Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:26:34.225479Z node 88 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-03T10:26:34.225602Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:34.225812Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:26:34.226007Z node 88 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186233409546;self_id=[88:332:2318];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1152;event=tablet_die; Forgetting tablet 72075186233409546 2025-06-03T10:26:34.227535Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:26:34.227547Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:26:34.227568Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:26:34.228710Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:26:34.228727Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:26:34.228867Z node 88 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 1005 2025-06-03T10:26:34.228923Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1005: send EvNotifyTxCompletion 2025-06-03T10:26:34.228930Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1005 2025-06-03T10:26:34.228997Z node 88 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1005, at schemeshard: 72057594046678944 2025-06-03T10:26:34.229015Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1005: got EvNotifyTxCompletionResult 2025-06-03T10:26:34.229019Z node 88 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1005: satisfy waiter [88:550:2519] TestWaitNotification: OK eventTxId 1005 2025-06-03T10:26:34.229080Z node 88 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/OlapStore/ColumnTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:34.229112Z node 88 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/OlapStore/ColumnTable" took 44us result status StatusPathDoesNotExist 2025-06-03T10:26:34.229152Z node 88 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/OlapStore/ColumnTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/OlapStore/ColumnTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:26:34.229226Z node 88 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/OlapStore" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:34.229243Z node 88 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/OlapStore" took 20us result status StatusPathDoesNotExist 2025-06-03T10:26:34.229258Z node 88 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/OlapStore\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/OlapStore" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> test_sql_streaming.py::test[suites-GroupByHoppingWindowPercentile-default.txt] [GOOD] >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureBlock42Count6Idx0 >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests10000Inflight1000BlobSize1000 [GOOD] >> test_sql_streaming.py::test[suites-WriteTwoTopics-default.txt] [GOOD] >> TBsProxyFaultToleranceTest::CheckTGetWithRecoverFaultToleranceTestErasureMirror3dc |61.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/streaming_optimize/py3test >> test_sql_streaming.py::test[suites-GroupByHoppingWindowPercentile-default.txt] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/unittest >> CostMetricsPutMirror3dc::TestPutMirror3dcRequests10000Inflight1000BlobSize1000 [GOOD] Test command err: RandomSeed# 17652040601904076067 2025-06-03T10:25:37.071796Z 6 00h00m30.010000s :BS_PROXY_GET ERROR: [fd8726626f160edc] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:1:10:1:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-03T10:25:37.071843Z 4 00h00m30.010000s :BS_PROXY_GET ERROR: [c26ca601934fca5a] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:1:10:2:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:1:0]"} Marker# BPG29 2025-06-03T10:25:37.071871Z 6 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:5:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:1:10:1:1000:0] PatchedBlobId# [1:1:2:10:1:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-03T10:25:37.071896Z 4 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:3:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:1:10:2:1000:0] PatchedBlobId# [1:1:2:10:4098:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:1:0] Marker# BSVSP01 2025-06-03T10:25:39.553938Z 3 00h00m30.010000s :BS_PROXY_GET ERROR: [b0fb698d93fa39a0] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:1:10:3:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:2:0]"} Marker# BPG29 2025-06-03T10:25:39.553998Z 3 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:1:10:3:1000:0] PatchedBlobId# [1:1:2:10:147459:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:2:0] Marker# BSVSP01 2025-06-03T10:25:43.482743Z 9 00h00m30.010000s :BS_PROXY_GET ERROR: [7a8b75ad039035cd] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:1:10:4:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:2:0]"} Marker# BPG29 2025-06-03T10:25:43.482815Z 9 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:1:10:4:1000:0] PatchedBlobId# [1:1:2:10:24580:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:2:0] Marker# BSVSP01 2025-06-03T10:25:43.764873Z 7 00h00m30.010000s :BS_PROXY_GET ERROR: [d25823a43a3795a3] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:2:10:24580:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:0:0]"} Marker# BPG29 2025-06-03T10:25:43.764926Z 7 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:2:10:24580:1000:0] PatchedBlobId# [1:1:3:10:4100:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:0:0] Marker# BSVSP01 2025-06-03T10:25:43.769166Z 6 00h00m30.010000s :BS_PROXY_GET ERROR: [1e1316bae6dcdda9] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:3:10:4100:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:2:0]"} Marker# BPG29 2025-06-03T10:25:43.769211Z 6 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:3:10:4100:1000:0] PatchedBlobId# [1:1:4:10:151556:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:2:0] Marker# BSVSP01 2025-06-03T10:25:43.770173Z 9 00h00m30.010000s :BS_PROXY_GET ERROR: [22ec32e8db4e5980] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:4:10:151556:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:2:0]"} Marker# BPG29 2025-06-03T10:25:43.770201Z 9 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:4:10:151556:1000:0] PatchedBlobId# [1:1:5:10:8196:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:2:0] Marker# BSVSP01 2025-06-03T10:25:43.776782Z 4 00h00m30.010000s :BS_PROXY_GET ERROR: [3f6981dc942a68b2] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:5:10:8196:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:0:0]"} Marker# BPG29 2025-06-03T10:25:43.776839Z 4 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:5:10:8196:1000:0] PatchedBlobId# [1:1:6:10:12292:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:0:0] Marker# BSVSP01 2025-06-03T10:25:44.219067Z 5 00h00m30.010000s :BS_PROXY_GET ERROR: [0d5b28a73a9ca466] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:6:10:12292:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:1:0]"} Marker# BPG29 2025-06-03T10:25:44.219123Z 5 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:6:10:12292:1000:0] PatchedBlobId# [1:1:7:10:61444:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:1:0] Marker# BSVSP01 2025-06-03T10:25:44.220304Z 1 00h00m30.010000s :BS_PROXY_GET ERROR: [5c36ec635b1aebb3] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:7:10:61444:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-03T10:25:44.220328Z 1 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:7:10:61444:1000:0] PatchedBlobId# [1:1:8:10:16388:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-03T10:25:44.220884Z 6 00h00m30.010000s :BS_PROXY_GET ERROR: [de061836ebec84ed] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:8:10:16388:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:2:0]"} Marker# BPG29 2025-06-03T10:25:44.220904Z 6 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:8:10:16388:1000:0] PatchedBlobId# [1:1:9:10:20484:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:2:0] Marker# BSVSP01 2025-06-03T10:25:44.245995Z 8 00h00m30.010000s :BS_PROXY_GET ERROR: [0caa2e7ae1a8f748] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:9:10:20484:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:1:0]"} Marker# BPG29 2025-06-03T10:25:44.246057Z 8 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:9:10:20484:1000:0] PatchedBlobId# [1:1:10:10:20484:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:1:0] Marker# BSVSP01 2025-06-03T10:25:44.291578Z 3 00h00m30.010000s :BS_PROXY_GET ERROR: [ff11dec54d200413] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:10:10:20484:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:2:0]"} Marker# BPG29 2025-06-03T10:25:44.291643Z 3 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:10:10:20484:1000:0] PatchedBlobId# [1:1:11:10:4:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:2:0] Marker# BSVSP01 2025-06-03T10:25:46.982742Z 6 00h00m30.010000s :BS_PROXY_GET ERROR: [0fcf7b0252da7774] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:1:10:5:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:2:0]"} Marker# BPG29 2025-06-03T10:25:46.982809Z 6 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:1:10:5:1000:0] PatchedBlobId# [1:1:2:10:4101:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:2:0] Marker# BSVSP01 2025-06-03T10:25:47.173908Z 8 00h00m30.010000s :BS_PROXY_GET ERROR: [7c02a817d5443a5f] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:2:10:4101:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:1:0]"} Marker# BPG29 2025-06-03T10:25:47.173964Z 8 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:2:10:4101:1000:0] PatchedBlobId# [1:1:3:10:102405:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:1:0] Marker# BSVSP01 2025-06-03T10:25:47.178406Z 3 00h00m30.010000s :BS_PROXY_GET ERROR: [0378fc03e23dcea3] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:3:10:102405:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:2:0]"} Marker# BPG29 2025-06-03T10:25:47.178464Z 3 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:3:10:102405:1000:0] PatchedBlobId# [1:1:4:10:8197:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:2:0] Marker# BSVSP01 2025-06-03T10:25:47.182796Z 9 00h00m30.010000s :BS_PROXY_GET ERROR: [b62a920b62e2cd1d] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:4:10:8197:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:2:0]"} Marker# BPG29 2025-06-03T10:25:47.182845Z 9 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:4:10:8197:1000:0] PatchedBlobId# [1:1:5:10:12293:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:2:0] Marker# BSVSP01 2025-06-03T10:25:47.491672Z 7 00h00m30.010000s :BS_PROXY_GET ERROR: [49f240bcbfe05113] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:5:10:12293:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:0:0]"} Marker# BPG29 2025-06-03T10:25:47.491762Z 7 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:5:10:12293:1000:0] PatchedBlobId# [1:1:6:10:12293:1000:0 ... 0:0] Marker# BSVSP01 2025-06-03T10:26:02.850010Z 7 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:102:10:8222:1000:0] PatchedBlobId# [1:1:103:10:12318:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:0:0] Marker# BSVSP01 2025-06-03T10:26:02.850316Z 4 00h00m30.010000s :BS_PROXY_GET ERROR: [8dce5cad723d2d2b] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:101:10:77912:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:0:0]"} Marker# BPG29 2025-06-03T10:26:02.850375Z 7 00h00m30.010000s :BS_PROXY_GET ERROR: [a289cd6b7032e650] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:99:10:4191:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:0:0]"} Marker# BPG29 2025-06-03T10:26:02.850941Z 4 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:101:10:77912:1000:0] PatchedBlobId# [1:1:102:10:8280:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:0:0] Marker# BSVSP01 2025-06-03T10:26:02.850970Z 7 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:99:10:4191:1000:0] PatchedBlobId# [1:1:100:10:102495:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:0:0] Marker# BSVSP01 2025-06-03T10:26:02.852209Z 1 00h00m30.010000s :BS_PROXY_GET ERROR: [c30b7cb66098a619] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:99:10:4221:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-03T10:26:02.852276Z 1 00h00m30.010000s :BS_PROXY_GET ERROR: [6f5090733181ff38] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:101:10:102461:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-03T10:26:02.852871Z 1 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:99:10:4221:1000:0] PatchedBlobId# [1:1:100:10:127101:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-03T10:26:02.852928Z 1 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:101:10:102461:1000:0] PatchedBlobId# [1:1:102:10:8253:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-03T10:26:02.853811Z 8 00h00m30.010000s :BS_PROXY_GET ERROR: [083b0065e89a1b6a] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:97:10:20563:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:1:0]"} Marker# BPG29 2025-06-03T10:26:02.853879Z 8 00h00m30.010000s :BS_PROXY_GET ERROR: [a77b9d19f2567bda] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:105:10:16462:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:1:0]"} Marker# BPG29 2025-06-03T10:26:02.854500Z 8 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:97:10:20563:1000:0] PatchedBlobId# [1:1:98:10:83:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:1:0] Marker# BSVSP01 2025-06-03T10:26:02.854526Z 8 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:105:10:16462:1000:0] PatchedBlobId# [1:1:106:10:114766:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:1:0] Marker# BSVSP01 2025-06-03T10:26:02.856739Z 2 00h00m30.010000s :BS_PROXY_GET ERROR: [927accfef51cd06d] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:101:10:8257:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:1:0]"} Marker# BPG29 2025-06-03T10:26:02.856807Z 2 00h00m30.010000s :BS_PROXY_GET ERROR: [f263fb505b92406f] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:104:10:16463:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:1:0]"} Marker# BPG29 2025-06-03T10:26:02.857065Z 4 00h00m30.010000s :BS_PROXY_GET ERROR: [115a7fc66b547b20] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:98:10:43:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:0:0]"} Marker# BPG29 2025-06-03T10:26:02.857338Z 2 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:101:10:8257:1000:0] PatchedBlobId# [1:1:102:10:12353:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:1:0] Marker# BSVSP01 2025-06-03T10:26:02.857373Z 2 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:104:10:16463:1000:0] PatchedBlobId# [1:1:105:10:65615:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:1:0] Marker# BSVSP01 2025-06-03T10:26:02.857622Z 4 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:98:10:43:1000:0] PatchedBlobId# [1:1:99:10:122923:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:0:0] Marker# BSVSP01 2025-06-03T10:26:02.858373Z 4 00h00m30.010000s :BS_PROXY_GET ERROR: [35e4ed765eef2ea8] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:98:10:63:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:0:0]"} Marker# BPG29 2025-06-03T10:26:02.859090Z 1 00h00m30.010000s :BS_PROXY_GET ERROR: [a2c019e5f7acbef9] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:101:10:127067:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:0:0:0]"} Marker# BPG29 2025-06-03T10:26:02.859219Z 4 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:98:10:63:1000:0] PatchedBlobId# [1:1:99:10:147519:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:0:0] Marker# BSVSP01 2025-06-03T10:26:02.859723Z 1 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:101:10:127067:1000:0] PatchedBlobId# [1:1:102:10:8283:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:0:0:0] Marker# BSVSP01 2025-06-03T10:26:02.861311Z 8 00h00m30.010000s :BS_PROXY_GET ERROR: [8d2c3e092928785a] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:99:10:4185:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:1:0]"} Marker# BPG29 2025-06-03T10:26:02.861369Z 8 00h00m30.010000s :BS_PROXY_GET ERROR: [4a09dff14208eeb6] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:96:10:114792:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:1:0]"} Marker# BPG29 2025-06-03T10:26:02.861681Z 8 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:99:10:4185:1000:0] PatchedBlobId# [1:1:100:10:28761:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:1:0] Marker# BSVSP01 2025-06-03T10:26:02.861738Z 8 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:96:10:114792:1000:0] PatchedBlobId# [1:1:97:10:20584:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:1:0] Marker# BSVSP01 2025-06-03T10:26:02.863290Z 4 00h00m30.010000s :BS_PROXY_GET ERROR: [be058b1a5131fc32] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:100:10:4167:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:1:0:0]"} Marker# BPG29 2025-06-03T10:26:02.863586Z 4 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:1:0:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:100:10:4167:1000:0] PatchedBlobId# [1:1:101:10:102471:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:1:0:0] Marker# BSVSP01 2025-06-03T10:26:02.865760Z 8 00h00m30.010000s :BS_PROXY_GET ERROR: [0a3be9b0220e3391] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:102:10:12340:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:1:0]"} Marker# BPG29 2025-06-03T10:26:02.865808Z 8 00h00m30.010000s :BS_PROXY_GET ERROR: [ddf2e8e25f6c9eac] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:101:10:8269:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:1:0]"} Marker# BPG29 2025-06-03T10:26:02.865956Z 8 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:102:10:12340:1000:0] PatchedBlobId# [1:1:103:10:12340:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:1:0] Marker# BSVSP01 2025-06-03T10:26:02.865977Z 8 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:101:10:8269:1000:0] PatchedBlobId# [1:1:102:10:8269:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:1:0] Marker# BSVSP01 2025-06-03T10:26:02.868187Z 8 00h00m30.010000s :BS_PROXY_GET ERROR: [e244bc87e556ca4d] Response# TEvGetResult {Status# DEADLINE ResponseSz# 1 {[1:1:102:10:131111:1000:0] DEADLINE Size# 0 RequestedSize# 1000} ErrorReason# "status# DEADLINE from# [82000000:1:2:1:0]"} Marker# BPG29 2025-06-03T10:26:02.868461Z 8 00h00m30.010000s :BS_VDISK_PATCH ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) TEvVMovedPatch: failed on VGet; OriginalBlobId# [1:1:102:10:131111:1000:0] PatchedBlobId# [1:1:103:10:12327:1000:0] ErrorReason# Couldn't get the original blob; GetStatus# DEADLINE GetResponseStatus# DEADLINE GetErrorReason# status# DEADLINE from# [82000000:1:2:1:0] Marker# BSVSP01 >> AnalyzeColumnshard::AnalyzeDeadline [GOOD] >> test_sql_streaming.py::test[suites-GroupByHopTimeExtractorUnusedColumns-default.txt] [GOOD] >> TFlatTableLongTxLarge::LargeDeltaChain [GOOD] |61.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/streaming_optimize/py3test >> test_sql_streaming.py::test[suites-WriteTwoTopics-default.txt] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeDeadline [GOOD] Test command err: 2025-06-03T10:23:59.854862Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:23:59.854909Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:23:59.854920Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002a81/r3tmp/tmp8F1hVA/pdisk_1.dat 2025-06-03T10:24:00.019558Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13692, node 1 2025-06-03T10:24:00.152947Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:00.152978Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:00.152984Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:00.153126Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:00.153806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:00.244558Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:00.244604Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:00.262098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11890 2025-06-03T10:24:00.689533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:02.283630Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:24:02.295510Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:02.295549Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:02.370019Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:02.377742Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:02.630730Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:02.630913Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:02.631058Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:02.631094Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:02.631139Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:02.631156Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:02.631538Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:02.631561Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:02.631579Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:02.821105Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:02.821151Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:02.834899Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:02.902797Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:02.954077Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:24:02.954118Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:24:02.969465Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:24:02.969539Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:24:02.969568Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:24:02.969576Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:24:02.969583Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:24:02.969590Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:24:02.969596Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:24:02.969604Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:24:02.969801Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:24:02.988037Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:02.988071Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:02.989873Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:24:02.991074Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:24:02.991223Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:24:03.000303Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:24:03.015899Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:24:03.015929Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:24:03.015943Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:24:03.024708Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:24:03.031910Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:24:03.031978Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:24:03.191792Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:24:03.298841Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:24:03.360270Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:24:03.956077Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2216:3061], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:03.956165Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:03.960715Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:24:04.028370Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:24:04.028455Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:24:04.028522Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:24:04.028550Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:24:04.028584Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:24:04.028612Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:24:04.028639Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:24:04.028673Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_re ... fault, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:28.798778Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897 2025-06-03T10:26:28.814941Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7291:5336], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-03T10:26:28.826534Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-03T10:26:29.026824Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7377:5383] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:29.039311Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:7399:5397]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:29.039393Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:26:29.039408Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:7401:5399] 2025-06-03T10:26:29.039421Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:7401:5399] 2025-06-03T10:26:29.039524Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7402:5400] 2025-06-03T10:26:29.039562Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7402:5400], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:26:29.039580Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-03T10:26:29.039624Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7401:5399], server id = [2:7402:5400], tablet id = 72075186224037894, status = OK 2025-06-03T10:26:29.039649Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:26:29.039666Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:7399:5397], StatRequests.size() = 1 2025-06-03T10:26:29.060669Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NGVjM2I5NzktY2U1OGU5MTUtYWJiYTcyODUtZGE4N2E4M2Y=, TxId: 2025-06-03T10:26:29.060692Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NGVjM2I5NzktY2U1OGU5MTUtYWJiYTcyODUtZGE4N2E4M2Y=, TxId: 2025-06-03T10:26:29.060789Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:29.072208Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:26:29.072233Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:26:29.145154Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-03T10:26:29.145186Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-03T10:26:29.207481Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:7401:5399], schemeshard count = 1 2025-06-03T10:26:30.155195Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:26:30.155223Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-03T10:26:30.155985Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:30.167849Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:30.167987Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:30.167996Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 4], AnalyzedShards 1 2025-06-03T10:26:30.179213Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:31.230900Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:26:31.230936Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:26:31.230941Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-03T10:26:31.230950Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-03T10:26:31.230956Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:31.231200Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:31.242718Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. 2025-06-03T10:26:31.242759Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:31.242886Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:31.242904Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete ... blocking NKikimr::NStat::TEvStatistics::TEvAnalyzeTableResponse from TX_COLUMNSHARD_ACTOR to STATISTICS_AGGREGATOR cookie 0 ... waiting for TEvAnalyzeTableResponse (done) 2025-06-03T10:26:31.243306Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:26:31.609899Z node 2 :STATISTICS ERROR: tx_analyze_deadline.cpp:28: [72075186224037894] Delete long analyze operation, OperationId=operationId 2025-06-03T10:26:31.925501Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:26:31.925612Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:26:33.802689Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-03T10:26:33.802726Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7881: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-03T10:26:33.802730Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7912: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-03T10:26:33.802735Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-03T10:26:36.032567Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7996: SendBaseStatsToSA(), path count: 2, at schemeshard: 72075186224037897 2025-06-03T10:26:36.032603Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 205.000000s, at schemeshard: 72075186224037897 2025-06-03T10:26:36.032704Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 51 2025-06-03T10:26:36.138325Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:36.138383Z node 2 :STATISTICS DEBUG: tx_analyze_deadline.cpp:46: [72075186224037894] TTxAnalyzeDeadline::Complete. Send TEvAnalyzeResponse for deleted operation, OperationId=operationId, ActorId=[1:3129:3338] 2025-06-03T10:26:36.138395Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-03T10:26:36.138435Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-03T10:26:36.138606Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7596:5491], server id = [2:7597:5492], tablet id = 72075186224037899, status = OK 2025-06-03T10:26:36.138636Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7596:5491], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:36.139450Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-03T10:26:36.139465Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:36.139538Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:36.139562Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:36.139591Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7596:5491], server id = [2:7597:5492], tablet id = 72075186224037899 2025-06-03T10:26:36.139595Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:36.139635Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:36.140461Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:26:36.150228Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7631:5513]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:36.150288Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:26:36.150295Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7631:5513], StatRequests.size() = 1 FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; >> AnalyzeColumnshard::AnalyzeRebootSaBeforeSave [GOOD] >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureMirror3dcCount6Idx0 >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureMirror3dcCount6Idx5 |61.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/streaming_optimize/py3test >> test_sql_streaming.py::test[suites-GroupByHopTimeExtractorUnusedColumns-default.txt] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootSaBeforeSave [GOOD] Test command err: 2025-06-03T10:24:02.942338Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:453:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:24:02.942421Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:24:02.942453Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002a77/r3tmp/tmp4ej7uR/pdisk_1.dat 2025-06-03T10:24:03.131819Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10263, node 1 2025-06-03T10:24:03.342275Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:03.342304Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:03.342310Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:03.342442Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:03.343171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:03.446005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:03.446055Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:03.470058Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19230 2025-06-03T10:24:03.872285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:05.276099Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:24:05.296599Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:05.296639Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:05.370427Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:05.377770Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:05.656298Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:05.656546Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:05.656599Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:05.656628Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:05.656677Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:05.656696Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:05.656714Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:05.656734Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:05.656750Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:05.839719Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:05.839760Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:05.858120Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:05.911592Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:05.929956Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:24:05.929989Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:24:05.950005Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:24:05.950281Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:24:05.950306Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:24:05.950326Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:24:05.950332Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:24:05.950338Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:24:05.950344Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:24:05.950351Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:24:05.950561Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:24:05.975233Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:05.975263Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1863:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:05.988777Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1877:2607] 2025-06-03T10:24:06.000600Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1896:2617] 2025-06-03T10:24:06.000667Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1896:2617], schemeshard id = 72075186224037897 2025-06-03T10:24:06.007010Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:24:06.020514Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:24:06.020535Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:24:06.020546Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:24:06.027104Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:24:06.033325Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:24:06.033364Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:24:06.233674Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:24:06.453772Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:24:06.499906Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:24:07.596777Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2219:3063], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:07.596822Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:07.621934Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:24:07.833372Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2303:2839];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:24:07.834034Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2303:2839];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:24:07.834133Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2303:2839];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:24:07.834162Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2303:2839];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:24:07.834380Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2303:2839];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:24:07.834417Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2303:2839];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:24:07.834625Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2303:2839];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:24:07.834666Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2303:2839];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_re ... ICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 1 2025-06-03T10:26:34.312388Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 1 2025-06-03T10:26:34.312398Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:26:34.312438Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:34.312816Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:34.312832Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:34.312941Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:26:34.312965Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:26:34.313225Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:34.313238Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:34.313923Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:26:34.370675Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:34.370758Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-03T10:26:34.370945Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7591:5525], server id = [2:7592:5526], tablet id = 72075186224037899, status = OK 2025-06-03T10:26:34.370972Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7591:5525], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:34.371955Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-03T10:26:34.371979Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:34.372032Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:34.372062Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:34.372136Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:34.372726Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7591:5525], server id = [2:7592:5526], tablet id = 72075186224037899 2025-06-03T10:26:34.372738Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:34.372903Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:26:34.380457Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7610:5544]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:34.380537Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:26:34.380546Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7610:5544], StatRequests.size() = 1 2025-06-03T10:26:34.414216Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=Y2Y0YWVhNzYtYWFlNTcwMjMtNDEzZGFkOS1mMWU2Y2M4NA==, TxId: 2025-06-03T10:26:34.414249Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=Y2Y0YWVhNzYtYWFlNTcwMjMtNDEzZGFkOS1mMWU2Y2M4NA==, TxId: 2025-06-03T10:26:34.414435Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:34.427542Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7625:5550] 2025-06-03T10:26:34.427617Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7544:5494], server id = [2:7626:5551], tablet id = 72075186224037894, status = OK 2025-06-03T10:26:34.427626Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7626:5551] 2025-06-03T10:26:34.427648Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:7625:5550], schemeshard id = 72075186224037897 2025-06-03T10:26:34.427670Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7626:5551], node id = 2, have schemeshards count = 1, need schemeshards count = 0 2025-06-03T10:26:34.441313Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:34.441345Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:26:34.517761Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7630:5554] 2025-06-03T10:26:34.518027Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:22: [72075186224037894] TTxAnalyze::Execute. ReplyToActorId [1:3132:3339] , Record { OperationId: "operationId" Tables { PathId { OwnerId: 72075186224037897 LocalId: 4 } } Types: TYPE_COUNT_MIN_SKETCH } 2025-06-03T10:26:34.518040Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:38: [72075186224037894] TTxAnalyze::Execute. Update existing force traversal. OperationId operationId , ReplyToActorId [1:3132:3339] 2025-06-03T10:26:34.518058Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:97: [72075186224037894] TTxAnalyze::Complete 2025-06-03T10:26:34.971306Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-03T10:26:34.971339Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-03T10:26:34.981794Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 3 is different from the current 0 2025-06-03T10:26:34.981835Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-03T10:26:35.657061Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:26:35.657092Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:26:35.657099Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-03T10:26:36.796489Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:26:36.796549Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-03T10:26:36.796556Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:36.796777Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:36.808276Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:36.808417Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:36.808434Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:36.808595Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:26:36.820185Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:36.820292Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 4, current Round: 0 2025-06-03T10:26:36.820497Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7709:5597], server id = [2:7710:5598], tablet id = 72075186224037899, status = OK 2025-06-03T10:26:36.820524Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7709:5597], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:36.820886Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-03T10:26:36.820902Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:36.820934Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:36.820962Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:36.821023Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:36.821651Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7709:5597], server id = [2:7710:5598], tablet id = 72075186224037899 2025-06-03T10:26:36.821663Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:36.821846Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:26:36.835839Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZjVkZGEwMDAtNzRlYzcxMWUtZjZiMTZlNDItZDU0NjA2YWI=, TxId: 2025-06-03T10:26:36.835866Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZjVkZGEwMDAtNzRlYzcxMWUtZjZiMTZlNDItZDU0NjA2YWI=, TxId: 2025-06-03T10:26:36.836036Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:36.847908Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:36.847942Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3132:3339] >> HullReplWriteSst::Basic [GOOD] >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureBlock42Count6Idx3 >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureBlock42Count6Idx1 >> YdbOlapStore::LogTsRangeDescending [GOOD] |61.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut |61.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut |61.8%| [LD] {RESULT} $(B)/ydb/core/kqp/runtime/ut/ydb-core-kqp-runtime-ut |61.8%| [TA] $(B)/ydb/tests/fq/streaming_optimize/test-results/py3test/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/repl/ut/unittest >> HullReplWriteSst::Basic [GOOD] Test command err: commit chunk# 1 {ChunkIdx: 1 Offset: 101220352 Size: 32994288} 749868 commit chunk# 2 {ChunkIdx: 2 Offset: 101224448 Size: 32990724} 749787 commit chunk# 3 {ChunkIdx: 3 Offset: 101224448 Size: 32991384} 749802 commit chunk# 4 {ChunkIdx: 4 Offset: 101216256 Size: 32997544} 749942 commit chunk# 5 {ChunkIdx: 5 Offset: 101216256 Size: 33001460} 750031 commit chunk# 6 {ChunkIdx: 6 Offset: 101212160 Size: 33005068} 750113 commit chunk# 7 {ChunkIdx: 7 Offset: 101232640 Size: 32985048} 749658 commit chunk# 8 {ChunkIdx: 8 Offset: 101228544 Size: 32988832} 749744 commit chunk# 9 {ChunkIdx: 9 Offset: 101228544 Size: 32986940} 749701 commit chunk# 10 {ChunkIdx: 10 Offset: 101228544 Size: 32987776} 749720 commit chunk# 11 {ChunkIdx: 11 Offset: 101232640 Size: 32984740} 749651 commit chunk# 12 {ChunkIdx: 12 Offset: 101240832 Size: 32975544} 749442 commit chunk# 13 {ChunkIdx: 13 Offset: 101216256 Size: 32999392} 749984 commit chunk# 14 {ChunkIdx: 14 Offset: 101224448 Size: 32992572} 749829 commit chunk# 15 {ChunkIdx: 15 Offset: 101195776 Size: 33018312} 750414 commit chunk# 16 {ChunkIdx: 16 Offset: 101232640 Size: 32985048} 749658 commit chunk# 17 {ChunkIdx: 17 Offset: 101224448 Size: 32991296} 749800 commit chunk# 18 {ChunkIdx: 18 Offset: 101216256 Size: 32999788} 749993 commit chunk# 19 {ChunkIdx: 19 Offset: 101220352 Size: 32997368} 749938 commit chunk# 20 {ChunkIdx: 20 Offset: 101232640 Size: 32983200} 749616 commit chunk# 21 {ChunkIdx: 21 Offset: 101244928 Size: 32972640} 749376 commit chunk# 22 {ChunkIdx: 22 Offset: 101203968 Size: 33013736} 750310 commit chunk# 23 {ChunkIdx: 23 Offset: 101224448 Size: 32989316} 749755 commit chunk# 24 {ChunkIdx: 24 Offset: 101216256 Size: 33001460} 750031 commit chunk# 25 {ChunkIdx: 25 Offset: 101212160 Size: 33002384} 750052 commit chunk# 26 {ChunkIdx: 26 Offset: 101212160 Size: 33001592} 750034 commit chunk# 27 {ChunkIdx: 27 Offset: 101212160 Size: 33004188} 750093 commit chunk# 28 {ChunkIdx: 28 Offset: 101232640 Size: 32982760} 749606 commit chunk# 29 {ChunkIdx: 29 Offset: 101203968 Size: 33012152} 750274 commit chunk# 30 {ChunkIdx: 30 Offset: 101224448 Size: 32992044} 749817 commit chunk# 31 {ChunkIdx: 31 Offset: 101203968 Size: 33010612} 750239 commit chunk# 32 {ChunkIdx: 32 Offset: 101224448 Size: 32989756} 749765 commit chunk# 33 {ChunkIdx: 33 Offset: 101208064 Size: 33009644} 750217 commit chunk# 34 {ChunkIdx: 34 Offset: 101224448 Size: 32993276} 749845 commit chunk# 35 {ChunkIdx: 35 Offset: 101224448 Size: 32993276} 749845 commit chunk# 36 {ChunkIdx: 36 Offset: 101203968 Size: 33012724} 750287 commit chunk# 37 {ChunkIdx: 37 Offset: 101208064 Size: 33009644} 750217 commit chunk# 38 {ChunkIdx: 38 Offset: 101220352 Size: 32993276} 749845 commit chunk# 39 {ChunkIdx: 39 Offset: 101187584 Size: 33026628} 750603 commit chunk# 40 {ChunkIdx: 40 Offset: 101224448 Size: 32990988} 749793 commit chunk# 41 {ChunkIdx: 41 Offset: 101203968 Size: 33010480} 750236 commit chunk# 42 {ChunkIdx: 42 Offset: 101224448 Size: 32992660} 749831 commit chunk# 43 {ChunkIdx: 43 Offset: 101216256 Size: 33000096} 750000 commit chunk# 44 {ChunkIdx: 44 Offset: 101208064 Size: 33006784} 750152 commit chunk# 45 {ChunkIdx: 45 Offset: 101203968 Size: 33009688} 750218 commit chunk# 46 {ChunkIdx: 46 Offset: 101220352 Size: 32997368} 749938 commit chunk# 47 {ChunkIdx: 47 Offset: 101232640 Size: 32982936} 749610 commit chunk# 48 {ChunkIdx: 48 Offset: 101195776 Size: 33020204} 750457 commit chunk# 49 {ChunkIdx: 49 Offset: 101249024 Size: 32968152} 749274 commit chunk# 50 {ChunkIdx: 50 Offset: 101228544 Size: 32985268} 749663 commit chunk# 51 {ChunkIdx: 51 Offset: 101216256 Size: 33001460} 750031 commit chunk# 52 {ChunkIdx: 52 Offset: 101240832 Size: 32976864} 749472 commit chunk# 53 {ChunkIdx: 53 Offset: 101224448 Size: 32993188} 749843 commit chunk# 54 {ChunkIdx: 54 Offset: 101240832 Size: 32975236} 749435 commit chunk# 55 {ChunkIdx: 55 Offset: 101199872 Size: 33017300} 750391 commit chunk# 56 {ChunkIdx: 56 Offset: 101236736 Size: 32979548} 749533 commit chunk# 57 {ChunkIdx: 57 Offset: 101208064 Size: 33006344} 750142 commit chunk# 58 {ChunkIdx: 58 Offset: 101208064 Size: 33009644} 750217 commit chunk# 59 {ChunkIdx: 59 Offset: 101224448 Size: 32993276} 749845 commit chunk# 60 {ChunkIdx: 60 Offset: 101216256 Size: 32999524} 749987 commit chunk# 61 {ChunkIdx: 61 Offset: 101208064 Size: 33006828} 750153 commit chunk# 62 {ChunkIdx: 62 Offset: 101228544 Size: 32988656} 749740 commit chunk# 63 {ChunkIdx: 63 Offset: 101240832 Size: 32974576} 749420 commit chunk# 64 {ChunkIdx: 64 Offset: 101236736 Size: 32979944} 749542 commit chunk# 65 {ChunkIdx: 65 Offset: 101228544 Size: 32986808} 749698 commit chunk# 66 {ChunkIdx: 66 Offset: 101212160 Size: 33005464} 750122 commit chunk# 67 {ChunkIdx: 67 Offset: 101208064 Size: 33005816} 750130 commit chunk# 68 {ChunkIdx: 68 Offset: 101220352 Size: 32997368} 749938 commit chunk# 69 {ChunkIdx: 69 Offset: 101240832 Size: 32973080} 749386 commit chunk# 70 {ChunkIdx: 70 Offset: 101249024 Size: 32968680} 749286 commit chunk# 71 {ChunkIdx: 71 Offset: 101203968 Size: 33010744} 750242 commit chunk# 72 {ChunkIdx: 72 Offset: 101203968 Size: 33013736} 750310 commit chunk# 73 {ChunkIdx: 73 Offset: 101203968 Size: 33011184} 750252 commit chunk# 74 {ChunkIdx: 74 Offset: 101236736 Size: 32979020} 749521 commit chunk# 75 {ChunkIdx: 75 Offset: 101224448 Size: 32990768} 749788 commit chunk# 76 {ChunkIdx: 76 Offset: 101228544 Size: 32987072} 749704 commit chunk# 77 {ChunkIdx: 77 Offset: 101212160 Size: 33001768} 750038 commit chunk# 78 {ChunkIdx: 78 Offset: 101191680 Size: 33025660} 750581 commit chunk# 79 {ChunkIdx: 79 Offset: 101228544 Size: 32986588} 749693 commit chunk# 80 {ChunkIdx: 80 Offset: 101220352 Size: 32997368} 749938 commit chunk# 81 {ChunkIdx: 81 Offset: 101236736 Size: 32978492} 749509 commit chunk# 82 {ChunkIdx: 82 Offset: 101240832 Size: 32976776} 749470 commit chunk# 83 {ChunkIdx: 83 Offset: 101224448 Size: 32990196} 749775 commit chunk# 84 {ChunkIdx: 84 Offset: 101224448 Size: 32990944} 749792 commit chunk# 85 {ChunkIdx: 85 Offset: 101232640 Size: 32985048} 749658 commit chunk# 86 {ChunkIdx: 86 Offset: 101212160 Size: 33001504} 750032 commit chunk# 87 {ChunkIdx: 87 Offset: 101240832 Size: 32973784} 749402 commit chunk# 88 {ChunkIdx: 88 Offset: 101191680 Size: 33025836} 750585 commit chunk# 89 {ChunkIdx: 89 Offset: 101261312 Size: 32956404} 749007 commit chunk# 90 {ChunkIdx: 90 Offset: 101228544 Size: 32987512} 749714 commit chunk# 91 {ChunkIdx: 91 Offset: 101236736 Size: 32978668} 749513 commit chunk# 92 {ChunkIdx: 92 Offset: 101216256 Size: 32999304} 749982 commit chunk# 93 {ChunkIdx: 93 Offset: 101203968 Size: 33009776} 750220 commit chunk# 94 {ChunkIdx: 94 Offset: 101208064 Size: 33007004} 750157 commit chunk# 95 {ChunkIdx: 95 Offset: 101224448 Size: 32990152} 749774 commit chunk# 96 {ChunkIdx: 96 Offset: 101191680 Size: 33022096} 750500 commit chunk# 97 {ChunkIdx: 97 Offset: 101240832 Size: 32975280} 749436 commit chunk# 98 {ChunkIdx: 98 Offset: 101208064 Size: 33009644} 750217 commit chunk# 99 {ChunkIdx: 99 Offset: 101216256 Size: 33000008} 749998 commit chunk# 100 {ChunkIdx: 100 Offset: 101253120 Size: 32964588} 749193 commit chunk# 101 {ChunkIdx: 101 Offset: 101216256 Size: 32999436} 749985 commit chunk# 102 {ChunkIdx: 102 Offset: 101220352 Size: 32997368} 749938 commit chunk# 103 {ChunkIdx: 103 Offset: 101224448 Size: 32989888} 749768 commit chunk# 104 {ChunkIdx: 104 Offset: 101228544 Size: 32986720} 749696 commit chunk# 105 {ChunkIdx: 105 Offset: 101232640 Size: 32985048} 749658 commit chunk# 106 {ChunkIdx: 106 Offset: 101232640 Size: 32984652} 749649 commit chunk# 107 {ChunkIdx: 107 Offset: 101232640 Size: 32982672} 749604 commit chunk# 108 {ChunkIdx: 108 Offset: 101216256 Size: 32999568} 749988 commit chunk# 109 {ChunkIdx: 109 Offset: 101232640 Size: 32984520} 749646 commit chunk# 110 {ChunkIdx: 110 Offset: 101203968 Size: 33012680} 750286 commit chunk# 111 {ChunkIdx: 111 Offset: 101212160 Size: 33005552} 750124 commit chunk# 112 {ChunkIdx: 112 Offset: 101232640 Size: 32984212} 749639 commit chunk# 113 {ChunkIdx: 113 Offset: 101212160 Size: 33005068} 750113 commit chunk# 114 {ChunkIdx: 114 Offset: 101220352 Size: 32995256} 749890 commit chunk# 115 {ChunkIdx: 115 Offset: 101236736 Size: 32978800} 749516 commit chunk# 116 {ChunkIdx: 116 Offset: 101212160 Size: 33003748} 750083 commit chunk# 117 {ChunkIdx: 117 Offset: 101236736 Size: 32980956} 749565 commit chunk# 118 {ChunkIdx: 118 Offset: 101249024 Size: 32966568} 749238 commit chunk# 119 {ChunkIdx: 119 Offset: 101208064 Size: 33005552} 750124 commit chunk# 120 {ChunkIdx: 120 Offset: 101249024 Size: 32965820} 749221 commit chunk# 121 {ChunkIdx: 121 Offset: 101224448 Size: 32991560} 749806 commit chunk# 122 {ChunkIdx: 122 Offset: 101216256 Size: 32997896} 749950 commit chunk# 123 {ChunkIdx: 123 Offset: 101220352 Size: 32997368} 749938 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut_large/unittest >> TFlatTableLongTxLarge::LargeDeltaChain [GOOD] Test command err: DataBytes = 1073746235 DataPages = 150237 FlatIndexBytes = 3155010 BTreeIndexBytes = 6810652 DataBytes = 1073742150 DataPages = 151523 FlatIndexBytes = 22252537 BTreeIndexBytes = 25927778 DataBytes = 1073753117 DataPages = 148879 FlatIndexBytes = 1072403884 BTreeIndexBytes = 1077128646 DataBytes = 1073744451 DataPages = 150676 FlatIndexBytes = 6479123 BTreeIndexBytes = 7437771 DataBytes = 1073743502 DataPages = 47351 FlatIndexBytes = 1643820 BTreeIndexBytes = 2065359 DataBytes = 1073744719 DataPages = 70000 FlatIndexBytes = 3454718 BTreeIndexBytes = 3553208 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:25:12.553943Z 00000.002 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 ...compacting ...waiting until compacted ...compacting ...waiting until compacted 00084.362 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00084.371 NN| TABLET_SAUSAGECACHE: Poison cache serviced 265 reqs hit {1 102b} miss {266 17592354026b} 00084.371 II| FAKE_ENV: Shut order, stopping 4 BS groups 00084.371 II| FAKE_ENV: DS.0 gone, left {15399b, 2}, put {222293b, 1558} 00084.371 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00084.371 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00084.371 II| FAKE_ENV: DS.1 gone, left {4398138657b, 535}, put {8813752974b, 2851} 00084.372 II| FAKE_ENV: All BS storage groups are stopped 00084.372 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00084.378 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped |61.8%| [TA] {RESULT} $(B)/ydb/tests/fq/streaming_optimize/test-results/py3test/{meta.json ... results_accumulator.log} |61.8%| [TM] {RESULT} ydb/core/tablet_flat/ut_large/unittest >> TBsProxyFaultToleranceTest::CheckTDiscoverFaultToleranceTestErasure4Plus2Block >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureBlock42Count6Idx5 >> TBsProxyFaultToleranceTest::CheckTDiscoverFaultToleranceTestErasure4Plus2Block [GOOD] |61.9%| [TA] $(B)/ydb/core/blobstorage/vdisk/repl/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> HttpRequest::Status [GOOD] |61.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_ftol/unittest >> TBsProxyFaultToleranceTest::CheckTDiscoverFaultToleranceTestErasure4Plus2Block [GOOD] >> TBsProxyFaultToleranceTest::CheckTRangeFaultToleranceTestErasureMirror3dc >> HttpRequest::Analyze [GOOD] >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureBlock42Count6Idx4 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> HttpRequest::Status [GOOD] Test command err: 2025-06-03T10:26:28.202128Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:28.202168Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:26:28.202177Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001bdd/r3tmp/tmpe0hnm6/pdisk_1.dat 2025-06-03T10:26:28.308261Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12990, node 1 2025-06-03T10:26:28.411658Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:28.411678Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:28.411683Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:28.411726Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:28.412243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:26:28.488708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:28.488750Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:28.500837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22305 2025-06-03T10:26:28.846111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:26:29.582032Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:26:29.589681Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:29.589721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:29.643262Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:26:29.643801Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:29.800204Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.800363Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.800498Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.800536Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.800576Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.800596Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.800613Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.800632Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.800648Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.953073Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:29.953113Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:29.964226Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:30.000984Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:30.013668Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:26:30.013705Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:26:30.023521Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:26:30.023591Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:26:30.023621Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:26:30.023629Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:26:30.023636Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:26:30.023644Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:26:30.023651Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:26:30.023660Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:26:30.023843Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:26:30.037906Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:30.037940Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:30.039568Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:26:30.040479Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:26:30.040602Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:26:30.042455Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:26:30.046421Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:26:30.046440Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:26:30.046452Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:26:30.050643Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:26:30.052470Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:26:30.052508Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:26:30.162501Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:26:30.238424Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:26:30.311612Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:26:30.815580Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2216:3061], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:30.815636Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:30.821231Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:26:30.902183Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2372:2880];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:26:30.902272Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2372:2880];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:26:30.902347Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2372:2880];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:26:30.902374Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2372:2880];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:26:30.902398Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2372:2880];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:26:30.902424Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2372:2880];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:26:30.902448Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2372:2880];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:26:30.902480Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2372:2880];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_re ... d: 72075186224037897 2025-06-03T10:26:32.331322Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037900;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:26:32.332304Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:26:32.332584Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:26:32.332679Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037902;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:26:32.332838Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:26:32.333122Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037904;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:26:32.333206Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:26:32.333602Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:26:32.333696Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:26:32.334183Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:26:33.115871Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3796:3217], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:33.116034Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:33.117045Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715661:0, at schemeshard: 72075186224037897 2025-06-03T10:26:33.141394Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037900;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:26:33.141554Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:26:33.142009Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:26:33.142095Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037902;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:26:33.142189Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:26:33.142386Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037904;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:26:33.142456Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:26:33.142626Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:26:33.142942Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:26:33.143056Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:26:33.933285Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3962:3265], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:33.933521Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:33.935028Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715662:0, at schemeshard: 72075186224037897 2025-06-03T10:26:33.959944Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037900;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:33.960062Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:33.960454Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:33.960564Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037902;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:33.960645Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037904;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:33.960723Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:33.960802Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:33.961093Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:33.961209Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:33.961474Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; waiting actualization: 0/0.000014s 2025-06-03T10:26:39.119869Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:5850:5438] 2025-06-03T10:26:39.120732Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:478: [72075186224037894] Send TEvStatistics::TEvAnalyzeStatusResponse. Status STATUS_NO_OPERATION Answer: 'No analyze operation' FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/ut/unittest >> YdbOlapStore::LogTsRangeDescending [GOOD] Test command err: 2025-06-03T10:26:09.720330Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667459638703955:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:09.720355Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027f3/r3tmp/tmptjZs3y/pdisk_1.dat 2025-06-03T10:26:09.788235Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27967, node 1 2025-06-03T10:26:09.812284Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:09.812297Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:09.812299Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:09.812358Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:09.820365Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:09.820398Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:09.822038Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10435 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:09.859074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:10435 2025-06-03T10:26:09.914202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateColumnStore CreateColumnStore { Name: "OlapStore" ColumnShardCount: 4 SchemaPresets { Name: "default" Schema { Columns { Name: "message" Type: "Utf8" } Columns { Name: "json_payload" Type: "JsonDocument" } Columns { Name: "resource_id" Type: "Utf8" NotNull: true } Columns { Name: "uid" Type: "Utf8" NotNull: true } Columns { Name: "timestamp" Type: "Timestamp" NotNull: true } Columns { Name: "resource_type" Type: "Utf8" NotNull: true } Columns { Name: "level" Type: "Int32" } Columns { Name: "ingested_at" Type: "Timestamp" } Columns { Name: "saved_at" Type: "Timestamp" } Columns { Name: "request_id" Type: "Utf8" } KeyColumnNames: "timestamp" KeyColumnNames: "resource_type" KeyColumnNames: "resource_id" KeyColumnNames: "uid" } } } } TxId: 281474976715658 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-03T10:26:09.914300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: create_store.cpp:331: TCreateOlapStore Propose, path: /Root/OlapStore, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:26:09.914450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: OlapStore, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-03T10:26:09.914470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-06-03T10:26:09.914479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-03T10:26:09.914487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-03T10:26:09.914492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-03T10:26:09.914497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 4 2025-06-03T10:26:09.914566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 5 2025-06-03T10:26:09.914830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976715658:0 1 -> 2 2025-06-03T10:26:09.914915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:26:09.914929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:26:09.914960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-03T10:26:09.914969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 6 2025-06-03T10:26:09.915991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976715658, response: Status: StatusAccepted TxId: 281474976715658 SchemeshardId: 72057594046644480 PathId: 2, at schemeshard: 72057594046644480 2025-06-03T10:26:09.916036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715658, database: /Root, subject: , status: StatusAccepted, operation: CREATE COLUMN STORE, path: /Root/OlapStore 2025-06-03T10:26:09.916107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-03T10:26:09.916114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715658, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-03T10:26:09.916153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715658, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-03T10:26:09.916189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-03T10:26:09.916196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7511667459638704516:2372], at schemeshard: 72057594046644480, txId: 281474976715658, path id: 1 2025-06-03T10:26:09.916204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7511667459638704516:2372], at schemeshard: 72057594046644480, txId: 281474976715658, path id: 2 2025-06-03T10:26:09.916215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:26:09.916224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976715658:0 ProgressState, operation type: TxCreateOlapStore, at tablet# 72057594046644480 2025-06-03T10:26:09.916364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:357: TCreateParts opId# 281474976715658:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046644480 OwnerIdx: 1 TabletType: ColumnShard ObjectDomain { SchemeShard: 72057594046644480 PathId: 1 } ObjectId: 2 BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { StoragePoolName: "hdd2" } BindedChannels { Storag ... nput channelId: 36, seqNo: [1] 2025-06-03T10:26:38.221652Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 37, seqNo: [1] 2025-06-03T10:26:38.221655Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 38, seqNo: [1] 2025-06-03T10:26:38.221657Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 39, seqNo: [1] 2025-06-03T10:26:38.221660Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 40, seqNo: [1] 2025-06-03T10:26:38.221661Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 41, seqNo: [1] 2025-06-03T10:26:38.221664Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 42, seqNo: [1] 2025-06-03T10:26:38.221666Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 43, seqNo: [1] 2025-06-03T10:26:38.221668Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 44, seqNo: [1] 2025-06-03T10:26:38.221670Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 45, seqNo: [1] 2025-06-03T10:26:38.221672Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 46, seqNo: [1] 2025-06-03T10:26:38.221678Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 47, seqNo: [1] 2025-06-03T10:26:38.221681Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 48, seqNo: [1] 2025-06-03T10:26:38.221683Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 49, seqNo: [1] 2025-06-03T10:26:38.221685Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 50, seqNo: [1] 2025-06-03T10:26:38.221688Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 51, seqNo: [1] 2025-06-03T10:26:38.221690Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 52, seqNo: [1] 2025-06-03T10:26:38.221692Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 53, seqNo: [1] 2025-06-03T10:26:38.221694Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 54, seqNo: [1] 2025-06-03T10:26:38.221696Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 55, seqNo: [1] 2025-06-03T10:26:38.221699Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 56, seqNo: [1] 2025-06-03T10:26:38.221701Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 57, seqNo: [1] 2025-06-03T10:26:38.221703Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 58, seqNo: [1] 2025-06-03T10:26:38.221707Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 59, seqNo: [1] 2025-06-03T10:26:38.221709Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 60, seqNo: [1] 2025-06-03T10:26:38.221711Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 61, seqNo: [1] 2025-06-03T10:26:38.221714Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 62, seqNo: [1] 2025-06-03T10:26:38.221716Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 63, seqNo: [1] 2025-06-03T10:26:38.221718Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715670, task: 65. Tasks execution finished, don't wait for ack delivery in input channelId: 64, seqNo: [1] 2025-06-03T10:26:38.221720Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715670, task: 65. Tasks execution finished 2025-06-03T10:26:38.221722Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [28:7511667585589852689:3148], TxId: 281474976715670, task: 65. Ctx: { CustomerSuppliedId : . TraceId : 01jwtn8axn437kkjvg00y0b2zt. SessionId : ydb://session/3?node_id=28&id=YmQ4YjdjNjYtYjUyNDYwMTAtY2NhODViZjYtZmQyOGJmYmI=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Compute state finished. All channels and sinks finished 2025-06-03T10:26:38.221759Z node 28 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715670, task: 65. pass away 2025-06-03T10:26:38.221790Z node 28 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976715670;task_id=65;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-03T10:26:38.221790Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:434: ActorId: [28:7511667585589852606:3076] TxId: 281474976715670. Ctx: { TraceId: 01jwtn8axn437kkjvg00y0b2zt, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=YmQ4YjdjNjYtYjUyNDYwMTAtY2NhODViZjYtZmQyOGJmYmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [28:7511667585589852689:3148], task: 65, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 1520 Tasks { TaskId: 65 StageId: 1 CpuTimeUs: 113 FinishTimeMs: 1748946398221 ComputeCpuTimeUs: 34 BuildCpuTimeUs: 79 HostName: "ghrun-pyvh3niaay" NodeId: 28 CreateTimeMs: 1748946398206 UpdateTimeMs: 1748946398221 } MaxMemoryUsage: 1048576 } 2025-06-03T10:26:38.221800Z node 28 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715670. Ctx: { TraceId: 01jwtn8axn437kkjvg00y0b2zt, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=YmQ4YjdjNjYtYjUyNDYwMTAtY2NhODViZjYtZmQyOGJmYmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [28:7511667585589852689:3148] 2025-06-03T10:26:38.221851Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2151: ActorId: [28:7511667585589852606:3076] TxId: 281474976715670. Ctx: { TraceId: 01jwtn8axn437kkjvg00y0b2zt, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=YmQ4YjdjNjYtYjUyNDYwMTAtY2NhODViZjYtZmQyOGJmYmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-03T10:26:38.221863Z node 28 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:839: ActorId: [28:7511667585589852606:3076] TxId: 281474976715670. Ctx: { TraceId: 01jwtn8axn437kkjvg00y0b2zt, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=28&id=YmQ4YjdjNjYtYjUyNDYwMTAtY2NhODViZjYtZmQyOGJmYmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.019479s ReadRows: 0 ReadBytes: 0 ru: 12 rate limiter was not found force flag: 1 2025-06-03T10:26:38.221882Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1707: SessionId: ydb://session/3?node_id=28&id=YmQ4YjdjNjYtYjUyNDYwMTAtY2NhODViZjYtZmQyOGJmYmI=, ActorId: [28:7511667585589852581:3076], ActorState: ExecuteState, TraceId: 01jwtn8axn437kkjvg00y0b2zt, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-06-03T10:26:38.221987Z node 28 :KQP_SESSION INFO: kqp_session_actor.cpp:1966: SessionId: ydb://session/3?node_id=28&id=YmQ4YjdjNjYtYjUyNDYwMTAtY2NhODViZjYtZmQyOGJmYmI=, ActorId: [28:7511667585589852581:3076], ActorState: ExecuteState, TraceId: 01jwtn8axn437kkjvg00y0b2zt, txInfo Status: Active Kind: ReadOnly TotalDuration: 0 ServerDuration: 30.176 QueriesCount: 1 2025-06-03T10:26:38.222002Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2121: SessionId: ydb://session/3?node_id=28&id=YmQ4YjdjNjYtYjUyNDYwMTAtY2NhODViZjYtZmQyOGJmYmI=, ActorId: [28:7511667585589852581:3076], ActorState: ExecuteState, TraceId: 01jwtn8axn437kkjvg00y0b2zt, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-03T10:26:38.222035Z node 28 :KQP_SESSION INFO: kqp_session_actor.cpp:2481: SessionId: ydb://session/3?node_id=28&id=YmQ4YjdjNjYtYjUyNDYwMTAtY2NhODViZjYtZmQyOGJmYmI=, ActorId: [28:7511667585589852581:3076], ActorState: ExecuteState, TraceId: 01jwtn8axn437kkjvg00y0b2zt, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-03T10:26:38.222039Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2542: SessionId: ydb://session/3?node_id=28&id=YmQ4YjdjNjYtYjUyNDYwMTAtY2NhODViZjYtZmQyOGJmYmI=, ActorId: [28:7511667585589852581:3076], ActorState: ExecuteState, TraceId: 01jwtn8axn437kkjvg00y0b2zt, EndCleanup, isFinal: 1 2025-06-03T10:26:38.222055Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2278: SessionId: ydb://session/3?node_id=28&id=YmQ4YjdjNjYtYjUyNDYwMTAtY2NhODViZjYtZmQyOGJmYmI=, ActorId: [28:7511667585589852581:3076], ActorState: ExecuteState, TraceId: 01jwtn8axn437kkjvg00y0b2zt, Sent query response back to proxy, proxyRequestId: 5, proxyId: [28:7511667576999914852:2280] 2025-06-03T10:26:38.222058Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2554: SessionId: ydb://session/3?node_id=28&id=YmQ4YjdjNjYtYjUyNDYwMTAtY2NhODViZjYtZmQyOGJmYmI=, ActorId: [28:7511667585589852581:3076], ActorState: unknown state, TraceId: 01jwtn8axn437kkjvg00y0b2zt, Cleanup temp tables: 0 2025-06-03T10:26:38.223444Z node 28 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946398178, txId: 18446744073709551615] shutting down 2025-06-03T10:26:38.223494Z node 28 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2645: SessionId: ydb://session/3?node_id=28&id=YmQ4YjdjNjYtYjUyNDYwMTAtY2NhODViZjYtZmQyOGJmYmI=, ActorId: [28:7511667585589852581:3076], ActorState: unknown state, TraceId: 01jwtn8axn437kkjvg00y0b2zt, Session actor destroyed ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> HttpRequest::Analyze [GOOD] Test command err: 2025-06-03T10:26:28.699008Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:28.699043Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:26:28.699050Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001bd2/r3tmp/tmpjMabO4/pdisk_1.dat 2025-06-03T10:26:28.798487Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3893, node 1 2025-06-03T10:26:28.905559Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:28.905582Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:28.905588Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:28.905647Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:28.906328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:26:28.984148Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:28.984183Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:28.996182Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20166 2025-06-03T10:26:29.344181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:26:30.089374Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:26:30.098801Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:30.098841Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:30.152473Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:26:30.153168Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:30.311341Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.311532Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.311690Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.311731Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.311781Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.311807Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.311824Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.311843Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.311865Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.464975Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:30.465007Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:30.476152Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:30.508795Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:30.525044Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:26:30.525081Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:26:30.533514Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:26:30.533560Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:26:30.533579Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:26:30.533584Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:26:30.533588Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:26:30.533593Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:26:30.533597Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:26:30.533601Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:26:30.533716Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:26:30.546877Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:30.546908Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:30.548020Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:26:30.548716Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:26:30.548798Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:26:30.550608Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:26:30.553877Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:26:30.553894Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:26:30.553903Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:26:30.557656Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:26:30.559126Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:26:30.559164Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:26:30.663387Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:26:30.735299Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:26:30.787451Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:26:31.316772Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2213:3058], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:31.316823Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:31.322174Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:26:31.396721Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2368:2878];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:26:31.396799Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2368:2878];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:26:31.396867Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2368:2878];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:26:31.396893Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2368:2878];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:26:31.396918Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2368:2878];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:26:31.396947Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2368:2878];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:26:31.396975Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2368:2878];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:26:31.397001Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2368:2878];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_reg ... : log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:26:32.798834Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037902;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:26:32.798916Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:26:32.799049Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037904;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:26:32.799104Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:26:32.799206Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:26:32.799392Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:26:32.799496Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:26:33.600429Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3800:3218], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:33.600521Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:33.605419Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715661:0, at schemeshard: 72075186224037897 2025-06-03T10:26:33.631559Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037900;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:26:33.631969Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:26:33.632342Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:26:33.632488Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037902;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:26:33.632603Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:26:33.633080Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:26:33.633452Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037904;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:26:33.633592Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:26:33.633729Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:26:33.634286Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:26:34.300142Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:3961:3264], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:34.300199Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:34.304755Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715662:0, at schemeshard: 72075186224037897 2025-06-03T10:26:34.339894Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037900;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:34.340053Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037901;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:34.340631Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:34.340750Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037902;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:34.340850Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037904;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:34.340950Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:34.341047Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:34.341652Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:34.341779Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:34.341901Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; waiting actualization: 0/0.000015s 2025-06-03T10:26:39.800206Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:5869:5445] 2025-06-03T10:26:39.801558Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:22: [72075186224037894] TTxAnalyze::Execute. ReplyToActorId [1:5866:3869] , Record { OperationId: "\000\000\000\000\034*^\004\342\247\323\266Q\324\027\035" Tables { PathId { OwnerId: 72075186224037897 LocalId: 4 } } } 2025-06-03T10:26:39.801588Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:47: [72075186224037894] TTxAnalyze::Execute. Create new force traversal operation, OperationId= *^ӶQ 2025-06-03T10:26:39.801598Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:65: [72075186224037894] TTxAnalyze::Execute. Create new force traversal table, OperationId= *^ӶQ , PathId [OwnerId: 72075186224037897, LocalPathId: 4] Answer: 'Analyze sent. OperationId: 000000071abr2e59ykps8x85rx' FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; >> TExternalTableTest::SchemeErrors |61.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut |61.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut |61.9%| [TA] {RESULT} $(B)/ydb/core/blobstorage/vdisk/repl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |61.9%| [LD] {RESULT} $(B)/ydb/core/kesus/tablet/ut/ydb-core-kesus-tablet-ut >> TExternalTableTest::SchemeErrors [GOOD] >> TExternalTableTest::DropExternalTable >> TExternalTableTest::DropTableTwice |61.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut |61.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut |61.9%| [LD] {RESULT} $(B)/ydb/core/health_check/ut/ydb-core-health_check-ut >> TExternalTableTest::ReplaceExternalTableIfNotExists ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::SchemeErrors [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:128:2058] recipient: [1:110:2141] 2025-06-03T10:26:42.508400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:42.508429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:42.508436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:42.508442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:42.508448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:42.508452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:42.508462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:42.508476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:42.508594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:42.508670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:42.551039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:26:42.551063Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:42.551164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:42.559551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:42.559689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:42.559723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:42.570720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:42.570920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:42.571057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:42.571136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:42.571822Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:42.571867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:42.572157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:42.572167Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:42.572183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:42.572194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:42.572200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:42.572245Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:42.573565Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:240:2058] recipient: [1:15:2062] 2025-06-03T10:26:42.623106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:42.623191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:42.623258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:42.623305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:42.623319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:42.624134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:42.624162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:42.624227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:42.624238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:42.624245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:42.624250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:42.624681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:42.624692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:42.624698Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:42.625011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:42.625019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:42.625027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:42.625034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:42.625889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:42.627418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:42.627470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:42.627674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:42.627706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:42.627718Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:42.627781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:42.627789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:42.627831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:42.627845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:42.628293Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:42.628302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594 ... eration.cpp:130: IgniteOperation, opId: 126:1, propose status:StatusSchemeError, reason: Type 'BlaBlaType' specified for column 'RowId' is not supported by storage, at schemeshard: 72057594046678944 2025-06-03T10:26:42.665003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 126, response: Status: StatusSchemeError Reason: "Type \'BlaBlaType\' specified for column \'RowId\' is not supported by storage" TxId: 126 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:42.665039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 126, database: /MyRoot, subject: , status: StatusSchemeError, reason: Type 'BlaBlaType' specified for column 'RowId' is not supported by storage, operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 126, wait until txId: 126 TestModificationResults wait txId: 127 2025-06-03T10:26:42.665736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "" Type: "Uint64" } } } TxId: 127 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:42.665782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 127:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "" Type: "Uint64" } } 2025-06-03T10:26:42.665793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 127:0, path# /MyRoot/DirA/Table2 2025-06-03T10:26:42.665824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 127:1, propose status:StatusSchemeError, reason: Columns cannot have an empty name, at schemeshard: 72057594046678944 2025-06-03T10:26:42.666240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 127, response: Status: StatusSchemeError Reason: "Columns cannot have an empty name" TxId: 127 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:42.666263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 127, database: /MyRoot, subject: , status: StatusSchemeError, reason: Columns cannot have an empty name, operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 127, wait until txId: 127 TestModificationResults wait txId: 128 2025-06-03T10:26:42.666860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" TypeId: 27 } } } TxId: 128 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:42.666899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 128:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" TypeId: 27 } } 2025-06-03T10:26:42.666909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 128:0, path# /MyRoot/DirA/Table2 2025-06-03T10:26:42.666924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 128:1, propose status:StatusSchemeError, reason: Cannot set TypeId for column 'RowId', use Type, at schemeshard: 72057594046678944 2025-06-03T10:26:42.672463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 128, response: Status: StatusSchemeError Reason: "Cannot set TypeId for column \'RowId\', use Type" TxId: 128 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:42.672515Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 128, database: /MyRoot, subject: , status: StatusSchemeError, reason: Cannot set TypeId for column 'RowId', use Type, operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 128, wait until txId: 128 TestModificationResults wait txId: 129 2025-06-03T10:26:42.673322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" } } } TxId: 129 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:42.673375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 129:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" } } 2025-06-03T10:26:42.673388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 129:0, path# /MyRoot/DirA/Table2 2025-06-03T10:26:42.673410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 129:1, propose status:StatusSchemeError, reason: Missing Type for column 'RowId', at schemeshard: 72057594046678944 2025-06-03T10:26:42.674097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 129, response: Status: StatusSchemeError Reason: "Missing Type for column \'RowId\'" TxId: 129 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:42.674124Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 129, database: /MyRoot, subject: , status: StatusSchemeError, reason: Missing Type for column 'RowId', operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 129, wait until txId: 129 TestModificationResults wait txId: 130 2025-06-03T10:26:42.674806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" Type: "Uint64" Id: 2 } Columns { Name: "RowId2" Type: "Uint64" Id: 2 } } } TxId: 130 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:42.674854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 130:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" Type: "Uint64" Id: 2 } Columns { Name: "RowId2" Type: "Uint64" Id: 2 } } 2025-06-03T10:26:42.674866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 130:0, path# /MyRoot/DirA/Table2 2025-06-03T10:26:42.674899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 130:1, propose status:StatusSchemeError, reason: Duplicate column id: 2, at schemeshard: 72057594046678944 2025-06-03T10:26:42.675404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 130, response: Status: StatusSchemeError Reason: "Duplicate column id: 2" TxId: 130 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:42.675427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 130, database: /MyRoot, subject: , status: StatusSchemeError, reason: Duplicate column id: 2, operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 130, wait until txId: 130 TestModificationResults wait txId: 131 2025-06-03T10:26:42.676048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource1" Location: "/" Columns { Name: "RowId" Type: "Uint64" } Columns { Name: "Value" Type: "Utf8" } } } TxId: 131 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:42.676109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 131:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "Table2" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource1" Location: "/" Columns { Name: "RowId" Type: "Uint64" } Columns { Name: "Value" Type: "Utf8" } } 2025-06-03T10:26:42.676122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 131:0, path# /MyRoot/DirA/Table2 2025-06-03T10:26:42.676147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 131:1, propose status:StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/ExternalDataSource1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:166, at schemeshard: 72057594046678944 2025-06-03T10:26:42.676681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 131, response: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalDataSource1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:166" TxId: 131 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:42.676707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 131, database: /MyRoot, subject: , status: StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/ExternalDataSource1', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:166, operation: CREATE EXTERNAL TABLE, path: /MyRoot/DirA/Table2 TestModificationResult got TxId: 131, wait until txId: 131 >> HttpRequest::AnalyzeServerless [GOOD] >> TExternalTableTest::DropExternalTable [GOOD] >> TExternalTableTest::Decimal >> TExternalTableTest::DropTableTwice [GOOD] >> TExternalTableTest::ReplaceExternalTableIfNotExists [GOOD] >> TExternalTableTest::Decimal [GOOD] |61.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login |61.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login |61.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_login/ydb-core-tx-schemeshard-ut_login ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> HttpRequest::AnalyzeServerless [GOOD] Test command err: 2025-06-03T10:26:29.620781Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:29.620815Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:26:29.620823Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001bc8/r3tmp/tmpx9C8Fu/pdisk_1.dat 2025-06-03T10:26:29.723192Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10358, node 1 2025-06-03T10:26:29.827192Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:29.827214Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:29.827217Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:29.827259Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:29.827757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:26:29.903959Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:29.903992Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:29.916068Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62398 2025-06-03T10:26:30.258806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:26:30.990272Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:26:31.000566Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:31.000611Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:31.054443Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:26:31.055054Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:31.212655Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:31.212821Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:31.212932Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:31.212961Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:31.212996Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:31.213010Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:31.213022Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:31.213036Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:31.213052Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:31.363286Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:31.363327Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:31.374771Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:31.412683Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:31.431945Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:26:31.431988Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:26:31.442176Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:26:31.442234Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:26:31.442267Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:26:31.442274Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:26:31.442281Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:26:31.442289Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:26:31.442296Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:26:31.442304Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:26:31.442468Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:26:31.455972Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:31.456006Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:31.457250Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:26:31.458126Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:26:31.458238Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:26:31.460302Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-03T10:26:31.463938Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:26:31.463955Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:26:31.463974Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-03T10:26:31.467963Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:26:31.469731Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:26:31.469762Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:26:31.575538Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:26:31.649135Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:26:31.691820Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:26:32.214848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:26:32.654945Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:32.737383Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7814: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-03T10:26:32.737405Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7830: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-03T10:26:32.737429Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:2567:2933], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-03T10:26:32.737817Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2569:2935] 2025-06-03T10:26:32.737973Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2569:2935], schemeshard id = 72075186224037899 2025-06-03T10:26:33.492509Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2691:3233], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:33.492563Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:33.497722Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715661:0, at schemeshard: 72075186224037899 2025-06-03T10:26:33.579277Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037905;self_id=[2:2841:3070];tablet_id=72075186224037905;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:26:33.579361Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037905;self_id=[2:2841:3070];tablet_id=72075186224037905;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:26:33.579424Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037905;self_id=[2:2841:3070];tablet_id=72075186224037905;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register ... D_TX WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:35.057140Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037912;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:35.057258Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037914;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:35.057454Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:35.057539Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:35.057613Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:35.057685Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:35.057763Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:26:36.210300Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:4312:3402], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:36.210354Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:36.214646Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715663:0, at schemeshard: 72075186224037899 2025-06-03T10:26:36.238693Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; 2025-06-03T10:26:36.238850Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; 2025-06-03T10:26:36.239060Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037912;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; 2025-06-03T10:26:36.239400Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; 2025-06-03T10:26:36.239526Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; 2025-06-03T10:26:36.239808Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; 2025-06-03T10:26:36.239966Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037914;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; 2025-06-03T10:26:36.240141Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; 2025-06-03T10:26:36.240256Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; 2025-06-03T10:26:36.240346Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; 2025-06-03T10:26:36.842246Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:4471:3444], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:36.842480Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:36.843872Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715664:0, at schemeshard: 72075186224037899 2025-06-03T10:26:36.876946Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:26:36.877087Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037906;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:26:36.878281Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037912;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:26:36.878626Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:26:36.878735Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:26:36.878835Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037907;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:26:36.878920Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037914;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:26:36.879329Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:26:36.879424Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037910;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:26:36.879505Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; waiting actualization: 0/0.000015s 2025-06-03T10:26:42.724691Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:6360:5652] 2025-06-03T10:26:42.725775Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:22: [72075186224037894] TTxAnalyze::Execute. ReplyToActorId [1:6356:4035] , Record { OperationId: "\000\000\000\000\"\n;\362\263\246k\301\325\350\250\030" Tables { PathId { OwnerId: 72057594046644480 LocalId: 2 } } } 2025-06-03T10:26:42.725822Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:47: [72075186224037894] TTxAnalyze::Execute. Create new force traversal operation, OperationId=" ;k 2025-06-03T10:26:42.725830Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:65: [72075186224037894] TTxAnalyze::Execute. Create new force traversal table, OperationId=" ;k , PathId [OwnerId: 72057594046644480, LocalPathId: 2] Answer: 'Analyze sent. OperationId: 00000008ga7fsb79kbr7ayha0r' FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=TTxWriteIndex destructor withno CompleteReady flag;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=manager.cpp:51;message=aborted data locks manager; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::DropTableTwice [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:128:2058] recipient: [1:110:2141] 2025-06-03T10:26:43.565397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:43.565426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:43.565432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:43.565438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:43.565444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:43.565449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:43.565459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:43.565474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:43.565585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:43.565667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:43.613558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:26:43.613584Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:43.613693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:43.618260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:43.618369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:43.618401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:43.621809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:43.622034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:43.622172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:43.622257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:43.623076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:43.623126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:43.623427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:43.623440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:43.623459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:43.623468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:43.623475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:43.623518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.625186Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:240:2058] recipient: [1:15:2062] 2025-06-03T10:26:43.646875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:43.646966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.647023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:43.647061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:43.647073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.648046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:43.648083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:43.648169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.648184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:43.648194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:43.648200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:43.648915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.648938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:43.648944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:43.649510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.649531Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.649541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:43.649553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:43.650403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:43.651012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:43.651073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:43.652011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:43.652047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:43.652066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:43.652139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:43.652151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:43.652203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:43.652222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:43.652876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:43.652891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594 ... rd.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:43.683582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:43.683619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:26:43.683637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:26:43.683662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:43.683667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:207:2208], at schemeshard: 72057594046678944, txId: 103, path id: 1 2025-06-03T10:26:43.683673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:207:2208], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-03T10:26:43.683678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:207:2208], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-03T10:26:43.683748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.683757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-03T10:26:43.683770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:26:43.683775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:26:43.683780Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:26:43.683784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:26:43.683789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-06-03T10:26:43.683795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:26:43.683802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-06-03T10:26:43.683808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:0 2025-06-03T10:26:43.683826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:26:43.683832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:26:43.683840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 103, publications: 3, subscribers: 0 2025-06-03T10:26:43.683846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-03T10:26:43.683852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-03T10:26:43.683861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-03T10:26:43.683978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:26:43.683999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:26:43.684006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 103 2025-06-03T10:26:43.684014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-03T10:26:43.684021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:26:43.684145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:26:43.684155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:26:43.684169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:26:43.684239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:26:43.684250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:26:43.684255Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2025-06-03T10:26:43.684260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-03T10:26:43.684265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:26:43.684330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:26:43.684339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:26:43.684344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-06-03T10:26:43.684351Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-03T10:26:43.684356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:26:43.684365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-03T10:26:43.684956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:26:43.685351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:26:43.685377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:26:43.685392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-03T10:26:43.685454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-03T10:26:43.685461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-03T10:26:43.685537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-03T10:26:43.685557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:26:43.685563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:366:2356] TestWaitNotification: OK eventTxId 103 2025-06-03T10:26:43.685641Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:43.685672Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 42us result status StatusPathDoesNotExist 2025-06-03T10:26:43.685715Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/ExternalTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::Decimal [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:128:2058] recipient: [1:110:2141] 2025-06-03T10:26:43.509013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:43.509038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:43.509045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:43.509051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:43.509057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:43.509062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:43.509072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:43.509086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:43.509193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:43.509265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:43.528187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:26:43.528212Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:43.528324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:43.535259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:43.535364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:43.535391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:43.538105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:43.539065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:43.539198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:43.539276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:43.540107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:43.540145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:43.540408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:43.540418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:43.540433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:43.540440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:43.540447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:43.540486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.541920Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:240:2058] recipient: [1:15:2062] 2025-06-03T10:26:43.580243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:43.580331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.580393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:43.580436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:43.580448Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.581222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:43.581247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:43.581324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.581338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:43.581344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:43.581350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:43.581862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.581877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:43.581885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:43.587397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.587424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.587433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:43.587443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:43.588290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:43.588894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:43.588937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:43.589146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:43.589175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:43.589183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:43.589243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:43.589253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:43.589313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:43.589329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:43.589910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:43.589921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594 ... xId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:43.829751Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:26:43.829770Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:26:43.829780Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:26:43.829819Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:43.829825Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:208:2209], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-03T10:26:43.829832Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:208:2209], at schemeshard: 72057594046678944, txId: 101, path id: 3 2025-06-03T10:26:43.829837Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:208:2209], at schemeshard: 72057594046678944, txId: 101, path id: 3 2025-06-03T10:26:43.829841Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:208:2209], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-03T10:26:43.829886Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.829896Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-03T10:26:43.829911Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:26:43.829918Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:26:43.829923Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:26:43.829927Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:26:43.829932Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-03T10:26:43.829939Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:26:43.829945Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-06-03T10:26:43.829950Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 101:0 2025-06-03T10:26:43.829965Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:26:43.829973Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:26:43.829980Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 3, subscribers: 0 2025-06-03T10:26:43.829985Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-03T10:26:43.829989Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-03T10:26:43.829993Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-03T10:26:43.830248Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:26:43.830265Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:26:43.830271Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:26:43.830277Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-03T10:26:43.830282Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:26:43.830579Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:26:43.830597Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:26:43.830603Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:26:43.830608Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-03T10:26:43.830613Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:26:43.830997Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:26:43.831018Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:26:43.831023Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:26:43.831031Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-03T10:26:43.831036Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:26:43.831050Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-03T10:26:43.831423Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:26:43.831511Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:26:43.831799Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-03T10:26:43.831865Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:26:43.831875Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-03T10:26:43.831963Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:26:43.831985Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:26:43.831992Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:339:2329] TestWaitNotification: OK eventTxId 101 2025-06-03T10:26:43.832078Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:43.832123Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 59us result status StatusSuccess 2025-06-03T10:26:43.832244Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalTable" PathDescription { Self { Name: "ExternalTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Decimal(35,9)" TypeId: 4865 Id: 1 NotNull: false TypeInfo { DecimalPrecision: 35 DecimalScale: 9 } } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TraverseColumnShard::TraverseColumnTable [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ReplaceExternalTableIfNotExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:128:2058] recipient: [1:110:2141] 2025-06-03T10:26:43.858436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:43.858462Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:43.858468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:43.858473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:43.858479Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:43.858483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:43.858493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:43.858505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:43.858619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:43.858707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:43.875652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:26:43.875676Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:43.875785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:43.878803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:43.878914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:43.878941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:43.882629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:43.882820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:43.882946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:43.883023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:43.883652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:43.883695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:43.883952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:43.883967Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:43.883989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:43.884000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:43.884005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:43.884047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.885353Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:240:2058] recipient: [1:15:2062] 2025-06-03T10:26:43.912195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:43.912283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.912350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:43.912391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:43.912403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.913286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:43.913334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:43.913401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.913412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:43.913419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:43.913426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:43.914071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.914085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:43.914092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:43.914894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.914908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.914916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:43.914924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:43.915747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:43.916214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:43.916260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:43.916460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:43.916489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:43.916496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:43.916555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:43.916562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:43.916603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:43.916616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:43.917059Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:43.917069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594 ... 005 2025-06-03T10:26:43.947288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:43.947330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:43.947339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_external_table.cpp:58: [72057594046678944] TAlterExternalTable TPropose, operationId: 104:0 HandleReply TEvOperationPlan: step# 5000005 2025-06-03T10:26:43.947363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 104:0 128 -> 240 2025-06-03T10:26:43.947389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:26:43.947400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:26:43.947539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-03T10:26:43.947564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-03T10:26:43.947952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:43.947963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:43.947993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:26:43.948005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:26:43.948019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:43.948025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:207:2208], at schemeshard: 72057594046678944, txId: 104, path id: 1 2025-06-03T10:26:43.948030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:207:2208], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-03T10:26:43.948034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:207:2208], at schemeshard: 72057594046678944, txId: 104, path id: 3 FAKE_COORDINATOR: Erasing txId 104 2025-06-03T10:26:43.948100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-03T10:26:43.948107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-03T10:26:43.948122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-06-03T10:26:43.948127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:26:43.948132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-06-03T10:26:43.948136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:26:43.948141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-03T10:26:43.948146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:26:43.948152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-06-03T10:26:43.948156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 104:0 2025-06-03T10:26:43.948167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:26:43.948171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:26:43.948177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-06-03T10:26:43.948181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-03T10:26:43.948184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 4 2025-06-03T10:26:43.948301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:26:43.948312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:26:43.948317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-03T10:26:43.948322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-03T10:26:43.948326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:26:43.948584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:26:43.948599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:26:43.948604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-03T10:26:43.948610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-03T10:26:43.948614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:26:43.948626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-03T10:26:43.949144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-03T10:26:43.949339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-03T10:26:43.949395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-03T10:26:43.949402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-03T10:26:43.949472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-03T10:26:43.949489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-03T10:26:43.949495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:392:2382] TestWaitNotification: OK eventTxId 104 2025-06-03T10:26:43.949569Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:43.949596Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 34us result status StatusSuccess 2025-06-03T10:26:43.949666Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalTable" PathDescription { Self { Name: "ExternalTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 3 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 3 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/other_location" Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TExternalTableTest::ReplaceExternalTableIfNotExistsShouldFailIfFeatureFlagIsNotSet ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTable [GOOD] Test command err: 2025-06-03T10:23:56.831232Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:453:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:23:56.831360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:23:56.831395Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002a99/r3tmp/tmpf4m05Y/pdisk_1.dat 2025-06-03T10:23:56.962954Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8987, node 1 2025-06-03T10:23:57.088043Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:57.088075Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:57.088081Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:57.088211Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:23:57.088920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:23:57.189261Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:57.189332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:57.206129Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23371 2025-06-03T10:23:57.644957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:23:58.767307Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:23:58.785236Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:58.785281Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:58.854266Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:23:58.856015Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:59.018093Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.018288Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.018473Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.018519Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.018578Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.018594Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.018613Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.018630Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.018647Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:59.176671Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:59.176727Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:59.188651Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:59.226225Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:23:59.235353Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:23:59.235383Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:23:59.242294Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:23:59.242525Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:23:59.242557Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:23:59.242565Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:23:59.242571Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:23:59.242577Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:23:59.242581Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:23:59.242587Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:23:59.242951Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:23:59.259849Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:23:59.259885Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1865:2600], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:23:59.261111Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1872:2606] 2025-06-03T10:23:59.263611Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1913:2626] 2025-06-03T10:23:59.263687Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:23:59.263938Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1913:2626], schemeshard id = 72075186224037897 2025-06-03T10:23:59.269742Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:23:59.269767Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:23:59.269781Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:23:59.273546Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:23:59.275577Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:23:59.275627Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:23:59.389999Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:23:59.550626Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:23:59.613649Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:24:00.325896Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2211:3056], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:00.325951Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:00.331195Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:24:00.382031Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2317:2851];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:24:00.382122Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2317:2851];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:24:00.382191Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2317:2851];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:24:00.382220Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2317:2851];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:24:00.382247Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2317:2851];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:24:00.382281Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2317:2851];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:24:00.382315Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2317:2851];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:24:00.382345Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2317:2851];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_reg ... 4976720658 completed, doublechecking } 2025-06-03T10:26:41.171964Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:8296:6135] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:41.224248Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:8318:6149]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:41.224324Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:26:41.224339Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8320:6151] 2025-06-03T10:26:41.224352Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8320:6151] 2025-06-03T10:26:41.224478Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8321:6152] 2025-06-03T10:26:41.224531Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8320:6151], server id = [2:8321:6152], tablet id = 72075186224037894, status = OK 2025-06-03T10:26:41.224546Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8321:6152], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:26:41.224569Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-03T10:26:41.224600Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:26:41.224616Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:8318:6149], StatRequests.size() = 1 2025-06-03T10:26:41.273179Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OWQ4ZWVhYjktYWEwN2RiYjktYWU5NzA4OGYtMTJiYWU0MQ==, TxId: 2025-06-03T10:26:41.273213Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OWQ4ZWVhYjktYWEwN2RiYjktYWU5NzA4OGYtMTJiYWU0MQ==, TxId: 2025-06-03T10:26:41.287308Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:41.307450Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:26:41.307484Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:26:41.369994Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-03T10:26:41.370036Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-03T10:26:41.480747Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:8320:6151], schemeshard count = 1 2025-06-03T10:26:43.994764Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:26:43.994808Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:26:43.994820Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-03T10:26:43.994825Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:43.998419Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:44.022025Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:44.022242Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:44.022265Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:44.022731Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:26:44.050124Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:44.050209Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-03T10:26:44.050457Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8437:6211], server id = [2:8441:6215], tablet id = 72075186224037899, status = OK 2025-06-03T10:26:44.050677Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8437:6211], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:44.050999Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8438:6212], server id = [2:8442:6216], tablet id = 72075186224037900, status = OK 2025-06-03T10:26:44.051020Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8438:6212], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:44.051706Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8439:6213], server id = [2:8443:6217], tablet id = 72075186224037901, status = OK 2025-06-03T10:26:44.051728Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8439:6213], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:44.051874Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8440:6214], server id = [2:8444:6218], tablet id = 72075186224037902, status = OK 2025-06-03T10:26:44.051885Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8440:6214], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:44.053560Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-03T10:26:44.054133Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8437:6211], server id = [2:8441:6215], tablet id = 72075186224037899 2025-06-03T10:26:44.054146Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:44.054374Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-03T10:26:44.054496Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8438:6212], server id = [2:8442:6216], tablet id = 72075186224037900 2025-06-03T10:26:44.054503Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:44.054599Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-03T10:26:44.054642Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8439:6213], server id = [2:8443:6217], tablet id = 72075186224037901 2025-06-03T10:26:44.054646Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:44.054799Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-03T10:26:44.054809Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:44.054848Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:44.054881Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:44.054986Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:44.055528Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8440:6214], server id = [2:8444:6218], tablet id = 72075186224037902 2025-06-03T10:26:44.055538Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:44.055734Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:26:44.068957Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8473:6243]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:44.069048Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:26:44.069057Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8473:6243], StatRequests.size() = 1 2025-06-03T10:26:44.148120Z node 2 :SYSTEM_VIEWS WARN: tx_interval_summary.cpp:212: [72075186224037891] TEvIntervalQuerySummary, time mismath: node id# 2, interval end# 1970-01-01T00:02:03.000000Z, event interval end# 2025-06-03T10:26:42.000000Z 2025-06-03T10:26:44.148388Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YmVhYzgwODYtNTMxMjFhODUtNmQ2ZTRkMGEtNWViZTc0MjE=, TxId: 2025-06-03T10:26:44.148400Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YmVhYzgwODYtNTMxMjFhODUtNmQ2ZTRkMGEtNWViZTc0MjE=, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-03T10:26:44.148594Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8486:6249]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:44.148671Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:44.148827Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-03T10:26:44.148834Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-03T10:26:44.151721Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-03T10:26:44.151756Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-03T10:26:44.151769Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-03T10:26:44.152963Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 >> TAsyncIndexTests::MergeMainWithReboots[PipeResets] [GOOD] >> TExternalTableTest::ReplaceExternalTableIfNotExistsShouldFailIfFeatureFlagIsNotSet [GOOD] >> TExternalTableTest::ParallelCreateSameExternalTable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index/unittest >> TAsyncIndexTests::MergeMainWithReboots[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:26:16.552426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:16.552457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:16.552464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:16.552470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:16.552483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:16.552488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:16.552498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:16.552514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:16.552645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:16.552731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:16.569544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:26:16.569581Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:16.569681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:26:16.572961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:16.573096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:16.573143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:16.575245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:16.575331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:16.575458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:16.575535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:16.576025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:16.576081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:16.576342Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:16.576354Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:16.576373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:16.576382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:16.576388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:16.576454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:26:16.578036Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:26:16.600994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:16.601078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:16.601154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:16.601207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:16.601220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:16.610176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:16.610223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:16.610306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:16.610331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:16.610339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:16.610346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:16.617728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:16.617767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:16.617782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:16.621385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:16.621416Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:16.621425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:16.621437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:16.622160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:16.622816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:16.622860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:16.623056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:16.623086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:16.623093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:16.623150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Ch ... eBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableIndexes { Name: "UserDefinedIndex" LocalPathId: 4 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "indexed" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:45.213798Z node 30 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][72075186233409549:2][72075186233409546][30:837:2681] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-03T10:26:45.213848Z node 30 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409549:2][30:791:2681] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-03T10:26:45.213898Z node 30 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][72075186233409549:2][72075186233409546][30:837:2681] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 1748946405188403 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 2 Group: 1748946405188403 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 },{ Order: 3 Group: 1748946405188403 Step: 5000003 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046678944, LocalPathId: 4] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046678944, LocalPathId: 3] SchemaVersion: 1 LockId: 0 LockOffset: 0 }] } 2025-06-03T10:26:45.225379Z node 30 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:200: [TableChangeSenderShard][72075186233409549:2][72075186233409546][30:837:2681] Handle NKikimrChangeExchange.TEvStatus Status: STATUS_OK RecordStatuses { Order: 1 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 2 Status: STATUS_OK Reason: REASON_NONE } RecordStatuses { Order: 3 Status: STATUS_OK Reason: REASON_NONE } LastRecordOrder: 3 2025-06-03T10:26:45.225445Z node 30 :CHANGE_EXCHANGE DEBUG: change_sender_async_index.cpp:239: [AsyncIndexChangeSenderMain][72075186233409549:2][30:791:2681] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186233409546 } 2025-06-03T10:26:45.412516Z node 30 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:26:45.412681Z node 30 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndex/indexImplTable" took 175us result status StatusSuccess 2025-06-03T10:26:45.412997Z node 30 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/UserDefinedIndex/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "indexed" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "indexed" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TExternalTableTest::ReadOnlyMode >> TExternalTableTest::ParallelCreateExternalTable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ReplaceExternalTableIfNotExistsShouldFailIfFeatureFlagIsNotSet [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:128:2058] recipient: [1:110:2141] 2025-06-03T10:26:45.859397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:45.859421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:45.859427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:45.859432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:45.859438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:45.859441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:45.859451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:45.859464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:45.859566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:45.859647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:45.875312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:26:45.875337Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:45.875449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:45.878677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:45.878794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:45.878825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:45.881982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:45.882187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:45.882325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:45.882406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:45.883092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:45.883139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:45.883442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:45.883454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:45.883470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:45.883478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:45.883485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:45.883528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:45.884946Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:240:2058] recipient: [1:15:2062] 2025-06-03T10:26:45.927304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:45.927395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:45.927465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:45.927517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:45.927529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:45.933690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:45.933734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:45.933823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:45.933835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:45.933842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:45.933848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:45.934435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:45.934448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:45.934454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:45.934773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:45.934782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:45.934788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:45.934795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:45.935535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:45.935917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:45.935954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:45.936139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:45.936163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:45.936171Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:45.936241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:45.936252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:45.936284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:45.936295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:45.936688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:45.936696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594 ... e 2025-06-03T10:26:45.952199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:26:45.952204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-06-03T10:26:45.952208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 101:0 2025-06-03T10:26:45.952224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:26:45.952230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-03T10:26:45.952234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-03T10:26:45.952238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-03T10:26:45.952383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:26:45.952393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:26:45.952399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:26:45.952404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-03T10:26:45.952408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:26:45.952488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:26:45.952497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:26:45.952501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:26:45.952505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-03T10:26:45.952509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:26:45.952519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-03T10:26:45.953165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:26:45.953204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-03T10:26:45.953252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:26:45.953259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-03T10:26:45.953339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:26:45.953358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:26:45.953363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:304:2294] TestWaitNotification: OK eventTxId 101 2025-06-03T10:26:45.953441Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:45.953472Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 42us result status StatusSuccess 2025-06-03T10:26:45.953564Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalDataSource" PathDescription { Self { Name: "ExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalDataSourceDescription { Name: "ExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 102 2025-06-03T10:26:45.954334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "ExternalTable" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } ReplaceIfExists: true } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:45.954398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 102:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "ExternalTable" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } ReplaceIfExists: true } 2025-06-03T10:26:45.954409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 102:0, explain: Invalid TCreateExternalTable request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, at schemeshard: 72057594046678944 2025-06-03T10:26:45.954415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 102:1, propose status:StatusPreconditionFailed, reason: Invalid TCreateExternalTable request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, at schemeshard: 72057594046678944 2025-06-03T10:26:45.961873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 102, response: Status: StatusPreconditionFailed Reason: "Invalid TCreateExternalTable request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:45.961921Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Invalid TCreateExternalTable request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, operation: CREATE EXTERNAL TABLE, path: /MyRoot/ExternalTable TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-03T10:26:45.961998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-03T10:26:45.962005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-03T10:26:45.962080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-03T10:26:45.962105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:26:45.962110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:312:2302] TestWaitNotification: OK eventTxId 102 2025-06-03T10:26:45.962194Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:45.962220Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 32us result status StatusPathDoesNotExist 2025-06-03T10:26:45.962256Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/ExternalTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TComputeScheduler::QueryLimits [GOOD] |61.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |62.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings |62.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_auditsettings/ydb-core-tx-schemeshard-ut_auditsettings >> TSubDomainTest::DatashardNotRunAtAllWhenSubDomainNodesIsStopped [GOOD] >> TExternalTableTest::ParallelCreateSameExternalTable [GOOD] >> TKqpScanData::EmptyColumns >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeSave [GOOD] >> TExternalTableTest::ReadOnlyMode [GOOD] >> TKqpScanData::EmptyColumns [GOOD] >> TExternalTableTest::ParallelCreateExternalTable [GOOD] |62.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |62.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |62.0%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_cache/ydb-core-tx-scheme_board-ut_cache |62.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/engine/ut/ydb-core-engine-ut |62.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/engine/ut/ydb-core-engine-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/runtime/ut/unittest >> TComputeScheduler::QueryLimits [GOOD] Test command err: 800 800 800 800 |62.0%| [LD] {RESULT} $(B)/ydb/core/engine/ut/ydb-core-engine-ut |62.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless |62.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless |62.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_serverless/ydb-core-tx-schemeshard-ut_serverless ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ParallelCreateExternalTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:128:2058] recipient: [1:110:2141] 2025-06-03T10:26:47.207737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:47.207767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:47.207774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:47.207779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:47.207786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:47.207792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:47.207801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:47.207814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:47.207948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:47.208047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:47.230664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:26:47.230692Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:47.230811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:47.234305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:47.234429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:47.234461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:47.238217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:47.238428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:47.238580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:47.238664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:47.239416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:47.239478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:47.239767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:47.239781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:47.239804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:47.239812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:47.239820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:47.239863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:47.241410Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:240:2058] recipient: [1:15:2062] 2025-06-03T10:26:47.292545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:47.292642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:47.292710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:47.292758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:47.292768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:47.297867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:47.297915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:47.297999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:47.298013Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:47.298021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:47.298028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:47.302743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:47.302771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:47.302781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:47.303772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:47.303791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:47.303799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:47.303809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:47.304727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:47.305809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:47.305863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:47.306081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:47.306121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:47.306130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:47.306203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:47.306215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:47.306262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:47.306277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:47.313896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:47.313918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594 ... rd: 72057594046678944 2025-06-03T10:26:47.371836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 127: got EvNotifyTxCompletionResult 2025-06-03T10:26:47.371840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 127: satisfy waiter [1:374:2364] TestWaitNotification: OK eventTxId 125 TestWaitNotification: OK eventTxId 126 TestWaitNotification: OK eventTxId 127 2025-06-03T10:26:47.371932Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/ExternalTable1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:47.371982Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/ExternalTable1" took 64us result status StatusSuccess 2025-06-03T10:26:47.372070Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/ExternalTable1" PathDescription { Self { Name: "ExternalTable1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 126 CreateStep: 5000005 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "ExternalTable1" PathId { OwnerId: 72057594046678944 LocalId: 4 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false } Content: "" } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:47.372185Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/ExternalTable2" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:47.372205Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/ExternalTable2" took 22us result status StatusSuccess 2025-06-03T10:26:47.372251Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/ExternalTable2" PathDescription { Self { Name: "ExternalTable2" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 127 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "ExternalTable2" PathId { OwnerId: 72057594046678944 LocalId: 5 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key1" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false } Columns { Name: "key2" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false } Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 3 NotNull: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 4 NotNull: false } Content: "" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:47.372344Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:47.372360Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA" took 17us result status StatusSuccess 2025-06-03T10:26:47.372449Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA" PathDescription { Self { Name: "DirA" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 125 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 } ChildrenExist: true } Children { Name: "ExternalTable1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 126 CreateStep: 5000005 ParentPathId: 3 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "ExternalTable2" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 127 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:47.372522Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/ExternalTable1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:47.372540Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/ExternalTable1" took 20us result status StatusSuccess 2025-06-03T10:26:47.372583Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/ExternalTable1" PathDescription { Self { Name: "ExternalTable1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 126 CreateStep: 5000005 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "ExternalTable1" PathId { OwnerId: 72057594046678944 LocalId: 4 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false } Content: "" } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:47.372636Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/ExternalTable2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:47.372650Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/ExternalTable2" took 15us result status StatusSuccess 2025-06-03T10:26:47.372692Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/ExternalTable2" PathDescription { Self { Name: "ExternalTable2" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 127 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "ExternalTable2" PathId { OwnerId: 72057594046678944 LocalId: 5 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key1" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false } Columns { Name: "key2" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false } Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 3 NotNull: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 4 NotNull: false } Content: "" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableRebootSaTabletBeforeSave [GOOD] Test command err: 2025-06-03T10:24:05.638644Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:453:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:24:05.638728Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:24:05.638760Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002a6e/r3tmp/tmp6tijZE/pdisk_1.dat 2025-06-03T10:24:05.871043Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8773, node 1 2025-06-03T10:24:06.050077Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:06.050102Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:06.050108Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:06.050215Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:06.051202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:06.153060Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:06.153099Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:06.169574Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8563 2025-06-03T10:24:06.899366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:11.326138Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:24:11.562079Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:11.562740Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:11.634868Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:11.649750Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:12.258518Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:12.258707Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:12.258858Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:12.258906Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:12.258948Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:12.258966Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:12.258982Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:12.259022Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:12.259038Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:12.457951Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:12.458227Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:12.471085Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:12.900111Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:13.042994Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:24:13.043026Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:24:13.130098Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:24:13.131448Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:24:13.131481Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:24:13.131488Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:24:13.131495Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:24:13.131501Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:24:13.131507Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:24:13.131517Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:24:13.131954Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:24:13.173589Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:13.173624Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1863:2597], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:13.185955Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1876:2608] 2025-06-03T10:24:13.195416Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1896:2618] 2025-06-03T10:24:13.196007Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1896:2618], schemeshard id = 72075186224037897 2025-06-03T10:24:13.208863Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:24:13.261832Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:24:13.261895Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:24:13.261909Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:24:13.293628Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:24:13.338112Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:24:13.338163Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:24:13.631961Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:24:13.930501Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:24:13.993619Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:24:15.441354Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2221:3063], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:15.441575Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:15.470345Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:24:15.767133Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:24:15.767227Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:24:15.768386Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:24:15.768414Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:24:15.768693Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:24:15.768969Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:24:15.769225Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:24:15.781725Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_regi ... , at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:46.155077Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8347:6163], server id = [2:8348:6164], tablet id = 72075186224037894 2025-06-03T10:26:46.155087Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8485:6240] 2025-06-03T10:26:46.155095Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8485:6240] 2025-06-03T10:26:46.235254Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:26:46.235298Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:26:46.235382Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:26:46.235623Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:26:46.235683Z node 2 :STATISTICS DEBUG: tx_init.cpp:55: [72075186224037894] Loaded database: /Root/Database 2025-06-03T10:26:46.235691Z node 2 :STATISTICS DEBUG: tx_init.cpp:59: [72075186224037894] Loaded traversal start key 2025-06-03T10:26:46.235698Z node 2 :STATISTICS DEBUG: tx_init.cpp:64: [72075186224037894] Loaded traversal table owner id: 72075186224037897 2025-06-03T10:26:46.235704Z node 2 :STATISTICS DEBUG: tx_init.cpp:69: [72075186224037894] Loaded traversal table local path id: 4 2025-06-03T10:26:46.235709Z node 2 :STATISTICS DEBUG: tx_init.cpp:74: [72075186224037894] Loaded traversal start time: 1748946406104528 2025-06-03T10:26:46.235715Z node 2 :STATISTICS DEBUG: tx_init.cpp:79: [72075186224037894] Loaded traversal IsColumnTable: 1 2025-06-03T10:26:46.235720Z node 2 :STATISTICS DEBUG: tx_init.cpp:84: [72075186224037894] Loaded global traversal round: 2 2025-06-03T10:26:46.235732Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 1 2025-06-03T10:26:46.235739Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:26:46.235753Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 2 2025-06-03T10:26:46.235761Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:26:46.235767Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:26:46.235776Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:26:46.235807Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:46.236106Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:26:46.236241Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:46.236251Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:46.236269Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:26:46.236607Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:46.236622Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:46.236838Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:26:46.283305Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:46.283382Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-03T10:26:46.283631Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8532:6270], server id = [2:8536:6274], tablet id = 72075186224037899, status = OK 2025-06-03T10:26:46.283661Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8532:6270], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:46.283706Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8533:6271], server id = [2:8537:6275], tablet id = 72075186224037900, status = OK 2025-06-03T10:26:46.283717Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8533:6271], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:46.284056Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8534:6272], server id = [2:8539:6277], tablet id = 72075186224037901, status = OK 2025-06-03T10:26:46.284070Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8534:6272], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:46.284129Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8535:6273], server id = [2:8538:6276], tablet id = 72075186224037902, status = OK 2025-06-03T10:26:46.284137Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8535:6273], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:46.284370Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-03T10:26:46.284428Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-03T10:26:46.284453Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8532:6270], server id = [2:8536:6274], tablet id = 72075186224037899 2025-06-03T10:26:46.284458Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:46.284536Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8533:6271], server id = [2:8537:6275], tablet id = 72075186224037900 2025-06-03T10:26:46.284541Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:46.284563Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-03T10:26:46.284599Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-03T10:26:46.284606Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:46.284662Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:46.284691Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:46.284745Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:46.285344Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8534:6272], server id = [2:8539:6277], tablet id = 72075186224037901 2025-06-03T10:26:46.285358Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:46.285542Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8535:6273], server id = [2:8538:6276], tablet id = 72075186224037902 2025-06-03T10:26:46.285549Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:46.285614Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:26:46.311434Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8560:6298]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:46.311524Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:26:46.311534Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8560:6298], StatRequests.size() = 1 2025-06-03T10:26:46.371261Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NmUyZWY0MTUtMTk5MDU3ODYtM2U2M2JlMjQtYjg5YjFkNzc=, TxId: 2025-06-03T10:26:46.371294Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NmUyZWY0MTUtMTk5MDU3ODYtM2U2M2JlMjQtYjg5YjFkNzc=, TxId: 2025-06-03T10:26:46.371498Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:46.384123Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:46.384160Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:26:46.395609Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8577:6306] 2025-06-03T10:26:46.395700Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:8577:6306], schemeshard id = 72075186224037897 2025-06-03T10:26:46.395722Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8578:6307] 2025-06-03T10:26:46.395738Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8485:6240], server id = [2:8578:6307], tablet id = 72075186224037894, status = OK 2025-06-03T10:26:46.395763Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8578:6307], node id = 2, have schemeshards count = 1, need schemeshards count = 0 2025-06-03T10:26:46.469767Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8579:6308]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:46.469894Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-03T10:26:46.469901Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-03T10:26:46.470684Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-03T10:26:46.470704Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-03T10:26:46.470715Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-03T10:26:46.471929Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 >> TKqpScanData::DifferentNumberOfInputAndResultColumns [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ReadOnlyMode [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:128:2058] recipient: [1:110:2141] 2025-06-03T10:26:46.787747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:46.787775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:46.787781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:46.787787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:46.787792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:46.787797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:46.787806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:46.787820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:46.787932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:46.788011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:46.820546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:26:46.820571Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:46.820675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:46.835142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:46.835273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:46.835307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:46.861689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:46.865377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:46.865510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:46.865592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:46.873565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:46.873631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:46.873977Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:46.873990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:46.874008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:46.874017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:46.874024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:46.874073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:46.889626Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:240:2058] recipient: [1:15:2062] 2025-06-03T10:26:46.940047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:46.940121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:46.940179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:46.940221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:46.940234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:46.949689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:46.949730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:46.949811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:46.949822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:46.949828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:46.949834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:46.950415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:46.950426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:46.950432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:46.950731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:46.950740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:46.950746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:46.950753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:46.951484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:46.951824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:46.951860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:46.952031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:46.952058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:46.952064Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:46.952115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:46.952122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:46.952149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:46.952159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:46.954103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:46.954117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594 ... ogressState, at schemeshard: 72057594046678944 2025-06-03T10:26:47.169370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 129 ready parts: 1/1 2025-06-03T10:26:47.169399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 129 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:47.169531Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 10 PathOwnerId: 72057594046678944, cookie: 129 2025-06-03T10:26:47.169545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 10 PathOwnerId: 72057594046678944, cookie: 129 2025-06-03T10:26:47.169549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 129 2025-06-03T10:26:47.169555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 10 2025-06-03T10:26:47.169561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 5 2025-06-03T10:26:47.169697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 129 2025-06-03T10:26:47.169709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 129 2025-06-03T10:26:47.169713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 129 2025-06-03T10:26:47.169717Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 2 2025-06-03T10:26:47.169722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-03T10:26:47.169732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 129, ready parts: 0/1, is published: true 2025-06-03T10:26:47.172388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 129:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:129 msg type: 269090816 2025-06-03T10:26:47.172441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 129, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 129 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 129 at step: 5000005 2025-06-03T10:26:47.175479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 2025-06-03T10:26:47.175524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 2025-06-03T10:26:47.175594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:47.175632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 129 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:47.175644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:33: MkDir::TPropose operationId# 129:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000005, at schemeshard: 72057594046678944 2025-06-03T10:26:47.175696Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 129:0 128 -> 240 2025-06-03T10:26:47.175735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-03T10:26:47.175752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 FAKE_COORDINATOR: Erasing txId 129 2025-06-03T10:26:47.183168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:47.183189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 129, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:47.183240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 129, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-03T10:26:47.183260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:47.183266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:490:2446], at schemeshard: 72057594046678944, txId: 129, path id: 1 2025-06-03T10:26:47.183272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:490:2446], at schemeshard: 72057594046678944, txId: 129, path id: 5 2025-06-03T10:26:47.183364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 129:0, at schemeshard: 72057594046678944 2025-06-03T10:26:47.183373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 129:0 ProgressState 2025-06-03T10:26:47.183390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#129:0 progress is 1/1 2025-06-03T10:26:47.183396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-06-03T10:26:47.183402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#129:0 progress is 1/1 2025-06-03T10:26:47.183406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-06-03T10:26:47.183412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 129, ready parts: 1/1, is published: false 2025-06-03T10:26:47.183419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 129 ready parts: 1/1 2025-06-03T10:26:47.183425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 129:0 2025-06-03T10:26:47.183430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 129:0 2025-06-03T10:26:47.183452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-03T10:26:47.183459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 129, publications: 2, subscribers: 0 2025-06-03T10:26:47.183464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 129, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-03T10:26:47.183468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 129, [OwnerId: 72057594046678944, LocalPathId: 5], 3 2025-06-03T10:26:47.183629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 129 2025-06-03T10:26:47.183642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 129 2025-06-03T10:26:47.183651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 129 2025-06-03T10:26:47.183657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-03T10:26:47.183662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 5 2025-06-03T10:26:47.183810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 5 Version: 3 PathOwnerId: 72057594046678944, cookie: 129 2025-06-03T10:26:47.183821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 5 Version: 3 PathOwnerId: 72057594046678944, cookie: 129 2025-06-03T10:26:47.183825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 129 2025-06-03T10:26:47.183830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 129, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 3 2025-06-03T10:26:47.183834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-03T10:26:47.183843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 129, subscribers: 0 2025-06-03T10:26:47.194360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 2025-06-03T10:26:47.194408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 129 TestModificationResult got TxId: 129, wait until txId: 129 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table/unittest >> TExternalTableTest::ParallelCreateSameExternalTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:128:2058] recipient: [1:110:2141] 2025-06-03T10:26:46.881854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:46.881884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:46.881891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:46.881896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:46.881903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:46.881908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:46.881917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:46.881932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:46.882076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:46.882157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:46.919746Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:26:46.919776Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:46.919893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:46.927464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:46.927606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:46.927638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:46.936739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:46.936940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:46.937065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:46.937148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:46.937967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:46.938019Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:46.938308Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:46.938324Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:46.938347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:46.938356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:46.938363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:46.938408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:46.939861Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:240:2058] recipient: [1:15:2062] 2025-06-03T10:26:46.963269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:46.963337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:46.963392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:46.963433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:46.963445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:46.964123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:46.964150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:46.964208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:46.964218Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:46.964224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:46.964229Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:46.964675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:46.964689Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:46.964694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:46.965065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:46.965081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:46.965087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:46.965094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:46.965881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:46.966365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:46.966401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:46.966598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:46.966625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:46.966634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:46.966695Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:46.966703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:46.966737Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:46.966750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:46.967217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:46.967228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594 ... alse ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:46.986983Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/NilNoviSubLuna" took 50us result status StatusSuccess 2025-06-03T10:26:46.987074Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/NilNoviSubLuna" PathDescription { Self { Name: "NilNoviSubLuna" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 125 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "NilNoviSubLuna" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:46.987160Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/NilNoviSubLuna" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:46.987181Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/NilNoviSubLuna" took 22us result status StatusSuccess 2025-06-03T10:26:46.987230Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/NilNoviSubLuna" PathDescription { Self { Name: "NilNoviSubLuna" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 125 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "NilNoviSubLuna" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 125 2025-06-03T10:26:46.987277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 125: send EvNotifyTxCompletion 2025-06-03T10:26:46.987285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 125 TestWaitNotification wait txId: 126 2025-06-03T10:26:46.987303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 126: send EvNotifyTxCompletion 2025-06-03T10:26:46.987307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 126 TestWaitNotification wait txId: 127 2025-06-03T10:26:46.987318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 127: send EvNotifyTxCompletion 2025-06-03T10:26:46.987322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 127 2025-06-03T10:26:46.987409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 125, at schemeshard: 72057594046678944 2025-06-03T10:26:46.987428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 125: got EvNotifyTxCompletionResult 2025-06-03T10:26:46.987434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 125: satisfy waiter [1:342:2332] 2025-06-03T10:26:46.987483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 126, at schemeshard: 72057594046678944 2025-06-03T10:26:46.987495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 126: got EvNotifyTxCompletionResult 2025-06-03T10:26:46.987500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 126: satisfy waiter [1:342:2332] 2025-06-03T10:26:46.987510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 127, at schemeshard: 72057594046678944 2025-06-03T10:26:46.987525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 127: got EvNotifyTxCompletionResult 2025-06-03T10:26:46.987530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 127: satisfy waiter [1:342:2332] TestWaitNotification: OK eventTxId 125 TestWaitNotification: OK eventTxId 126 TestWaitNotification: OK eventTxId 127 2025-06-03T10:26:46.987614Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/NilNoviSubLuna" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:46.987637Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/NilNoviSubLuna" took 29us result status StatusSuccess 2025-06-03T10:26:46.987697Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/NilNoviSubLuna" PathDescription { Self { Name: "NilNoviSubLuna" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 125 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "NilNoviSubLuna" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 128 2025-06-03T10:26:46.988576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable CreateExternalTable { Name: "NilNoviSubLuna" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } Columns { Name: "value" Type: "Uint64" } } } TxId: 128 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:46.988641Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_table.cpp:427: [72057594046678944] CreateNewExternalTable, opId 128:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalTable FailOnExist: false CreateExternalTable { Name: "NilNoviSubLuna" SourceType: "General" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" } Columns { Name: "value" Type: "Uint64" } } 2025-06-03T10:26:46.988657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_external_table.cpp:300: [72057594046678944] TCreateExternalTable Propose: opId# 128:0, path# /MyRoot/NilNoviSubLuna 2025-06-03T10:26:46.988681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 128:1, propose status:StatusAlreadyExists, reason: Check failed: path: '/MyRoot/NilNoviSubLuna', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:132, at schemeshard: 72057594046678944 2025-06-03T10:26:46.989322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 128, response: Status: StatusAlreadyExists Reason: "Check failed: path: \'/MyRoot/NilNoviSubLuna\', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:132" TxId: 128 SchemeshardId: 72057594046678944 PathId: 3 PathCreateTxId: 125, at schemeshard: 72057594046678944 2025-06-03T10:26:46.989359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 128, database: /MyRoot, subject: , status: StatusAlreadyExists, reason: Check failed: path: '/MyRoot/NilNoviSubLuna', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeExternalTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:132, operation: CREATE EXTERNAL TABLE, path: /MyRoot/NilNoviSubLuna TestModificationResult got TxId: 128, wait until txId: 128 |62.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::EmptyColumns [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_base_tenant/unittest >> TSubDomainTest::DatashardNotRunAtAllWhenSubDomainNodesIsStopped [GOOD] Test command err: 2025-06-03T10:25:29.812884Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667289377821237:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:29.812907Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:25:29.817807Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511667285711662184:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:25:29.817856Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001147/r3tmp/tmpLA1zz0/pdisk_1.dat 2025-06-03T10:25:29.933586Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:25:29.993513Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:30.014275Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:30.014306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:30.014364Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:30.014384Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:25:30.016060Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-03T10:25:30.016080Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:25:30.016564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:30.016659Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:30.121941Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:25:30.121990Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:63239 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:25:30.152341Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:25:30.154790Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511667289377821458:2143] Handle TEvNavigate describe path dc-1 2025-06-03T10:25:30.156617Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511667293672789206:2462] HANDLE EvNavigateScheme dc-1 2025-06-03T10:25:30.156667Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:7511667289377821560:2196], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: true SyncVersion: true Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:25:30.156682Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2322: Create subscriber: self# [1:7511667289377821560:2196], path# /dc-1, domainOwnerId# 72057594046644480 2025-06-03T10:25:30.156748Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:7511667293672789207:2463][/dc-1] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-03T10:25:30.157260Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667289377821092:2053] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667293672789211:2463] 2025-06-03T10:25:30.157283Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667289377821095:2056] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667293672789212:2463] 2025-06-03T10:25:30.157286Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667289377821092:2053] Subscribe: subscriber# [1:7511667293672789211:2463], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:30.157318Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667289377821095:2056] Subscribe: subscriber# [1:7511667293672789212:2463], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:30.157319Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1061: [1:7511667289377821098:2059] Handle NKikimrSchemeBoard.TEvSubscribe { Path: /dc-1 DomainOwnerId: 72057594046644480 }: sender# [1:7511667293672789213:2463] 2025-06-03T10:25:30.157323Z node 1 :SCHEME_BOARD_REPLICA INFO: replica.cpp:646: [1:7511667289377821098:2059] Subscribe: subscriber# [1:7511667293672789213:2463], path# /dc-1, domainOwnerId# 72057594046644480, capabilities# AckNotifications: true 2025-06-03T10:25:30.157337Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667293672789211:2463][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667289377821092:2053] 2025-06-03T10:25:30.157343Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667293672789212:2463][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667289377821095:2056] 2025-06-03T10:25:30.157345Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667289377821092:2053] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667293672789211:2463] 2025-06-03T10:25:30.157348Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:7511667293672789213:2463][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667289377821098:2059] 2025-06-03T10:25:30.157351Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667289377821095:2056] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667293672789212:2463] 2025-06-03T10:25:30.157354Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667293672789207:2463][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667293672789208:2463] 2025-06-03T10:25:30.157355Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1089: [1:7511667289377821098:2059] Handle NKikimrSchemeBoard.TEvNotifyAck { Version: 2 }: sender# [1:7511667293672789213:2463] 2025-06-03T10:25:30.157362Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667293672789207:2463][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667293672789209:2463] 2025-06-03T10:25:30.157375Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:7511667293672789207:2463][/dc-1] Set up state: owner# [1:7511667289377821560:2196], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:30.157418Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:7511667293672789207:2463][/dc-1] Handle NKikimrSchemeBoard.TEvNotify { Path: /dc-1 PathId: [OwnerId: 72057594046644480, LocalPathId: 1] Version: 2 }: sender# [1:7511667293672789210:2463] 2025-06-03T10:25:30.157425Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:7511667293672789207:2463][/dc-1] Path was already updated: owner# [1:7511667289377821560:2196], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 72057594046644480, LocalPathId: 1], Version: 2) DomainId: [OwnerId: 72057594046644480, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:25:30.157433Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667293672789211:2463][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667293672789208:2463], cookie# 1 2025-06-03T10:25:30.157437Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667293672789212:2463][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667293672789209:2463], cookie# 1 2025-06-03T10:25:30.157442Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:7511667293672789213:2463][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667293672789210:2463], cookie# 1 2025-06-03T10:25:30.157448Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667289377821092:2053] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667293672789211:2463], cookie# 1 2025-06-03T10:25:30.157462Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667289377821095:2056] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667293672789212:2463], cookie# 1 2025-06-03T10:25:30.157469Z node 1 :SCHEME_BOARD_REPLICA DEBUG: replica.cpp:1128: [1:7511667289377821098:2059] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: /dc-1 }: sender# [1:7511667293672789213:2463], cookie# 1 2025-06-03T10:25:30.157476Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667293672789211:2463][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667289377821092:2053], cookie# 1 2025-06-03T10:25:30.157481Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667293672789212:2463][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667289377821095:2056], cookie# 1 2025-06-03T10:25:30.157484Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:7511667293672789213:2463][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667289377821098:2059], cookie# 1 2025-06-03T10:25:30.157489Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667293672789207:2463][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667293672789208:2463], cookie# 1 2025-06-03T10:25:30.157496Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:7511667293672789207:2463][/dc-1] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:25:30.157500Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:7511667293672789207:2463][/dc-1] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:7511667293672789209:2463], cookie# 1 2025-06-03T10:25:30.157504Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:7511667293672789207:2463][/dc-1] Sync is done: cookie# 1, size# 3 ... source_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:26:45.272377Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [8:7511667613301906602:2553], recipient# [8:7511667613301906601:2533], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:26:45.660373Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7511667318269040897:2127], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:26:45.660419Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7511667318269040897:2127], cacheItem# { Subscriber: { Subscriber: [7:7511667318269041520:2552] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:26:45.660436Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7511667614621786165:3134], recipient# [7:7511667614621786164:2451], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:26:45.807821Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [8:7511667316949162330:2103], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:26:45.807875Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [8:7511667316949162330:2103], cacheItem# { Subscriber: { Subscriber: [8:7511667321244130065:2354] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:26:45.807899Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [8:7511667613301906604:2554], recipient# [8:7511667613301906603:2534], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:26:45.829431Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [8:7511667316949162330:2103], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:26:45.829482Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [8:7511667316949162330:2103], cacheItem# { Subscriber: { Subscriber: [8:7511667321244130065:2354] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:26:45.829506Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [8:7511667613301906606:2555], recipient# [8:7511667613301906605:2535], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:26:46.245698Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7511667318269040897:2127], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:26:46.245765Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7511667318269040897:2127], cacheItem# { Subscriber: { Subscriber: [7:7511667322564008908:2615] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:26:46.245799Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7511667618916753474:3142], recipient# [7:7511667618916753473:2452], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/initialization/migrations TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:26:46.275228Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [8:7511667316949162330:2103], request# { ErrorCount: 0 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:26:46.275292Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [8:7511667316949162330:2103], cacheItem# { Subscriber: { Subscriber: [8:7511667321244129831:2222] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:26:46.275323Z node 8 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [8:7511667617596873905:2556], recipient# [8:7511667617596873904:2536], result# { ErrorCount: 1 DatabaseName: /dc-1/USER_0 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/USER_0/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:26:46.659494Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [7:7511667318269040897:2127], request# { ErrorCount: 0 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:26:46.659541Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [7:7511667318269040897:2127], cacheItem# { Subscriber: { Subscriber: [7:7511667318269041520:2552] DomainOwnerId: 72057594046644480 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusPathDoesNotExist Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:26:46.659563Z node 7 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [7:7511667618916753479:3143], recipient# [7:7511667618916753478:2453], result# { ErrorCount: 1 DatabaseName: /dc-1 DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1/.metadata/workload_manager/classifiers/resource_pool_classifiers TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo }] } >> TKqpScanData::ArrowToUnboxedValueConverter >> TKqpScanData::ArrowToUnboxedValueConverter [GOOD] |62.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::DifferentNumberOfInputAndResultColumns [GOOD] |62.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview |62.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut |62.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview |62.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut |62.1%| [LD] {RESULT} $(B)/ydb/core/statistics/database/ut/ydb-core-statistics-database-ut |62.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/sysview/ydb-core-kqp-ut-sysview |62.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::ArrowToUnboxedValueConverter [GOOD] >> THealthCheckTest::RedGroupIssueWhenDisintegratedGroupStatus >> TKqpScanData::FailOnUnsupportedPgType [GOOD] >> TSchemeShardLoginTest::UserLogin >> TSchemeShardLoginTest::AddAccess_NonExisting-StrictAclCheck-false >> TSchemeShardLoginTest::RemoveGroup-StrictAclCheck-false >> TSchemeShardLoginTest::RemoveUser_Owner-StrictAclCheck-true >> TSchemeShardLoginTest::RemoveGroup_NonExisting-StrictAclCheck-false |62.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/runtime/ut/unittest >> TraverseColumnShard::TraverseServerlessColumnTable [GOOD] >> TSchemeShardLoginTest::UserLogin [GOOD] >> TSchemeShardLoginTest::UserStayLockedOutIfEnterValidPassword |62.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats |62.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats |62.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_stats/ydb-core-tx-schemeshard-ut_stats |62.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/runtime/ut/unittest >> TKqpScanData::FailOnUnsupportedPgType [GOOD] >> TSchemeShardLoginTest::RemoveGroup-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveGroup-StrictAclCheck-true >> TSchemeShardLoginTest::AddAccess_NonExisting-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::AddAccess_NonExisting-StrictAclCheck-true >> TSchemeShardLoginTest::RemoveGroup-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::DisableBuiltinAuthMechanism |62.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer >> TSchemeShardLoginTest::RemoveGroup_NonExisting-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveGroup_NonExisting-StrictAclCheck-true >> TSchemeShardLoginTest::RemoveUser_Owner-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::TestExternalLogin >> TSchemeShardLoginTest::UserStayLockedOutIfEnterValidPassword [GOOD] >> TWebLoginService::AuditLogAdminLoginSuccess >> TSchemeShardLoginTest::AddAccess_NonExisting-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::AddAccess_NonYdb-StrictAclCheck-false |62.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer |62.1%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_table_writer/ydb-core-tx-replication-service-ut_table_writer ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseServerlessColumnTable [GOOD] Test command err: 2025-06-03T10:23:58.637604Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:23:58.637667Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:23:58.637679Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002a87/r3tmp/tmpoTed9t/pdisk_1.dat 2025-06-03T10:23:58.855536Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1755, node 1 2025-06-03T10:23:58.966005Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:58.966032Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:58.966037Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:58.966093Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:23:58.966773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:23:59.066153Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:59.066196Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:59.079354Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16222 2025-06-03T10:23:59.444101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:00.527731Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:24:00.566014Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:00.566059Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:00.640377Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:00.645722Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:00.870120Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:00.870329Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:00.870512Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:00.870571Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:00.870630Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:00.870652Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:00.870673Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:00.870718Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:00.870755Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:01.057935Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:01.057987Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:01.071662Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:01.150932Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:01.191842Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:24:01.191900Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:24:01.210503Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:24:01.210581Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:24:01.210610Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:24:01.210617Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:24:01.210624Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:24:01.210632Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:24:01.210638Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:24:01.210646Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:24:01.210825Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:24:01.228256Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:01.228295Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:01.243008Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:24:01.245348Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:24:01.249755Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:24:01.260421Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-03T10:24:01.269068Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:24:01.269098Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:24:01.269111Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-03T10:24:01.284714Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:24:01.294785Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:24:01.294852Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:24:01.479313Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:24:01.641994Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:24:01.689689Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:24:02.351059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:24:02.937746Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:03.050815Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7814: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-03T10:24:03.050844Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7830: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-03T10:24:03.050859Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:2567:2933], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-03T10:24:03.051733Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2569:2935] 2025-06-03T10:24:03.051936Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2569:2935], schemeshard id = 72075186224037899 2025-06-03T10:24:03.968369Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2691:3233], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:03.968429Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:03.973361Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715661:0, at schemeshard: 72075186224037899 2025-06-03T10:24:04.043550Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037905;self_id=[2:2803:3047];tablet_id=72075186224037905;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:24:04.043670Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037905;self_id=[2:2803:3047];tablet_id=72075186224037905;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:24:04.043756Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037905;self_id=[2:2803:3047];tablet_id=72075186224037905;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register; ... Id[ 1 ], ReplyToActorId[ [2:9389:6917]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:46.962942Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:26:46.962960Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:9391:6919] 2025-06-03T10:26:46.962973Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:9391:6919] 2025-06-03T10:26:46.963089Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:9392:6920] 2025-06-03T10:26:46.963143Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:9392:6920], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:26:46.963163Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-03T10:26:46.963233Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:9391:6919], server id = [2:9392:6920], tablet id = 72075186224037894, status = OK 2025-06-03T10:26:46.963257Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:26:46.963273Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:9389:6917], StatRequests.size() = 1 2025-06-03T10:26:47.030080Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MzI4ZmViZWItMzUwNWYyMDMtMTQzODZkMDEtNjU0OTBiNjI=, TxId: 2025-06-03T10:26:47.030118Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MzI4ZmViZWItMzUwNWYyMDMtMTQzODZkMDEtNjU0OTBiNjI=, TxId: 2025-06-03T10:26:47.030302Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:47.052396Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:26:47.052427Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:26:47.105456Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-03T10:26:47.105499Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-03T10:26:47.187792Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:9391:6919], schemeshard count = 1 2025-06-03T10:26:47.512566Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7996: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224037899 2025-06-03T10:26:47.512603Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 221.000000s, at schemeshard: 72075186224037899 2025-06-03T10:26:47.512667Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037899, stats size# 28 2025-06-03T10:26:47.534268Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-03T10:26:49.945700Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:26:49.945737Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:26:49.945748Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is column table. 2025-06-03T10:26:49.945769Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-03T10:26:49.946773Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:49.967109Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:49.967309Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:49.967331Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:49.967693Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:26:49.984595Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:49.984682Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-03T10:26:49.984920Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:9512:6991], server id = [2:9516:6995], tablet id = 72075186224037905, status = OK 2025-06-03T10:26:49.985022Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:9512:6991], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-03T10:26:49.985439Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:9513:6992], server id = [2:9517:6996], tablet id = 72075186224037906, status = OK 2025-06-03T10:26:49.985462Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:9513:6992], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-03T10:26:49.985791Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:9514:6993], server id = [2:9519:6998], tablet id = 72075186224037907, status = OK 2025-06-03T10:26:49.985811Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:9514:6993], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-03T10:26:49.985863Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:9515:6994], server id = [2:9518:6997], tablet id = 72075186224037908, status = OK 2025-06-03T10:26:49.985871Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:9515:6994], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-03T10:26:49.987580Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037905 2025-06-03T10:26:49.987794Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:9512:6991], server id = [2:9516:6995], tablet id = 72075186224037905 2025-06-03T10:26:49.987806Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:49.987919Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037906 2025-06-03T10:26:49.988025Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:9513:6992], server id = [2:9517:6996], tablet id = 72075186224037906 2025-06-03T10:26:49.988033Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:49.988137Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037908 2025-06-03T10:26:49.988223Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:9515:6994], server id = [2:9518:6997], tablet id = 72075186224037908 2025-06-03T10:26:49.988228Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:49.988419Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037907 2025-06-03T10:26:49.988429Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:49.988472Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:49.990061Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:49.990223Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:9514:6993], server id = [2:9519:6998], tablet id = 72075186224037907 2025-06-03T10:26:49.990231Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:49.990283Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-03T10:26:49.991137Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:26:49.999011Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:9548:7023]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:49.999113Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:26:49.999122Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:9548:7023], StatRequests.size() = 1 2025-06-03T10:26:50.064151Z node 2 :SYSTEM_VIEWS WARN: tx_interval_summary.cpp:212: [72075186224037891] TEvIntervalQuerySummary, time mismath: node id# 2, interval end# 1970-01-01T00:02:04.000000Z, event interval end# 2025-06-03T10:26:48.000000Z 2025-06-03T10:26:50.064431Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NTg1ZTI1My05N2FmZWRkOC05NmM5OGMyNS00NzBjYjUzMg==, TxId: 2025-06-03T10:26:50.064446Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NTg1ZTI1My05N2FmZWRkOC05NmM5OGMyNS00NzBjYjUzMg==, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-03T10:26:50.064601Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:9561:7029]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:50.064688Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:50.064808Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-03T10:26:50.064815Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-03T10:26:50.065730Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-03T10:26:50.065771Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-03T10:26:50.065781Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037899, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-03T10:26:50.067095Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 >>> failedEstimatesCount = 0 >> TSchemeShardLoginTest::BanUnbanUser >> TSchemeShardLoginTest::RemoveGroup_NonExisting-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-false >> TSchemeShardLoginTest::AddAccess_NonYdb-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::AddAccess_NonYdb-StrictAclCheck-true >> TSchemeShardLoginTest::TestExternalLogin [GOOD] >> TSchemeShardLoginTest::TestExternalLoginWithIncorrectLdapDomain >> TSchemeShardLoginTest::DisableBuiltinAuthMechanism [GOOD] >> TSchemeShardLoginTest::FailedLoginUserUnderNameOfGroup >> TSchemeShardLoginTest::RemoveGroup_Owner-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-false >> TWebLoginService::AuditLogAdminLoginSuccess [GOOD] >> TWebLoginService::AuditLogCreateModifyUser >> TSchemeShardLoginTest::AddAccess_NonYdb-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::AccountLockoutAndAutomaticallyUnlock >> TSchemeShardLoginTest::BanUnbanUser [GOOD] >> TSchemeShardLoginTest::BanUserWithWaiting >> TSchemeShardLoginTest::RemoveUser_NonExisting-StrictAclCheck-false >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-true >> TSchemeShardLoginTest::TestExternalLoginWithIncorrectLdapDomain [GOOD] >> TSchemeShardLoginTest::ResetFailedAttemptCount >> TWebLoginService::AuditLogCreateModifyUser [GOOD] >> TSchemeShardLoginTest::FailedLoginUserUnderNameOfGroup [GOOD] >> TSchemeShardLoginTest::FailedLoginWithInvalidUser >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-true [GOOD] >> TraverseColumnShard::TraverseColumnTableAggrStatNonLocalTablet [GOOD] >> AnalyzeColumnshard::AnalyzeAnalyzeOneColumnTableSpecificColumns [GOOD] >> TSchemeShardLoginTest::RemoveUser_NonExisting-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveUser_NonExisting-StrictAclCheck-true >> THealthCheckTest::RedGroupIssueWhenDisintegratedGroupStatus [GOOD] >> THealthCheckTest::ProtobufUnderLimitFor70LargeVdisksIssues >> TSchemeShardLoginTest::FailedLoginWithInvalidUser [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_login/unittest >> TWebLoginService::AuditLogCreateModifyUser [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:26:50.418143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:50.418174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:50.418182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:50.418188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:50.418207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:50.418213Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:50.418232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:50.418249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:50.418372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:50.418457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:50.465517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:50.465548Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:50.470172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:50.470308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:50.470347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:50.477739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:50.477857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:50.478033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:50.478113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:50.479077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:50.479143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:50.479481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:50.479496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:50.479508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:50.479518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:50.479526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:50.479572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.481834Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:26:50.543744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:50.543838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.543927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:50.543991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:50.544003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.544819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:50.544850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:50.544907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.544920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:50.544927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:50.544933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:50.545455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.545475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:50.545482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:50.545873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.545886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.545892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:50.545899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:50.546652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:50.547636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:50.547684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:50.547889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:50.547918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:50.547927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:50.548000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:50.548008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:50.548052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:50.548067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:50.548536Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:50.548547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:50.548593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... peration: MODIFY USER, path: /MyRoot 2025-06-03T10:26:52.568789Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:52.568796Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:52.568829Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:52.568835Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:208:2209], at schemeshard: 72057594046678944, txId: 105, path id: 1 2025-06-03T10:26:52.568927Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:26:52.568937Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:26:52.568942Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-03T10:26:52.568947Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-03T10:26:52.568952Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:52.568968Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-03T10:26:52.569365Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 AUDIT LOG buffer(6): 2025-06-03T10:26:52.527681Z: component=schemeshard, tx_id=1, remote_address={none}, subject={none}, sanitized_token={none}, database={none}, operation=ALTER DATABASE, paths=[//MyRoot], status=SUCCESS, detailed_status=StatusAccepted 2025-06-03T10:26:52.549828Z: component=schemeshard, tx_id=101, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=CREATE USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1 2025-06-03T10:26:52.556412Z: component=schemeshard, tx_id=102, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password] 2025-06-03T10:26:52.562489Z: component=schemeshard, tx_id=103, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[blocking] 2025-06-03T10:26:52.564624Z: component=schemeshard, tx_id=104, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[unblocking] 2025-06-03T10:26:52.567894Z: component=schemeshard, tx_id=105, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password] AUDIT LOG checked line: 2025-06-03T10:26:52.567894Z: component=schemeshard, tx_id=105, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password] 2025-06-03T10:26:52.570089Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterLogin AlterLogin { ModifyUser { User: "user1" Password: "password1" CanLogin: false } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:52.572444Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 106:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:52.572486Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-06-03T10:26:52.572494Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-03T10:26:52.572501Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-06-03T10:26:52.572505Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-03T10:26:52.572519Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:52.572533Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: false 2025-06-03T10:26:52.572539Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-03T10:26:52.572544Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 106:0 2025-06-03T10:26:52.572551Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 106, publications: 1, subscribers: 0 2025-06-03T10:26:52.572556Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 106, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-03T10:26:52.577674Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 106, response: Status: StatusSuccess TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:52.577716Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusSuccess, operation: MODIFY USER, path: /MyRoot 2025-06-03T10:26:52.577781Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:52.577789Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:52.577822Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:52.577828Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:208:2209], at schemeshard: 72057594046678944, txId: 106, path id: 1 2025-06-03T10:26:52.577955Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-06-03T10:26:52.577970Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-06-03T10:26:52.577975Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 106 2025-06-03T10:26:52.577982Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-03T10:26:52.577988Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:52.578011Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 106, subscribers: 0 2025-06-03T10:26:52.578776Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 AUDIT LOG buffer(7): 2025-06-03T10:26:52.527681Z: component=schemeshard, tx_id=1, remote_address={none}, subject={none}, sanitized_token={none}, database={none}, operation=ALTER DATABASE, paths=[//MyRoot], status=SUCCESS, detailed_status=StatusAccepted 2025-06-03T10:26:52.549828Z: component=schemeshard, tx_id=101, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=CREATE USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1 2025-06-03T10:26:52.556412Z: component=schemeshard, tx_id=102, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password] 2025-06-03T10:26:52.562489Z: component=schemeshard, tx_id=103, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[blocking] 2025-06-03T10:26:52.564624Z: component=schemeshard, tx_id=104, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[unblocking] 2025-06-03T10:26:52.567894Z: component=schemeshard, tx_id=105, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password] 2025-06-03T10:26:52.572408Z: component=schemeshard, tx_id=106, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password, blocking] AUDIT LOG checked line: 2025-06-03T10:26:52.572408Z: component=schemeshard, tx_id=106, remote_address={none}, subject={none}, sanitized_token={none}, database=/MyRoot, operation=MODIFY USER, paths=[/MyRoot], status=SUCCESS, detailed_status=StatusSuccess, login_user_level=admin, login_user=user1, login_user_change=[password, blocking] >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-true ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> TraverseColumnShard::TraverseColumnTableAggrStatNonLocalTablet [GOOD] Test command err: 2025-06-03T10:24:03.295475Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:453:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:24:03.295557Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:24:03.295591Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002a75/r3tmp/tmpN0fmbc/pdisk_1.dat 2025-06-03T10:24:03.453620Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15555, node 1 2025-06-03T10:24:03.564989Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:03.565015Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:03.565020Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:03.565126Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:03.565826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:03.656470Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:03.656529Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:03.668890Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26882 2025-06-03T10:24:04.073198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:05.638449Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:24:05.655669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:05.655704Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:05.702074Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:05.704444Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:06.161620Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:06.162223Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:06.164113Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:06.164172Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:06.164239Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:06.164257Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:06.164277Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:06.165126Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:06.165159Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:06.375044Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:06.375089Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:06.398343Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:06.672438Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:06.778304Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:24:06.778343Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:24:06.874181Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:24:06.876019Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:24:06.876047Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:24:06.876053Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:24:06.876059Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:24:06.876065Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:24:06.876071Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:24:06.876080Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:24:06.876582Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:24:06.953623Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:06.953657Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1863:2597], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:06.962130Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1876:2608] 2025-06-03T10:24:06.971476Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1896:2618] 2025-06-03T10:24:06.971965Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1896:2618], schemeshard id = 72075186224037897 2025-06-03T10:24:06.984757Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:24:07.039046Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:24:07.039073Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:24:07.039086Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:24:07.060013Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:24:07.070911Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:24:07.070952Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:24:07.372654Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:24:07.685732Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:24:07.781598Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:24:09.136376Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2221:3063], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:09.136565Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:09.166243Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:24:09.382882Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:24:09.383163Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:24:09.383730Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:24:09.383974Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:24:09.384000Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:24:09.384022Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:24:09.384043Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:24:09.384065Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2326:2854];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_re ... 72075186224037897, LocalPathId: 3] 2025-06-03T10:26:45.451437Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:26:45.514217Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-03T10:26:45.514258Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-03T10:26:45.576565Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:8333:6154], schemeshard count = 1 2025-06-03T10:26:48.076087Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:26:48.076124Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:26:48.076136Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-03T10:26:48.076142Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:48.077159Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:48.101994Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:48.102234Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:48.102258Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:48.102569Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:26:48.114885Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:48.114969Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-03T10:26:48.115211Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8450:6214], server id = [2:8454:6218], tablet id = 72075186224037899, status = OK 2025-06-03T10:26:48.115341Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8450:6214], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:48.115419Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8451:6215], server id = [2:8455:6219], tablet id = 72075186224037900, status = OK 2025-06-03T10:26:48.115442Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8451:6215], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:48.115796Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8452:6216], server id = [2:8456:6220], tablet id = 72075186224037901, status = OK 2025-06-03T10:26:48.115808Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8452:6216], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:48.116187Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8453:6217], server id = [2:8457:6221], tablet id = 72075186224037902, status = OK 2025-06-03T10:26:48.116199Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8453:6217], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:48.117614Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-03T10:26:48.117748Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8450:6214], server id = [2:8454:6218], tablet id = 72075186224037899 2025-06-03T10:26:48.117754Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:48.117919Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-03T10:26:48.118031Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8451:6215], server id = [2:8455:6219], tablet id = 72075186224037900 2025-06-03T10:26:48.118036Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:48.118205Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037901 2025-06-03T10:26:48.118259Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8452:6216], server id = [2:8456:6220], tablet id = 72075186224037901 2025-06-03T10:26:48.118262Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:48.118309Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037902 2025-06-03T10:26:48.118315Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:48.118351Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:48.118466Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8453:6217], server id = [2:8457:6221], tablet id = 72075186224037902 2025-06-03T10:26:48.118472Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:48.163049Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:48.163146Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-03T10:26:48.677483Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 3 2025-06-03T10:26:48.677521Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-03T10:26:51.770196Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:26:51.770296Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:26:51.997591Z node 2 :STATISTICS INFO: service_impl.cpp:416: Node 3 is unavailable 2025-06-03T10:26:51.997631Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:51.997671Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 3 is different from the current 0 2025-06-03T10:26:51.997675Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-03T10:26:51.997708Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:51.997728Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:51.997914Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:26:52.018207Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:52.018305Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 4, current Round: 0 2025-06-03T10:26:52.018506Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8585:6287], server id = [2:8586:6288], tablet id = 72075186224037900, status = OK 2025-06-03T10:26:52.018540Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8585:6287], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:52.018934Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-03T10:26:52.018947Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:52.018979Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:52.019012Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:52.019100Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:52.019754Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8585:6287], server id = [2:8586:6288], tablet id = 72075186224037900 2025-06-03T10:26:52.019763Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:52.020004Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:26:52.043723Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8604:6306]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:52.043791Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:26:52.043799Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8604:6306], StatRequests.size() = 1 2025-06-03T10:26:52.170662Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OGY1MjBkNTQtZTEyNjNiNWEtYTRkMjhiZDAtNmY4MDM2Y2Q=, TxId: 2025-06-03T10:26:52.170699Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OGY1MjBkNTQtZTEyNjNiNWEtYTRkMjhiZDAtNmY4MDM2Y2Q=, TxId: ... waiting for NKikimr::NStat::TEvStatistics::TEvSaveStatisticsQueryResponse (done) 2025-06-03T10:26:52.170888Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:8619:6312]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:52.170961Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:52.171162Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-03T10:26:52.171169Z node 2 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-03T10:26:52.171928Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-03T10:26:52.171943Z node 2 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 3 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-03T10:26:52.171953Z node 2 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-03T10:26:52.173141Z node 2 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 3 probe = 4 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeAnalyzeOneColumnTableSpecificColumns [GOOD] Test command err: 2025-06-03T10:24:20.854323Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:24:20.854377Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:24:20.854394Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002a3d/r3tmp/tmpERihb8/pdisk_1.dat 2025-06-03T10:24:21.580228Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64966, node 1 2025-06-03T10:24:21.828422Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:21.828445Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:21.828450Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:21.828559Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:21.829167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:21.950467Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:21.950499Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:21.977831Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21818 2025-06-03T10:24:22.452032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:23.396219Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:24:23.442468Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:23.442515Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:23.514395Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:23.515218Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:23.697994Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:23.698215Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:23.698397Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:23.698439Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:23.698499Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:23.698520Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:23.698539Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:23.698570Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:23.698613Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:23.856328Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:23.856369Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:23.882036Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:23.945717Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:23.964202Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:24:23.964242Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:24:23.983914Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:24:23.983991Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:24:23.984028Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:24:23.984036Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:24:23.984043Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:24:23.984051Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:24:23.984058Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:24:23.984068Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:24:23.984250Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:24:24.000986Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:24.001034Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:24.005920Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:24:24.006886Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:24:24.007023Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:24:24.010044Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:24:24.014771Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:24:24.014795Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:24:24.014807Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:24:24.020155Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:24:24.022065Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:24:24.022102Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:24:24.138903Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:24:24.235857Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:24:24.278644Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:24:24.818698Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2216:3061], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:24.818760Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:24.822707Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:24:24.877365Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:24:24.877442Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:24:24.877497Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:24:24.877530Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:24:24.877553Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:24:24.877585Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:24:24.877612Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:24:24.877636Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_re ... tistics(), node count = 0, schemeshard count = 0 2025-06-03T10:26:46.620817Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:7391:5396], schemeshard count = 1 2025-06-03T10:26:47.874421Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:26:47.874461Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-03T10:26:47.875626Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:47.893082Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:47.893270Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:47.893284Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 4], AnalyzedShards 1 2025-06-03T10:26:47.905334Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:49.169834Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:26:49.169864Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:26:49.169870Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-03T10:26:49.169879Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-03T10:26:49.169885Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:49.170114Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:49.190503Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. 2025-06-03T10:26:49.190550Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:49.190674Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:49.190692Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:49.190946Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-03T10:26:49.190972Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-03T10:26:49.191049Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:26:49.208164Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-03T10:26:49.208199Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:49.208263Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-03T10:26:49.208443Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7507:5464], server id = [2:7508:5465], tablet id = 72075186224037899, status = OK 2025-06-03T10:26:49.208469Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7507:5464], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:49.209740Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-03T10:26:49.209777Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:49.209866Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7507:5464], server id = [2:7508:5465], tablet id = 72075186224037899 2025-06-03T10:26:49.209872Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:49.209895Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:49.209927Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:49.210008Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:49.210749Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:26:49.222830Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7528:5484]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:49.222895Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:26:49.222904Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7528:5484], StatRequests.size() = 1 2025-06-03T10:26:49.264791Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZTdlN2Q0MjctMWRiMGViYjUtOWE5YzVjYzYtYTRhNmU3ODA=, TxId: 2025-06-03T10:26:49.264819Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZTdlN2Q0MjctMWRiMGViYjUtOWE5YzVjYzYtYTRhNmU3ODA=, TxId: 2025-06-03T10:26:49.264958Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:49.292770Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:49.292798Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:26:49.825800Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-03T10:26:49.825838Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-03T10:26:50.575415Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:26:50.575448Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:26:50.575453Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-03T10:26:51.967990Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:26:51.968051Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-03T10:26:51.968057Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:51.968271Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:51.989806Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:51.989950Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:51.989966Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:51.990059Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:26:52.012359Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:52.012421Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-03T10:26:52.012601Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7624:5532], server id = [2:7625:5533], tablet id = 72075186224037899, status = OK 2025-06-03T10:26:52.012625Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7624:5532], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:52.012937Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-03T10:26:52.012947Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:52.012998Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:52.013029Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:52.013059Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7624:5532], server id = [2:7625:5533], tablet id = 72075186224037899 2025-06-03T10:26:52.013064Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:52.013114Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:52.014004Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:26:52.057731Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YTIwOGJiNmQtN2ZmM2MyYy02YzkwNWVjNS00Mzc0ZmZmZQ==, TxId: 2025-06-03T10:26:52.057775Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YTIwOGJiNmQtN2ZmM2MyYy02YzkwNWVjNS00Mzc0ZmZmZQ==, TxId: 2025-06-03T10:26:52.057928Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:52.069538Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:52.069566Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3128:3338] >> TMiniKQLEngineFlatTest::TestEmptyProgram [GOOD] >> TMiniKQLEngineFlatTest::TestEraseRow [GOOD] >> TMiniKQLEngineFlatTest::TestEraseRowNullKey >> TSchemeShardLoginTest::RemoveUser_NonExisting-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveUser_Groups-StrictAclCheck-false >> TMiniKQLEngineFlatTest::TestEraseRowNullKey [GOOD] >> TMiniKQLEngineFlatTest::TestEraseRowManyShards [GOOD] >> TMiniKQLEngineFlatTest::TestCASBoth2Success [GOOD] >> TMiniKQLEngineFlatTest::TestEraseRowNoShards [GOOD] >> TMiniKQLEngineFlatTest::TestDiagnostics [GOOD] >> TMiniKQLEngineFlatTest::TestCombineByKeyPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestCombineByKeyNoPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestLengthPushdown >> TMiniKQLEngineFlatTest::TestLengthPushdown [GOOD] >> TMiniKQLEngineFlatTest::TestInternalResult [GOOD] >> TMiniKQLEngineFlatTest::TestIndependentSelects [GOOD] >> TMiniKQLEngineFlatTest::TestCrossTableRs [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::RemoveGroup_Acl-StrictAclCheck-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:26:51.112953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:51.112988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:51.112995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:51.113001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:51.113017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:51.113023Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:51.113033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:51.113048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:51.113172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:51.113247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:51.145996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:51.146023Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:51.154882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:51.155024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:51.155060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:51.170827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:51.170909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:51.171046Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:51.171111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:51.171823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:51.171886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:51.172203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:51.172214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:51.172224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:51.172232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:51.172238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:51.172259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:51.173670Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:26:51.232089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:51.232158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:51.232220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:51.232265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:51.232275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:51.234379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:51.234415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:51.234463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:51.234474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:51.234481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:51.234487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:51.239410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:51.239435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:51.239444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:51.239974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:51.239988Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:51.239995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:51.240002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:51.240761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:51.241257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:51.241315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:51.241516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:51.241546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:51.241554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:51.241638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:51.242012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:51.242055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:51.242072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:51.249665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:51.249682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:51.249746Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 7: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:26:52.812581Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:26:52.812591Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:26:52.812595Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-03T10:26:52.812600Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-03T10:26:52.812604Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:26:52.812614Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-03T10:26:52.813177Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-03T10:26:52.813262Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 TestModificationResult got TxId: 105, wait until txId: 105 2025-06-03T10:26:52.813371Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Dir1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:52.813397Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Dir1" took 34us result status StatusSuccess 2025-06-03T10:26:52.813463Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Dir1" PathDescription { Self { Name: "Dir1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 2 EffectiveACLVersion: 2 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 106 2025-06-03T10:26:52.814235Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveGroup { Group: "group1" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:52.814281Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5223: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 1] name: MyRoot type: EPathTypeDir state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:52.814287Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5239: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:52.814293Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5223: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 2] name: Dir1 type: EPathTypeDir state: EPathStateNoChanges stepDropped: 0 droppedTxId: 0 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:52.814297Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5239: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:26:52.814345Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 106:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:52.814362Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-06-03T10:26:52.814368Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-03T10:26:52.814374Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-06-03T10:26:52.814378Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-03T10:26:52.814388Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:52.814398Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: false 2025-06-03T10:26:52.814403Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-03T10:26:52.814409Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 106:0 2025-06-03T10:26:52.814415Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 106, publications: 1, subscribers: 0 2025-06-03T10:26:52.814423Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 106, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-03T10:26:52.814948Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 106, response: Status: StatusSuccess TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:52.814970Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusSuccess, operation: REMOVE GROUP, path: /MyRoot 2025-06-03T10:26:52.815001Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:52.815008Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:52.815032Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:52.815037Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:205:2206], at schemeshard: 72057594046678944, txId: 106, path id: 1 2025-06-03T10:26:52.815138Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-06-03T10:26:52.815151Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 106 2025-06-03T10:26:52.815157Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 106 2025-06-03T10:26:52.815162Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-03T10:26:52.815168Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:26:52.815187Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 106, subscribers: 0 2025-06-03T10:26:52.815538Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 TestModificationResult got TxId: 106, wait until txId: 106 2025-06-03T10:26:52.815637Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:52.815664Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 34us result status StatusSuccess 2025-06-03T10:26:52.815774Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: true } Children { Name: "Dir1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::FailedLoginWithInvalidUser [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:26:50.709365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:50.709398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:50.709404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:50.709411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:50.709429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:50.709434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:50.709445Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:50.709460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:50.709589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:50.709671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:50.744573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:50.744600Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:50.759888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:50.760013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:50.760047Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:50.761918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:50.761975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:50.762092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:50.762146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:50.767238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:50.767336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:50.767714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:50.767730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:50.767741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:50.767751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:50.767758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:50.767785Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.769769Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:26:50.821371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:50.821468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.821556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:50.821615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:50.821630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.825801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:50.825841Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:50.825895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.825907Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:50.825913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:50.825919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:50.826497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.826514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:50.826524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:50.829569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.829592Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.829599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:50.829607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:50.830450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:50.831286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:50.831346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:50.831587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:50.831631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:50.831641Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:50.831745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:50.831757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:50.831813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:50.831829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:50.835942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:50.835962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:50.836021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... ESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:53.018713Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-03T10:26:53.018725Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-06-03T10:26:53.018730Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:26:53.018736Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-06-03T10:26:53.018739Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:26:53.018745Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-03T10:26:53.018750Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:26:53.018755Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-06-03T10:26:53.018760Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1:0 2025-06-03T10:26:53.018775Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:26:53.018781Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-03T10:26:53.018785Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-03T10:26:53.018867Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-03T10:26:53.018877Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-03T10:26:53.018882Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-03T10:26:53.018887Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-03T10:26:53.018891Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:53.018904Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-03T10:26:53.019433Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-03T10:26:53.019678Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:53.019764Z node 5 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [5:268:2258] Bootstrap 2025-06-03T10:26:53.021584Z node 5 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [5:268:2258] Become StateWork (SchemeCache [5:273:2263]) 2025-06-03T10:26:53.021709Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:53.021772Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 74us result status StatusSuccess 2025-06-03T10:26:53.021865Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:53.021930Z node 5 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [5:268:2258] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:26:53.022542Z node 5 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944 2025-06-03T10:26:53.022706Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-03T10:26:53.022714Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:46: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-06-03T10:26:53.165025Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Cannot find user: user1", at schemeshard: 72057594046678944 2025-06-03T10:26:53.165078Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:53.165087Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:53.165146Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:53.165151Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:205:2206], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-06-03T10:26:53.165288Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 0 2025-06-03T10:26:53.165381Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:53.165406Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 30us result status StatusSuccess 2025-06-03T10:26:53.165488Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwr/CCd/8bzr11oT2Zm/a\nO9+HLqdXvq+Vkq7MZcKS0uHh3RKf+jDGSy9fhD04C2t6d+x0Ql5PzJZHmn6V8d4i\nCDjmYreMDOGh6ckcovyyczrddlsiY9jSqhXtQjXR9L8DFLG7M9oShzg/UiuWGEo6\ngHv8TN6xV8M1cxqeSU6VAR1L2wq01NgLDauPb5wR7GLDP6+YRQzVlyM674s0Pdkc\nIsckbVeI6oB5zE2kFQ6o4r8f9E7WwUEeTqq4ol2wxbBoHqpvdahuzIw4M9vn2b/X\nrXkp8YAEqnDGffCRevg3ZxW8YHPvncPS/oZTZ6Saw2GpkCONfgyrvcJp3JfOn+IX\n1wIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1749032813158 } Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardLoginTest::RemoveUser_Groups-StrictAclCheck-false [GOOD] >> TSchemeShardLoginTest::RemoveUser_Groups-StrictAclCheck-true >> TOlapReboots::CreateMultipleStandaloneTables [GOOD] >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-true [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/engine/ut/unittest >> TMiniKQLEngineFlatTest::TestCrossTableRs [GOOD] Test command err: SetProgram (370): ydb/core/engine/mkql_engine_flat.cpp:183: ExtractResultType(): requirement !label.StartsWith(TxInternalResultPrefix) failed. Label can't be used in SetResult as it's reserved for internal purposes: __cantuse PrepareShardPrograms (491): too many shard readsets (1 > 0), src tables: [200:301:0], dst tables: [200:302:0] Type { Kind: Struct } >> AnalyzeColumnshard::AnalyzeTwoColumnTables [GOOD] >> TSchemeShardLoginTest::RemoveUser_Groups-StrictAclCheck-true [GOOD] >> TSchemeShardLoginTest::RemoveUser_Owner-StrictAclCheck-false ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeTwoColumnTables [GOOD] Test command err: 2025-06-03T10:24:05.544891Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:24:05.544928Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:24:05.544937Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002a66/r3tmp/tmp69tcD9/pdisk_1.dat 2025-06-03T10:24:05.779132Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10417, node 1 2025-06-03T10:24:05.902883Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:05.902902Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:05.902905Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:05.902946Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:05.903554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:05.993555Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:05.993591Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:06.013093Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29516 2025-06-03T10:24:06.473645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:09.232621Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:24:09.247768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:09.247809Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:09.322925Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:09.324052Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:09.711289Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:09.711449Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:09.711617Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:09.711656Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:09.711699Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:09.711717Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:09.712371Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:09.712817Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:09.713041Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:09.941818Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:09.941856Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:09.965932Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:10.138932Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:10.274098Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:24:10.274134Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:24:10.377881Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:24:10.377938Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:24:10.377961Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:24:10.377968Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:24:10.377973Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:24:10.377979Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:24:10.377985Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:24:10.377991Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:24:10.378548Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:24:10.401426Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:10.401454Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:10.407949Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:24:10.410965Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:24:10.411519Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:24:10.421618Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:24:10.434922Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:24:10.434947Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:24:10.434959Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:24:10.443677Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:24:10.445356Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:24:10.445386Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:24:10.698259Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:24:10.923663Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:24:10.973714Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:24:12.191743Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2216:3061], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:12.191790Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:12.200684Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:24:12.375444Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:24:12.376136Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:24:12.376425Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:24:12.376447Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:24:12.376471Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:24:12.376493Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:24:12.376719Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:24:12.376984Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_re ... 6-03T10:26:48.324921Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:48.343248Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:48.343282Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:47: [72075186224037894] TTxFinishTraversal::Complete. Don't send TEvAnalyzeResponse. There are pending operations, OperationId operationId , ActorId=[1:3895:3607] 2025-06-03T10:26:48.938882Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-03T10:26:48.938916Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-03T10:26:49.625728Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:26:49.625782Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 5] is column table. 2025-06-03T10:26:49.626882Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:49.644431Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:49.644587Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:49.644595Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 5], AnalyzedShards 1 2025-06-03T10:26:49.671493Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:49.696096Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. 2025-06-03T10:26:49.696485Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-03T10:26:49.696519Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-03T10:26:49.717953Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-03T10:26:51.069777Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:26:51.069815Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 5] is column table. 2025-06-03T10:26:51.069823Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-03T10:26:51.070031Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:51.082093Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:51.082264Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:51.082284Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:51.082420Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:26:51.111543Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:51.111645Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-03T10:26:51.111849Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8275:5933], server id = [2:8276:5934], tablet id = 72075186224037900, status = OK 2025-06-03T10:26:51.111881Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8275:5933], path = { OwnerId: 72075186224037897 LocalId: 5 } 2025-06-03T10:26:51.112963Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-03T10:26:51.112987Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:51.113020Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:51.113051Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:51.113119Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:51.113731Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8275:5933], server id = [2:8276:5934], tablet id = 72075186224037900 2025-06-03T10:26:51.113744Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:51.113979Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:26:51.144063Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NGU1NmUwMzAtMjhiZTY2ODktMmEzYmZiNDktZWFjNDFmZDM=, TxId: 2025-06-03T10:26:51.144094Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NGU1NmUwMzAtMjhiZTY2ODktMmEzYmZiNDktZWFjNDFmZDM=, TxId: 2025-06-03T10:26:51.144276Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:51.166106Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-03T10:26:51.166128Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:26:51.761133Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 3 is different from the current 0 2025-06-03T10:26:51.761165Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-03T10:26:52.502393Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:26:52.502473Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:26:52.539058Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:26:52.539105Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:26:52.539112Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-03T10:26:53.861265Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:26:53.861359Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 5] is column table. 2025-06-03T10:26:53.861367Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-03T10:26:53.861540Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:53.873443Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:53.873608Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:53.873630Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:53.873798Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:26:53.886592Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:53.886683Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 4, current Round: 0 2025-06-03T10:26:53.886878Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8378:5991], server id = [2:8379:5992], tablet id = 72075186224037900, status = OK 2025-06-03T10:26:53.886910Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8378:5991], path = { OwnerId: 72075186224037897 LocalId: 5 } 2025-06-03T10:26:53.887301Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037900 2025-06-03T10:26:53.887322Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:53.887356Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:53.887388Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:53.887480Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:53.888076Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8378:5991], server id = [2:8379:5992], tablet id = 72075186224037900 2025-06-03T10:26:53.888088Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:53.888332Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:26:53.913452Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ODlkMDZhNjktOTJmYmI3NzMtNWE0MWMwYWYtY2E2NWE4ZWI=, TxId: 2025-06-03T10:26:53.913491Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ODlkMDZhNjktOTJmYmI3NzMtNWE0MWMwYWYtY2E2NWE4ZWI=, TxId: 2025-06-03T10:26:53.913674Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:53.925766Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-03T10:26:53.925803Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3895:3607] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:26:53.989372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:53.989410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:53.989417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:53.989423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:53.989440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:53.989445Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:53.989455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:53.989470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:53.989590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:53.989671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:54.014366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:54.014399Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:54.020277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:54.020442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:54.020483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:54.028845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:54.028942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:54.029108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:54.029182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:54.030251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:54.030339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:54.030778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:54.030798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:54.030810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:54.030825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:54.030833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:54.030863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:54.037908Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:26:54.089415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:54.089517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:54.089613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:54.089672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:54.089686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:54.097920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:54.097980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:54.098067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:54.098084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:54.098091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:54.098099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:54.102054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:54.102092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:54.102103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:54.102918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:54.102934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:54.102943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:54.102953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:54.103878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:54.105213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:54.105311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:54.105593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:54.105643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:54.105655Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:54.105754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:54.105766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:54.105816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:54.105832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:54.106524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:54.106537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:54.106607Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 43: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-06-03T10:26:54.512706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-03T10:26:54.512932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:54.513892Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 7 TxId_Deprecated: 7 TabletID: 72075186234409551 2025-06-03T10:26:54.514017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 Forgetting tablet 72075186234409549 2025-06-03T10:26:54.514398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 7 ShardOwnerId: 72057594046678944 ShardLocalIdx: 7, at schemeshard: 72057594046678944 2025-06-03T10:26:54.514459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:26:54.514723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-03T10:26:54.521856Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 6 TxId_Deprecated: 6 TabletID: 72075186234409550 Forgetting tablet 72075186234409551 Forgetting tablet 72075186234409550 2025-06-03T10:26:54.522551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-06-03T10:26:54.522674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:26:54.522846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:26:54.522854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:26:54.522877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:26:54.522989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:26:54.522995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:26:54.523008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:26:54.525765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-03T10:26:54.525796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186234409549 2025-06-03T10:26:54.525976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-03T10:26:54.525987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186234409551 2025-06-03T10:26:54.529530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-03T10:26:54.529563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186234409550 2025-06-03T10:26:54.529653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:26:54.529668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-03T10:26:54.529779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-03T10:26:54.529790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-03T10:26:54.529906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-03T10:26:54.529948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-03T10:26:54.529955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:945:2802] TestWaitNotification: OK eventTxId 106 2025-06-03T10:26:54.530078Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0/dir/table0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:54.530152Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0/dir/table0" took 99us result status StatusPathDoesNotExist 2025-06-03T10:26:54.530212Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ServerLess0/dir/table0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/ServerLess0/dir/table0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:26:54.530283Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:54.530301Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0" took 20us result status StatusPathDoesNotExist 2025-06-03T10:26:54.530323Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ServerLess0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/ServerLess0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:26:54.530377Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:54.530414Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 38us result status StatusSuccess 2025-06-03T10:26:54.530512Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 7 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "SharedDB" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 wait until 72075186234409549 is deleted wait until 72075186234409550 is deleted wait until 72075186234409551 is deleted wait until 72075186234409552 is deleted 2025-06-03T10:26:54.530615Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186234409549 2025-06-03T10:26:54.530645Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186234409550 2025-06-03T10:26:54.530655Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186234409551 2025-06-03T10:26:54.530664Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186234409552 Deleted tabletId 72075186234409549 Deleted tabletId 72075186234409550 Deleted tabletId 72075186234409551 Deleted tabletId 72075186234409552 >> TSchemeShardLoginTest::RemoveUser_Owner-StrictAclCheck-false [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_olap_reboots/unittest >> TOlapReboots::CreateMultipleStandaloneTables [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:25:50.858973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:25:50.859004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:25:50.859010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:25:50.859015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:25:50.859030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:25:50.859035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:25:50.859045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:25:50.859061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:25:50.859195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:25:50.859277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:25:50.877598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:25:50.877631Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:50.877753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:25:50.881945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:25:50.882126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:25:50.882171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:25:50.884790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:25:50.884863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:25:50.884996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:25:50.885054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:25:50.885679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:25:50.885746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:25:50.886143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:25:50.886162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:25:50.886182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:25:50.886194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:25:50.886202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:25:50.886258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:25:50.888301Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:25:50.917001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:25:50.917106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.917191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:25:50.917259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:25:50.917274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.918628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:25:50.918673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:25:50.918745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.918759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:25:50.918765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:25:50.918773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:25:50.920926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.920958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:25:50.920968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:25:50.921721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.921742Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.921752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:25:50.921762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:25:50.922751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:25:50.923971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:25:50.924046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:25:50.924336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:25:50.924376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:25:50.924385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:25:50.924470Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... o_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1002 2025-06-03T10:26:54.201345Z node 120 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-03T10:26:54.201357Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-03T10:26:54.201746Z node 120 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:26:54.201769Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:26:54.201775Z node 120 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1002 2025-06-03T10:26:54.201782Z node 120 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 4 2025-06-03T10:26:54.201793Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-03T10:26:54.201812Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1002, ready parts: 0/1, is published: true 2025-06-03T10:26:54.201961Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1002:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 275382275 2025-06-03T10:26:54.202936Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-06-03T10:26:54.203059Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 TestWaitNotification: OK eventTxId 1003 2025-06-03T10:26:54.217129Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6151: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409547 TxId: 1002 2025-06-03T10:26:54.217160Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 1002, tablet: 72075186233409547, partId: 0 2025-06-03T10:26:54.217185Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 1002:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409547 TxId: 1002 FAKE_COORDINATOR: Erasing txId 1002 2025-06-03T10:26:54.217638Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-03T10:26:54.217675Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-03T10:26:54.217683Z node 120 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1002:0 ProgressState 2025-06-03T10:26:54.217702Z node 120 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1002:0 progress is 1/1 2025-06-03T10:26:54.217707Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-03T10:26:54.217713Z node 120 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1002:0 progress is 1/1 2025-06-03T10:26:54.217717Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-03T10:26:54.217722Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1002, ready parts: 1/1, is published: true 2025-06-03T10:26:54.217751Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [120:433:2398] message: TxId: 1002 2025-06-03T10:26:54.217757Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-03T10:26:54.217764Z node 120 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1002:0 2025-06-03T10:26:54.217770Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1002:0 2025-06-03T10:26:54.217811Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-03T10:26:54.218225Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1002: got EvNotifyTxCompletionResult 2025-06-03T10:26:54.218234Z node 120 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1002: satisfy waiter [120:434:2399] TestWaitNotification: OK eventTxId 1002 2025-06-03T10:26:54.218351Z node 120 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ColumnTable1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:54.218420Z node 120 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ColumnTable1" took 78us result status StatusSuccess 2025-06-03T10:26:54.218577Z node 120 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ColumnTable1" PathDescription { Self { Name: "ColumnTable1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 1003 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnTableVersion: 1 ColumnTableSchemaVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ColumnTableDescription { Name: "ColumnTable1" Schema { Columns { Id: 1 Name: "timestamp" Type: "Timestamp" TypeId: 50 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 2 Name: "data" Type: "Utf8" TypeId: 4608 NotNull: false StorageId: "" DefaultValue { } ColumnFamilyId: 0 } KeyColumnNames: "timestamp" NextColumnId: 3 Version: 1 Options { SchemeNeedActualization: false } ColumnFamilies { Id: 0 Name: "default" } NextColumnFamilyId: 1 } ColumnShardCount: 1 Sharding { ColumnShards: 72075186233409546 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "timestamp" } } StorageConfig { DataChannelCount: 64 } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:54.218713Z node 120 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ColumnTable2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:54.218736Z node 120 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ColumnTable2" took 24us result status StatusSuccess 2025-06-03T10:26:54.218804Z node 120 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ColumnTable2" PathDescription { Self { Name: "ColumnTable2" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeColumnTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 ColumnTableVersion: 1 ColumnTableSchemaVersion: 1 } ChildrenExist: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ColumnTableDescription { Name: "ColumnTable2" Schema { Columns { Id: 1 Name: "timestamp" Type: "Timestamp" TypeId: 50 NotNull: true StorageId: "" DefaultValue { } ColumnFamilyId: 0 } Columns { Id: 2 Name: "data" Type: "Utf8" TypeId: 4608 NotNull: false StorageId: "" DefaultValue { } ColumnFamilyId: 0 } KeyColumnNames: "timestamp" NextColumnId: 3 Version: 1 Options { SchemeNeedActualization: false } ColumnFamilies { Id: 0 Name: "default" } NextColumnFamilyId: 1 } ColumnShardCount: 1 Sharding { ColumnShards: 72075186233409547 HashSharding { Function: HASH_FUNCTION_CONSISTENCY_64 Columns: "timestamp" } } StorageConfig { DataChannelCount: 64 } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardServerLess::TestServerlessComputeResourcesModeFeatureFlag >> TSchemeShardServerLess::Fake [GOOD] |62.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_serverless/unittest >> TOlapReboots::DropMultipleStandaloneTables [GOOD] >> THealthCheckTest::ProtobufUnderLimitFor70LargeVdisksIssues [GOOD] >> THealthCheckTest::ServerlessBadTablets |62.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_serverless/unittest |62.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::Fake [GOOD] >> TSchemeShardLoginTest::BanUserWithWaiting [GOOD] >> TSchemeShardLoginTest::ChangeAcceptablePasswordParameters >> TSchemeShardLoginTest::AccountLockoutAndAutomaticallyUnlock [GOOD] >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-false ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::RemoveUser_Owner-StrictAclCheck-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:26:52.879506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:52.879541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:52.879547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:52.879553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:52.879572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:52.879577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:52.879587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:52.879602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:52.879724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:52.879800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:52.897585Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:52.897616Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:52.906070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:52.906216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:52.906250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:52.923437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:52.923523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:52.923672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:52.923749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:52.925602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:52.925681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:52.926084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:52.926101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:52.926113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:52.926128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:52.926136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:52.926163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:52.929618Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:26:52.970600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:52.970687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:52.970771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:52.970823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:52.970836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:52.972525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:52.972564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:52.972623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:52.972637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:52.972644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:52.972650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:52.973405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:52.973422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:52.973430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:52.976866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:52.976891Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:52.976900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:52.976909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:52.977807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:52.981267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:52.981354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:52.981605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:52.981651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:52.981661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:52.981770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:52.981784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:52.981841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:52.981857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:52.985615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:52.985637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:52.985710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 105, response: Status: StatusSuccess TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:55.808841Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot, subject: , status: StatusSuccess, operation: MODIFY ACL, path: /MyRoot/Dir1/DirSub1, set owner:user2 2025-06-03T10:26:55.808874Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:55.808880Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:26:55.808903Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:26:55.808920Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:55.808925Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:205:2206], at schemeshard: 72057594046678944, txId: 105, path id: 3 2025-06-03T10:26:55.808931Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:205:2206], at schemeshard: 72057594046678944, txId: 105, path id: 2 2025-06-03T10:26:55.809036Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:26:55.809047Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:26:55.809052Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 105 2025-06-03T10:26:55.809056Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 5 2025-06-03T10:26:55.809062Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:26:55.809126Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:26:55.809135Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:26:55.809139Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-03T10:26:55.809147Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 7 2025-06-03T10:26:55.809151Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:26:55.809160Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-03T10:26:55.809641Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-03T10:26:55.809744Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 TestModificationResult got TxId: 105, wait until txId: 105 TestModificationResults wait txId: 106 2025-06-03T10:26:55.810421Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveUser { User: "user1" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:55.810515Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 106:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:55.810532Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-06-03T10:26:55.810537Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-03T10:26:55.810544Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-06-03T10:26:55.810547Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-03T10:26:55.810559Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:55.810567Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: false 2025-06-03T10:26:55.810572Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-03T10:26:55.810577Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 106:0 2025-06-03T10:26:55.810584Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 106, publications: 1, subscribers: 0 2025-06-03T10:26:55.810588Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 106, [OwnerId: 72057594046678944, LocalPathId: 1], 10 2025-06-03T10:26:55.811107Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 106, response: Status: StatusSuccess TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:55.811135Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot, subject: , status: StatusSuccess, operation: REMOVE USER, path: /MyRoot 2025-06-03T10:26:55.811173Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:55.811184Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 106, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:55.811222Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:55.811228Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:205:2206], at schemeshard: 72057594046678944, txId: 106, path id: 1 2025-06-03T10:26:55.811348Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 10 PathOwnerId: 72057594046678944, cookie: 106 2025-06-03T10:26:55.811363Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 10 PathOwnerId: 72057594046678944, cookie: 106 2025-06-03T10:26:55.811369Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 106 2025-06-03T10:26:55.811376Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 106, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 10 2025-06-03T10:26:55.811383Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:26:55.811401Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 106, subscribers: 0 2025-06-03T10:26:55.811816Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 TestModificationResult got TxId: 106, wait until txId: 106 2025-06-03T10:26:55.812050Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Dir1/DirSub1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:55.812092Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Dir1/DirSub1" took 52us result status StatusSuccess 2025-06-03T10:26:55.812170Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Dir1/DirSub1" PathDescription { Self { Name: "DirSub1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 103 CreateStep: 5000002 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "user2" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:55.812264Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-03T10:26:55.812286Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Cannot find user: user1", at schemeshard: 72057594046678944 >> TSchemeShardServerLess::TestServerlessComputeResourcesModeFeatureFlag [GOOD] >> TSchemeShardLoginTest::ResetFailedAttemptCount [GOOD] >> TSchemeShardLoginTest::ResetFailedAttemptCountAfterModifyUser >> AnalyzeColumnshard::AnalyzeRebootSaBeforeAnalyzeTableResponse [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::TestServerlessComputeResourcesModeFeatureFlag [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:26:56.344111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:56.344145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:56.344152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:56.344158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:56.344176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:56.344181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:56.344193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:56.344209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:56.344336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:56.344419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:56.369820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:56.369860Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:56.383356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:56.383558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:56.383607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:56.386913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:56.386998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:56.387165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:56.387239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:56.388089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:56.388169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:56.388573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:56.388594Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:56.388607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:56.388618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:56.388645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:56.388675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:56.395340Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:26:56.439277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:56.439385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:56.439473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:56.439532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:56.439545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:56.441215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:56.441254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:56.441344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:56.441362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:56.441369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:56.441375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:56.442022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:56.442035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:56.442042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:56.442385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:56.442396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:56.442403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:56.442413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:56.443239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:56.443685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:56.443733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:56.443964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:56.443994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:56.444003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:56.444078Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:56.444088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:56.444131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:56.444149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:56.444585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:56.444595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:56.444657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... _TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-03T10:26:56.947502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-03T10:26:56.947513Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-03T10:26:56.947523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 104:0, at tablet# 72057594046678944 2025-06-03T10:26:56.947536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 104 ready parts: 1/1 2025-06-03T10:26:56.947592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 104 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:56.948359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 104:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:104 msg type: 269090816 2025-06-03T10:26:56.948400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 104, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 104 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 104 at step: 5000005 2025-06-03T10:26:56.948510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:56.948536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:56.948548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 104:0, at tablet# 72057594046678944 2025-06-03T10:26:56.948634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 104:0 128 -> 240 2025-06-03T10:26:56.948646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 104:0, at tablet# 72057594046678944 2025-06-03T10:26:56.948681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-03T10:26:56.948711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:567: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 3], Generation: 2, ActorId:[1:619:2543], EffectiveACLVersion: 0, SubdomainVersion: 2, UserAttributesVersion: 1, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 2, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 1, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 104 2025-06-03T10:26:56.955467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:56.955496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:26:56.955580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:56.955589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-03T10:26:56.955697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-03T10:26:56.955710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:787: [72057594046678944] TSyncHive, operationId 104:0, ProgressState, NeedSyncHive: 0 2025-06-03T10:26:56.955719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 104:0 240 -> 240 2025-06-03T10:26:56.955968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:26:56.956007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:26:56.956014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 104 2025-06-03T10:26:56.956022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-03T10:26:56.956034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 6 2025-06-03T10:26:56.956057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-06-03T10:26:56.962865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-03T10:26:56.962895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-03T10:26:56.962930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-06-03T10:26:56.962937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:26:56.962943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-06-03T10:26:56.962948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:26:56.962954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-06-03T10:26:56.962963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:26:56.962970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-06-03T10:26:56.962976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 104:0 2025-06-03T10:26:56.963037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-03T10:26:56.963215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-03T10:26:56.963664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-03T10:26:56.963677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-03T10:26:56.963786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-03T10:26:56.963809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-03T10:26:56.963815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:776:2653] TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-06-03T10:26:56.964731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "ServerLess0" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeExclusive } } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:56.964769Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1102: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 105:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { Name: "ServerLess0" ServerlessComputeResourcesMode: EServerlessComputeResourcesModeExclusive } 2025-06-03T10:26:56.964778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1108: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 105:0, path /MyRoot/ServerLess0 2025-06-03T10:26:56.964823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 105:0, explain: Invalid AlterExtSubDomain request: Unsupported: feature flag EnableServerlessExclusiveDynamicNodes is off, at schemeshard: 72057594046678944 2025-06-03T10:26:56.964832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 105:1, propose status:StatusPreconditionFailed, reason: Invalid AlterExtSubDomain request: Unsupported: feature flag EnableServerlessExclusiveDynamicNodes is off, at schemeshard: 72057594046678944 2025-06-03T10:26:56.975466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 105, response: Status: StatusPreconditionFailed Reason: "Invalid AlterExtSubDomain request: Unsupported: feature flag EnableServerlessExclusiveDynamicNodes is off" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:56.975539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Invalid AlterExtSubDomain request: Unsupported: feature flag EnableServerlessExclusiveDynamicNodes is off, operation: ALTER DATABASE, path: /MyRoot/ServerLess0 TestModificationResult got TxId: 105, wait until txId: 105 >> StatisticsSaveLoad::Delete >> TSchemeShardLoginTest::ChangeAcceptablePasswordParameters [GOOD] >> TSchemeShardLoginTest::ChangeAccountLockoutParameters >> TSchemeShardServerLess::StorageBillingLabels ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::AccountLockoutAndAutomaticallyUnlock [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:26:50.565003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:50.565034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:50.565054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:50.565060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:50.565076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:50.565081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:50.565093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:50.565107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:50.565222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:50.565315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:50.648134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:50.648162Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:50.674922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:50.675062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:50.675097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:50.698292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:50.698368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:50.698501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:50.698561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:50.705007Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:50.705103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:50.705500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:50.705515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:50.705525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:50.705533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:50.705540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:50.705566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.717224Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:26:50.807024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:50.807106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.807179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:50.807231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:50.807242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.821714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:50.821773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:50.821839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.821852Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:50.821859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:50.821865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:50.825469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.825495Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:50.825505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:50.829518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.829540Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.829547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:50.829555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:50.833622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:50.834187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:50.834232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:50.834446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:50.834475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:50.834485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:50.834563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:50.834572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:50.834620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:50.834634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:50.845265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:50.845323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:50.845392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... operation: CREATE USER, path: /MyRoot 2025-06-03T10:26:52.690821Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:52.690828Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:52.690873Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:52.690880Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:205:2206], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-03T10:26:52.691008Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:26:52.691021Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:26:52.691026Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:26:52.691033Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-06-03T10:26:52.691044Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:52.691064Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-03T10:26:52.691440Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 2025-06-03T10:26:52.691526Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-03T10:26:52.691532Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:46: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-06-03T10:26:52.757626Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Invalid password", at schemeshard: 72057594046678944 2025-06-03T10:26:52.757666Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:52.757675Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:52.757735Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:52.757763Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:205:2206], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-06-03T10:26:52.757897Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 0 2025-06-03T10:26:52.757986Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-03T10:26:52.762025Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Invalid password", at schemeshard: 72057594046678944 2025-06-03T10:26:52.762129Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-03T10:26:52.769828Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Invalid password", at schemeshard: 72057594046678944 2025-06-03T10:26:52.769955Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-03T10:26:52.789618Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Invalid password", at schemeshard: 72057594046678944 2025-06-03T10:26:52.789732Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-03T10:26:52.789780Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "User user1 login denied: too many failed password attempts", at schemeshard: 72057594046678944 2025-06-03T10:26:52.789823Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-03T10:26:52.789831Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "User user1 login denied: too many failed password attempts", at schemeshard: 72057594046678944 2025-06-03T10:26:52.789894Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:52.789946Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 60us result status StatusSuccess 2025-06-03T10:26:52.790059Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoyKAZ6dKmCfmBBfq6yAF\nAETzfkjLLmfF1MFXwTcmejDGdzCycc4NDQ/hP5QVFcEUKp41+QyeSnfCqA/C8oAH\nCx/qFndmetpS1GqfFc0G8EzXYWVqDxFZbR0SObXGClLGRZOwCWvvgWp6Q0IKpquP\ni476DXuN0dQR1SdnEnIZS1NQ2lUlUbh56zs4GjCKY1xOECyg6N8IbIQH6pRC5wbr\nwOYCf2+imsXi1l+vBjA8ji+NoWw9QFU+dE0Pkj5qHDqOkM4vCLE0dYv7l0x4xjZI\n5F4a8rE9+Es6hniQPEM/IfU0Azysjx8NYHYV/+KGRT4MTIR7abG+bJwsRFN9QmFD\nfwIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1749032812745 } Sids { Name: "user1" Type: USER } Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:56.793503Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-03T10:26:56.801578Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "Invalid password", at schemeshard: 72057594046678944 2025-06-03T10:26:56.801703Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-03T10:26:56.815996Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Token: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzQ4OTg5NjE2LCJpYXQiOjE3NDg5NDY0MTYsInN1YiI6InVzZXIxIn0.G-bfHlqGxoMaWnItsbb0G_VyMKC6xNjSLH_go6rA58HNWM6KJb4JAV0Um6ma0naIxub-b7Q6MIPxzFsmUm-bzYLrWAKzKjHWDfKHJYfjbK0vMf4vYeuRbV5vWOXilmf4ovoRSNaKjv7u90OycBSFee6luyHIWxTfOGI6fcwCmJvAIqKMaXUntAoAekPDA0U4C70P2jUkLhvRmOuxAO2UOsEFbCesmvui-W8EeFNs6uF1FKZjeW2-dKgeaxKVWw3_O2AcfVPNg4_vfpXcNfMbq4nwR4yKuEuh3DEd81rORiQmG0beFD_3A1kJ7OH6nXDyjVjP9wcuGnH0sDWQmGHWdg" SanitizedToken: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzQ4OTg5NjE2LCJpYXQiOjE3NDg5NDY0MTYsInN1YiI6InVzZXIxIn0.**" IsAdmin: true, at schemeshard: 72057594046678944 2025-06-03T10:26:56.816148Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:56.816214Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 63us result status StatusSuccess 2025-06-03T10:26:56.816344Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoyKAZ6dKmCfmBBfq6yAF\nAETzfkjLLmfF1MFXwTcmejDGdzCycc4NDQ/hP5QVFcEUKp41+QyeSnfCqA/C8oAH\nCx/qFndmetpS1GqfFc0G8EzXYWVqDxFZbR0SObXGClLGRZOwCWvvgWp6Q0IKpquP\ni476DXuN0dQR1SdnEnIZS1NQ2lUlUbh56zs4GjCKY1xOECyg6N8IbIQH6pRC5wbr\nwOYCf2+imsXi1l+vBjA8ji+NoWw9QFU+dE0Pkj5qHDqOkM4vCLE0dYv7l0x4xjZI\n5F4a8rE9+Es6hniQPEM/IfU0Azysjx8NYHYV/+KGRT4MTIR7abG+bJwsRFN9QmFD\nfwIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1749032812745 } Sids { Name: "user1" Type: USER } Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeshardStatsBatchingTest::TopicAccountSizeAndUsedReserveSize >> YdbOlapStore::BulkUpsert [GOOD] >> YdbOlapStore::DuplicateRows >> TSchemeShardLoginTest::ResetFailedAttemptCountAfterModifyUser [GOOD] >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-false [GOOD] >> StatisticsSaveLoad::Simple >> THealthCheckTest::ServerlessBadTablets [GOOD] >> THealthCheckTest::ServerlessWhenTroublesWithSharedNodes ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_olap_reboots/unittest >> TOlapReboots::DropMultipleStandaloneTables [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:25:50.391814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:25:50.391850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:25:50.391857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:25:50.391863Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:25:50.391879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:25:50.391884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:25:50.391895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:25:50.391911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:25:50.392053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:25:50.392143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:25:50.407532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:25:50.407564Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:50.407676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:25:50.410447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:25:50.410549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:25:50.410576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:25:50.412855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:25:50.412923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:25:50.413035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:25:50.413105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:25:50.413614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:25:50.413675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:25:50.413982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:25:50.413998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:25:50.414017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:25:50.414027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:25:50.414034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:25:50.414095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:25:50.415563Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:25:50.434287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:25:50.434378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.434439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:25:50.434483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:25:50.434492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.435381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:25:50.435413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:25:50.435461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.435469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:25:50.435475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:25:50.435479Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:25:50.435875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.435891Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:25:50.435897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:25:50.436287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.436297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.436301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:25:50.436307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:25:50.436884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:25:50.437285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:25:50.437346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:25:50.437565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:25:50.437590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:25:50.437598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:25:50.437655Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-03T10:26:56.202566Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1004, ready parts: 0/1, is published: true 2025-06-03T10:26:56.202614Z node 111 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-03T10:26:56.202803Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:56.203012Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:26:56.203192Z node 111 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186233409546;self_id=[111:332:2318];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1152;event=tablet_die; Forgetting tablet 72075186233409546 2025-06-03T10:26:56.204782Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:26:56.204799Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:26:56.204819Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:26:56.204938Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1004:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 275382275 2025-06-03T10:26:56.230027Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-03T10:26:56.232075Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-03T10:26:56.232387Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:26:56.232407Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:26:56.232479Z node 111 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 1005 2025-06-03T10:26:56.261884Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6151: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409547 TxId: 1004 2025-06-03T10:26:56.261917Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 1004, tablet: 72075186233409547, partId: 0 2025-06-03T10:26:56.261954Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 1004:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409547 TxId: 1004 2025-06-03T10:26:56.261969Z node 111 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1004:0 129 -> 130 FAKE_COORDINATOR: Erasing txId 1004 2025-06-03T10:26:56.262594Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 1004:0, at schemeshard: 72057594046678944 2025-06-03T10:26:56.262642Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1004:0, at schemeshard: 72057594046678944 2025-06-03T10:26:56.262651Z node 111 :FLAT_TX_SCHEMESHARD INFO: drop_table.cpp:315: TDropColumnTable TProposedDeleteParts operationId# 1004:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:56.262674Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-03T10:26:56.262702Z node 111 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1004:0 progress is 1/1 2025-06-03T10:26:56.262708Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-03T10:26:56.262714Z node 111 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1004:0 progress is 1/1 2025-06-03T10:26:56.262718Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-03T10:26:56.262739Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1004, ready parts: 1/1, is published: true 2025-06-03T10:26:56.262754Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [111:364:2341] message: TxId: 1004 2025-06-03T10:26:56.262760Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-03T10:26:56.262766Z node 111 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1004:0 2025-06-03T10:26:56.262771Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1004:0 2025-06-03T10:26:56.262798Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:26:56.263419Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:26:56.263455Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-03T10:26:56.263462Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [111:534:2491] 2025-06-03T10:26:56.263579Z node 111 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-03T10:26:56.263755Z node 111 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186233409547;self_id=[111:435:2402];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1152;event=tablet_die; 2025-06-03T10:26:56.264945Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:26:56.265152Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 Forgetting tablet 72075186233409547 2025-06-03T10:26:56.265359Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:26:56.265368Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-03T10:26:56.265381Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:26:56.272009Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:26:56.272044Z node 111 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:26:56.272200Z node 111 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 1004 2025-06-03T10:26:56.272343Z node 111 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ColumnTable1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:56.272405Z node 111 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ColumnTable1" took 75us result status StatusPathDoesNotExist 2025-06-03T10:26:56.272450Z node 111 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ColumnTable1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/ColumnTable1" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:26:56.272532Z node 111 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ColumnTable2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:56.272548Z node 111 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ColumnTable2" took 18us result status StatusPathDoesNotExist 2025-06-03T10:26:56.272566Z node 111 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ColumnTable2\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/ColumnTable2" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootSaBeforeAnalyzeTableResponse [GOOD] Test command err: 2025-06-03T10:24:18.493186Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:24:18.493231Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:24:18.493242Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002a48/r3tmp/tmp7TMAIw/pdisk_1.dat 2025-06-03T10:24:18.871800Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8480, node 1 2025-06-03T10:24:19.298124Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:19.298149Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:19.298155Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:19.298268Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:19.300834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:19.382205Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:19.382243Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:19.401839Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6741 2025-06-03T10:24:19.916381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:22.317841Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:24:22.336263Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:22.336308Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:22.439107Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:22.441055Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:22.647716Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:22.648192Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:22.648378Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:22.648424Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:22.648481Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:22.648506Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:22.648575Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:22.648600Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:22.648619Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:22.860961Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:22.861011Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:22.874238Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:22.925575Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:22.944119Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:24:22.944162Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:24:22.955175Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:24:22.955255Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:24:22.955289Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:24:22.955297Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:24:22.955305Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:24:22.955332Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:24:22.955340Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:24:22.955349Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:24:22.955572Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:24:22.978785Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:22.978827Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:22.982385Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:24:22.983527Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:24:22.983661Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:24:22.986576Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:24:22.997668Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:24:22.997704Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:24:22.997723Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:24:23.005496Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:24:23.007918Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:24:23.007971Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:24:23.145801Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:24:23.241099Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:24:23.284998Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:24:23.874084Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2216:3061], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:23.874141Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:23.878187Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:24:23.925169Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:24:23.925259Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:24:23.926900Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:24:23.926980Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:24:23.927012Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:24:23.927045Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:24:23.927074Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:24:23.927105Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_regi ... ath [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:51.022039Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:51.042138Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:51.042308Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:51.042327Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:51.042658Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:26:51.058673Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:51.058756Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-03T10:26:51.058916Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7615:5514], server id = [2:7616:5515], tablet id = 72075186224037899, status = OK 2025-06-03T10:26:51.058942Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7615:5514], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:51.059811Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-03T10:26:51.059827Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:51.059887Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:51.059915Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:51.059975Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:51.060536Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7615:5514], server id = [2:7616:5515], tablet id = 72075186224037899 2025-06-03T10:26:51.060545Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:51.060674Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:26:51.069850Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7636:5534]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:51.069920Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:26:51.069927Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7636:5534], StatRequests.size() = 1 2025-06-03T10:26:51.107209Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MjIzMDg4Zi1jZmFjOTg0My04ZDdkMmQwMS1iNThmYzEwNw==, TxId: 2025-06-03T10:26:51.107240Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MjIzMDg4Zi1jZmFjOTg0My04ZDdkMmQwMS1iNThmYzEwNw==, TxId: 2025-06-03T10:26:51.107410Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:51.132963Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:51.133077Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:26:51.745691Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-03T10:26:51.745729Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-03T10:26:52.535278Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:26:52.535326Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-03T10:26:52.535545Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:52.553052Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:52.553218Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:52.553229Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 4], AnalyzedShards 1 2025-06-03T10:26:52.585968Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:53.949644Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:26:53.949682Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:26:53.949687Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-03T10:26:53.949765Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. 2025-06-03T10:26:53.949912Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-03T10:26:53.949941Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-03T10:26:53.967318Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-03T10:26:55.263997Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:26:55.264033Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:26:55.264038Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-03T10:26:56.589613Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:26:56.589735Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:26:56.601523Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:26:56.601587Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-03T10:26:56.601594Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:56.601846Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:56.636780Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:56.636932Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:56.636947Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:56.637107Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:26:56.662521Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:56.662603Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-03T10:26:56.662779Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7806:5624], server id = [2:7807:5625], tablet id = 72075186224037899, status = OK 2025-06-03T10:26:56.662814Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7806:5624], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:56.663198Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-03T10:26:56.663212Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:56.663246Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:56.663273Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:56.663367Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:56.664119Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7806:5624], server id = [2:7807:5625], tablet id = 72075186224037899 2025-06-03T10:26:56.664132Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:56.664403Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:26:56.711454Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZTA5OTBmODQtMWZhNTU3YTUtZTIxNDRhMjYtYmQ5NjdmMjM=, TxId: 2025-06-03T10:26:56.711486Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZTA5OTBmODQtMWZhNTU3YTUtZTIxNDRhMjYtYmQ5NjdmMjM=, TxId: 2025-06-03T10:26:56.711618Z node 2 :SYSTEM_VIEWS WARN: tx_interval_summary.cpp:212: [72075186224037891] TEvIntervalQuerySummary, time mismath: node id# 2, interval end# 1970-01-01T00:02:08.000000Z, event interval end# 2025-06-03T10:26:54.000000Z 2025-06-03T10:26:56.711730Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:56.726616Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:56.726644Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3129:3338] >> TBsProxyFaultToleranceTest::CheckTRangeFaultToleranceTestErasureMirror3dc [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::BaseCase-AlterDatabaseCreateHiveFirst-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:26:57.424197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:57.424233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:57.424241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:57.424247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:57.424264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:57.424269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:57.424281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:57.424298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:57.424427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:57.424513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:57.493056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:57.493098Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:57.507000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:57.507183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:57.507241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:57.524346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:57.524439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:57.524647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:57.524729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:57.530297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:57.530425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:57.530920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:57.530939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:57.530951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:57.530962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:57.530970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:57.531026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.543338Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:26:57.645868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:57.645977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.646072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:57.646134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:57.646148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.650133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:57.650200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:57.650297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.650317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:57.650326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:57.650335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:57.652010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.652044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:57.652055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:57.652764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.652781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.652790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:57.652800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:57.653853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:57.663715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:57.663823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:57.664114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:57.664175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:57.664187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:57.664304Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:57.664315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:57.664368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:57.664385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:57.671430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:57.671460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:57.671552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... SHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:58.196218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-03T10:26:58.196529Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 7 TxId_Deprecated: 7 TabletID: 72075186234409548 2025-06-03T10:26:58.196613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-06-03T10:26:58.196718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-03T10:26:58.196877Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72075186233409546] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 6 TxId_Deprecated: 6 TabletID: 72075186234409547 Forgetting tablet 72075186234409548 2025-06-03T10:26:58.197518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 7 ShardOwnerId: 72057594046678944 ShardLocalIdx: 7, at schemeshard: 72057594046678944 2025-06-03T10:26:58.197590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 Forgetting tablet 72075186234409547 2025-06-03T10:26:58.197849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72075186233409546 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-06-03T10:26:58.197895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:26:58.201576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-03T10:26:58.201885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:26:58.201902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:26:58.201958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:26:58.202063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:26:58.202073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:26:58.202089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:26:58.206043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-03T10:26:58.206082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186234409546 2025-06-03T10:26:58.206137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-03T10:26:58.206143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186234409548 2025-06-03T10:26:58.206231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-03T10:26:58.206243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186234409547 2025-06-03T10:26:58.206771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:26:58.206801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-03T10:26:58.206911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-03T10:26:58.206923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-03T10:26:58.207040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-03T10:26:58.207078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-03T10:26:58.207087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:933:2791] TestWaitNotification: OK eventTxId 106 2025-06-03T10:26:58.207214Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0/dir/table0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:58.207270Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0/dir/table0" took 84us result status StatusPathDoesNotExist 2025-06-03T10:26:58.207338Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ServerLess0/dir/table0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/ServerLess0/dir/table0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:26:58.207416Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLess0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:58.207434Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ServerLess0" took 20us result status StatusPathDoesNotExist 2025-06-03T10:26:58.207455Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ServerLess0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/ServerLess0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:26:58.207516Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:58.207553Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 39us result status StatusSuccess 2025-06-03T10:26:58.207680Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 7 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "SharedDB" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 wait until 72075186233409550 is deleted wait until 72075186233409551 is deleted wait until 72075186233409552 is deleted wait until 72075186233409553 is deleted 2025-06-03T10:26:58.207802Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186233409550 2025-06-03T10:26:58.212826Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186233409551 2025-06-03T10:26:58.212875Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186233409552 2025-06-03T10:26:58.212888Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72075186233409546] TEvSubscribeToTabletDeletion, 72075186233409553 Deleted tabletId 72075186233409550 Deleted tabletId 72075186233409551 Deleted tabletId 72075186233409552 Deleted tabletId 72075186233409553 >> LocalTableWriter::SupportedTypes ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::ResetFailedAttemptCountAfterModifyUser [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:26:50.750529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:50.750562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:50.750569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:50.750576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:50.750594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:50.750600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:50.750611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:50.750625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:50.750732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:50.750804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:50.792500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:50.792532Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:50.806131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:50.806297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:50.806350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:50.811991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:50.812068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:50.812210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:50.812278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:50.821860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:50.821939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:50.822310Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:50.822329Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:50.822339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:50.822353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:50.822360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:50.822384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.824171Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:26:50.895834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:50.895906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.895988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:50.896048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:50.896059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.896878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:50.896908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:50.896963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.896975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:50.896982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:50.896988Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:50.905141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.905176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:50.905187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:50.907019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.907045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:50.907053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:50.907063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:50.907877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:50.908504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:50.908555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:50.908783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:50.908820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:50.908829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:50.908912Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:50.908923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:50.908974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:50.908988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:50.909537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:50.909549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:50.909622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 57.927593Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-03T10:26:57.927600Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:57.927623Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-03T10:26:57.928091Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:26:57.928347Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 Leader for TabletID 72057594046678944 is [5:309:2295] sender: [5:404:2058] recipient: [5:103:2137] Leader for TabletID 72057594046678944 is [5:309:2295] sender: [5:407:2058] recipient: [5:15:2062] Leader for TabletID 72057594046678944 is [5:309:2295] sender: [5:408:2058] recipient: [5:406:2376] Leader for TabletID 72057594046678944 is [5:409:2377] sender: [5:410:2058] recipient: [5:406:2376] 2025-06-03T10:26:57.936077Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:57.936113Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:57.936122Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:57.936128Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:57.936135Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:57.936141Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:57.936151Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:57.936188Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:57.936295Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:57.936371Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:57.937925Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:57.938373Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:57.938431Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:57.938480Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:57.938488Z node 5 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:57.938528Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:57.938643Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit for Paths, read records: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:57.938665Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1457: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.938677Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1483: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.938743Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1785: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.938759Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-03T10:26:57.938791Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2033: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.938805Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2093: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.938817Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2151: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.938837Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2237: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.938848Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2303: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.938868Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2453: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.938907Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2832: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.938922Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2911: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.938972Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3412: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.938983Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3448: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.939004Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3665: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.939018Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3810: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.939031Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3827: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.939072Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3987: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.939085Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4003: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.939108Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4288: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.939141Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4593: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.939152Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4651: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.939171Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4746: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.939179Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4773: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.939186Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4800: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-03T10:26:57.941973Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:57.942465Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:57.942483Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:57.942858Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:57.942877Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:57.942887Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:57.943013Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [5:409:2377] sender: [5:467:2058] recipient: [5:15:2062] 2025-06-03T10:26:58.000017Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-03T10:26:58.000038Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:46: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-06-03T10:26:58.074167Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Token: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjMifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzQ4OTg5NjE4LCJpYXQiOjE3NDg5NDY0MTgsInN1YiI6InVzZXIxIn0.Z5IGUr-QoDgz8cpCVmwmqJ0JQLyhPoqynkr3lXqLOMz1MCB_Iq9UXGXwROcUU3Wg0ovPLexcLyRQ7wT7dW6B_4CHamH2rrPzzOWpJKWCQFy0egCBzDbuKQKcZLMOwK7fAl7CSY_Q7ABpPa0aPps2dPHQ5iLbQwxuYIYSK6e8f2TEhtIjhR7JSOXPrh4hyxokv_xrzPNmHxfhTzbtI0q0KuvtgHC7peJdQe20JDVx1pjiMKS5XCjCLpH9BphXbKaWXI7DYQ-QqVZlpYhJfZLLbNMZ6-XGhFpPWa5n_kL2w6-uw-diVCgIibzWPmwDbH5kQBwXyzkxJa4c4m5Gy3_udw" SanitizedToken: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjMifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzQ4OTg5NjE4LCJpYXQiOjE3NDg5NDY0MTgsInN1YiI6InVzZXIxIn0.**" IsAdmin: true, at schemeshard: 72057594046678944 2025-06-03T10:26:58.074216Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:58.074225Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:58.074281Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:58.074288Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:458:2415], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-06-03T10:26:58.074417Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 0 >> TSchemeshardStatsBatchingTest::TopicAccountSizeAndUsedReserveSize [GOOD] >> LocalTableWriter::DecimalKeys >> LocalTableWriter::WriteTable |62.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_ftol/unittest >> TBsProxyFaultToleranceTest::CheckTRangeFaultToleranceTestErasureMirror3dc [GOOD] |62.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_table_writer/unittest >> AnalyzeColumnshard::AnalyzeRebootSaBeforeAggregate [GOOD] >> LocalTableWriter::ApplyInCorrectOrder >> THealthCheckTest::ServerlessWhenTroublesWithSharedNodes [GOOD] >> THealthCheckTest::ServerlessWithExclusiveNodesWhenTroublesWithSharedNodes ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::TopicAccountSizeAndUsedReserveSize [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:26:58.560036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:58.560072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:58.560078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:58.560085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:58.560112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:58.560117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:58.560129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:58.560143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:58.560270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:58.560350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:58.595429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:58.595468Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:58.603161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:58.603335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:58.603391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:58.605796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:58.605894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:58.606038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:58.606130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:58.606904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:58.606964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:58.607398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:58.607414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:58.607424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:58.607438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:58.607444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:58.607469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:58.609076Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:26:58.672353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:58.672477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:58.672568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:58.672628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:58.672642Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:58.684574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:58.684638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:58.684744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:58.684760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:58.684767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:58.684774Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:58.687348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:58.687386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:58.687397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:58.694876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:58.694918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:58.694928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:58.694938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:58.695880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:58.699988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:58.700095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:58.700394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:58.700457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:58.700470Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:58.700623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:58.700636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:58.700694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:58.700714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:58.701786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:58.701805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:58.701878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-03T10:26:59.312817Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 268698118, Sender [1:218:2216], Recipient [1:285:2271]: NKikimrHive.TEvDeleteTabletReply Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 2025-06-03T10:26:59.312822Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4908: StateWork, processing event TEvHive::TEvDeleteTabletReply 2025-06-03T10:26:59.312829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-03T10:26:59.313125Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186233409551][Topic3] Discovered subdomain [OwnerId: 72057594046678944, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186233409551 2025-06-03T10:26:59.313172Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186233409547][Topic1] Discovered subdomain [OwnerId: 72057594046678944, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186233409547 2025-06-03T10:26:59.313256Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:26:59.313854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-03T10:26:59.313871Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:26:59.313900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:26:59.313917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-03T10:26:59.313922Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:26:59.314343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:26:59.314371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-03T10:26:59.314417Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877763, Sender [1:1036:2893], Recipient [1:285:2271]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594037968897 ClientId: [1:1036:2893] ServerId: [1:1038:2895] } 2025-06-03T10:26:59.314425Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4978: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-03T10:26:59.314433Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5768: Client pipe, to tablet: 72057594037968897, from:72057594046678944 is reset TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-03T10:26:59.314560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-03T10:26:59.314572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-03T10:26:59.314672Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [1:1052:2909], Recipient [1:285:2271]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:26:59.314680Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:26:59.314686Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046678944 2025-06-03T10:26:59.314724Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124996, Sender [1:554:2486], Recipient [1:285:2271]: NKikimrScheme.TEvNotifyTxCompletion TxId: 104 2025-06-03T10:26:59.314732Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4895: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-03T10:26:59.314749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-03T10:26:59.314779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-03T10:26:59.314787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:1050:2907] 2025-06-03T10:26:59.314816Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [1:1052:2909], Recipient [1:285:2271]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:26:59.314823Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:26:59.314829Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 104 2025-06-03T10:26:59.314958Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122945, Sender [1:1053:2910], Recipient [1:285:2271]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-03T10:26:59.314965Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4894: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-03T10:26:59.314981Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:59.315041Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 59us result status StatusSuccess 2025-06-03T10:26:59.315219Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 13 WriteSpeedInBytesPerSecond: 19 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 808 AccountSize: 808 DataSize: 31 UsedReserveSize: 31 } } PQPartitionsInside: 4 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:59.315384Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271188001, Sender [1:1054:2911], Recipient [1:285:2271]: NKikimrPQ.TEvPeriodicTopicStats PathId: 4 Generation: 1 Round: 6 DataSize: 151 UsedReserveSize: 151 2025-06-03T10:26:59.315392Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4920: StateWork, processing event TEvPersQueue::TEvPeriodicTopicStats 2025-06-03T10:26:59.315401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046678944, LocalPathId: 4] DataSize 151 UsedReserveSize 151 2025-06-03T10:26:59.315413Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:128: Will execute TTxStoreStats, queue# 1 2025-06-03T10:26:59.315521Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122945, Sender [1:1055:2912], Recipient [1:285:2271]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-03T10:26:59.315528Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4894: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-03T10:26:59.315541Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:26:59.316067Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 24us result status StatusSuccess 2025-06-03T10:26:59.316211Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 13 WriteSpeedInBytesPerSecond: 19 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 808 AccountSize: 808 DataSize: 182 UsedReserveSize: 182 } } PQPartitionsInside: 4 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |62.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::DecimalKeys [GOOD] >> LocalTableWriter::SupportedTypes [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootSaBeforeAggregate [GOOD] Test command err: 2025-06-03T10:24:22.522548Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:24:22.522614Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:24:22.522626Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002a2d/r3tmp/tmpQDuFLi/pdisk_1.dat 2025-06-03T10:24:22.689030Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27357, node 1 2025-06-03T10:24:22.840873Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:22.840900Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:22.840905Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:22.841021Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:22.841874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:22.927871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:22.927924Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:22.943308Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62461 2025-06-03T10:24:23.350078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:24.302391Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:24:24.315129Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:24.315204Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:24.369229Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:24.369842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:24.534664Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:24.534891Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:24.535061Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:24.535106Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:24.535162Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:24.535185Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:24.535205Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:24.535226Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:24.535269Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:24.722799Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:24.722848Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:24.734520Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:24.768536Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:24.779147Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:24:24.779177Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:24:24.787612Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:24:24.787681Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:24:24.787714Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:24:24.787722Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:24:24.787729Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:24:24.787737Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:24:24.787746Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:24:24.787755Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:24:24.787932Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:24:24.802586Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:24.802627Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:24.804261Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:24:24.805241Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:24:24.805416Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:24:24.807482Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:24:24.811592Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:24:24.811614Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:24:24.811627Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:24:24.816648Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:24:24.818739Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:24:24.818773Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:24:24.957914Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:24:25.082186Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:24:25.129721Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:24:25.736717Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2216:3061], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:25.736796Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:25.741425Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:24:25.804089Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:24:25.804187Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:24:25.804252Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:24:25.804284Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:24:25.804322Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:24:25.804387Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:24:25.804426Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:24:25.804462Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_re ... 37894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:26:55.669209Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 2 2025-06-03T10:26:55.669219Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 1 2025-06-03T10:26:55.669228Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 1 2025-06-03T10:26:55.669237Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:26:55.669268Z node 2 :STATISTICS DEBUG: tx_init.cpp:295: [72075186224037894] TTxInit::Complete. Start navigate. PathId [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:55.669650Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:26:55.669792Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:26:55.669953Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:55.669967Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:55.670159Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:55.670169Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:55.670321Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:26:55.738028Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:55.738096Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-03T10:26:55.738348Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7622:5518], server id = [2:7623:5519], tablet id = 72075186224037899, status = OK 2025-06-03T10:26:55.738388Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7622:5518], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:55.739629Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-03T10:26:55.739657Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:55.739791Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:55.739834Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:55.739921Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:55.740807Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7622:5518], server id = [2:7623:5519], tablet id = 72075186224037899 2025-06-03T10:26:55.740823Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:55.741077Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:26:55.760822Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7643:5538]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:55.760899Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:26:55.760909Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7643:5538], StatRequests.size() = 1 2025-06-03T10:26:55.832066Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OGIwYzA4YTItYzM0YTVmNGUtZDNhYzhlOGItNjJhZDc0MDQ=, TxId: 2025-06-03T10:26:55.832103Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OGIwYzA4YTItYzM0YTVmNGUtZDNhYzhlOGItNjJhZDc0MDQ=, TxId: 2025-06-03T10:26:55.832279Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:55.847575Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:55.847608Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:26:55.862143Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7660:5546] 2025-06-03T10:26:55.862210Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7661:5547] 2025-06-03T10:26:55.862233Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7575:5488], server id = [2:7661:5547], tablet id = 72075186224037894, status = OK 2025-06-03T10:26:55.862261Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:7660:5546], schemeshard id = 72075186224037897 2025-06-03T10:26:55.862286Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7661:5547], node id = 2, have schemeshards count = 1, need schemeshards count = 0 2025-06-03T10:26:55.994139Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7670:5548] 2025-06-03T10:26:55.994438Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:22: [72075186224037894] TTxAnalyze::Execute. ReplyToActorId [1:3129:3338] , Record { OperationId: "operationId" Tables { PathId { OwnerId: 72075186224037897 LocalId: 4 } } Types: TYPE_COUNT_MIN_SKETCH } 2025-06-03T10:26:55.994451Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:38: [72075186224037894] TTxAnalyze::Execute. Update existing force traversal. OperationId operationId , ReplyToActorId [1:3129:3338] 2025-06-03T10:26:55.994470Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:97: [72075186224037894] TTxAnalyze::Complete 2025-06-03T10:26:56.557598Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 3 is different from the current 0 2025-06-03T10:26:56.557635Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-03T10:26:57.442587Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:26:57.442627Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:26:57.442633Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-03T10:26:58.954469Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:26:58.954547Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-03T10:26:58.954553Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:58.954764Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:58.967745Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:58.967914Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:58.967933Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:58.968079Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:26:58.993970Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:58.994053Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 4, current Round: 0 2025-06-03T10:26:58.994219Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7744:5591], server id = [2:7745:5592], tablet id = 72075186224037899, status = OK 2025-06-03T10:26:58.994251Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7744:5591], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:58.994632Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-03T10:26:58.994652Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:58.994728Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:58.994762Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:58.994832Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:58.995467Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7744:5591], server id = [2:7745:5592], tablet id = 72075186224037899 2025-06-03T10:26:58.995483Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:58.995652Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:26:59.019482Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YzM1Mzg2ZWYtN2E1NTBhMWEtZjEwNzdjNGQtOGEwZTQ0M2U=, TxId: 2025-06-03T10:26:59.019517Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YzM1Mzg2ZWYtN2E1NTBhMWEtZjEwNzdjNGQtOGEwZTQ0M2U=, TxId: 2025-06-03T10:26:59.019731Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:59.044621Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:59.044656Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3129:3338] >> LocalTableWriter::WriteTable [GOOD] |62.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::DataAlongWithHeartbeat >> LocalTableWriter::WaitTxIds >> LocalTableWriter::ConsistentWrite >> LocalTableWriter::ApplyInCorrectOrder [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::SupportedTypes [GOOD] Test command err: 2025-06-03T10:26:59.486539Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667672811582313:2204];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:59.486705Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002112/r3tmp/tmpy9XFLk/pdisk_1.dat 2025-06-03T10:26:59.590284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:59.590325Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:59.591289Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:59.598285Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:1170 TServer::EnableGrpc on GrpcPort 1877, node 1 2025-06-03T10:26:59.663878Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:59.663893Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:59.663895Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:59.663952Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1170 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:59.792887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:26:59.806184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:26:59.808004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946419920 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "int32_value" Type: "Int32" TypeId: 1 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "ui... (TRUNCATED) 2025-06-03T10:26:59.912183Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667672811582846:2345] Handshake: worker# [1:7511667672811582753:2284] 2025-06-03T10:26:59.912318Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667672811582846:2345] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:26:59.912384Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667672811582846:2345] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-03T10:26:59.912393Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667672811582846:2345] Send handshake: worker# [1:7511667672811582753:2284] 2025-06-03T10:26:59.912700Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667672811582846:2345] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 45b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 3 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 4 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 41b Offset: 5 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 41b Offset: 6 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 7 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 44b Offset: 8 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 66b Offset: 9 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 71b Offset: 10 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 72b Offset: 11 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 12 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 48b Offset: 13 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 51b Offset: 14 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 58b Offset: 15 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 51b Offset: 16 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 54b Offset: 17 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 57b Offset: 18 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 76b Offset: 19 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 20 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 54b Offset: 21 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 61b Offset: 22 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 51b Offset: 23 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 45b Offset: 24 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 46b Offset: 25 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 47b Offset: 26 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 50b Offset: 27 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 28 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 72b Offset: 29 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 57b Offset: 30 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 64b Offset: 31 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-03T10:26:59.912906Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667672811582846:2345] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 45 },{ Order: 2 BodySize: 45 },{ Order: 3 BodySize: 45 },{ Order: 4 BodySize: 45 },{ Order: 5 BodySize: 41 },{ Order: 6 BodySize: 41 },{ Order: 7 BodySize: 45 },{ Order: 8 BodySize: 44 },{ Order: 9 BodySize: 66 },{ Order: 10 BodySize: 71 },{ Order: 11 BodySize: 72 },{ Order: 12 BodySize: 49 },{ Order: 13 BodySize: 48 },{ Order: 14 BodySize: 51 },{ Order: 15 BodySize: 58 },{ Order: 16 BodySize: 51 },{ Order: 17 BodySize: 54 },{ Order: 18 BodySize: 57 },{ Order: 19 BodySize: 76 },{ Order: 20 BodySize: 45 },{ Order: 21 BodySize: 54 },{ Order: 22 BodySize: 61 },{ Order: 23 BodySize: 51 },{ Order: 24 BodySize: 45 },{ Order: 25 BodySize: 46 },{ Order: 26 BodySize: 47 },{ Order: 27 BodySize: 50 },{ Order: 28 BodySize: 49 },{ Order: 29 BodySize: 72 },{ Order: 30 BodySize: 57 },{ Order: 31 BodySize: 64 }] } 2025-06-03T10:26:59.912977Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667672811582849:2345] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-03T10:26:59.912984Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667672811582846:2345] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-03T10:26:59.913027Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667672811582849:2345] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 2 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 3 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 4 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 5 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 41b },{ Order: 6 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 41b },{ Order: 7 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 8 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 44b },{ Order: 9 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 66b },{ Order: 10 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 71b },{ Order: 11 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 72b },{ Order: 12 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 13 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b },{ Order: 14 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 51b },{ Order: 15 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 58b },{ Order: 16 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 51b },{ Order: 17 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 54b },{ Order: 18 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 57b },{ Order: 19 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 76b },{ Order: 20 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 21 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 54b },{ Order: 22 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 61b },{ Order: 23 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 51b },{ Order: 24 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 45b },{ Order: 25 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 46b },{ Order: 26 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 47b },{ Order: 27 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 50b },{ Order: 28 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 29 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 72b },{ Order: 30 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 57b },{ Order: 31 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 64b }] } 2025-06-03T10:26:59.949690Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667672811582849:2345] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-03T10:26:59.949744Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667672811582846:2345] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-03T10:26:59.949761Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667672811582846:2345] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31] } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::DecimalKeys [GOOD] Test command err: 2025-06-03T10:26:59.679490Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667672863230216:2218];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:59.835547Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002104/r3tmp/tmpsyTemP/pdisk_1.dat 2025-06-03T10:26:59.913895Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:59.917478Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667672863230012:2079] 1748946419670718 != 1748946419670721 2025-06-03T10:26:59.941947Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:59.942002Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:59.942572Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19768 TServer::EnableGrpc on GrpcPort 18014, node 1 2025-06-03T10:27:00.008443Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:00.008460Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:00.008463Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:00.008539Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19768 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:00.151338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:00.161873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:27:00.170457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946420277 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Decimal(1,0)" TypeId: 4865 Id: 1 NotNull: false TypeInfo { DecimalPrecision: 1 DecimalScale: 0 } IsBuildInProgress: false } Columns { Name: "value" Type: "Decimal(35,10)" TypeId: 4865 I... (TRUNCATED) 2025-06-03T10:27:00.257473Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667677158198014:2343] Handshake: worker# [1:7511667677158197923:2284] 2025-06-03T10:27:00.257653Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667677158198014:2343] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:27:00.257744Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667677158198014:2343] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Decimal(1,0) : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-03T10:27:00.257753Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667677158198014:2343] Send handshake: worker# [1:7511667677158197923:2284] 2025-06-03T10:27:00.261469Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667677158198014:2343] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 57b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 57b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 57b Offset: 3 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-03T10:27:00.261550Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667677158198014:2343] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 57 },{ Order: 2 BodySize: 57 },{ Order: 3 BodySize: 57 }] } 2025-06-03T10:27:00.261609Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667677158198018:2343] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-03T10:27:00.261616Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667677158198014:2343] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-03T10:27:00.261637Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667677158198018:2343] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 57b },{ Order: 2 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 57b },{ Order: 3 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 57b }] } 2025-06-03T10:27:00.262273Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667677158198018:2343] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-03T10:27:00.262284Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667677158198014:2343] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-03T10:27:00.262291Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667677158198014:2343] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1,2,3] } >> TSchemeshardStatsBatchingTest::PeriodicTopicStatsReload |62.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw |62.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw |62.3%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/ut_rw/ydb-core-tx-columnshard-ut_rw >> THealthCheckTest::ServerlessWithExclusiveNodesWhenTroublesWithSharedNodes [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::WriteTable [GOOD] Test command err: 2025-06-03T10:27:00.009100Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667675692780868:2206];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0020fe/r3tmp/tmp5g4os0/pdisk_1.dat 2025-06-03T10:27:00.158103Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:27:00.227257Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:00.229404Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667675692780696:2079] 1748946419971657 != 1748946419971660 2025-06-03T10:27:00.261949Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:00.261991Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:00.265777Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20294 TServer::EnableGrpc on GrpcPort 17583, node 1 2025-06-03T10:27:00.333603Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:00.333621Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:00.333624Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:00.333687Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20294 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:00.512316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:00.517413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:27:00.518748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1748946420592 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-03T10:27:00.559392Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667679987748692:2343] Handshake: worker# [1:7511667679987748603:2284] 2025-06-03T10:27:00.559513Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667679987748692:2343] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:27:00.559574Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667679987748692:2343] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-03T10:27:00.559582Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667679987748692:2343] Send handshake: worker# [1:7511667679987748603:2284] 2025-06-03T10:27:00.559898Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667679987748692:2343] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 36b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 36b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 36b Offset: 3 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-03T10:27:00.559959Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667679987748692:2343] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 36 },{ Order: 2 BodySize: 36 },{ Order: 3 BodySize: 36 }] } 2025-06-03T10:27:00.560008Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667679987748696:2343] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-03T10:27:00.560014Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667679987748692:2343] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-03T10:27:00.560031Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667679987748696:2343] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b },{ Order: 2 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b },{ Order: 3 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b }] } 2025-06-03T10:27:00.562488Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667679987748696:2343] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-03T10:27:00.562513Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667679987748692:2343] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-03T10:27:00.562523Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667679987748692:2343] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1,2,3] } >> TSchemeshardStatsBatchingTest::ShouldNotBatchWhenDisabled >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeReserved |62.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut |62.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut |62.3%| [LD] {RESULT} $(B)/ydb/public/lib/ydb_cli/topic/ut/ydb-public-lib-ydb_cli-topic-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::ApplyInCorrectOrder [GOOD] Test command err: 2025-06-03T10:27:00.347472Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667678182809026:2204];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:00.347609Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002108/r3tmp/tmp6bxLwE/pdisk_1.dat 2025-06-03T10:27:00.562830Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:9548 TServer::EnableGrpc on GrpcPort 19110, node 1 2025-06-03T10:27:00.640883Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:00.640903Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:00.640907Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:00.640972Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9548 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:27:00.717803Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:00.717839Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:00.721725Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:00.770547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:00.775036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:27:00.782227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946420900 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-03T10:27:00.869963Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667678182809562:2343] Handshake: worker# [1:7511667678182809471:2284] 2025-06-03T10:27:00.870288Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667678182809562:2343] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:27:00.870366Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667678182809562:2343] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-03T10:27:00.870375Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667678182809562:2343] Send handshake: worker# [1:7511667678182809471:2284] 2025-06-03T10:27:00.873311Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667678182809562:2343] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 48b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-03T10:27:00.874592Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667678182809562:2343] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 10 TxId: 0 } TxId: 1 } 2025-06-03T10:27:00.874647Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667678182809562:2343] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 48 }] } 2025-06-03T10:27:00.874727Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667678182809567:2343] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-03T10:27:00.874734Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667678182809562:2343] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-03T10:27:00.874752Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667678182809567:2343] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 1 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b }] } 2025-06-03T10:27:00.877682Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667678182809567:2343] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-03T10:27:00.877725Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667678182809562:2343] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-03T10:27:00.877736Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667678182809562:2343] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1] } 2025-06-03T10:27:00.881467Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667678182809562:2343] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 49b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 48b Offset: 3 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 19b Offset: 4 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-03T10:27:00.881633Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667678182809562:2343] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 20 TxId: 0 } TxId: 2 } 2025-06-03T10:27:00.881654Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667678182809562:2343] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 2 BodySize: 49 },{ Order: 3 BodySize: 48 }] } 2025-06-03T10:27:00.881688Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667678182809567:2343] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 2 Group: 0 Step: 11 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 3 Group: 0 Step: 2 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b }] } 2025-06-03T10:27:00.893656Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667678182809567:2343] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-03T10:27:00.893688Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667678182809562:2343] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-03T10:27:00.893719Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667678182809562:2343] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [2,3] } |62.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_stats/unittest >> LocalTableWriter::WaitTxIds [GOOD] >> LocalTableWriter::ConsistentWrite [GOOD] >> TSchemeshardStatsBatchingTest::PeriodicTopicStatsReload [GOOD] >> LocalTableWriter::DataAlongWithHeartbeat [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/health_check/ut/unittest >> THealthCheckTest::ServerlessWithExclusiveNodesWhenTroublesWithSharedNodes [GOOD] Test command err: 2025-06-03T10:26:52.059010Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:701:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:52.059133Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:26:52.059155Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:26:52.059495Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:698:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:52.059579Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:26:52.059619Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002700/r3tmp/tmpLaPD28/pdisk_1.dat 2025-06-03T10:26:52.309135Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1780, node 1 TClient is connected to server localhost:16124 2025-06-03T10:26:52.559317Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:52.559338Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:52.559343Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:52.559466Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:55.291648Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:281:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:55.291798Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:26:55.291854Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:26:55.292294Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:701:2355], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:55.292338Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:26:55.292424Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002700/r3tmp/tmpRrxzmU/pdisk_1.dat 2025-06-03T10:26:55.540692Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4133, node 3 TClient is connected to server localhost:12438 2025-06-03T10:26:55.796058Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:55.796081Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:55.796087Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:55.796214Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:57.900018Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:456:2415], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:57.900093Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:26:57.900109Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002700/r3tmp/tmpzJhKWr/pdisk_1.dat 2025-06-03T10:26:58.099053Z node 5 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24320, node 5 TClient is connected to server localhost:9765 2025-06-03T10:26:58.271694Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:58.271716Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:58.271721Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:58.271794Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration self_check_result: MAINTENANCE_REQUIRED issue_log { id: "ORANGE-f489-1231c6b1" status: ORANGE message: "Database has compute issues" location { database { name: "/Root" } } reason: "ORANGE-6fa7-1231c6b1" reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "ORANGE-6fa7-1231c6b1" status: ORANGE message: "Compute has issues with tablets" location { database { name: "/Root" } } reason: "ORANGE-0df4-1231c6b1-Unknown" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-5" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-5" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 5 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "ORANGE-0df4-1231c6b1-Unknown" status: ORANGE message: "Tablets are restarting too often" location { compute { tablet { type: "Unknown" id: "1" count: 1 } } database { name: "/Root" } node { } } type: "TABLET" level: 4 } issue_log { id: "ORANGE-f489-2135748c" status: ORANGE message: "Database has compute issues" location { database { name: "/Root/shared" } } reason: "ORANGE-6fa7-2135748c" reason: "YELLOW-1ba8-2135748c" type: "DATABASE" level: 1 } issue_log { id: "ORANGE-6fa7-2135748c" status: ORANGE message: "Compute has issues with tablets" location { database { name: "/Root/shared" } } reason: "ORANGE-0df4-2135748c-Unknown" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-1ba8-2135748c" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root/shared" } } reason: "YELLOW-e9e2-2135748c-6" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-2135748c-6" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 6 host: "::1" port: 12002 } } database { name: "/Root/shared" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "ORANGE-0df4-2135748c-Unknown" status: ORANGE message: "Tablets are restarting too often" location { compute { tablet { type: "Unknown" id: "1" count: 1 } } database { name: "/Root/shared" } node { } } type: "TABLET" level: 4 } database_status { name: "/Root" overall: ORANGE storage { overall: GREEN pools { id: "static" overall: GREEN groups { id: "0" overall: GREEN } } } compute { overall: ORANGE nodes { id: "5" overall: YELLOW load { overall: YELLOW load: 162.835449 cores: 64 } } tablets { overall: ORANGE type: "Unknown" state: "RESTARTS_TOO_OFTEN" count: 1 id: "1" } } } database_status { name: "/Root/shared" overall: ORANGE storage { overall: GREEN pools { id: "/Root:test" overall: GREEN groups { id: "2147483648" overall: GREEN vdisks { id: "5-1-55" overall: GREEN pdisk { id: "5-1" overall: GREEN } } } } } compute { overall: ORANGE nodes { id: "6" overall: YELLOW load { overall: YELLOW load: 162.835449 cores: 64 } } tablets { overall: ORANGE type: "Unknown" state: "RESTARTS_TOO_OFTEN" count: 1 id: "1" } } } location { id: 5 host: "::1" port: 12001 } 2025-06-03T10:26:59.761778Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:26:59.761805Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:26:59.761867Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:311:2354], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002700/r3tmp/tmpfwHcY3/pdisk_1.dat TServer::EnableGrpc on GrpcPort 64107, node 7 TClient is connected to server localhost:15919 self_check_result: EMERGENCY issue_log { id: "RED-f489" status: RED message: "Database has compute issues" reason: "RED-7469" type: "DATABASE" level: 1 } issue_log { id: "RED-7469" status: RED message: "There are no compute nodes" type: "COMPUTE" level: 2 } database_status { name: "/Root/serverless" overall: RED storage { overall: GREEN pools { id: "/Root:test" overall: GREEN groups { id: "2147483648" overall: GREEN vdisks { id: "7-1-55" overall: GREEN pdisk { id: "7-1" overall: GREEN } } } } } compute { overall: RED } } location { id: 7 host: "::1" port: 12001 } 2025-06-03T10:27:01.542965Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:453:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:27:01.543055Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:27:01.543066Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002700/r3tmp/tmpB3gnnZ/pdisk_1.dat 2025-06-03T10:27:01.806297Z node 8 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13017, node 8 TClient is connected to server localhost:18639 2025-06-03T10:27:01.991767Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:01.991789Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:01.991794Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:01.991886Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> AnalyzeColumnshard::AnalyzeSameOperationId [GOOD] >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchSize ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::PeriodicTopicStatsReload [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:27:02.763019Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:02.763052Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:02.763065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:02.763072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:02.763100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:02.763105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:02.763116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:02.763131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:02.763268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:02.763349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:02.871006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:27:02.871047Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:02.896023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:02.896227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:02.896288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:02.925880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:02.925975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:02.926144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:02.926240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:02.933196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:02.933330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:02.933825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:02.933838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:02.933847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:02.933856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:02.933862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:02.933889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:27:02.957955Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:27:03.090428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:03.090554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.090643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:03.090708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:03.090721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.097886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:03.097946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:03.098057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.098073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:03.098080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:03.098088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:03.099941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.099965Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:03.099975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:03.109865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.109905Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.109915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:03.109926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:03.110828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:03.122887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:03.122978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:03.123256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:03.123312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:03.123326Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:03.123436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:27:03.123447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:03.123503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:03.123519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:27:03.129732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:03.129758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:03.129844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... TICE: schemeshard__init.cpp:2303: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.546176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2453: TTxInit for ChannelsBinding, read records: 8, at schemeshard: 72057594046678944 2025-06-03T10:27:03.546226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:27:03.546288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2832: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.546305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2911: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.546363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3412: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.546374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3448: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.546420Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3665: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.546434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3810: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.546448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3827: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.546476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3987: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.546487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4003: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.546523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4288: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.546558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4593: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.546568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4651: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.546589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4746: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.546596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4773: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.546603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4800: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.546640Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:27:03.558501Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:27:03.558598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:03.559014Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435083, Sender [1:570:2498], Recipient [1:570:2498]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2025-06-03T10:27:03.559025Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4945: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2025-06-03T10:27:03.559193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:03.559203Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:03.559213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:03.559222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:03.559228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:03.559232Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:27:03.559267Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 274399233, Sender [1:606:2498], Recipient [1:570:2498]: NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-06-03T10:27:03.559274Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5036: StateWork, processing event TEvTxAllocatorClient::TEvAllocateResult 2025-06-03T10:27:03.559279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:570:2498] sender: [1:626:2058] recipient: [1:15:2062] 2025-06-03T10:27:03.629088Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122945, Sender [1:625:2540], Recipient [1:570:2498]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-03T10:27:03.629113Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4894: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-03T10:27:03.629149Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:27:03.629231Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 70us result status StatusSuccess 2025-06-03T10:27:03.629413Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 1 WriteSpeedInBytesPerSecond: 7 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 7 AccountSize: 17 DataSize: 17 UsedReserveSize: 7 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:03.629523Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271188001, Sender [1:627:2541], Recipient [1:570:2498]: NKikimrPQ.TEvPeriodicTopicStats PathId: 2 Generation: 1 Round: 96 DataSize: 19 UsedReserveSize: 7 2025-06-03T10:27:03.629530Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4920: StateWork, processing event TEvPersQueue::TEvPeriodicTopicStats 2025-06-03T10:27:03.629538Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046678944, LocalPathId: 2] DataSize 19 UsedReserveSize 7 2025-06-03T10:27:03.629546Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:128: Will execute TTxStoreStats, queue# 1 2025-06-03T10:27:03.629557Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.000000s, queue# 1 2025-06-03T10:27:03.629597Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122945, Sender [1:628:2542], Recipient [1:570:2498]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-03T10:27:03.629606Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4894: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-03T10:27:03.629614Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:27:03.629627Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 15us result status StatusSuccess 2025-06-03T10:27:03.629675Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 1 WriteSpeedInBytesPerSecond: 7 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 7 AccountSize: 17 DataSize: 17 UsedReserveSize: 7 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchTimeout ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::WaitTxIds [GOOD] Test command err: 2025-06-03T10:27:01.592783Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667684938453422:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:01.600278Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0020df/r3tmp/tmpPHI5gg/pdisk_1.dat 2025-06-03T10:27:01.932256Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:01.932285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:01.945524Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667684938453265:2079] 1748946421589185 != 1748946421589188 2025-06-03T10:27:01.946500Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:01.952486Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61986 TServer::EnableGrpc on GrpcPort 62801, node 1 2025-06-03T10:27:02.036620Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:02.036637Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:02.036639Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:02.036697Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61986 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:02.200294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:02.214131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:27:02.215961Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1748946422370 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-03T10:27:02.350542Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667689233421269:2344] Handshake: worker# [1:7511667689233421270:2345] 2025-06-03T10:27:02.350663Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667689233421269:2344] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:27:02.350733Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667689233421269:2344] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-03T10:27:02.350744Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667689233421269:2344] Send handshake: worker# [1:7511667689233421270:2345] 2025-06-03T10:27:02.350918Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667689233421269:2344] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 48b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-03T10:27:02.352236Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667689233421269:2344] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 10 TxId: 0 } TxId: 1 } 2025-06-03T10:27:02.352273Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667689233421269:2344] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 48 }] } 2025-06-03T10:27:02.352315Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667689233421273:2344] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-03T10:27:02.352335Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667689233421269:2344] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-03T10:27:02.352352Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667689233421273:2344] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 1 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b }] } 2025-06-03T10:27:02.356563Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667689233421273:2344] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-03T10:27:02.356602Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667689233421269:2344] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-03T10:27:02.356613Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667689233421269:2344] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1] } 2025-06-03T10:27:03.351828Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667689233421269:2344] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 20 TxId: 0 } TxId: 2 } 2025-06-03T10:27:03.351899Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667689233421269:2344] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 2 BodySize: 49 }] } 2025-06-03T10:27:03.351943Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667689233421273:2344] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 2 Group: 0 Step: 11 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b }] } 2025-06-03T10:27:03.365769Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667689233421273:2344] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-03T10:27:03.365805Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667689233421269:2344] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-03T10:27:03.365818Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667689233421269:2344] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [2] } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::DataAlongWithHeartbeat [GOOD] Test command err: 2025-06-03T10:27:01.700898Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667683348459350:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:01.701097Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0020e5/r3tmp/tmpD7jWn0/pdisk_1.dat 2025-06-03T10:27:01.869907Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:01.869946Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:01.875740Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:01.877387Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667683348459154:2079] 1748946421688174 != 1748946421688177 2025-06-03T10:27:01.891756Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17993 TServer::EnableGrpc on GrpcPort 62372, node 1 2025-06-03T10:27:01.977638Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:01.977655Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:01.977659Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:01.977750Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17993 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:02.194821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:27:02.212403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946422370 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-03T10:27:02.344389Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687643427153:2343] Handshake: worker# [1:7511667687643427154:2344] 2025-06-03T10:27:02.344516Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687643427153:2343] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:27:02.344592Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687643427153:2343] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-03T10:27:02.344601Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687643427153:2343] Send handshake: worker# [1:7511667687643427154:2344] 2025-06-03T10:27:02.344879Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687643427153:2343] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 48b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 19b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-03T10:27:02.346005Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687643427153:2343] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 10 TxId: 0 } TxId: 1 } 2025-06-03T10:27:02.346056Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687643427153:2343] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 48 }] } 2025-06-03T10:27:02.346101Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667687643427158:2343] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-03T10:27:02.346108Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687643427153:2343] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-03T10:27:02.346121Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667687643427158:2343] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 1 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b }] } 2025-06-03T10:27:02.348293Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667687643427158:2343] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-03T10:27:02.348308Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687643427153:2343] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-03T10:27:02.348319Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687643427153:2343] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1] } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_table_writer/unittest >> LocalTableWriter::ConsistentWrite [GOOD] Test command err: 2025-06-03T10:27:01.801313Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667683594210576:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:01.801469Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0020d7/r3tmp/tmpGZPTD7/pdisk_1.dat 2025-06-03T10:27:01.992359Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667683594210419:2079] 1748946421797962 != 1748946421797965 2025-06-03T10:27:02.038480Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:02.038515Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:02.039291Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:02.041791Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6581 TServer::EnableGrpc on GrpcPort 64349, node 1 2025-06-03T10:27:02.117976Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:02.117989Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:02.117992Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:02.118049Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6581 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:02.340057Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:02.347114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:27:02.348368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1748946422447 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-03T10:27:02.429559Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Handshake: worker# [1:7511667687889178330:2285] 2025-06-03T10:27:02.429667Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:27:02.429763Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-03T10:27:02.429772Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Send handshake: worker# [1:7511667687889178330:2285] 2025-06-03T10:27:02.429922Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 48b Offset: 1 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 48b Offset: 2 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 48b Offset: 3 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-03T10:27:02.430864Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 10 TxId: 0 } TxId: 1 } 2025-06-03T10:27:02.430891Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 48 },{ Order: 2 BodySize: 48 },{ Order: 3 BodySize: 48 }] } 2025-06-03T10:27:02.430933Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667687889178424:2345] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-03T10:27:02.430940Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-03T10:27:02.430957Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667687889178424:2345] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 1 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b },{ Order: 2 Group: 0 Step: 2 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b },{ Order: 3 Group: 0 Step: 3 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 48b }] } 2025-06-03T10:27:02.437610Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667687889178424:2345] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-03T10:27:02.437637Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-03T10:27:02.437650Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1,2,3] } 2025-06-03T10:27:02.437827Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 19b Offset: 4 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-03T10:27:02.437929Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 49b Offset: 5 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 6 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 7 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 8 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-03T10:27:02.438035Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:490: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Handle NKikimrReplication.TEvTxIdResult VersionTxIds { Version { Step: 20 TxId: 0 } TxId: 2 } VersionTxIds { Version { Step: 30 TxId: 0 } TxId: 3 } 2025-06-03T10:27:02.438056Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 5 BodySize: 49 },{ Order: 6 BodySize: 49 },{ Order: 7 BodySize: 49 },{ Order: 8 BodySize: 49 }] } 2025-06-03T10:27:02.438089Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667687889178424:2345] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 5 Group: 0 Step: 11 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 6 Group: 0 Step: 12 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 7 Group: 0 Step: 21 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 8 Group: 0 Step: 22 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b }] } 2025-06-03T10:27:02.443958Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667687889178424:2345] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-03T10:27:02.443990Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-03T10:27:02.444002Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [5,6,7,8] } 2025-06-03T10:27:02.444190Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 49b Offset: 9 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: },{ Codec: RAW Data: 49b Offset: 10 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } 2025-06-03T10:27:02.444240Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 9 BodySize: 49 },{ Order: 10 BodySize: 49 }] } 2025-06-03T10:27:02.444273Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667687889178424:2345] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 9 Group: 0 Step: 13 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b },{ Order: 10 Group: 0 Step: 23 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 49b }] } 2025-06-03T10:27:02.453578Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:2:1][72075186224037888][1:7511667687889178424:2345] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-03T10:27:02.453597Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-03T10:27:02.453606Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [9,10] } 2025-06-03T10:27:02.453744Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 2][1:7511667687889178421:2345] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: TestSource Records [{ Codec: RAW Data: 19b Offset: 11 SeqNo: 0 CreateTime: 1970-01-01T00:00:00.000000Z MessageGroupId: ProducerId: }] } >> StatisticsSaveLoad::Simple [GOOD] >> TStoragePoolsStatsPersistence::SameAggregatedStatsAfterRestart |62.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_stats/unittest |62.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber |62.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber |62.4%| [LD] {RESULT} $(B)/ydb/core/base/ut_board_subscriber/ydb-core-base-ut_board_subscriber ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeSameOperationId [GOOD] Test command err: 2025-06-03T10:24:21.291206Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:24:21.291769Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:24:21.291976Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002a53/r3tmp/tmp6nQEvh/pdisk_1.dat 2025-06-03T10:24:21.884319Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28790, node 1 2025-06-03T10:24:22.261022Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:22.261049Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:22.261055Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:22.261109Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:22.261817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:22.386808Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:22.386853Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:22.399532Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:32113 2025-06-03T10:24:22.840933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:23.851111Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:24:23.862082Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:23.862125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:23.934232Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:23.934988Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:24.124357Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:24.124563Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:24.124706Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:24.124747Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:24.124795Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:24.124817Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:24.124835Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:24.124856Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:24.124897Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:24.283219Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:24.283264Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:24.294491Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:24.323003Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:24.333926Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:24:24.333953Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:24:24.340375Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:24:24.340428Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:24:24.340450Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:24:24.340456Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:24:24.340463Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:24:24.340469Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:24:24.340474Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:24:24.340481Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:24:24.340597Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:24:24.353334Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:24.353369Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1861:2597], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:24.354606Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:24:24.355550Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1895:2617] 2025-06-03T10:24:24.355605Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1895:2617], schemeshard id = 72075186224037897 2025-06-03T10:24:24.356774Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:24:24.360440Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:24:24.360462Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:24:24.360474Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:24:24.363482Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:24:24.365103Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:24:24.365130Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:24:24.474350Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:24:24.548451Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:24:24.602173Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:24:25.186246Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2211:3056], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:25.186300Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:25.190439Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:24:25.258808Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2300:2843];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:24:25.258901Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2300:2843];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:24:25.258976Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2300:2843];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:24:25.259010Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2300:2843];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:24:25.259043Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2300:2843];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:24:25.259082Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2300:2843];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:24:25.259114Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2300:2843];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:24:25.259147Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2300:2843];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_re ... adata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:26:52.987304Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:7445:5413]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:52.987374Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:26:52.987388Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:7447:5415] 2025-06-03T10:26:52.987402Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:7447:5415] 2025-06-03T10:26:52.987554Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7448:5416] 2025-06-03T10:26:52.987586Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7447:5415], server id = [2:7448:5416], tablet id = 72075186224037894, status = OK 2025-06-03T10:26:52.987613Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7448:5416], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:26:52.987632Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-03T10:26:52.987665Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:26:52.987681Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:7445:5413], StatRequests.size() = 1 2025-06-03T10:26:53.019021Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YzFiZWUyOTctMTg3YjBjYzYtZmQ3NGVlNjItOWFlZDk4Zjk=, TxId: 2025-06-03T10:26:53.019051Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YzFiZWUyOTctMTg3YjBjYzYtZmQ3NGVlNjItOWFlZDk4Zjk=, TxId: 2025-06-03T10:26:53.019278Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:53.031216Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:26:53.031248Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:26:53.089322Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-03T10:26:53.089363Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-03T10:26:53.173563Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:7447:5415], schemeshard count = 1 2025-06-03T10:26:54.428415Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:26:54.428457Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-03T10:26:54.429282Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:54.441207Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:54.441375Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:54.441387Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037897, LocalPathId: 4], AnalyzedShards 1 2025-06-03T10:26:54.457665Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:54.481496Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. ... blocking NKikimr::NStat::TEvStatistics::TEvAnalyzeTableResponse from TX_COLUMNSHARD_ACTOR to STATISTICS_AGGREGATOR cookie 0 ... waiting for TEvAnalyzeTableResponse (done) ... unblocking NKikimr::NStat::TEvStatistics::TEvAnalyzeTableResponse from TX_COLUMNSHARD_ACTOR to STATISTICS_AGGREGATOR 2025-06-03T10:26:54.481970Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-03T10:26:54.482002Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-03T10:26:54.482158Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:22: [72075186224037894] TTxAnalyze::Execute. ReplyToActorId [1:3123:3333] , Record { OperationId: "operationId" Tables { PathId { OwnerId: 72075186224037897 LocalId: 4 } } Types: TYPE_COUNT_MIN_SKETCH } 2025-06-03T10:26:54.482167Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:38: [72075186224037894] TTxAnalyze::Execute. Update existing force traversal. OperationId operationId , ReplyToActorId [1:3123:3333] 2025-06-03T10:26:54.493799Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-03T10:26:54.493826Z node 2 :STATISTICS DEBUG: tx_analyze.cpp:97: [72075186224037894] TTxAnalyze::Complete 2025-06-03T10:26:55.945593Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:26:55.945660Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-03T10:26:55.945667Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:55.945828Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:26:55.958429Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:26:55.958533Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:26:55.958550Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:26:55.958819Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:26:55.970442Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:26:55.970507Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-03T10:26:55.970653Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7566:5485], server id = [2:7567:5486], tablet id = 72075186224037899, status = OK 2025-06-03T10:26:55.970679Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7566:5485], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:26:55.971627Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-03T10:26:55.971644Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:26:55.971761Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:26:55.971787Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:26:55.971828Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7566:5485], server id = [2:7567:5486], tablet id = 72075186224037899 2025-06-03T10:26:55.971832Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:26:55.971873Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:26:55.972490Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:26:56.024527Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7587:5505]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:56.024597Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:26:56.024605Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7587:5505], StatRequests.size() = 1 2025-06-03T10:26:56.138651Z node 2 :SYSTEM_VIEWS WARN: tx_interval_summary.cpp:212: [72075186224037891] TEvIntervalQuerySummary, time mismath: node id# 2, interval end# 1970-01-01T00:02:04.000000Z, event interval end# 2025-06-03T10:26:54.000000Z 2025-06-03T10:26:56.138870Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NzFkNDk3Y2EtNTZmMmQ4YTctNWZmYTViYjYtNzU4MDU0NzA=, TxId: 2025-06-03T10:26:56.138881Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NzFkNDk3Y2EtNTZmMmQ4YTctNWZmYTViYjYtNzU4MDU0NzA=, TxId: 2025-06-03T10:26:56.139019Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:26:56.157210Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:26:56.157239Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3123:3333] 2025-06-03T10:26:56.834835Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-03T10:26:56.834873Z node 2 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-03T10:26:59.145837Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:26:59.145927Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:26:59.181634Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:27:02.001642Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:27:02.001686Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:27:03.425779Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:27:03.448638Z node 2 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-03T10:27:03.448675Z node 2 :STATISTICS DEBUG: service_impl.cpp:1021: Skip TEvStatisticsRequestTimeout >> StatisticsSaveLoad::Delete [GOOD] >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeRequest >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeReserved [GOOD] |62.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/database/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/database/ut/unittest >> StatisticsSaveLoad::Simple [GOOD] Test command err: 2025-06-03T10:27:00.216020Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:453:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:27:00.216103Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:27:00.216136Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000e94/r3tmp/tmpzCAigu/pdisk_1.dat 2025-06-03T10:27:00.424501Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6081, node 1 2025-06-03T10:27:00.569566Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:00.569594Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:00.569599Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:00.569733Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:27:00.570396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:27:00.677726Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:00.677780Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:00.693382Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4795 2025-06-03T10:27:01.153789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:27:02.356823Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:27:02.406706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:02.406752Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:02.477226Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:27:02.481987Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:02.682614Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:02.682817Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:02.682991Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:02.683039Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:02.683100Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:02.683119Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:02.683138Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:02.683171Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:02.683189Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:02.870016Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:02.870070Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:02.882786Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:02.986113Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:03.015416Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:27:03.015461Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:27:03.053867Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:27:03.054242Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:27:03.054277Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:27:03.054285Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:27:03.054293Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:27:03.054301Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:27:03.054308Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:27:03.054317Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:27:03.055786Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:27:03.112828Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:27:03.112871Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1865:2600], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:27:03.114172Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1872:2606] 2025-06-03T10:27:03.131221Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1913:2626] 2025-06-03T10:27:03.131339Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:27:03.131735Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1913:2626], schemeshard id = 72075186224037897 2025-06-03T10:27:03.141357Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:27:03.141393Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:27:03.141410Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:27:03.149519Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:27:03.151727Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:27:03.151772Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:27:03.341679Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:27:03.502046Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:27:03.577758Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:27:04.165570Z node 1 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:27:04.165728Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:27:04.168399Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:27:04.169258Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2231:3070], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:04.169282Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2247:3075], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:04.169311Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:04.170696Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72075186224037897 2025-06-03T10:27:04.185799Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2251:3078], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:27:04.438054Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:2340:3107] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:04.494100Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2362:3119]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:27:04.494180Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:27:04.494197Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2364:3121] 2025-06-03T10:27:04.494212Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2364:3121] 2025-06-03T10:27:04.494452Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2365:2833] 2025-06-03T10:27:04.494559Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2364:3121], server id = [2:2365:2833], tablet id = 72075186224037894, status = OK 2025-06-03T10:27:04.494575Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2365:2833], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:27:04.494596Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-03T10:27:04.494655Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:27:04.494668Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2362:3119], StatRequests.size() = 1 2025-06-03T10:27:04.536258Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=YjUyZDY2N2UtNTQyZTE5MTMtZDE1N2Q4MWQtNTQxOTYxMzk=, TxId: 2025-06-03T10:27:04.536296Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=YjUyZDY2N2UtNTQyZTE5MTMtZDE1N2Q4MWQtNTQxOTYxMzk=, TxId: 2025-06-03T10:27:04.536608Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:27:04.537240Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tag AS Uint32; SELECT data FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id AND stat_type = $stat_type AND column_tag = $column_tag; 2025-06-03T10:27:04.557090Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2398:3142]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:27:04.557163Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:27:04.557172Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2398:3142], StatRequests.size() = 1 2025-06-03T10:27:04.614779Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=YTIzNzhmZmMtZWU3MjlhYTMtZmQ4MGM5MmYtYmZjYTNkNTQ=, TxId: 01jwtn94rqb7e0d91k2w3wpn0e 2025-06-03T10:27:04.614838Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=YTIzNzhmZmMtZWU3MjlhYTMtZmQ4MGM5MmYtYmZjYTNkNTQ=, TxId: 01jwtn94rqb7e0d91k2w3wpn0e 2025-06-03T10:27:04.615487Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:27:04.620757Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tag AS Uint32; SELECT data FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id AND stat_type = $stat_type AND column_tag = $column_tag; 2025-06-03T10:27:04.641221Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=NTVhMzJjOWQtZWQ0YzY5YjYtYzAxMGU0YTMtNjg0ZmE1OWE=, TxId: 01jwtn94sd896m9rb4py94tayp 2025-06-03T10:27:04.641281Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=NTVhMzJjOWQtZWQ0YzY5YjYtYzAxMGU0YTMtNjg0ZmE1OWE=, TxId: 01jwtn94sd896m9rb4py94tayp |62.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/database/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/database/ut/unittest >> StatisticsSaveLoad::Delete [GOOD] Test command err: 2025-06-03T10:26:59.334888Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:453:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:59.334987Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:26:59.335025Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000e91/r3tmp/tmpKdU7NV/pdisk_1.dat 2025-06-03T10:26:59.745286Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17522, node 1 2025-06-03T10:26:59.911961Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:59.911989Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:59.911995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:59.912120Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:59.912846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:27:00.020285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:00.020332Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:00.033605Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21546 2025-06-03T10:27:00.515817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:27:01.696315Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:27:01.739527Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:01.739568Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:01.807506Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:27:01.809068Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:02.061992Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:02.062127Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:02.062270Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:02.063412Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:02.063467Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:02.063485Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:02.063502Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:02.063530Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:02.063545Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:02.235039Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:02.235091Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:02.250975Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:02.559988Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:02.638960Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:27:02.638996Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:27:02.711670Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:27:02.712092Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:27:02.712125Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:27:02.712131Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:27:02.712138Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:27:02.712145Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:27:02.712163Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:27:02.712172Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:27:02.712380Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:27:02.756668Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:27:02.756711Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1864:2599], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:27:02.760934Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1876:2607] 2025-06-03T10:27:02.762368Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1907:2624] 2025-06-03T10:27:02.762449Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1907:2624], schemeshard id = 72075186224037897 2025-06-03T10:27:02.777937Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:27:02.806529Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:27:02.806562Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:27:02.806577Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:27:02.815654Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:27:02.831607Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:27:02.831680Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:27:03.113903Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:27:03.302142Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:27:03.349704Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:27:04.042377Z node 1 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:27:04.042490Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:27:04.047292Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:27:04.053481Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2237:3073], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:04.053529Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2250:3078], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:04.053545Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:04.055375Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72075186224037897 2025-06-03T10:27:04.103175Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2254:3081], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:27:04.317762Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:2341:3110] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:04.447857Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2363:3122]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:27:04.447928Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:27:04.447944Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2365:3124] 2025-06-03T10:27:04.447959Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2365:3124] 2025-06-03T10:27:04.448168Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2366:2831] 2025-06-03T10:27:04.448247Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2365:3124], server id = [2:2366:2831], tablet id = 72075186224037894, status = OK 2025-06-03T10:27:04.448295Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2366:2831], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:27:04.448315Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-03T10:27:04.448383Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:27:04.448396Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2363:3122], StatRequests.size() = 1 2025-06-03T10:27:04.481431Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=ZDY2MmVhNmEtYzI1OTk5OWYtNTA0MzcxY2UtNDlmMDhiMzk=, TxId: 2025-06-03T10:27:04.481467Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=ZDY2MmVhNmEtYzI1OTk5OWYtNTA0MzcxY2UtNDlmMDhiMzk=, TxId: 2025-06-03T10:27:04.481793Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:27:04.482426Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:27:04.488326Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2399:3145]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:27:04.488403Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:27:04.488411Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2399:3145], StatRequests.size() = 1 2025-06-03T10:27:04.535828Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=OTM3M2NkZmQtYzdjODkwZDktOWQxZmY2OGUtM2Q4YzAxMDU=, TxId: 2025-06-03T10:27:04.535864Z node 1 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=1&id=OTM3M2NkZmQtYzdjODkwZDktOWQxZmY2OGUtM2Q4YzAxMDU=, TxId: 2025-06-03T10:27:04.536267Z node 1 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:27:04.536889Z node 1 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tag AS Uint32; SELECT data FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id AND stat_type = $stat_type AND column_tag = $column_tag; 2025-06-03T10:27:04.560415Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [1:2431:3160]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:27:04.560481Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-03T10:27:04.560490Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 3, ReplyToActorId = [1:2431:3160], StatRequests.size() = 1 2025-06-03T10:27:04.650179Z node 1 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=1&id=MTBlNjM5ZTktNjE5NzVkZTUtYzEwMWI0YzUtMmJjZDg2MzU=, TxId: 01jwtn94s7aank6p1tjqv9m919 2025-06-03T10:27:04.650234Z node 1 :STATISTICS WARN: query_actor.cpp:372: [TQueryBase] Finish with BAD_REQUEST, Issues: {
: Error: No data }, SessionId: ydb://session/3?node_id=1&id=MTBlNjM5ZTktNjE5NzVkZTUtYzEwMWI0YzUtMmJjZDg2MzU=, TxId: 01jwtn94s7aank6p1tjqv9m919 |62.4%| [TA] $(B)/ydb/core/tx/replication/service/ut_table_writer/test-results/unittest/{meta.json ... results_accumulator.log} |62.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/database/ut/unittest >> StatisticsSaveLoad::ForbidAccess |62.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/database/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeReserved [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:27:03.420266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:03.420297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:03.420303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:03.420309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:03.420333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:03.420337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:03.420347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:03.420363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:03.420487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:03.420564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:03.435358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:27:03.435402Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:03.439835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:03.439979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:03.440032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:03.442141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:03.442211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:03.442338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:03.442432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:03.443106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:03.443155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:03.443535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:03.443548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:03.443561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:03.443571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:03.443577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:03.443602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.445058Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:27:03.465983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:03.466075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.466149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:03.466200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:03.466212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.467101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:03.467136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:03.467225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.467238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:03.467245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:03.467251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:03.467801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.467817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:03.467823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:03.468186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.468200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.468207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:03.468215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:03.468869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:03.469286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:03.469356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:03.469595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:03.469627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:03.469636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:03.469730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:27:03.469739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:03.469783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:03.469798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:27:03.470245Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:03.470258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:03.470311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... tId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 136598400 AccountSize: 136598400 DataSize: 16975298 UsedReserveSize: 16975298 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:05.217469Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:157: [72075186233409547][Topic1] TPersQueueReadBalancer::HandleWakeup 2025-06-03T10:27:05.217519Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:550: [72075186233409547][Topic1] Send TEvPersQueue::TEvStatus TabletId: 72075186233409546 Cookie: 2 2025-06-03T10:27:05.217670Z node 1 :PERSQUEUE DEBUG: partition.cpp:858: [PQ: 72075186233409546, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2025-06-03T10:27:05.217709Z node 1 :PERSQUEUE DEBUG: partition.cpp:858: [PQ: 72075186233409546, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2025-06-03T10:27:05.217723Z node 1 :PERSQUEUE DEBUG: partition.cpp:858: [PQ: 72075186233409546, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 16975298 UsedReserveSize: 16975298 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2025-06-03T10:27:05.217860Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186233409547][Topic1] Send TEvPeriodicTopicStats PathId: 2 Generation: 2 StatsReportRound: 2 DataSize: 16975298 UsedReserveSize: 16975298 2025-06-03T10:27:05.217883Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186233409547][Topic1] ProcessPendingStats. PendingUpdates size 0 2025-06-03T10:27:05.217954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046678944, LocalPathId: 2] DataSize 16975298 UsedReserveSize 16975298 2025-06-03T10:27:05.241522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-03T10:27:05.253622Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:05.253717Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 2 took 104us result status StatusSuccess 2025-06-03T10:27:05.253856Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 3 PartitionPerTablet: 3 PQTabletConfig { PartitionConfig { LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 136598400 AccountSize: 136598400 DataSize: 16975298 UsedReserveSize: 16975298 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:05.937471Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:157: [72075186233409547][Topic1] TPersQueueReadBalancer::HandleWakeup 2025-06-03T10:27:05.937523Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:550: [72075186233409547][Topic1] Send TEvPersQueue::TEvStatus TabletId: 72075186233409546 Cookie: 3 2025-06-03T10:27:05.937716Z node 1 :PERSQUEUE DEBUG: partition.cpp:858: [PQ: 72075186233409546, Partition: 1, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2025-06-03T10:27:05.937741Z node 1 :PERSQUEUE DEBUG: partition.cpp:858: [PQ: 72075186233409546, Partition: 2, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2025-06-03T10:27:05.937757Z node 1 :PERSQUEUE DEBUG: partition.cpp:858: [PQ: 72075186233409546, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 16975298 UsedReserveSize: 16975298 ReserveSize: 45532800 PartitionConfig{ LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 TotalPartitions: 3 } 2025-06-03T10:27:05.937899Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186233409547][Topic1] Send TEvPeriodicTopicStats PathId: 2 Generation: 2 StatsReportRound: 3 DataSize: 16975298 UsedReserveSize: 16975298 2025-06-03T10:27:05.937921Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186233409547][Topic1] ProcessPendingStats. PendingUpdates size 0 2025-06-03T10:27:05.937984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046678944, LocalPathId: 2] DataSize 16975298 UsedReserveSize 16975298 2025-06-03T10:27:05.958012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-03T10:27:05.968607Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:05.968683Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 2 took 90us result status StatusSuccess 2025-06-03T10:27:05.968813Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 3 PartitionPerTablet: 3 PQTabletConfig { PartitionConfig { LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 136598400 AccountSize: 136598400 DataSize: 16975298 UsedReserveSize: 16975298 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:06.017661Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:27:06.017761Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 120us result status StatusSuccess 2025-06-03T10:27:06.017898Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 3 PartitionPerTablet: 3 PQTabletConfig { PartitionConfig { LifetimeSeconds: 2678400 WriteSpeedInBytesPerSecond: 17 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_RESERVED_CAPACITY } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 136598400 AccountSize: 136598400 DataSize: 16975298 UsedReserveSize: 16975298 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |62.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/database/ut/unittest |62.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge |62.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge |62.4%| [TA] {RESULT} $(B)/ydb/core/tx/replication/service/ut_table_writer/test-results/unittest/{meta.json ... results_accumulator.log} |62.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_topic_splitmerge/ydb-core-tx-schemeshard-ut_topic_splitmerge |62.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/database/ut/unittest |62.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/database/ut/unittest >> TColumnShardTestReadWrite::WriteReadDuplicate >> KqpSysColV1::StreamSelectRowAsterisk >> TTopicWriterTests::TestTopicWriterParams_Format_NewlineDelimited [GOOD] >> TTopicWriterTests::TestTopicWriterParams_Format_Concatenated [GOOD] >> TTopicWriterTests::TestTopicWriterParams_No_Delimiter [GOOD] >> TTopicWriterTests::TestTopicWriterParams_InvalidDelimiter [GOOD] >> TTopicWriterTests::TestEnterMessage_1KiB_No_Delimiter [GOOD] >> TTopicWriterTests::TestEnterMessage_Custom_Delimiter_Delimited [GOOD] >> TTopicWriterTests::TestEnterMessage_OnlyDelimiters [GOOD] >> TTopicWriterTests::TestEnterMessage_SomeBinaryData [GOOD] |62.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut |62.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut |62.5%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/engines/ut/ydb-core-tx-columnshard-engines-ut |62.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestTopicWriterParams_Format_Concatenated [GOOD] |62.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut >> TTopicReaderTests::TestRun_ReadMoreMessagesThanLimit_Without_Wait_NewlineDelimited >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeRequest [GOOD] >> TTopicWriterTests::TestEnterMessage_EmptyInput [GOOD] >> TTopicWriterTests::TestEnterMessage_No_Base64_Transform [GOOD] >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform_Invalid_Encode [GOOD] >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform [GOOD] |62.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut |62.5%| [LD] {RESULT} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/ydb-core-fq-libs-checkpoint_storage-ut |62.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestTopicWriterParams_InvalidDelimiter [GOOD] |62.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_SomeBinaryData [GOOD] |62.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut |62.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut |62.5%| [LD] {RESULT} $(B)/ydb/library/table_creator/ut/ydb-library-table_creator-ut |62.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_Custom_Delimiter_Delimited [GOOD] >> TBoardSubscriberTest::ReconnectReplica >> TTxDataShardMiniKQL::WriteAndReadMany [GOOD] |62.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_With_Base64_Transform [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::TopicPeriodicStatMeteringModeRequest [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:27:06.197616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:06.197650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:06.197656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:06.197663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:06.197706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:06.197712Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:06.197723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:06.197739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:06.197875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:06.197951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:06.213331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:27:06.213363Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:06.217618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:06.217758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:06.217808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:06.219720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:06.219788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:06.219926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:06.220013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:06.220701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:06.220752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:06.221168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:06.221183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:06.221194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:06.221207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:06.221215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:06.221240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:27:06.222755Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:27:06.286309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:06.286435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:06.286522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:06.286578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:06.286592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:06.292981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:06.293037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:06.293130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:06.293145Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:06.293152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:06.293159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:06.293903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:06.293927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:06.293934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:06.294397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:06.294411Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:06.294418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:06.294426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:06.295325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:06.301773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:06.301853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:06.302147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:06.302198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:06.302210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:06.302314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:27:06.302326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:06.302378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:06.302395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:27:06.303323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:06.303339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:06.303405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... ncing.cpp:1823: [72075186233409547][Topic1] ProcessPendingStats. PendingUpdates size 0 2025-06-03T10:27:08.264523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046678944, LocalPathId: 2] DataSize 16975298 UsedReserveSize 0 2025-06-03T10:27:08.277336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-03T10:27:08.289649Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:08.289778Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 2 took 154us result status StatusSuccess 2025-06-03T10:27:08.289954Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 3 PartitionPerTablet: 3 PQTabletConfig { PartitionConfig { LifetimeSeconds: 11 WriteSpeedInBytesPerSecond: 17 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_REQUEST_UNITS } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 16975298 DataSize: 16975298 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:08.989609Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:157: [72075186233409547][Topic1] TPersQueueReadBalancer::HandleWakeup 2025-06-03T10:27:08.989682Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:550: [72075186233409547][Topic1] Send TEvPersQueue::TEvStatus TabletId: 72075186233409546 Cookie: 3 2025-06-03T10:27:08.989986Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186233409547][Topic1] Send TEvPeriodicTopicStats PathId: 2 Generation: 2 StatsReportRound: 4 DataSize: 16975298 UsedReserveSize: 0 2025-06-03T10:27:08.990019Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186233409547][Topic1] ProcessPendingStats. PendingUpdates size 0 2025-06-03T10:27:08.990108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046678944, LocalPathId: 2] DataSize 16975298 UsedReserveSize 0 2025-06-03T10:27:09.005910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-03T10:27:09.021575Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:09.021703Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 2 took 154us result status StatusSuccess 2025-06-03T10:27:09.021878Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 3 PartitionPerTablet: 3 PQTabletConfig { PartitionConfig { LifetimeSeconds: 11 WriteSpeedInBytesPerSecond: 17 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_REQUEST_UNITS } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 16975298 DataSize: 16975298 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:09.070356Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Topic1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:27:09.070459Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Topic1" took 139us result status StatusSuccess 2025-06-03T10:27:09.070614Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 3 PartitionPerTablet: 3 PQTabletConfig { PartitionConfig { LifetimeSeconds: 11 WriteSpeedInBytesPerSecond: 17 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_REQUEST_UNITS } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 16975298 DataSize: 16975298 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:09.070790Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186233409547][Topic1] pipe [1:632:2551] connected; active server actors: 1 2025-06-03T10:27:09.087321Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186233409547][Topic1] BALANCER INIT DONE for Topic1: (0, 72075186233409546) (1, 72075186233409546) (2, 72075186233409546) 2025-06-03T10:27:09.087535Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186233409547][Topic1] Discovered subdomain [OwnerId: 72057594046678944, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186233409547 2025-06-03T10:27:09.097918Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186233409547][Topic1] TEvClientConnected TabletId 72057594046678944, NodeId 1, Generation 3 2025-06-03T10:27:09.098006Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:09.098084Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 2 took 86us result status StatusSuccess 2025-06-03T10:27:09.098239Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic1" PathDescription { Self { Name: "Topic1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409547 } PersQueueGroup { Name: "Topic1" PathId: 2 TotalGroupCount: 3 PartitionPerTablet: 3 PQTabletConfig { PartitionConfig { LifetimeSeconds: 11 WriteSpeedInBytesPerSecond: 17 } YdbDatabasePath: "/MyRoot" MeteringMode: METERING_MODE_REQUEST_UNITS } Partitions { PartitionId: 0 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } Partitions { PartitionId: 2 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409547 NextPartitionId: 3 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 16975298 DataSize: 16975298 UsedReserveSize: 0 } } PQPartitionsInside: 3 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:09.098551Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186233409547][Topic1] TEvClientConnected TabletId 72075186233409546, NodeId 1, Generation 2 2025-06-03T10:27:09.137730Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186233409547][Topic1] pipe [1:679:2586] connected; active server actors: 1 |62.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_No_Base64_Transform [GOOD] >> TStoragePoolsStatsPersistence::SameAggregatedStatsAfterRestart [GOOD] >> TBoardSubscriberTest::ReconnectReplica [GOOD] >> TTopicWriterTests::TestEnterMessage_1KiB_Newline_Delimiter [GOOD] >> TTopicWriterTests::TestEnterMessage_1KiB_Newline_Delimited_With_Two_Delimiters_In_A_Row [GOOD] >> KqpSysColV1::StreamSelectRowAsterisk [GOOD] |62.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::ReconnectReplica [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_stats/unittest >> TStoragePoolsStatsPersistence::SameAggregatedStatsAfterRestart [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:27:05.777865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:05.777900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:05.777907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:05.777916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:05.777946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:05.777950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:05.777962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:05.777977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:05.778123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:05.778222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:05.812415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:27:05.812450Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:05.827230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:05.827416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:05.827466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:05.829879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:05.829946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:05.830090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:05.830181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:05.830967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:05.831021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:05.831448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:05.831464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:05.831475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:05.831486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:05.835520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:05.835667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.837908Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:27:05.882545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:05.882656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.882745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:05.882809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:05.882822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.883798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:05.883832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:05.883918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.883932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:05.883939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:05.883946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:05.884448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.884461Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:05.884468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:05.884822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.884834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.884841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:05.884849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:05.885719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:05.886206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:05.886261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:05.886510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:05.886541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:05.886550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:05.886639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:27:05.886650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:05.886695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:05.886710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:27:05.887181Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:05.887191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:05.887250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T1 ... meshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:09.864577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit for Paths, read records: 2, at schemeshard: 72057594046678944 2025-06-03T10:27:09.864599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: SomeTable, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:27:09.864627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1457: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:09.864638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1483: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:09.864711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1785: TTxInit for Tables, read records: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:09.864762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-06-03T10:27:09.864775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-03T10:27:09.864826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2033: TTxInit for Columns, read records: 2, at schemeshard: 72057594046678944 2025-06-03T10:27:09.864847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2093: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:09.864862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2151: TTxInit for Shards, read records: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:09.864868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:27:09.864885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2237: TTxInit for TablePartitions, read records: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:09.864931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2303: TTxInit for TableShardPartitionConfigs, read records: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:09.864987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2453: TTxInit for ChannelsBinding, read records: 3, at schemeshard: 72057594046678944 2025-06-03T10:27:09.865040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2832: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:09.865056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2911: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:09.865114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3412: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:09.865123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3448: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:09.865159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3665: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:09.865170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3810: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:09.865182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3827: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:09.865222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3987: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:09.865233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4003: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:09.865259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4288: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:09.865322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4593: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:09.865335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4651: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:09.865352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4746: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:09.865359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4773: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:09.865366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4800: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:09.872644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:09.873553Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:09.873580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:09.873639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:09.873668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:09.873675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:09.873727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:755:2707] sender: [1:807:2058] recipient: [1:15:2062] 2025-06-03T10:27:09.922184Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:27:09.922304Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeTable" took 148us result status StatusSuccess 2025-06-03T10:27:09.922488Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeTable" PathDescription { Self { Name: "SomeTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "SomeTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 Family: 1 FamilyName: "alternative" NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 4140 RowCount: 100 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { PoolsUsage { PoolKind: "pool-kind-1" DataSize: 1020 IndexSize: 0 } PoolsUsage { PoolKind: "pool-kind-2" DataSize: 3120 IndexSize: 0 } } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 82344 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 4140 DataSize: 4140 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-1" TotalSize: 1020 DataSize: 1020 IndexSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-2" TotalSize: 3120 DataSize: 3120 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:09.922858Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:27:09.922903Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 48us result status StatusSuccess 2025-06-03T10:27:09.922996Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "SomeTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 4140 DataSize: 4140 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-1" TotalSize: 1020 DataSize: 1020 IndexSize: 0 } StoragePoolsUsage { PoolKind: "pool-kind-2" TotalSize: 3120 DataSize: 3120 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/sysview/unittest >> KqpSysColV1::StreamSelectRowAsterisk [GOOD] Test command err: Trying to start YDB, gRPC: 27641, MsgBus: 1053 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00253d/r3tmp/tmpmFMue6/pdisk_1.dat 2025-06-03T10:27:08.769454Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:27:08.846762Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667713320992005:2079] 1748946428640080 != 1748946428640083 2025-06-03T10:27:08.855092Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:08.855602Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:08.855628Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 27641, node 1 2025-06-03T10:27:08.857724Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:08.881518Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:08.881530Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:08.881532Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:08.881581Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1053 TClient is connected to server localhost:1053 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:09.031121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:09.037408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:27:09.050243Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:09.098594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:09.169227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:09.231443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:09.526217Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667717615960942:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:09.526244Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:09.572924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:09.587326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:09.600746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:09.616215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:09.635408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:09.658779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:09.720055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:09.758200Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667717615961598:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:09.758223Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:09.758364Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667717615961603:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:09.759293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:09.762750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-03T10:27:09.762900Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667717615961605:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:27:09.854901Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667717615961665:3404] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:10.131870Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946430168, txId: 281474976710672] shutting down |62.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicWriterTests::TestEnterMessage_1KiB_Newline_Delimited_With_Two_Delimiters_In_A_Row [GOOD] >> TSchemeshardStatsBatchingTest::ShouldNotBatchWhenDisabled [GOOD] >> TBoardSubscriberTest::NotAvailableByShutdown ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_minikql/unittest >> TTxDataShardMiniKQL::WriteAndReadMany [GOOD] Test command err: 2025-06-03T10:25:50.910731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:25:50.910760Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:50.911676Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:25:50.915703Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:25:50.915868Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 9437184 actor [1:132:2154] 2025-06-03T10:25:50.915930Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:25:50.927274Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:109:2140], Recipient [1:132:2154]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:25:50.936566Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:25:50.936641Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:25:50.936949Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 9437184 2025-06-03T10:25:50.936966Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 9437184 2025-06-03T10:25:50.936977Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 9437184 2025-06-03T10:25:50.937083Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:25:50.937199Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:25:50.937219Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 9437184 persisting started state actor id [1:205:2154] in generation 2 2025-06-03T10:25:50.975120Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:25:50.986090Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 9437184 2025-06-03T10:25:50.986218Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 9437184 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:25:50.986249Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 9437184, actorId: [1:218:2215] 2025-06-03T10:25:50.986256Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 9437184 2025-06-03T10:25:50.986262Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 9437184, state: WaitScheme 2025-06-03T10:25:50.986269Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:50.986357Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:132:2154], Recipient [1:132:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:50.986367Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:50.986493Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 9437184 2025-06-03T10:25:50.986525Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 9437184 2025-06-03T10:25:50.986533Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-03T10:25:50.986542Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:25:50.986552Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 9437184 2025-06-03T10:25:50.986559Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 9437184 has no attached operations 2025-06-03T10:25:50.986566Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 9437184 2025-06-03T10:25:50.986572Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 9437184 TxInFly 0 2025-06-03T10:25:50.986577Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 9437184 2025-06-03T10:25:50.986592Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:214:2212], Recipient [1:132:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:50.986598Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:50.986612Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:212:2211], serverId# [1:214:2212], sessionId# [0:0:0] 2025-06-03T10:25:50.987258Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:100:2134], Recipient [1:132:2154]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 100 RawX2: 4294969430 } TxBody: "\nI\n\006table1\020\r\032\t\n\003key\030\002 \"\032\014\n\005value\030\200$ 8\032\n\n\004uint\030\002 9(\":\010Z\006\010\000\030\000(\000J\014/Root/table1" TxId: 1 ExecLevel: 0 Flags: 0 SchemeShardId: 4200 ProcessingParams { } 2025-06-03T10:25:50.987276Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:25:50.987298Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:25:50.987347Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit CheckSchemeTx 2025-06-03T10:25:50.987360Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 9437184 txId 1 ssId 4200 seqNo 0:0 2025-06-03T10:25:50.987374Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 1 at tablet 9437184 2025-06-03T10:25:50.987385Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is ExecutedNoMoreRestarts 2025-06-03T10:25:50.987390Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit CheckSchemeTx 2025-06-03T10:25:50.987397Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit StoreSchemeTx 2025-06-03T10:25:50.987402Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit StoreSchemeTx 2025-06-03T10:25:50.987508Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayCompleteNoMoreRestarts 2025-06-03T10:25:50.987513Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit StoreSchemeTx 2025-06-03T10:25:50.987518Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit FinishPropose 2025-06-03T10:25:50.987522Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit FinishPropose 2025-06-03T10:25:50.987536Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:1] at 9437184 is DelayComplete 2025-06-03T10:25:50.987540Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:1] at 9437184 executing on unit FinishPropose 2025-06-03T10:25:50.987544Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:1] at 9437184 to execution unit WaitForPlan 2025-06-03T10:25:50.987548Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:1] at 9437184 on unit WaitForPlan 2025-06-03T10:25:50.987554Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:1] at 9437184 is not ready to execute on unit WaitForPlan 2025-06-03T10:25:50.998605Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-03T10:25:50.998637Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit StoreSchemeTx 2025-06-03T10:25:50.998646Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:1] at 9437184 on unit FinishPropose 2025-06-03T10:25:50.998663Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 1 at tablet 9437184 send to client, exec latency: 0 ms, propose latency: 1 ms, status: PREPARED 2025-06-03T10:25:50.998706Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 9437184 not sending time cast registration request in state WaitScheme 2025-06-03T10:25:50.998875Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:224:2221], Recipient [1:132:2154]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:50.998886Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:25:50.998894Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 9437184, clientId# [1:223:2220], serverId# [1:224:2221], sessionId# [0:0:0] 2025-06-03T10:25:50.998908Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287424, Sender [1:100:2134], Recipient [1:132:2154]: {TEvPlanStep step# 1000001 MediatorId# 0 TabletID 9437184} 2025-06-03T10:25:50.998913Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3147: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-03T10:25:50.998960Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1790: Trying to execute [1000001:1] at 9437184 on unit WaitForPlan 2025-06-03T10:25:50.998969Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1805: Execution status for [1000001:1] at 9437184 is Executed 2025-06-03T10:25:50.998975Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [1000001:1] at 9437184 executing on unit WaitForPlan 2025-06-03T10:25:50.998981Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [1000001:1] at 9437184 to execution unit PlanQueue 2025-06-03T10:25:50.999823Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 1 at step 1000001 at tablet 9437184 { Transactions { TxId: 1 AckTo { RawX1: 100 RawX2: 4294969430 } } Step: 1000001 MediatorID: 0 TabletID: 9437184 } 2025-06-03T10:25:50.999846Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 2025-06-03T10:25:50.999918Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:132:2154], Recipient [1:132:2154]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:50.999927Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:25:50.999938Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 9437184 2025-06-03T10:25:50.999948Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 9437184 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:25:50.999954Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 9437184 2025-06-03T10:25:50.999964Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000001:1] in PlanQueue unit at 9437184 2025-06-03T10:25:50.999971Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [100000 ... 02?\022\002\205\000\034MyReads MyWrites\205\004\205\002?\022\002\206\202\024Reply\024Write?\030\205\002\206\203\010\002 AllReads\030MyKeys\014Run4ShardsForRead4ShardsToWrite\005?\024)\211\026?\022\203\005\004\200\205\006\203\004\203\004\203\004\006\n\016\213\004\203\004\207\203\001H\213\002\203\004\203\004\203\010\203\010\203\004\206\203\014\203\014,SelectRange\000\003?* h\020\000\000\000\000\000\000\016\000\000\000\000\000\000\000?\014\005?2\003?,D\003?.F\003?0p\007\013?:\003?4\000\'?8\003\013?>\003?<\003j\030\001\003?@\000\003?B\000\003?D\007\240%&\003?F\000\006\004?J\003\203\014\000\003\203\014\000\003\003?L\000\377\007\002\000\005?\032\005?\026?x\000\005?\030\003\005? \005?\034?x\000\006 2025-06-03T10:27:05.375078Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:27:05.375174Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:27:05.375440Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit CheckDataTx 2025-06-03T10:27:05.377699Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Executed 2025-06-03T10:27:05.377731Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:11] at 9437184 executing on unit CheckDataTx 2025-06-03T10:27:05.377739Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:11] at 9437184 to execution unit BuildAndWaitDependencies 2025-06-03T10:27:05.377746Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit BuildAndWaitDependencies 2025-06-03T10:27:05.377763Z node 3 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 9437184 CompleteEdge# v1000001/1 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1000001/18446744073709551615 ImmediateWriteEdgeReplied# v1000001/18446744073709551615 2025-06-03T10:27:05.377785Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:11] at 9437184 2025-06-03T10:27:05.377798Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Executed 2025-06-03T10:27:05.377802Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:11] at 9437184 executing on unit BuildAndWaitDependencies 2025-06-03T10:27:05.377808Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:11] at 9437184 to execution unit ExecuteDataTx 2025-06-03T10:27:05.377813Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-03T10:27:05.381499Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-03T10:27:05.381583Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-03T10:27:05.381598Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-03T10:27:05.414911Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:27:05.414951Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-03T10:27:05.415183Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-03T10:27:05.416154Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-03T10:27:05.416204Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-03T10:27:05.416216Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-03T10:27:05.468078Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:27:05.468121Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-03T10:27:05.468361Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-03T10:27:05.496780Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:11] at 9437184 exceeded memory limit 4194304 and requests 33554432 more for the next try 2025-06-03T10:27:05.496872Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-03T10:27:05.496887Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-03T10:27:05.497054Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:27:05.497064Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-03T10:27:05.497249Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-03T10:27:05.819623Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-03T10:27:05.819774Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-03T10:27:05.819791Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-03T10:27:05.865045Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:27:05.865086Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-03T10:27:05.865351Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-03T10:27:06.239770Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:175: Operation [0:11] at 9437184 exceeded memory limit 37748736 and requests 301989888 more for the next try 2025-06-03T10:27:06.239978Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-03T10:27:06.239997Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-03T10:27:06.258014Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:27:06.258052Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-03T10:27:06.258295Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-03T10:27:06.260748Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-03T10:27:06.260818Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-03T10:27:06.260830Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-03T10:27:06.266821Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:27:06.266855Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-03T10:27:06.267097Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-03T10:27:06.267738Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-03T10:27:06.267789Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-03T10:27:06.267802Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-03T10:27:06.316378Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:27:06.316416Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-03T10:27:06.316631Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-03T10:27:06.318091Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:189: Tablet 9437184 is not ready for [0:11] execution 2025-06-03T10:27:06.318157Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:561: tx 11 released its data 2025-06-03T10:27:06.318172Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Restart 2025-06-03T10:27:06.477556Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 9437184 2025-06-03T10:27:06.477606Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit ExecuteDataTx 2025-06-03T10:27:06.477892Z node 3 :TX_DATASHARD DEBUG: datashard_active_transaction.cpp:661: tx 11 at 9437184 restored its data 2025-06-03T10:27:07.779247Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:306: Executed operation [0:11] at tablet 9437184 with status COMPLETE 2025-06-03T10:27:07.779305Z node 3 :TX_DATASHARD TRACE: execute_data_tx_unit.cpp:312: Datashard execution counters for [0:11] at 9437184: {NSelectRow: 0, NSelectRange: 1, NUpdateRow: 0, NEraseRow: 0, SelectRowRows: 0, SelectRowBytes: 0, SelectRangeRows: 129871, SelectRangeBytes: 40000268, UpdateRowBytes: 0, EraseRowBytes: 0, SelectRangeDeletedRowSkips: 0, InvisibleRowSkips: 0} 2025-06-03T10:27:07.779334Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Executed 2025-06-03T10:27:07.779346Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:11] at 9437184 executing on unit ExecuteDataTx 2025-06-03T10:27:07.779354Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:11] at 9437184 to execution unit FinishPropose 2025-06-03T10:27:07.779363Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit FinishPropose 2025-06-03T10:27:07.779378Z node 3 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 11 at tablet 9437184 send to client, exec latency: 62 ms, propose latency: 62 ms, status: COMPLETE 2025-06-03T10:27:07.779413Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is DelayComplete 2025-06-03T10:27:07.779419Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:11] at 9437184 executing on unit FinishPropose 2025-06-03T10:27:07.779424Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:11] at 9437184 to execution unit CompletedOperations 2025-06-03T10:27:07.779429Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:11] at 9437184 on unit CompletedOperations 2025-06-03T10:27:07.779446Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:11] at 9437184 is Executed 2025-06-03T10:27:07.779450Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:11] at 9437184 executing on unit CompletedOperations 2025-06-03T10:27:07.779455Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:11] at 9437184 has finished 2025-06-03T10:27:07.786787Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 9437184 2025-06-03T10:27:07.786830Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:11] at 9437184 on unit FinishPropose 2025-06-03T10:27:07.786855Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 9437184 |62.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::DropByDisconnect |62.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_board_subscriber/unittest |62.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::NotAvailableByShutdown [GOOD] |62.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::DropByDisconnect [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::ShouldNotBatchWhenDisabled [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:27:03.386626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:03.386671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:03.386679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:03.386687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:03.386717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:03.386723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:03.386736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:03.386755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:03.386923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:03.387025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:03.459887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:27:03.459937Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:03.471268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:03.471427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:03.471488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:03.473665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:03.473765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:03.473931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:03.474017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:03.474887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:03.474953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:03.475437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:03.475457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:03.475470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:03.475482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:03.475490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:03.475519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.481394Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:27:03.561205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:03.565559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.565715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:03.565797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:03.565821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.567074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:03.567115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:03.567209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.567222Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:03.567229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:03.567235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:03.568020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.568036Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:03.568043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:03.572134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.572173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:03.572184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:03.572196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:03.573201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:03.574373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:03.574457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:03.574764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:03.574828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:03.574841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:03.574962Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:27:03.574975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:03.575040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:03.575058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:27:03.582266Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:03.582302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:03.582395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... HARD TRACE: schemeshard__init.cpp:2162: TTxInit for Shards, read: 72057594046678944:1, tabletId: 72075186233409546, PathId: [OwnerId: 72057594046678944, LocalPathId: 2], TabletType: DataShard, at schemeshard: 72057594046678944 2025-06-03T10:27:11.027897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:27:11.027916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2237: TTxInit for TablePartitions, read records: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:11.027953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2303: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:11.027994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2453: TTxInit for ChannelsBinding, read records: 3, at schemeshard: 72057594046678944 2025-06-03T10:27:11.028044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2832: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:11.028059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2911: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:11.028111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3412: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:11.028120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3448: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:11.028149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3665: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:11.028160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3810: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:11.028174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3827: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:11.028202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3987: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:11.028213Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4003: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:11.028241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4288: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:11.028273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4593: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:11.028283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4651: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:11.028301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4746: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:11.028307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4773: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:11.028314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4800: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:11.028350Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:27:11.050372Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:27:11.050471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:11.061530Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435083, Sender [1:1014:2956], Recipient [1:1014:2956]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2025-06-03T10:27:11.061566Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4945: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2025-06-03T10:27:11.061925Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:11.061954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:11.062043Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124999, Sender [1:1014:2956], Recipient [1:1014:2956]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:27:11.062051Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4889: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:27:11.062088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:11.062106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:11.062115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:11.062120Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:27:11.062169Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 274399233, Sender [1:1050:2956], Recipient [1:1014:2956]: NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-06-03T10:27:11.062175Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5036: StateWork, processing event TEvTxAllocatorClient::TEvAllocateResult 2025-06-03T10:27:11.062182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:1014:2956] sender: [1:1068:2058] recipient: [1:15:2062] 2025-06-03T10:27:11.101217Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122945, Sender [1:1067:2998], Recipient [1:1014:2956]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2025-06-03T10:27:11.101246Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4894: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-03T10:27:11.101290Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:27:11.101440Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Simple" took 135us result status StatusSuccess 2025-06-03T10:27:11.101752Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Simple" PathDescription { Self { Name: "Simple" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1001 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Simple" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 MaxPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 13984 RowCount: 100 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 82344 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 13984 DataSize: 13984 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |62.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::NotAvailableByShutdown [GOOD] |62.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::SimpleSubscriber |62.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::DropByDisconnect [GOOD] >> TestProgram::YqlKernelContains >> TestProgram::YqlKernelContains [GOOD] >> KqpAcl::AclForOltpAndOlap+isOlap >> TBoardSubscriberTest::SimpleSubscriber [GOOD] |62.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut |62.6%| [LD] {RESULT} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut |62.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/http_proxy/ut/inside_ydb_ut/ydb-core-http_proxy-ut-inside_ydb_ut >> TestProgram::YqlKernelEndsWith [GOOD] >> TableCreator::CreateTables >> TestProgram::JsonValue >> TestProgram::JsonValue [GOOD] >> TSchemeShardTopicSplitMergeTest::SplitWithWrongBoundary ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelContains [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\005@\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \034StringContains?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\005@\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \034StringContains?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(26):{\"i\":\"7,9\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:7,9"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"7\",\"p\":{\"address\":{\"name\":\"string\",\"id\":7}},\"o\":\"7\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"9\",\"p\":{\"address\":{\"name\":\"substring\",\"id\":9}},\"o\":\"9\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(26):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N5[label="1"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"7,9\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[]},{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":6}]},{"owner_id":4,"inputs":[{"from":6}]},{"owner_id":5,"inputs":[{"from":0}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"2":{"p":{"i":"7","p":{"address":{"name":"string","id":7}},"o":"7","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"i":"0","p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"7,9","t":"FetchOriginalData"},"w":4,"id":6},"7":{"p":{"p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"0","t":"ReserveMemory"},"w":0,"id":7},"5":{"p":{"i":"15","t":"Projection"},"w":26,"id":5},"4":{"p":{"i":"9","p":{"address":{"name":"substring","id":9}},"o":"9","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"7,9","p":{"kernel":{"class_name":"SIMPLE"}},"o":"15","t":"Calculation"},"w":26,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow9UInt8TypeE; |62.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/base/ut_board_subscriber/unittest >> TBoardSubscriberTest::SimpleSubscriber [GOOD] |62.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes |62.6%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes |62.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/indexes/ydb-core-kqp-ut-indexes ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::YqlKernelEndsWith [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\001H\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \020EndsWith?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Function { Arguments { Id: 7 } Arguments { Id: 9 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 15 } } } Kernels: "O\002\030BlockAsTuple\t\211\004\235\213\004\213\002\203\001H\213\002\203\014\001\235?\002\001\235?\006\001\002\000\t\211\002?\014\235?\000\001\002\000\t\251\000?\022\014Arg\000\000\t\211\002?\016\235?\004\001\002\000\t\211\006?\034\203\005@?\022?\022$BlockFunc\000\003? \020EndsWith?\030?\030\001\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N7[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0[shape=box, label="N4(26):{\"i\":\"7,9\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"15\",\"t\":\"Calculation\"}\nREMOVE:7,9"]; N2 -> N0[label="1"]; N4 -> N0[label="2"]; N2[shape=box, label="N2(9):{\"i\":\"7\",\"p\":{\"address\":{\"name\":\"string\",\"id\":7}},\"o\":\"7\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N2[label="1"]; N4[shape=box, label="N3(9):{\"i\":\"9\",\"p\":{\"address\":{\"name\":\"substring\",\"id\":9}},\"o\":\"9\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N4[label="1"]; N5[shape=box, label="N5(26):{\"i\":\"15\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N0 -> N5[label="1"]; N6[shape=box, label="N1(4):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"string\",\"id\":7},{\"name\":\"substring\",\"id\":9}]},\"o\":\"7,9\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N7 -> N6[label="1"]; N7->N6->N2->N4->N0->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":7,"inputs":[]},{"owner_id":0,"inputs":[{"from":2},{"from":4}]},{"owner_id":2,"inputs":[{"from":6}]},{"owner_id":4,"inputs":[{"from":6}]},{"owner_id":5,"inputs":[{"from":0}]},{"owner_id":6,"inputs":[{"from":7}]}],"nodes":{"2":{"p":{"i":"7","p":{"address":{"name":"string","id":7}},"o":"7","t":"AssembleOriginalData"},"w":9,"id":2},"6":{"p":{"i":"0","p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"7,9","t":"FetchOriginalData"},"w":4,"id":6},"7":{"p":{"p":{"data":[{"name":"string","id":7},{"name":"substring","id":9}]},"o":"0","t":"ReserveMemory"},"w":0,"id":7},"5":{"p":{"i":"15","t":"Projection"},"w":26,"id":5},"4":{"p":{"i":"9","p":{"address":{"name":"substring","id":9}},"o":"9","t":"AssembleOriginalData"},"w":9,"id":4},"0":{"p":{"i":"7,9","p":{"kernel":{"class_name":"SIMPLE"}},"o":"15","t":"Calculation"},"w":26,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow9UInt8TypeE; |62.6%| [TA] $(B)/ydb/core/tx/datashard/ut_minikql/test-results/unittest/{meta.json ... results_accumulator.log} |62.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/table_creator/ut/unittest |62.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/table_creator/ut/unittest >> TBsProxyFaultToleranceTest::CheckTGetWithRecoverFaultToleranceTestErasureMirror3dc [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonValue [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207?\004\001\235?\006\001\235?\n\001\"\000\t\211\004?\020\235?\002\001\235?\004\000\"\000\t\251\000?\026\002\000\t\251\000?\030\002\000\000\t\211\002?\022\235?\010\001\"\000\t\211\n?&?\026?\030?\002?\004?\010,ScalarApply\000?\036?\"\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\010?\002?\000\207?\004?4$IfPresent\000?.\t\251\000?\000\002\000\t\211\n?4\201\213\004\213\004\203\n\203\005@\207\203\001H?@?4?D?D VisitAll\000\t\211\020?H\211\006?H\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?X\203\005@\200\203\005@\202\022\000\003?p6Json2.SqlValueConvertToUtf8\202\003?r\000\002\017\003?Z\000\003?\\\000\003?^\000\003?`\000\027?d\t\211\014?b\311\002?b\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\214\005\205\004\203\010\203\005@\032\036\003?\222\002\003?\224\000\003\001\003?\216\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\244\203\005@\200\203\005@\202\022\000\003?\260\026Json2.Parse\202\003?\262\000\002\017\003?\246\000\003?\250\000\003?\252\000\003?\254\000?:\036\t\211\014?f\211\002?f\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\312\203\005@\200\203\005@\202\022\000\003?\326\"Json2.CompilePath\202\003?\330\000\002\017\003?\314\000\003?\316\000\003?\320\000\003?\322\000?2\036\010\000?l\276\t\251\000?@\002\000\'?4\t\251\000?D\002\000?\370\004\'?4\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207?\004\001\235?\006\001\235?\n\001\"\000\t\211\004?\020\235?\002\001\235?\004\000\"\000\t\251\000?\026\002\000\t\251\000?\030\002\000\000\t\211\002?\022\235?\010\001\"\000\t\211\n?&?\026?\030?\002?\004?\010,ScalarApply\000?\036?\"\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\010?\002?\000\207?\004?4$IfPresent\000?.\t\251\000?\000\002\000\t\211\n?4\201\213\004\213\004\203\n\203\005@\207\203\001H?@?4?D?D VisitAll\000\t\211\020?H\211\006?H\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?X\203\005@\200\203\005@\202\022\000\003?p6Json2.SqlValueConvertToUtf8\202\003?r\000\002\017\003?Z\000\003?\\\000\003?^\000\003?`\000\027?d\t\211\014?b\311\002?b\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\214\005\205\004\203\010\203\005@\032\036\003?\222\002\003?\224\000\003\001\003?\216\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\244\203\005@\200\203\005@\202\022\000\003?\260\026Json2.Parse\202\003?\262\000\002\017\003?\246\000\003?\250\000\003?\252\000\003?\254\000?:\036\t\211\014?f\211\002?f\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\312\203\005@\200\203\005@\202\022\000\003?\326\"Json2.CompilePath\202\003?\330\000\002\017\003?\314\000\003?\316\000\003?\320\000\003?\322\000?2\036\010\000?l\276\t\251\000?@\002\000\'?4\t\251\000?D\002\000?\370\004\'?4\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"5,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,5"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"5\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"5\",\"p\":{\"address\":{\"name\":\"json_string\",\"id\":5}},\"o\":\"5\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"5,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"5","p":{"address":{"name":"json_string","id":5}},"o":"5","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_string","id":5}]},"o":"5","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_string","id":5}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; json_string: [ "{"key":"value"}", "{"key":10}", "{"key":0.1}", "{"key":false}", "{"another":"value"}", "[]" ] Check output for Utf8 FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203\014?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?r$Json2.SqlValueBool\202\003?t\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?f\t\211\014?d\311\002?d\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\216\005\205\004\203\010\203\005@\032\036\003?\224\002\003?\226\000\003\001\003?\220\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\246\203\005@\200\203\005@\202\022\000\003?\262\026Json2.Parse\202\003?\264\000\002\017\003?\250\000\003?\252\000\003?\254\000\003?\256\000?<\036\t\211\014?h\211\002?h\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\314\203\005@\200\203\005@\202\022\000\003?\330\"Json2.CompilePath\202\003?\332\000\002\017\003?\316\000\003?\320\000\003?\322\000\003?\324\000?4\036\010\000?n\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\372\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203\014?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?r$Json2.SqlValueBool\202\003?t\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?f\t\211\014?d\311\002?d\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\216\005\205\004\203\010\203\005@\032\036\003?\224\002\003?\226\000\003\001\003?\220\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\246\203\005@\200\203\005@\202\022\000\003?\262\026Json2.Parse\202\003?\264\000\002\017\003?\250\000\003?\252\000\003?\254\000\003?\256\000?<\036\t\211\014?h\211\002?h\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\314\203\005@\200\203\005@\202\022\000\003?\330\"Json2.CompilePath\202\003?\332\000\002\017\003?\316\000\003?\320\000\003?\322\000\003?\324\000?4\036\010\000?n\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\372\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"5,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\" ... } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203B\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?6 VisitAll\000\t\211\020?J\211\006?J\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?r(Json2.SqlValueNumber\202\003?t\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?f\t\211\014?d\311\002?d\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\216\005\205\004\203\010\203\005@\032\036\003?\224\002\003?\226\000\003\001\003?\220\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\246\203\005@\200\203\005@\202\022\000\003?\262\026Json2.Parse\202\003?\264\000\002\017\003?\250\000\003?\252\000\003?\254\000\003?\256\000?<\036\t\211\014?h\211\002?h\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\314\203\005@\200\203\005@\202\022\000\003?\330\"Json2.CompilePath\202\003?\332\000\002\017\003?\316\000\003?\320\000\003?\322\000\003?\324\000?4\036\010\000?n\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000\t\211\004?6\203\005@?F\030Invoke\000\003?\374\016Convert?\372\001\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"5,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,5"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"5\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"5\",\"p\":{\"address\":{\"name\":\"json_string\",\"id\":5}},\"o\":\"5\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"5,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"5","p":{"address":{"name":"json_string","id":5}},"o":"5","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_string","id":5}]},"o":"5","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_string","id":5}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; json_string: [ "{"key":"value"}", "{"key":10}", "{"key":0.1}", "{"key":false}", "{"another":"value"}", "[]" ] Check output for Float FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203@\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?r(Json2.SqlValueNumber\202\003?t\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?f\t\211\014?d\311\002?d\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\216\005\205\004\203\010\203\005@\032\036\003?\224\002\003?\226\000\003\001\003?\220\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\246\203\005@\200\203\005@\202\022\000\003?\262\026Json2.Parse\202\003?\264\000\002\017\003?\250\000\003?\252\000\003?\254\000\003?\256\000?<\036\t\211\014?h\211\002?h\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\314\203\005@\200\203\005@\202\022\000\003?\330\"Json2.CompilePath\202\003?\332\000\002\017\003?\316\000\003?\320\000\003?\322\000\003?\324\000?4\036\010\000?n\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\372\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 5 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\022\006Arg\020JsonNode\020JsonPath\006UDF\006Udf\014Apply2\nFlags\010Name\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\tH\203\001H\213\002\207\203@\001\235?\006\001\235?\014\001\"\000\t\211\004?\022\235?\002\001\235?\004\000\"\000\t\251\000?\030\002\000\t\251\000?\032\002\000\000\t\211\002?\024\235?\n\001\"\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\002\000\t\251\000?\004\002\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\002\000\t\211\n?6\201\213\004\213\004\203\n\203\005@\207\203@?B?6?F?F VisitAll\000\t\211\020?J\211\006?J\207\214\006\214\n\210\203\001H\214\006\016\000\203\004\203\005@\203\004\203\004\207\214\006\214\n\210\203\001H\214\006\026\000\t\211\010?Z\203\005@\200\203\005@\202\022\000\003?r(Json2.SqlValueNumber\202\003?t\000\002\017\003?\\\000\003?^\000\003?`\000\003?b\000\027?f\t\211\014?d\311\002?d\203\tH\005\205\004\206\205\004\203\010\203\005@\032\036\203\005@\020Args\034Payload\006\002?\216\005\205\004\203\010\203\005@\032\036\003?\224\002\003?\226\000\003\001\003?\220\000\003\016\000\203\004\203\005@\203\004\203\004?\000\026\000\t\211\010?\246\203\005@\200\203\005@\202\022\000\003?\262\026Json2.Parse\202\003?\264\000\002\017\003?\250\000\003?\252\000\003?\254\000\003?\256\000?<\036\t\211\014?h\211\002?h\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?\314\203\005@\200\203\005@\202\022\000\003?\330\"Json2.CompilePath\202\003?\332\000\002\017\003?\316\000\003?\320\000\003?\322\000\003?\324\000?4\036\010\000?n\276\t\251\000?B\002\000\'?6\t\251\000?F\002\000?\372\004\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"5,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,5"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"5\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"5\",\"p\":{\"address\":{\"name\":\"json_string\",\"id\":5}},\"o\":\"5\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_string\",\"id\":5}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"5,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"5","p":{"address":{"name":"json_string","id":5}},"o":"5","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_string","id":5}]},"o":"5","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_string","id":5}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; json_string: [ "{"key":"value"}", "{"key":10}", "{"key":0.1}", "{"key":false}", "{"another":"value"}", "[]" ] Check output for Double FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10DoubleTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10DoubleTypeE; >> TSchemeShardTopicSplitMergeTest::SplitWithWrongBoundary [GOOD] >> TableCreator::CreateTables [GOOD] |62.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/table_creator/ut/unittest |62.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/table_creator/ut/unittest >> TSchemeShardLoginTest::ChangeAccountLockoutParameters [GOOD] >> TSchemeShardLoginTest::CheckThatLockedOutParametersIsRestoredFromLocalDb |62.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_ftol/unittest >> TBsProxyFaultToleranceTest::CheckTGetWithRecoverFaultToleranceTestErasureMirror3dc [GOOD] |62.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/table_creator/ut/unittest >> StatisticsSaveLoad::ForbidAccess [GOOD] |62.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/table_creator/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_topic_splitmerge/unittest >> TSchemeShardTopicSplitMergeTest::SplitWithWrongBoundary [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:27:14.049429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:14.049464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:14.049470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:14.049476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:14.049492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:14.049497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:14.049507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:14.049521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:14.049652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:14.049720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:14.080798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:27:14.080830Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:14.099310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:14.099450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:14.099494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:14.101427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:14.101487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:14.101650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:14.101713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:14.102416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:14.102475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:14.102828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:14.102842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:14.102853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:14.102864Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:14.102871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:14.102895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:27:14.108811Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:27:14.155922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:14.156006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:14.156075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:14.156119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:14.156130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:14.162329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:14.162418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:14.162491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:14.162503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:14.162509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:14.162516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:14.163034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:14.163043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:14.163049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:14.163336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:14.163345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:14.163351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:14.163359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:14.164164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:14.164519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:14.164562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:14.164751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:14.164776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:14.164784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:14.164839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:27:14.164847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:14.164880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:14.164892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:27:14.169578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:14.169595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:14.169668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... satisfy waiter [1:634:2557] TestWaitNotification: OK eventTxId 105 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\001" } TestModificationResults wait txId: 106 2025-06-03T10:27:14.285378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\001" } } } TxId: 106 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:14.285414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 106:0, at schemeshard: 72057594046678944 2025-06-03T10:27:14.285478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 106:1, propose status:StatusInvalidParameter, reason: Split boundary less or equals FromBound of partition: '01' <= '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54', at schemeshard: 72057594046678944 2025-06-03T10:27:14.286477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 106, response: Status: StatusInvalidParameter Reason: "Split boundary less or equals FromBound of partition: \'01\' <= \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\'" TxId: 106 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:14.286514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 106, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Split boundary less or equals FromBound of partition: '01' <= '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54', operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-03T10:27:14.286580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-03T10:27:14.286587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-03T10:27:14.286663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-03T10:27:14.286680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-03T10:27:14.286686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:641:2564] TestWaitNotification: OK eventTxId 106 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "UUUUUUUUUUUUUUUT" } TestModificationResults wait txId: 107 2025-06-03T10:27:14.287573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "UUUUUUUUUUUUUUUT" } } } TxId: 107 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:14.287612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 107:0, at schemeshard: 72057594046678944 2025-06-03T10:27:14.287668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 107:1, propose status:StatusInvalidParameter, reason: Split boundary less or equals FromBound of partition: '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54' <= '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54', at schemeshard: 72057594046678944 2025-06-03T10:27:14.288140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 107, response: Status: StatusInvalidParameter Reason: "Split boundary less or equals FromBound of partition: \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\' <= \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\'" TxId: 107 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:14.288164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 107, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Split boundary less or equals FromBound of partition: '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54' <= '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54', operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 107, wait until txId: 107 TestWaitNotification wait txId: 107 2025-06-03T10:27:14.288216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 107: send EvNotifyTxCompletion 2025-06-03T10:27:14.288223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 107 2025-06-03T10:27:14.288288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 107, at schemeshard: 72057594046678944 2025-06-03T10:27:14.288303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-03T10:27:14.288308Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [1:648:2571] TestWaitNotification: OK eventTxId 107 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\255" } TestModificationResults wait txId: 108 2025-06-03T10:27:14.288951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\255" } } } TxId: 108 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:14.288977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 108:0, at schemeshard: 72057594046678944 2025-06-03T10:27:14.289017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 108:1, propose status:StatusInvalidParameter, reason: Split boundary greate or equals ToBound of partition: 'AD' >= 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' (FromBound is '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54'), at schemeshard: 72057594046678944 2025-06-03T10:27:14.289504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 108, response: Status: StatusInvalidParameter Reason: "Split boundary greate or equals ToBound of partition: \'AD\' >= \'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9\' (FromBound is \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\')" TxId: 108 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:14.289530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 108, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Split boundary greate or equals ToBound of partition: 'AD' >= 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' (FromBound is '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54'), operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 2025-06-03T10:27:14.289588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 108: send EvNotifyTxCompletion 2025-06-03T10:27:14.289594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 108 2025-06-03T10:27:14.289672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 108, at schemeshard: 72057594046678944 2025-06-03T10:27:14.289689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-06-03T10:27:14.289694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [1:655:2578] TestWaitNotification: OK eventTxId 108 >>>>> Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } TestModificationResults wait txId: 109 2025-06-03T10:27:14.290338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_1" OperationType: ESchemeOpAlterPersQueueGroup AlterPersQueueGroup { Name: "Topic1" PQTabletConfig { PartitionConfig { } } Split { Partition: 1 SplitBoundary: "\252\252\252\252\252\252\252\252\252\252\252\252\252\252\252\251" } } } TxId: 109 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:14.290365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_pq.cpp:509: TAlterPQ Propose, path: /MyRoot/USER_1/Topic1, pathId: , opId: 109:0, at schemeshard: 72057594046678944 2025-06-03T10:27:14.290409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 109:1, propose status:StatusInvalidParameter, reason: Split boundary greate or equals ToBound of partition: 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' >= 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' (FromBound is '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54'), at schemeshard: 72057594046678944 2025-06-03T10:27:14.290931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 109, response: Status: StatusInvalidParameter Reason: "Split boundary greate or equals ToBound of partition: \'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9\' >= \'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9\' (FromBound is \'55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54\')" TxId: 109 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:14.290958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 109, database: /MyRoot/USER_1, subject: , status: StatusInvalidParameter, reason: Split boundary greate or equals ToBound of partition: 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' >= 'AA AA AA AA AA AA AA AA AA AA AA AA AA AA AA A9' (FromBound is '55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 54'), operation: ALTER PERSISTENT QUEUE, path: /MyRoot/USER_1/Topic1 TestModificationResult got TxId: 109, wait until txId: 109 TestWaitNotification wait txId: 109 2025-06-03T10:27:14.291013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 109: send EvNotifyTxCompletion 2025-06-03T10:27:14.291019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 109 2025-06-03T10:27:14.291082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 109, at schemeshard: 72057594046678944 2025-06-03T10:27:14.291101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 109: got EvNotifyTxCompletionResult 2025-06-03T10:27:14.291106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 109: satisfy waiter [1:662:2585] TestWaitNotification: OK eventTxId 109 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/table_creator/ut/unittest >> TableCreator::CreateTables [GOOD] Test command err: 2025-06-03T10:27:13.537140Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667736468389090:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:13.537395Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000898/r3tmp/tmpcWW9bb/pdisk_1.dat 2025-06-03T10:27:13.720201Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667736468388933:2079] 1748946433531649 != 1748946433531652 2025-06-03T10:27:13.733468Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:13.734525Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:13.734557Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:13.748670Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9523 TServer::EnableGrpc on GrpcPort 4692, node 1 2025-06-03T10:27:13.793573Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:13.793585Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:13.793587Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:13.793647Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-03T10:27:13.846572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:13.860626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:27:13.870090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:27:13.870360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 >> TColumnEngineTestLogs::IndexWriteLoadReadStrPK [GOOD] >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchSize [GOOD] |62.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows |62.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows >> TestProgram::NumRowsWithNulls |62.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/table_creator/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/database/ut/unittest >> StatisticsSaveLoad::ForbidAccess [GOOD] Test command err: 2025-06-03T10:27:08.303526Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:453:2413], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:27:08.303610Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:27:08.303636Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000e6b/r3tmp/tmpyO0FTf/pdisk_1.dat 2025-06-03T10:27:08.512151Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20538, node 1 2025-06-03T10:27:08.652888Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:08.652915Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:08.652920Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:08.653032Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:27:08.658487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:27:08.769233Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:08.769282Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:08.781468Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28631 2025-06-03T10:27:09.233101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:27:10.374509Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:27:10.406403Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:10.406446Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:10.470338Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:27:10.472831Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:10.696733Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:10.696927Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:10.697142Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:10.697199Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:10.697264Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:10.697284Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:10.697328Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:10.697375Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:10.697395Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:27:10.867893Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:10.867945Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:10.880072Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:11.002782Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:11.041445Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:27:11.041506Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:27:11.077415Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:27:11.077947Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:27:11.077987Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:27:11.077995Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:27:11.078004Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:27:11.078013Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:27:11.078022Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:27:11.078033Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:27:11.079067Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:27:11.096983Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:27:11.097018Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1865:2600], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:27:11.098303Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1872:2606] 2025-06-03T10:27:11.107538Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1913:2626] 2025-06-03T10:27:11.107633Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:27:11.107920Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1913:2626], schemeshard id = 72075186224037897 2025-06-03T10:27:11.122839Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:27:11.122870Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:27:11.122885Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:27:11.139873Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:27:11.144582Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:27:11.144630Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:27:11.329138Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:27:11.525509Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:27:11.615036Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:27:12.380930Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2211:3056], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:12.380981Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:12.405689Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:27:12.864592Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2516:3106], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:12.864657Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:12.865205Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2521:3110]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:27:12.865273Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:27:12.865286Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2523:3112] 2025-06-03T10:27:12.869396Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2523:3112] 2025-06-03T10:27:12.869769Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2524:2983] 2025-06-03T10:27:12.869855Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2523:3112], server id = [2:2524:2983], tablet id = 72075186224037894, status = OK 2025-06-03T10:27:12.869924Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2524:2983], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:27:12.869946Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-03T10:27:12.870017Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:27:12.870029Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [1:2521:3110], StatRequests.size() = 1 2025-06-03T10:27:12.882260Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2528:3116], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:12.882309Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:12.882439Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2533:3121], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:12.884211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480 2025-06-03T10:27:13.005562Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-03T10:27:13.005605Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-03T10:27:13.112575Z node 1 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [1:2523:3112], schemeshard count = 1 2025-06-03T10:27:13.402562Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:2535:3123], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-03T10:27:13.558973Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:2646:3193] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:13.569334Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:2669:3209]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:27:13.569406Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:27:13.569415Z node 1 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [1:2669:3209], StatRequests.size() = 1 2025-06-03T10:27:13.580522Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtn9ctc5hjvc6cbfkhtjkcs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTMyY2NkNTEtOTRiN2U3YmItY2NiZDUzNWYtNTc5ODVjNWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:13.668892Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:2748:3239], for# user@builtin, access# DescribeSchema 2025-06-03T10:27:13.668924Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:2748:3239], for# user@builtin, access# DescribeSchema 2025-06-03T10:27:13.675714Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:2738:3235], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:17: Error: At function: KiReadTable!
:2:17: Error: Cannot find table 'db.[/Root/Database/.metadata/_statistics]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:27:13.676449Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=ZjIyNmQ2Y2MtYmI2ZTM5ZDYtNTEzMzk1MzAtZDAwOThjODc=, ActorId: [1:2729:3227], ActorState: ExecuteState, TraceId: 01jwtn9dkx71n1bnjc2cw7wajt, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: |62.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing >> TestProgram::NumRowsWithNulls [GOOD] |62.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing >> YdbOlapStore::DuplicateRows [GOOD] |62.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_erase_rows/ydb-core-tx-datashard-ut_erase_rows >> AnalyzeColumnshard::AnalyzeServerless [GOOD] |62.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_object_storage_listing/ydb-core-tx-datashard-ut_object_storage_listing >> YdbOlapStore::LogCountByResource |62.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/table_creator/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchSize [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:27:05.163934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:05.163966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:05.163972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:05.163979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:05.164006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:05.164011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:05.164022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:05.164056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:05.164200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:05.164272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:05.181635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:27:05.181668Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:05.187291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:05.187456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:05.187504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:05.189873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:05.189946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:05.190090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:05.190169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:05.190859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:05.190904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:05.191300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:05.191311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:05.191323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:05.191332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:05.191339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:05.191361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.192847Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:27:05.218520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:05.218622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.218701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:05.218756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:05.218771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.219723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:05.219758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:05.219865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.219878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:05.219885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:05.219892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:05.221204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.221224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:05.221231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:05.221791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.221804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.221811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:05.221819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:05.222736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:05.223234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:05.223347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:05.223599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:05.223631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:05.223639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:05.223721Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:27:05.223730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:05.223785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:05.223801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:27:05.224338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:05.224349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:05.224411Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... ard__init.cpp:2162: TTxInit for Shards, read: 72057594046678944:2, tabletId: 72075186233409547, PathId: [OwnerId: 72057594046678944, LocalPathId: 3], TabletType: DataShard, at schemeshard: 72057594046678944 2025-06-03T10:27:15.549734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:27:15.549761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2237: TTxInit for TablePartitions, read records: 2, at schemeshard: 72057594046678944 2025-06-03T10:27:15.549813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2303: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:15.549872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2453: TTxInit for ChannelsBinding, read records: 6, at schemeshard: 72057594046678944 2025-06-03T10:27:15.549933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2832: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:15.549950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2911: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:15.550014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3412: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:15.550024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3448: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:15.550062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3665: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:15.550080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3810: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:15.550095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3827: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:15.550128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3987: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:15.550142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4003: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:15.550176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4288: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:15.550210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4593: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:15.550220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4651: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:15.550238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4746: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:15.550245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4773: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:15.550253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4800: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:15.550293Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:27:15.565699Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:27:15.565811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:15.566306Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435083, Sender [1:1131:3061], Recipient [1:1131:3061]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2025-06-03T10:27:15.566326Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4945: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2025-06-03T10:27:15.566748Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:15.566773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:15.566846Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124999, Sender [1:1131:3061], Recipient [1:1131:3061]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:27:15.566854Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4889: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:27:15.567053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:15.567070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:15.567080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:15.567084Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:27:15.567557Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 274399233, Sender [1:1167:3061], Recipient [1:1131:3061]: NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-06-03T10:27:15.567572Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5036: StateWork, processing event TEvTxAllocatorClient::TEvAllocateResult 2025-06-03T10:27:15.567577Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:1131:3061] sender: [1:1187:2058] recipient: [1:15:2062] 2025-06-03T10:27:15.606359Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122945, Sender [1:1186:3105], Recipient [1:1131:3061]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2025-06-03T10:27:15.606382Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4894: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-03T10:27:15.606423Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:27:15.606532Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Simple" took 91us result status StatusSuccess 2025-06-03T10:27:15.606803Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Simple" PathDescription { Self { Name: "Simple" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1001 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Simple" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 1 MinPartitionsCount: 20 MaxPartitionsCount: 20 } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 13984 RowCount: 100 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 5931 Memory: 141224 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 13984 DataSize: 13984 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |62.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |62.7%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_minikql/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TColumnEngineTestLogs::IndexWriteLoadReadStrPK [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=3912;columns=5; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=3912;columns=5; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=column_engine.h:144;event=RegisterTable;path_id=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=column_engine.h:144;event=RegisterTable;path_id=2; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:49;event=insert_to_cache;key=uint64::0;records=1;size=8; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=uint64::0;records=1;count=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=152;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=152;columns=1; FALLBACK_ACTOR_LOGGING;priority=INFO;component=2100;fline=simple_arrays_cache.h:65;event=slice_from_cache;key=uint64::0;records=1;count=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=152;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=152;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=3912;columns=5; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=3912;columns=5; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;field_name=_yql_plan_step;fline=native.cpp:71;event=parsing;size=944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;field_name=_yql_plan_step;fline=native.cpp:110;event=serialize;size=944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;field_name=timestamp;fline=native.cpp:71;event=parsing;size=944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;field_name=timestamp;fline=native.cpp:110;event=serialize;size=944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;field_name=_yql_tx_id;fline=native.cpp:71;event=parsing;size=944;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;field_name=_yql_tx_id;fline=native.cpp:110;event=serialize;size=944;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;field_name=resource_type;fline=native.cpp:71;event=parsing;size=1072;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;field_name=resource_type;fline=native.cpp:110;event=serialize;size=1072;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;field_name=resource_id;fline=native.cpp:71;event=parsing;size=760;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;field_name=resource_id;fline=native.cpp:110;event=serialize;size=760;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;field_name=uid;fline=native.cpp:71;event=parsing;size=760;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;field_name=uid;fline=native.cpp:110;event=serialize;size=760;columns=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;field_name=message;fline=native.cpp:71;event=parsing;size=760;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;field_name=message;fline=native.cpp:110;event=serialize;size=760;columns=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=constructor_meta.cpp:54;memory_size=94;data_size=66;sum=94;count=1; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=constructor_meta.cpp:75;memory_size=254;data_size=242;sum=254;count=2;size_of_meta=144; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=constructor_portion.cpp:40;memory_size=326;data_size=314;sum=326;count=1;size_of_portion=216; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=432;columns=4; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=432;columns=4; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=abstract.cpp:53;event=WriteIndexComplete;type=CS::INDEXATION;success=1; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=332;fline=granule.cpp:17;event=upsert_portion;portion=(portion_id:1;path_id:1;records_count:100;schema_version:1;level:0;;column_size:6184;index_size:0;meta:((produced=INSERTED;)););path_id=1; FALLBACK_ACTOR_LOGGING;priority=WARN;component=332;fline=abstract.cpp:105;event=AbortEmergency;reason=testing;prev_reason=; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=332;fline=portion_info.h:380;event=IsVisible;analyze_portion=(portion_id:1;path_id:1;records_count:100;schema_version:1;level:0;;column_size:6184;index_size:0;meta:((produced=INSERTED;)););visible=0;snapshot=plan_step=1;tx_id=0;; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=332;fline=portion_info.h:380;event=IsVisible;analyze_portion=(portion_id:1;path_id:1;records_count:100;schema_version:1;level:0;;column_size:6184;index_size:0;meta:((produced=INSERTED;)););visible=0;snapshot=plan_step=1;tx_id=2;; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=332;fline=portion_info.h:380;event=IsVisible;analyze_portion=(portion_id:1;path_id:1;records_count:100;schema_version:1;level:0;;column_size:6184;index_size:0;meta:((produced=INSERTED;)););visible=1;snapshot=plan_step=2;tx_id=1;; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=334;fline=column_engine_logs.cpp:457;event=portion_selected;pathId=1;portion=(portion_id:1;path_id:1;records_count:100;schema_version:1;level:0;;column_size:6184;index_size:0;meta:((produced=INSERTED;));); >> TestKinesisHttpProxy::UnauthorizedGetShardIteratorRequest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::NumRowsWithNulls [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 10001 } Function { Id: 7 Arguments { Id: 2 } } } } Command { Filter { Predicate { Id: 10001 } } } Command { GroupBy { Aggregates { Column { Id: 10002 } Function { Id: 2 } } } } Command { Projection { Columns { Id: 10002 } } } ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 10001 } Function { Id: 7 Arguments { Id: 2 } } } } Command { Filter { Predicate { Id: 10001 } } } Command { GroupBy { Aggregates { Column { Id: 10002 } Function { Id: 2 } } } } Command { Projection { Columns { Id: 10002 } } } ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N3(15):{\"i\":\"2\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"10001\",\"t\":\"Calculation\"}\nREMOVE:2"]; N2 -> N0[label="1"]; N1[shape=box, label="N1(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2}]},\"o\":\"2\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N6 -> N1[label="1"]; N2[shape=box, label="N2(7):{\"i\":\"2\",\"p\":{\"address\":{\"name\":\"uid\",\"id\":2}},\"o\":\"2\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N1 -> N2[label="1"]; N3[shape=box, label="N4(15):{\"i\":\"10001\",\"t\":\"Filter\"}\nREMOVE:10001",style=filled,color="#FFAAAA"]; N0 -> N3[label="1"]; N4[shape=box, label="N5(8):{\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"10002\",\"t\":\"Calculation\"}\n"]; N5[shape=box, label="N6(8):{\"i\":\"10002\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N4 -> N5[label="1"]; N6[shape=box, label="N0(0):{\"p\":{\"data\":[{\"name\":\"uid\",\"id\":2}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N6->N1->N2->N0->N3->N4->N5[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":6}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[{"from":0}]},{"owner_id":4,"inputs":[]},{"owner_id":5,"inputs":[{"from":4}]},{"owner_id":6,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"uid","id":2}]},"o":"2","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"i":"10001","t":"Filter"},"w":15,"id":3},"2":{"p":{"i":"2","p":{"address":{"name":"uid","id":2}},"o":"2","t":"AssembleOriginalData"},"w":7,"id":2},"6":{"p":{"p":{"data":[{"name":"uid","id":2}]},"o":"0","t":"ReserveMemory"},"w":0,"id":6},"5":{"p":{"i":"10002","t":"Projection"},"w":8,"id":5},"4":{"p":{"p":{"kernel":{"class_name":"SIMPLE"}},"o":"10002","t":"Calculation"},"w":8,"id":4},"0":{"p":{"i":"2","p":{"kernel":{"class_name":"SIMPLE"}},"o":"10001","t":"Calculation"},"w":15,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10StringTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10UInt64TypeE; |62.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonExistsBinary [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeServerless [GOOD] Test command err: 2025-06-03T10:24:19.677397Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:24:19.677440Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:24:19.677453Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002a52/r3tmp/tmpn6Kx35/pdisk_1.dat 2025-06-03T10:24:20.067176Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6633, node 1 2025-06-03T10:24:20.364180Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:24:20.364202Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:24:20.364208Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:24:20.364302Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:24:20.365595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:24:20.454324Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:20.454359Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:20.477708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4003 2025-06-03T10:24:21.221782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:24:22.703577Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:24:22.722331Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:22.722387Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:22.794321Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:24:22.798146Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:22.984767Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:22.984967Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:22.985111Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:22.985156Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:22.985210Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:22.985231Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:22.985249Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:22.985269Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:22.985329Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:24:23.154056Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:24:23.154111Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:24:23.170526Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:24:23.226064Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:23.248356Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:24:23.248401Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:24:23.257727Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:24:23.257775Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:24:23.257816Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:24:23.257825Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:24:23.257832Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:24:23.257837Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:24:23.257851Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:24:23.257860Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:24:23.258004Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:24:23.272589Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:23.272626Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:24:23.274527Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:24:23.275672Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:24:23.275828Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:24:23.279664Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-03T10:24:23.286649Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:24:23.286674Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:24:23.286688Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-03T10:24:23.292441Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:24:23.294646Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:24:23.294697Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:24:23.417400Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:24:23.515245Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:24:23.558499Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:24:24.119377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:24:24.560519Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:24:24.640948Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7814: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-03T10:24:24.640972Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7830: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-03T10:24:24.640985Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:2567:2933], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-03T10:24:24.641483Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2569:2935] 2025-06-03T10:24:24.641671Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2569:2935], schemeshard id = 72075186224037899 2025-06-03T10:24:25.509081Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2691:3233], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:25.509138Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:24:25.514245Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715661:0, at schemeshard: 72075186224037899 2025-06-03T10:24:25.568634Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037905;self_id=[2:2779:3031];tablet_id=72075186224037905;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:24:25.568742Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037905;self_id=[2:2779:3031];tablet_id=72075186224037905;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:24:25.568810Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037905;self_id=[2:2779:3031];tablet_id=72075186224037905;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;d ... ner_id AND local_path_id = $local_path_id; 2025-06-03T10:27:11.713614Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:8325:6117], DatabaseId: /Root/Shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:11.713667Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:8335:6122], DatabaseId: /Root/Shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:11.713705Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Shared, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:11.726296Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897 2025-06-03T10:27:11.770118Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:8339:6125], DatabaseId: /Root/Shared, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-03T10:27:11.983818Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:8439:6174] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Shared/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:12.026683Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:8468:6189]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:27:12.026805Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:27:12.026823Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:8470:6191] 2025-06-03T10:27:12.026840Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:8470:6191] 2025-06-03T10:27:12.026993Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:8471:6192] 2025-06-03T10:27:12.027049Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8470:6191], server id = [2:8471:6192], tablet id = 72075186224037894, status = OK 2025-06-03T10:27:12.027065Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:8471:6192], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:27:12.027084Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-03T10:27:12.027134Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:27:12.027154Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:8468:6189], StatRequests.size() = 1 2025-06-03T10:27:12.053964Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZDVhZjU3MzUtMTM5OTNhYjMtOWVlN2U4NzItNzE2YTgxMQ==, TxId: 2025-06-03T10:27:12.053998Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZDVhZjU3MzUtMTM5OTNhYjMtOWVlN2U4NzItNzE2YTgxMQ==, TxId: 2025-06-03T10:27:12.054176Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:27:12.070887Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:27:12.070918Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:27:12.129400Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-03T10:27:12.129442Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-03T10:27:12.209720Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:8470:6191], schemeshard count = 1 2025-06-03T10:27:12.529721Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7996: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224037899 2025-06-03T10:27:12.529756Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 239.000000s, at schemeshard: 72075186224037899 2025-06-03T10:27:12.529853Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037899, stats size# 28 2025-06-03T10:27:12.546509Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-03T10:27:13.533898Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:27:13.533943Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is column table. 2025-06-03T10:27:13.535170Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:27:13.550862Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:27:13.551059Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:27:13.551074Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:51: [72075186224037894] TTxResolve::ExecuteAnalyze. Table OperationId operationId, PathId [OwnerId: 72075186224037899, LocalPathId: 2], AnalyzedShards 1 2025-06-03T10:27:13.578743Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:27:13.601815Z node 2 :STATISTICS DEBUG: tx_analyze_table_request.cpp:56: [72075186224037894] TTxAnalyzeTableRequest::Complete. Send 1 events. 2025-06-03T10:27:13.602179Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:21: [72075186224037894] TTxAnalyzeTableResponse::Execute 2025-06-03T10:27:13.602212Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:52: [72075186224037894] TTxAnalyzeTableResponse::Execute. All shards are analyzed 2025-06-03T10:27:13.614908Z node 2 :STATISTICS DEBUG: tx_analyze_table_response.cpp:57: [72075186224037894] TTxAnalyzeTableResponse::Complete. 2025-06-03T10:27:15.109722Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:27:15.109811Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is column table. 2025-06-03T10:27:15.109820Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start force traversal navigate for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-03T10:27:15.110132Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:27:15.126066Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:27:15.126263Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:27:15.126286Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:27:15.126672Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:27:15.161974Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:27:15.162078Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-03T10:27:15.162326Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:8599:6274], server id = [2:8600:6275], tablet id = 72075186224037905, status = OK 2025-06-03T10:27:15.162362Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:8599:6274], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-03T10:27:15.163772Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037905 2025-06-03T10:27:15.163800Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:27:15.163898Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:8599:6274], server id = [2:8600:6275], tablet id = 72075186224037905 2025-06-03T10:27:15.163906Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:27:15.163933Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:27:15.163972Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:27:15.164086Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-03T10:27:15.165155Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:27:15.190010Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:8620:6294]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:27:15.190146Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:27:15.190157Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:8620:6294], StatRequests.size() = 1 2025-06-03T10:27:15.256476Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MjY2OGUzNzEtYjEwZDEzNDUtOWMzNTdjZjItNzc1NTUzYw==, TxId: 2025-06-03T10:27:15.256509Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MjY2OGUzNzEtYjEwZDEzNDUtOWMzNTdjZjItNzc1NTUzYw==, TxId: 2025-06-03T10:27:15.256698Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:27:15.274020Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-03T10:27:15.274054Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=operationId, ActorId=[1:3638:3510] |62.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest |62.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestYmqHttpProxy::TestCreateQueue >> KqpPrefixedVectorIndexes::PrefixedVectorIndexOrderByCosineDistanceWithCover-Nullable |62.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut |62.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut |62.8%| [LD] {RESULT} $(B)/ydb/core/persqueue/ut/ydb-core-persqueue-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/engines/ut/unittest >> TestProgram::JsonExistsBinary [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:33;event=parse_program;program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\020JsonNode\006Arg\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\032\000\t\211\004?\022\235?\002\001\235?\004\000\032\000\t\251\000?\030\006\000\t\251\000?\032\006\000\000\t\211\002?\024\235?\n\001\032\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\006\000\t\251\000?\004\006\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\006\000\t\211\022?6\211\010?6\207\203\021H\214\n\210\203\001H\214\002?6\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\002\207\203\014\026\000\t\211\010?J\203\005@\200\203\005@\202\022\000\003?d6Json2.JsonDocumentSqlExists\202\003?f\000\002\017\003?L\000\003?N\000\003?P\000\003?R\000\027?T?<\t\211\014?V\211\002?V\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?|\203\005@\200\203\005@\202\022\000\003?\210\"Json2.CompilePath\202\003?\212\000\002\017\003?~\000\003?\200\000\003?\202\000\003?\204\000?4\036\010\000?\\7?`\003?^\000\276\001\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:102;parse_proto_program=Command { Assign { Column { Id: 15 } Constant { Text: "$.key" } } } Command { Assign { Column { Id: 16 } Function { Id: 8 Arguments { Id: 6 } Arguments { Id: 15 } FunctionType: YQL_KERNEL KernelIdx: 0 } } } Command { Projection { Columns { Id: 16 } } } Kernels: "O\016\020JsonNode\006Arg\020JsonPath\006UDF\006Udf\014Apply2\030BlockAsTuple\t\211\004\235\213\004\213\004\207\203\021H\203\001H\213\002\207\203\014\001\235?\006\001\235?\014\001\032\000\t\211\004?\022\235?\002\001\235?\004\000\032\000\t\251\000?\030\006\000\t\251\000?\032\006\000\000\t\211\002?\024\235?\n\001\032\000\t\211\n?(?\030?\032?\002?\004?\n,ScalarApply\000? ?$\t\251\000?\002\006\000\t\251\000?\004\006\000\t\211\010?\n?\002?\000\207?\010?6$IfPresent\000?0\t\251\000?\000\006\000\t\211\022?6\211\010?6\207\203\021H\214\n\210\203\001H\214\002?6\016\000\203\004\203\005@\203\004\203\004\207?\000\214\n\210\203\001H\214\002\207\203\014\026\000\t\211\010?J\203\005@\200\203\005@\202\022\000\003?d6Json2.JsonDocumentSqlExists\202\003?f\000\002\017\003?L\000\003?N\000\003?P\000\003?R\000\027?T?<\t\211\014?V\211\002?V\203\001H\016\000\203\004\203\005@\203\004\203\004?\004\026\000\t\211\010?|\203\005@\200\203\005@\202\022\000\003?\210\"Json2.CompilePath\202\003?\212\000\002\017\003?~\000\003?\200\000\003?\202\000\003?\204\000?4\036\010\000?\\7?`\003?^\000\276\001\'?6\010\000\000\000/" ; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2101;fline=graph_execute.cpp:162;graph_constructed=digraph program {N0[shape=box, label="N0(0):{\"p\":{\"v\":\"$.key\"},\"o\":\"15\",\"t\":\"Const\"}\n"]; N1[shape=box, label="N4(15):{\"i\":\"6,15\",\"p\":{\"kernel\":{\"class_name\":\"SIMPLE\"}},\"o\":\"16\",\"t\":\"Calculation\"}\nREMOVE:15,6"]; N0 -> N1[label="1"]; N3 -> N1[label="2"]; N2[shape=box, label="N2(2):{\"i\":\"0\",\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"6\",\"t\":\"FetchOriginalData\"}\n",style=filled,color="#FFFF88"]; N5 -> N2[label="1"]; N3[shape=box, label="N3(7):{\"i\":\"6\",\"p\":{\"address\":{\"name\":\"json_binary\",\"id\":6}},\"o\":\"6\",\"t\":\"AssembleOriginalData\"}\n",style=filled,color="#FFFF88"]; N2 -> N3[label="1"]; N4[shape=box, label="N5(15):{\"i\":\"16\",\"t\":\"Projection\"}\n",style=filled,color="#FFAAAA"]; N1 -> N4[label="1"]; N5[shape=box, label="N1(0):{\"p\":{\"data\":[{\"name\":\"json_binary\",\"id\":6}]},\"o\":\"0\",\"t\":\"ReserveMemory\"}\n"]; N0->N5->N2->N3->N1->N4[color=red]; }; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=332;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[]},{"owner_id":1,"inputs":[{"from":0},{"from":3}]},{"owner_id":2,"inputs":[{"from":5}]},{"owner_id":3,"inputs":[{"from":2}]},{"owner_id":4,"inputs":[{"from":1}]},{"owner_id":5,"inputs":[]}],"nodes":{"1":{"p":{"i":"6,15","p":{"kernel":{"class_name":"SIMPLE"}},"o":"16","t":"Calculation"},"w":15,"id":1},"3":{"p":{"i":"6","p":{"address":{"name":"json_binary","id":6}},"o":"6","t":"AssembleOriginalData"},"w":7,"id":3},"2":{"p":{"i":"0","p":{"data":[{"name":"json_binary","id":6}]},"o":"6","t":"FetchOriginalData"},"w":2,"id":2},"5":{"p":{"p":{"data":[{"name":"json_binary","id":6}]},"o":"0","t":"ReserveMemory"},"w":0,"id":5},"4":{"p":{"i":"16","t":"Projection"},"w":15,"id":4},"0":{"p":{"p":{"v":"$.key"},"o":"15","t":"Const"},"w":0,"id":0}}}; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10BinaryTypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow10BinaryTypeE; json_binary: [ 7B226B6579223A2276616C7565227D, 5B5D ] json_binary: [ 010200002100000014000000030300000200000000040000C00400006B65790076616C756500, 010100000000000000000000 ] FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow9UInt8TypeE; FALLBACK_ACTOR_LOGGING;priority=NOTICE;component=332;fline=columnshard_ut_common.h:513;T=N5arrow9UInt8TypeE; |62.8%| [TA] $(B)/ydb/core/statistics/database/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpIndexes::CheckUpsertNonEquatableType+NotNull >> KqpAcl::AclForOltpAndOlap+isOlap [GOOD] >> KqpAcl::AclForOltpAndOlap-isOlap >> KqpIndexes::UpsertWithoutExtraNullDelete+UseSink >> KqpUniqueIndex::UpsertImplicitNullInComplexFk >> KqpVectorIndexes::VectorIndexIsNotUpdatable >> KqpUniqueIndex::UpdateFkAlreadyExist >> KqpVectorIndexes::OrderByCosineLevel1+Nullable+UseSimilarity >> KqpIndexes::SecondaryIndexOrderBy2 >> KqpUniqueIndex::UpdateOnHidenChanges+DataColumn >> KqpIndexes::SecondaryIndexUpsert1DeleteUpdate >> KqpIndexes::SelectFromAsyncIndexedTable >> TTopicReaderTests::TestRun_ReadMoreMessagesThanLimit_Without_Wait_NewlineDelimited [GOOD] >> TTopicReaderTests::TestRun_ReadMoreMessagesThanLimit_Without_Wait_NoDelimiter >> EraseRowsTests::ConditionalEraseRowsShouldNotErase >> DistributedEraseTests::DistributedEraseTxShouldFailOnVariousErrors |62.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_object_storage_listing/unittest >> TSchemeShardLoginTest::CheckThatLockedOutParametersIsRestoredFromLocalDb [GOOD] >> KqpAcl::AclForOltpAndOlap-isOlap [GOOD] >> KqpAcl::AclDml-UseSink-IsOlap >> KqpIndexes::CheckUpsertNonEquatableType+NotNull [GOOD] >> KqpIndexes::CheckUpsertNonEquatableType-NotNull >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureMirror3dcCount6Idx4 [GOOD] >> TestKinesisHttpProxy::UnauthorizedGetShardIteratorRequest [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_login/unittest >> TSchemeShardLoginTest::CheckThatLockedOutParametersIsRestoredFromLocalDb [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:26:52.108913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:52.108944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:52.108950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:52.108956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:52.108973Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:52.108977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:52.108986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:52.109001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:52.109113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:52.109180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:52.131871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:52.131902Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:52.136135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:52.136249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:52.136286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:52.138499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:52.138558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:52.138686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:52.138742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:52.139397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:52.139451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:52.139745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:52.139757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:52.139767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:52.139776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:52.139782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:52.139802Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:52.141181Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:26:52.163098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:52.163171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:52.163237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:52.163283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:52.163295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:52.163910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:52.163936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:52.163979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:52.163989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:52.163996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:52.164001Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:52.164434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:52.164446Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:52.164451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:52.164801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:52.164811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:52.164817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:52.164824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:52.165528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:52.166105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:52.166147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:52.166346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:52.166374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:52.166384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:52.166469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:52.166478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:52.166512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:52.166524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:52.166946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:52.166957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:52.166989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... Table profiles were not loaded 2025-06-03T10:27:17.604084Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:17.604184Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit for Paths, read records: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604205Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1457: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604216Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1483: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604284Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1785: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604298Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-03T10:27:17.604344Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2033: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604356Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2093: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604367Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2151: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604381Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2237: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604391Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2303: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604409Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2453: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604445Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2832: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604461Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2911: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604506Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3412: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604514Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3448: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604534Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3665: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604546Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3810: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604557Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3827: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604598Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3987: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604609Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4003: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604629Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4288: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604662Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4593: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604672Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4651: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604691Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4746: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604698Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4773: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.604705Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4800: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:17.636915Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:17.637624Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:17.637649Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:17.637797Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:17.637814Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:17.637823Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:17.648018Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [5:369:2337] sender: [5:427:2058] recipient: [5:15:2062] 2025-06-03T10:27:17.700015Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-03T10:27:17.700044Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:46: TTxLogin RotateKeys at schemeshard: 72057594046678944 2025-06-03T10:27:17.822412Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Error: "User user1 login denied: too many failed password attempts", at schemeshard: 72057594046678944 2025-06-03T10:27:17.822488Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:17.822500Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:17.822571Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:17.822580Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:418:2375], at schemeshard: 72057594046678944, txId: 0, path id: 1 2025-06-03T10:27:17.822752Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 0 2025-06-03T10:27:19.825578Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:43: TTxLogin Execute at schemeshard: 72057594046678944 2025-06-03T10:27:19.845824Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__login.cpp:85: TTxLogin Complete, result: Token: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjMifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzQ4OTg5NjM5LCJpYXQiOjE3NDg5NDY0MzksInN1YiI6InVzZXIxIn0.LzvYPrGza0lonASjHKfnMtQEZhfhM1dxoq1cV-0UPgeL_WbPGZwSeD6H4_tAt0J7NRi01vE0frbxc-qjYAFX6JK-8JwI61XytU1EnQ-rzO3Cpe66OmEpX7Gfytjrvw7DpUgusKgyi3Xl1KEtMCF6d2Qz1aPj7bPpTVwteUlU-GF6zHHvcZC8TAZgkjNIkOG8q-ddKafvPW7x9-vboKOE8t24Tc8sZRp3QKb8HV81HEoAE2fsdWIxyZEg9TcMq50VRHzUzmwhoTU_Y6t059dLRFFGa1iHp7WtcEmD911KFXSmEEgrUHlYFVVvcazQpN_u-xNjyCPIYZFFIDVTj_N13A" SanitizedToken: "eyJhbGciOiJQUzI1NiIsImtpZCI6IjMifQ.eyJhdWQiOlsiXC9NeVJvb3QiXSwiZXhwIjoxNzQ4OTg5NjM5LCJpYXQiOjE3NDg5NDY0MzksInN1YiI6InVzZXIxIn0.**" IsAdmin: true, at schemeshard: 72057594046678944 2025-06-03T10:27:19.846009Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:27:19.846078Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 66us result status StatusSuccess 2025-06-03T10:27:19.846206Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 4 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { PublicKeys { KeyId: 1 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApyyHGS7SZRc1vdPcMJXw\n5h9ccbj/5SC+CdLEOXbANnskCDMPkbYoR6AxYNu5r0esMhx8cbu7sIC+rddwbLLH\n9xsewyhqBYfRq5Tv5EUz+4rxrE1l8DoY5KnoLU5UyeO6PIx7eGgGVzz6Pe03UhPj\nP4irCjBwauvy9ILr1K0olzJlrwB9MkVr65ji33WccDVhGgiiyY98edRrXEwhPvqd\noP5WkdNr8W1quY8L/OXDCxiK6iFqLRy2ha4S1dxtDsKyJ1Y8hDsotOqJPiYCSkoF\nQ12FZ9p2X0pmaj/cCnDAu7XG5yTHGY1ejJoBA2Vsz4PLDbTZVuUvGCCa2jcDcqkV\nTwIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1749032835174 } PublicKeys { KeyId: 2 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyy+zcXHLZM6IeJCIiCq2\nD0H8AsGRvGRBLm4k/OCWLtDvqQMgFq42J9r74cMOJAjTQ+0SYunKPxd+SsQdLsiZ\nW2nxi8uO4NzT3grt580DsFJ8D2YJtiDqXHgKpaAk/BSBK6D1NOIj5AwwnY2ItEVj\n58sECvbwivuEzr/RUsAaQh5hw/1nZA+5I6L2/54XpX9wu6oAXy8xVnWJjj/2y+FG\nULEU4RbixgZx/qPPzym0nJbsOrPSuAnNgPIkrKZtFoX8It2Y06A69QEuXZLwN6b8\nhJwKZM7bwuDlzPDdZ9mftMkeT/JTixzgD2QFuc1KJmCB2uX5LgNHyDkfKt3hJ7ln\nnwIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1749032835576 } PublicKeys { KeyId: 3 KeyDataPEM: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3a/R2Cuh7nFnHGo8mydC\nJO7sw46fYr5KOjApVVlVk+XDiP+xUZNzW8ICTLtCpnWsKfObgm9tdaKYk9nKN8vK\npPYGgVq4t3pSrGxLKWm0Kqd+QKdS6SjjIoCU5Hj41r4Q8ISRWzCKwrTmSlGSIS+h\nEeZdAAjSDFCT5MaHUHYaaJdIDuyhLWkfxXlUKAlZVzu50KzjsXeRZ3QQNUXR3Tsm\nld/sygNl3NRGgvrjehyvwJc+xATypAXonydLziC8s8jattqcuaY+u7WX0A/A4LyB\nsZlgtw72JcsvxzTp4uDfq3kOYCfpFp96kpBeaWAQIpCqFbDnaOOqnDixCwFTf87S\nLwIDAQAB\n-----END PUBLIC KEY-----\n" ExpiresAt: 1749032837817 } Sids { Name: "user1" Type: USER } Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TestKinesisHttpProxy::TestRequestWithWrongRegion >> TestYmqHttpProxy::TestCreateQueue [GOOD] >> KqpVectorIndexes::VectorIndexIsNotUpdatable [GOOD] >> KqpVectorIndexes::SimpleVectorIndexOrderByCosineDistanceWithCover-Nullable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_ftol/unittest >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureMirror3dcCount6Idx4 [GOOD] Test command err: iteration# 4 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 10 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 16 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 22 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 28 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 34 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 40 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 46 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 52 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 58 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 64 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 70 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 76 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 82 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 88 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 94 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 100 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 106 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 112 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 118 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 124 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 130 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 136 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 142 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 148 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 154 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 160 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 166 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 172 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 178 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 184 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 190 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 196 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 202 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 208 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 214 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 220 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 226 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 232 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 238 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 244 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 250 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 256 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 262 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 268 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 274 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 280 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 286 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 292 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 298 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 304 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 310 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 316 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 322 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 328 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 334 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 340 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 346 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 352 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 358 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 364 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 370 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 376 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 382 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 388 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 394 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 400 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 406 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 412 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 418 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 424 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 430 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 436 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 442 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 448 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 454 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 460 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 466 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 472 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 478 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 484 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 >> TestYmqHttpProxy::TestCreateQueueWithBadQueueName >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureMirror3dcCount6Idx1 [GOOD] >> KqpUniqueIndex::UpsertImplicitNullInComplexFk [GOOD] >> KqpVectorIndexes::BuildIndexTimesAndUser >> KqpIndexes::UpsertWithoutExtraNullDelete+UseSink [GOOD] >> KqpIndexes::UpsertWithNullKeysSimple |62.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut >> KqpIndexes::SelectFromAsyncIndexedTable [GOOD] >> KqpIndexes::SelectFromIndexesAndFreeSpaceLogicDoesntTimeout |62.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut |62.8%| [TA] {RESULT} $(B)/ydb/core/statistics/database/ut/test-results/unittest/{meta.json ... results_accumulator.log} |62.8%| [LD] {RESULT} $(B)/ydb/core/security/certificate_check/ut/ydb-core-security-certificate_check-ut >> EraseRowsTests::ConditionalEraseRowsShouldNotErase [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldFailOnVariousErrors >> KqpIndexes::SecondaryIndexOrderBy2 [GOOD] >> KqpIndexes::SecondaryIndexReplace+UseSink >> DistributedEraseTests::DistributedEraseTxShouldFailOnVariousErrors [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldErase |62.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_object_storage_listing/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_ftol/unittest >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureMirror3dcCount6Idx1 [GOOD] Test command err: iteration# 1 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 7 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 13 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 19 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 25 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 31 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 37 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 43 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 49 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 55 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 61 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 67 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 73 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 79 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 85 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 91 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 97 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 103 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 109 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 115 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 121 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 127 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 133 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 139 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 145 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 151 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 157 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 163 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 169 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 175 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 181 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 187 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 193 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 199 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 205 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 211 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 217 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 223 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 229 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 235 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 241 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 247 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 253 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 259 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 265 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 271 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 277 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 283 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 289 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 295 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 301 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 307 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 313 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 319 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 325 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 331 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 337 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 343 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 349 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 355 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 361 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 367 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 373 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 379 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 385 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 391 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 397 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 403 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 409 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 415 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 421 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 427 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 433 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 439 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 445 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 451 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 457 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 463 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 469 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 475 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 481 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 487 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 >> KqpIndexes::SecondaryIndexUpsert1DeleteUpdate [GOOD] >> KqpIndexes::SecondaryIndexUpdateOnUsingIndex >> KqpUniqueIndex::UpdateFkAlreadyExist [GOOD] >> KqpUniqueIndex::UpdateFkPkOverlap >> KqpIndexes::CheckUpsertNonEquatableType-NotNull [GOOD] >> KqpIndexes::CreateTableWithExplicitAsyncIndexSQL >> KqpAcl::AclDml-UseSink-IsOlap [GOOD] >> KqpAcl::AclDml+UseSink-IsOlap >> KqpUniqueIndex::UpdateOnHidenChanges+DataColumn [GOOD] >> KqpUniqueIndex::UpdateOnHidenChanges-DataColumn |62.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_object_storage_listing/unittest >> YdbOlapStore::LogCountByResource [GOOD] |62.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_object_storage_listing/unittest >> KqpVectorIndexes::BuildIndexTimesAndUser [GOOD] >> KqpIndexes::UpsertWithNullKeysSimple [GOOD] >> KqpIndexes::UpsertWithNullKeysComplex >> EraseRowsTests::ConditionalEraseRowsShouldErase [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldBreakLocks >> TestKinesisHttpProxy::TestRequestWithWrongRegion [GOOD] >> TestYmqHttpProxy::TestCreateQueueWithBadQueueName [GOOD] >> KqpIndexes::SecondaryIndexReplace+UseSink [GOOD] >> KqpIndexes::SecondaryIndexReplace-UseSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::BuildIndexTimesAndUser [GOOD] Test command err: Trying to start YDB, gRPC: 1318, MsgBus: 19288 2025-06-03T10:27:18.469481Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667754930051723:2199];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:18.475851Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c77/r3tmp/tmp1yPO2S/pdisk_1.dat 2025-06-03T10:27:18.622980Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667754930051555:2079] 1748946438456779 != 1748946438456782 2025-06-03T10:27:18.624723Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1318, node 1 2025-06-03T10:27:18.654007Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:18.654037Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:18.661686Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:18.663378Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:18.663382Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:18.663384Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:18.663431Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19288 TClient is connected to server localhost:19288 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:18.848478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.852209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:27:18.853863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.933671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.976308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.002940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.145812Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667759225020490:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.145842Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.284490Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.321960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.397037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.416536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.442029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.466312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.531106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.612355Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667759225021161:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.612382Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.612606Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667759225021166:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.613772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:19.618184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:27:19.618288Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667759225021168:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:19.695450Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667759225021219:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:19.882177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:21.291423Z node 1 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jwtn9mtte3v36zpp98fb82sj, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWFlMjZiY2MtMzY2MDYxYzQtN2QwZDA3MzgtZDdmNmI1OWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-03T10:27:21.293737Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=NWFlMjZiY2MtMzY2MDYxYzQtN2QwZDA3MzgtZDdmNmI1OWQ=, ActorId: [1:7511667759225022215:2566], ActorState: ExecuteState, TraceId: 01jwtn9mtte3v36zpp98fb82sj, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 16878, MsgBus: 3177 2025-06-03T10:27:21.727023Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667769323901831:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:21.727353Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c77/r3tmp/tmpWyJHss/pdisk_1.dat 2025-06-03T10:27:21.791905Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:21.794230Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667769323901795:2079] 1748946441720528 != 1748946441720531 TServer::EnableGrpc on GrpcPort 16878, node 2 2025-06-03T10:27:21.815654Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:21.815667Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:21.815668Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:21.815710Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:27:21.862689Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:21.862716Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:21.869811Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3177 TClient is connected to server localhost:3177 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:22.041863Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:22.046009Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:27:22.056668Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:22.090491Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:22.166599Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:22.199441Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:22.664195Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667773618870732:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:22.664219Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:22.677821Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:22.699454Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:22.725227Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:22.753554Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:22.773799Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:22.799875Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:22.828482Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:22.850748Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667773618871394:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:22.850772Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:22.850898Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667773618871399:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:22.851698Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:22.855038Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:27:22.855122Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511667773618871401:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:22.950318Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511667773618871452:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:23.298190Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:23.384610Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:27:23.416039Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:27:23.450943Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710760:0, at schemeshard: 72057594046644480 2025-06-03T10:27:23.491109Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710764:0, at schemeshard: 72057594046644480 2025-06-03T10:27:23.518905Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037929 not found 2025-06-03T10:27:23.518965Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037927 not found 2025-06-03T10:27:23.518991Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037928 not found >> TestKinesisHttpProxy::TestRequestWithIAM |62.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_object_storage_listing/unittest >> TestYmqHttpProxy::TestCreateQueueWithEmptyName |62.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_object_storage_listing/unittest >> ObjectStorageListingTest::ListingNoFilter >> KqpIndexes::SecondaryIndexUpdateOnUsingIndex [GOOD] >> KqpIndexes::SecondaryIndexSelectUsingScripting |62.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap |62.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap |62.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_olap/ydb-core-tx-schemeshard-ut_olap >> KqpIndexes::CreateTableWithExplicitAsyncIndexSQL [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldFailOnVariousErrors [GOOD] >> KqpIndexes::SelectFromIndexesAndFreeSpaceLogicDoesntTimeout [GOOD] >> KqpIndexes::Uint8Index >> KqpIndexes::UpsertWithNullKeysComplex [GOOD] >> EraseRowsTests::ConditionalEraseRowsShouldBreakLocks [GOOD] >> KqpUniqueIndex::UpdateOnHidenChanges-DataColumn [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldFailOnVariousErrors [GOOD] Test command err: 2025-06-03T10:27:20.456123Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:27:20.456260Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:27:20.456300Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000e5e/r3tmp/tmp8pM5XH/pdisk_1.dat 2025-06-03T10:27:20.737244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.762199Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:20.763637Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946439423287 != 1748946439423291 2025-06-03T10:27:20.814247Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:20.814301Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:20.825944Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:20.916000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.944246Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:27:20.944391Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:27:20.972567Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:27:20.972631Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:27:20.972854Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:27:20.972866Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:27:20.972876Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:27:20.972966Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:27:20.972996Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:27:20.973013Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:27:20.985665Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:27:20.996298Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:27:20.996434Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:27:20.996476Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:27:20.996484Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:27:20.996490Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:27:20.996498Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:20.996722Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:27:20.996756Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:27:20.996904Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:27:20.996916Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:27:20.996929Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:27:20.996936Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:27:20.996952Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:27:20.997000Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:27:20.997104Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:27:20.997131Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:27:21.004664Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:27:21.017640Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:27:21.017703Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:27:21.194156Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-06-03T10:27:21.195494Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-03T10:27:21.195536Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:21.195658Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:27:21.195672Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:27:21.195687Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-03T10:27:21.195781Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-03T10:27:21.195834Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-03T10:27:21.196065Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:27:21.196084Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-03T10:27:21.196639Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-03T10:27:21.196774Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:27:21.197253Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-03T10:27:21.197269Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:21.197577Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-03T10:27:21.197615Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:27:21.197854Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:27:21.197866Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:27:21.197873Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-03T10:27:21.197895Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:27:21.197909Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:27:21.197924Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:21.199089Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:27:21.199480Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:27:21.199654Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-03T10:27:21.199665Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:27:21.212955Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:21.212998Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:741:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:21.213011Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:21.214389Z nod ... main_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037892 2025-06-03T10:27:25.021204Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037893 2025-06-03T10:27:25.021216Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:27:25.063161Z node 2 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037894 actor [2:1251:3028] 2025-06-03T10:27:25.063238Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:27:25.065008Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:27:25.065049Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:27:25.065229Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037894 2025-06-03T10:27:25.065240Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037894 2025-06-03T10:27:25.065248Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037894 2025-06-03T10:27:25.092038Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:27:25.092183Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:27:25.092220Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037894 persisting started state actor id [2:1267:3028] in generation 1 2025-06-03T10:27:25.115803Z node 2 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:27:25.115844Z node 2 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037894 2025-06-03T10:27:25.115891Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037894 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:27:25.115909Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037894, actorId: [2:1269:3038] 2025-06-03T10:27:25.115916Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037894 2025-06-03T10:27:25.115922Z node 2 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037894, state: WaitScheme 2025-06-03T10:27:25.115929Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-03T10:27:25.116081Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037894 2025-06-03T10:27:25.116112Z node 2 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037894 2025-06-03T10:27:25.116135Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037894 2025-06-03T10:27:25.116144Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:27:25.116156Z node 2 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037894 TxInFly 0 2025-06-03T10:27:25.116162Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037894 2025-06-03T10:27:25.116176Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1248:3026], serverId# [2:1258:3032], sessionId# [0:0:0] 2025-06-03T10:27:25.116324Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037894 2025-06-03T10:27:25.116399Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037894 txId 281474976715663 ssId 72057594046644480 seqNo 2:7 2025-06-03T10:27:25.116423Z node 2 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715663 at tablet 72075186224037894 2025-06-03T10:27:25.116611Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037894 2025-06-03T10:27:25.129618Z node 2 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037894 2025-06-03T10:27:25.129701Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037894 not sending time cast registration request in state WaitScheme 2025-06-03T10:27:25.298137Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1275:3044], serverId# [2:1277:3046], sessionId# [0:0:0] 2025-06-03T10:27:25.298559Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715663 at step 4000 at tablet 72075186224037894 { Transactions { TxId: 281474976715663 AckTo { RawX1: 0 RawX2: 0 } } Step: 4000 MediatorID: 72057594046382081 TabletID: 72075186224037894 } 2025-06-03T10:27:25.298574Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-03T10:27:25.298610Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037894 2025-06-03T10:27:25.298621Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:27:25.298633Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [4000:281474976715663] in PlanQueue unit at 72075186224037894 2025-06-03T10:27:25.298710Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037894 loaded tx from db 4000:281474976715663 keys extracted: 0 2025-06-03T10:27:25.298746Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-03T10:27:25.298929Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037894 2025-06-03T10:27:25.298946Z node 2 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037894 tableId# [OwnerId: 72057594046644480, LocalPathId: 8] schema version# 1 2025-06-03T10:27:25.299065Z node 2 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037894 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-03T10:27:25.299172Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037894 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:27:25.299492Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037894 time 3500 2025-06-03T10:27:25.299499Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-03T10:27:25.299676Z node 2 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037894 step# 4000} 2025-06-03T10:27:25.299688Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037894 2025-06-03T10:27:25.299971Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037894 2025-06-03T10:27:25.299982Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037894 2025-06-03T10:27:25.299989Z node 2 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037894 2025-06-03T10:27:25.300007Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [4000 : 281474976715663] from 72075186224037894 at tablet 72075186224037894 send result to client [2:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:27:25.300020Z node 2 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037894 Sending notify to schemeshard 72057594046644480 txId 281474976715663 state Ready TxInFly 0 2025-06-03T10:27:25.300060Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037894 2025-06-03T10:27:25.300143Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037894 2025-06-03T10:27:25.300162Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-03T10:27:25.300184Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-03T10:27:25.300319Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037891 2025-06-03T10:27:25.300346Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037892 2025-06-03T10:27:25.300366Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037893 2025-06-03T10:27:25.300414Z node 2 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:27:25.300584Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037894 coordinator 72057594046316545 last step 0 next step 4000 2025-06-03T10:27:25.300645Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715663 datashard 72075186224037894 state Ready 2025-06-03T10:27:25.300653Z node 2 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037894 Got TEvSchemaChangedResult from SS at 72075186224037894 2025-06-03T10:27:25.318794Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1304:3067], serverId# [2:1305:3068], sessionId# [0:0:0] 2025-06-03T10:27:25.318882Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037894, clientId# [2:1304:3067], serverId# [2:1305:3068], sessionId# [0:0:0] 2025-06-03T10:27:25.329408Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1309:3072], serverId# [2:1310:3073], sessionId# [0:0:0] 2025-06-03T10:27:25.329488Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037894, clientId# [2:1309:3072], serverId# [2:1310:3073], sessionId# [0:0:0] 2025-06-03T10:27:25.331735Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1314:3077], serverId# [2:1315:3078], sessionId# [0:0:0] 2025-06-03T10:27:25.331787Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037894, clientId# [2:1314:3077], serverId# [2:1315:3078], sessionId# [0:0:0] |62.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |62.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk |62.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/ydb-core-blobstorage-ut_blobstorage-ut_stop_pdisk >> KqpIndexes::SecondaryIndexReplace-UseSink [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::CreateTableWithExplicitAsyncIndexSQL [GOOD] Test command err: Trying to start YDB, gRPC: 25020, MsgBus: 13034 2025-06-03T10:27:17.954994Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667751352682378:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:17.955201Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c7e/r3tmp/tmp3njwYY/pdisk_1.dat 2025-06-03T10:27:18.174035Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:18.177393Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667751352682182:2079] 1748946437944847 != 1748946437944850 TServer::EnableGrpc on GrpcPort 25020, node 1 2025-06-03T10:27:18.201734Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:18.201746Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:18.201748Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:18.201785Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13034 TClient is connected to server localhost:13034 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:27:18.313617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:18.313654Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:18.324266Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:18.346347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.350843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:27:18.358893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.394646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.438881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.465763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.075253Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667759942618418:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.075287Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.182567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.209482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.240645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.258945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.291374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.317811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.348616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.387063Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667759942619069:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.387087Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.387195Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667759942619074:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.388140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:19.396255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-03T10:27:19.396379Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667759942619076:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:27:19.471897Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667759942619127:3398] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:19.839604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 61114, MsgBus: 18012 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c7e/r3tmp/tmpDDlpAc/pdisk_1.dat 2025-06-03T10:27:20.376049Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:27:20.401438Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667762408537458:2079] 1748946440347425 != 1748946440347428 2025-06-03T10:27:20.408308Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61114, node 2 2025-06-03T10:27:20.441801Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:20.441815Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:20.441817Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:20.441865Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:27:20.477812Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:20.477841Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:20.489914Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18012 TClient is connected to server localhost:18012 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateV ... [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:21.626415Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667766703507060:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:21.627406Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:21.631329Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:27:21.631460Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511667766703507062:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:21.700505Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511667766703507113:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:21.923888Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:22.198281Z node 2 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill Trying to start YDB, gRPC: 5633, MsgBus: 21591 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c7e/r3tmp/tmpxsiTly/pdisk_1.dat 2025-06-03T10:27:22.852344Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:27:22.859784Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:22.860278Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511667772861653621:2079] 1748946442803814 != 1748946442803817 TServer::EnableGrpc on GrpcPort 5633, node 3 2025-06-03T10:27:22.878028Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:22.878045Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:22.878047Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:22.878105Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21591 2025-06-03T10:27:22.918103Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:22.918134Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:22.918541Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21591 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:22.970463Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:22.977744Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:27:22.994529Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:23.025559Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:23.056029Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:23.069236Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:23.474113Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667777156622561:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:23.474145Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:23.484720Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:23.499237Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:23.516508Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:23.531885Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:23.552246Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:23.569969Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:23.592514Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:23.631570Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667777156623215:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:23.631623Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:23.631775Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667777156623220:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:23.633006Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:23.636451Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:27:23.636567Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511667777156623222:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:23.692890Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511667777156623273:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:23.920384Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:23.980605Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511667777156623816:3720] txid# 281474976715673, issues: { message: "Check failed: path: \'/Root/TestTable\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:484" severity: 1 } 2025-06-03T10:27:25.040218Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill >> KqpAcl::AclDml+UseSink-IsOlap [GOOD] >> KqpAcl::AclDml-UseSink+IsOlap >> KqpIndexes::SecondaryIndexSelectUsingScripting [GOOD] |62.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut |62.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut |62.9%| [LD] {RESULT} $(B)/ydb/core/actorlib_impl/ut/ydb-core-actorlib_impl-ut >> KqpPrefixedVectorIndexes::PrefixedVectorIndexOrderByCosineDistanceWithCover-Nullable [GOOD] >> KqpUniqueIndex::InsertFkAlreadyExist ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::UpsertWithNullKeysComplex [GOOD] Test command err: Trying to start YDB, gRPC: 23356, MsgBus: 63699 2025-06-03T10:27:18.339179Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667755376137428:2220];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c6e/r3tmp/tmpFAmu8j/pdisk_1.dat 2025-06-03T10:27:18.392008Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:27:18.448933Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667755376137219:2079] 1748946438316151 != 1748946438316154 2025-06-03T10:27:18.455745Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23356, node 1 2025-06-03T10:27:18.472964Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:18.472986Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:18.472988Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:18.473028Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:27:18.493129Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:18.493162Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:63699 2025-06-03T10:27:18.497565Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63699 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:18.658353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.669907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:27:18.682591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.743948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.806748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.846775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.177533Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667759671106153:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.177566Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.294340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.324766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.347127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.390064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.422712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.443757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.466434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.506652Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667759671106817:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.506689Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.506894Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667759671106822:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.507919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:19.513022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:27:19.513132Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667759671106824:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:19.614246Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667759671106875:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:20.458895Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271125000, Sender [0:0:0], Recipient [1:7511667755376137688:2205]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:27:20.458912Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4890: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:27:20.458923Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124999, Sender [1:7511667755376137688:2205], Recipient [1:7511667755376137688:2205]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:27:20.458926Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4889: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:27:20.459857Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [1:7511667763966074445:3572], Recipient [1:7511667755376137688:2205]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:20.459868Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:20.459871Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:27:20.459880Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [1:7511667763966074441:3569], Recipient [1:7511667755376137688:2205]: {TEvModifySchemeTransaction txid# 281474976715672 TabletId# 72057594046644480} 2025-06-03T10:27:20.459881Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:27:20.474812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "TestTable" Columns { Name: "Key" Type: "String" NotNull: false } Columns { Name: "Index2" Type: "String" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } Temporary: false } IndexDescription { Name: "Index" KeyColumnNames: "Index2" Type: EIndexTypeGlobal IndexImplTableDescriptions { PartitionConfig { } } } } } TxId: 281474976715672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:49184" , at schemesha ... schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-03T10:27:25.353659Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-03T10:27:25.353662Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-03T10:27:25.353664Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-03T10:27:25.353667Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 1/1, is published: true 2025-06-03T10:27:25.353675Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7511667778321714902:2152] message: TxId: 281474976710760 2025-06-03T10:27:25.353680Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-03T10:27:25.353683Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710760:0 2025-06-03T10:27:25.353685Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976710760:0 2025-06-03T10:27:25.353693Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 5 2025-06-03T10:27:25.353824Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:27:25.353832Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7511667778321714902:2152] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710760 at schemeshard: 72057594046644480 2025-06-03T10:27:25.353850Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124998, Sender [3:7511667778321714902:2152], Recipient [3:7511667778321714902:2152]: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710760 2025-06-03T10:27:25.353853Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5040: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletionResult 2025-06-03T10:27:25.353855Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvNotifyTxCompletionResult: txId# 281474976710760 2025-06-03T10:27:25.353858Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6753: Message: TxId: 281474976710760 2025-06-03T10:27:25.353866Z node 3 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2331: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976710760, buildInfoId: 281474976715678 2025-06-03T10:27:25.353881Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2334: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976710760, buildInfo: TBuildInfo{ IndexBuildId: 281474976715678, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobal, IndexName: IndexName2, IndexColumn: IndexColumn2, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [3:7511667786911652002:2553], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 1748946445358, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 2, upload bytes: 85, read rows: 2, read bytes: 85 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-03T10:27:25.353891Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:27:25.353932Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:27:25.353940Z node 3 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1117: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715678 Unlocking 2025-06-03T10:27:25.353947Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1118: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715678 Unlocking TBuildInfo{ IndexBuildId: 281474976715678, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobal, IndexName: IndexName2, IndexColumn: IndexColumn2, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [3:7511667786911652002:2553], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 1748946445358, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 2, upload bytes: 85, read rows: 2, read bytes: 85 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-03T10:27:25.353948Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:27:25.353951Z node 3 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-03T10:27:25.353979Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:27:25.353985Z node 3 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1117: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715678 Done 2025-06-03T10:27:25.353991Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1118: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715678 Done TBuildInfo{ IndexBuildId: 281474976715678, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobal, IndexName: IndexName2, IndexColumn: IndexColumn2, State: Done, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [3:7511667786911652002:2553], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 1748946445358, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 2, upload bytes: 85, read rows: 2, read bytes: 85 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-03T10:27:25.353994Z node 3 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:339: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 281474976715678, subscribers count# 0 2025-06-03T10:27:25.353997Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:27:25.354002Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:27:25.354854Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 274792450, Sender [3:7511667786911652157:2562], Recipient [3:7511667778321714902:2152]: NKikimrIndexBuilder.TEvGetRequest DatabaseName: "/Root" IndexBuildId: 281474976715678 2025-06-03T10:27:25.354863Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5017: StateWork, processing event TEvIndexBuilder::TEvGetRequest 2025-06-03T10:27:25.354877Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/Root" IndexBuildId: 281474976715678 2025-06-03T10:27:25.354947Z node 3 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:93: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 281474976715678 State: STATE_DONE Settings { source_path: "/Root/TestTable" index { name: "IndexName2" index_columns: "IndexColumn2" global_index { } } max_shards_in_flight: 32 ScanSettings { } } Progress: 100 StartTime { seconds: 1748946445 } EndTime { seconds: 1748946445 } } 2025-06-03T10:27:25.354950Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:27:25.354959Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:27:25.354980Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7511667786911652157:2562] msg type: 274792451 msg: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 281474976715678 State: STATE_DONE Settings { source_path: "/Root/TestTable" index { name: "IndexName2" index_columns: "IndexColumn2" global_index { } } max_shards_in_flight: 32 ScanSettings { } } Progress: 100 StartTime { seconds: 1748946445 } EndTime { seconds: 1748946445 } } at schemeshard: 72057594046644480 2025-06-03T10:27:25.359233Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [3:7511667786911652162:3832], Recipient [3:7511667778321714902:2152]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:25.359246Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:25.359249Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-03T10:27:25.359254Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [3:7511667786911652150:3823], Recipient [3:7511667778321714902:2152]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:25.359256Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:25.359257Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-03T10:27:25.359766Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [3:7511667786911652161:3831], Recipient [3:7511667778321714902:2152]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:25.359773Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:25.359774Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046644480 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/ut/unittest >> YdbOlapStore::LogCountByResource [GOOD] Test command err: 2025-06-03T10:26:09.518668Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667459028684084:2207];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:09.518761Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027fe/r3tmp/tmp8dmcm0/pdisk_1.dat 2025-06-03T10:26:09.620310Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:09.620341Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:09.624127Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:09.626568Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15930, node 1 2025-06-03T10:26:09.640291Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:09.640309Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:09.640311Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:09.640358Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8429 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:26:09.681744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... self_check_result: GOOD issue_log { id: "YELLOW-f489-1231c6b1" status: YELLOW message: "Database has compute issues" location { database { name: "/Root" } } reason: "YELLOW-1ba8-1231c6b1" type: "DATABASE" level: 1 } issue_log { id: "YELLOW-1ba8-1231c6b1" status: YELLOW message: "Compute is overloaded" location { database { name: "/Root" } } reason: "YELLOW-e9e2-1231c6b1-1" reason: "YELLOW-e9e2-1231c6b1-2" reason: "YELLOW-e9e2-1231c6b1-3" type: "COMPUTE" level: 2 } issue_log { id: "YELLOW-e9e2-1231c6b1-1" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 1 host: "::1" port: 12001 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-2" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 2 host: "::1" port: 12002 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } issue_log { id: "YELLOW-e9e2-1231c6b1-3" status: YELLOW message: "LoadAverage above 100%" location { compute { node { id: 3 host: "::1" port: 12003 } } database { name: "/Root" } } type: "LOAD_AVERAGE" level: 4 } location { id: 1 host: "::1" port: 12001 } 2025-06-03T10:26:10.757312Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511667465571636714:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:10.757421Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:26:10.760966Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7511667463941900852:2210];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:10.761072Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:26:10.774614Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511667461858534398:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:10.774811Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027fe/r3tmp/tmp1s4QIo/pdisk_1.dat 2025-06-03T10:26:10.808436Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27975, node 4 2025-06-03T10:26:10.852782Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:10.852802Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:10.852804Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:10.852859Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:10.857611Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:10.857654Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:10.866496Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:10.875001Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:10.875034Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:10.876651Z node 4 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 6 Cookie 6 2025-06-03T10:26:10.877831Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:10.877994Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:10.878011Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:14618 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:26:10.881372Z node 4 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-03T10:26:10.882004Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting...2025-06-03T10:26:10.891168Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976725657:0, at schemeshard: 72057594046644480 2025-06-03T10:26:15.757521Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7511667465571636714:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:15.757565Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:26:15.760601Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[5:7511667463941900852:2210];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:15.760649Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:26:15.772851Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7511667461858534398:2143];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:15.772897Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Killing node 4 Killing node 5 2025-06-03T10:26:25.799907Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7306: Cannot get console configs 2025-06-03T10:26:25.799930Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded Killing node 6 2025-06-03T10:26:31.749080Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7511667552143924353:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:26:31.749100Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027fe/r3tmp/tmpui9rSJ/pdisk_1.dat 2025-06-03T10:26:31.772662Z node 8 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 910 ... /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Sending channels info to compute actor: [47:7511667774982236942:3359], channels: 1 2025-06-03T10:27:22.662142Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:434: ActorId: [47:7511667774982236939:3079] TxId: 281474976715674. Ctx: { TraceId: 01jwtn9nxvec6asb4fyn02zmer, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [47:7511667774982236942:3359], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { } 2025-06-03T10:27:22.662150Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:645: ActorId: [47:7511667774982236939:3079] TxId: 281474976715674. Ctx: { TraceId: 01jwtn9nxvec6asb4fyn02zmer, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [47:7511667774982236942:3359], 2025-06-03T10:27:22.662198Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:349: ActorId: [47:7511667774982236939:3079] TxId: 281474976715674. Ctx: { TraceId: 01jwtn9nxvec6asb4fyn02zmer, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send TEvStreamData to [47:7511667774982236148:3079], seqNo: 1, nRows: 1 2025-06-03T10:27:22.662226Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:434: ActorId: [47:7511667774982236939:3079] TxId: 281474976715674. Ctx: { TraceId: 01jwtn9nxvec6asb4fyn02zmer, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [47:7511667774982236942:3359], task: 1, state: COMPUTE_STATE_EXECUTING, stats: { CpuTimeUs: 110 Tasks { TaskId: 1 CpuTimeUs: 66 FinishTimeMs: 1748946442662 OutputRows: 1 OutputBytes: 3 ResultRows: 1 ResultBytes: 3 ComputeCpuTimeUs: 7 BuildCpuTimeUs: 59 HostName: "ghrun-pyvh3niaay" NodeId: 47 CreateTimeMs: 1748946442661 CurrentWaitOutputTimeUs: 12 UpdateTimeMs: 1748946442662 } MaxMemoryUsage: 1048576 } 2025-06-03T10:27:22.662244Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:645: ActorId: [47:7511667774982236939:3079] TxId: 281474976715674. Ctx: { TraceId: 01jwtn9nxvec6asb4fyn02zmer, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [47:7511667774982236942:3359], 2025-06-03T10:27:22.662254Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1805: SessionId: ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==, ActorId: [47:7511667774982236148:3079], ActorState: ExecuteState, TraceId: 01jwtn9nxvec6asb4fyn02zmer, Forwarded TEvStreamData to [47:7511667774982236146:3078] 2025-06-03T10:27:22.662258Z node 47 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [47:7511667774982236942:3359], TxId: 281474976715674, task: 1. Ctx: { SessionId : ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==. TraceId : 01jwtn9nxvec6asb4fyn02zmer. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646926 2025-06-03T10:27:22.662292Z node 47 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [47:7511667774982236942:3359], TxId: 281474976715674, task: 1. Ctx: { SessionId : ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==. TraceId : 01jwtn9nxvec6asb4fyn02zmer. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Received channels info: Update { Id: 1 TransportVersion: DATA_TRANSPORT_OOB_PICKLE_1_0 SrcTaskId: 1 SrcEndpoint { ActorId { RawX1: 7511667774982236942 RawX2: 4503801490836767 } } DstEndpoint { ActorId { RawX1: 7511667774982236939 RawX2: 4503801490836487 } } InMemory: true } 2025-06-03T10:27:22.662299Z node 47 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:670: TxId: 281474976715674, task: 1. Tasks execution finished, waiting for chunk delivery in output channelId: 1, seqNo: [1] 2025-06-03T10:27:22.662591Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:415: TxId: 281474976715674, send ack to channelId: 1, seqNo: 1, enough: 0, freeSpace: 8388552, to: [47:7511667774982236943:3359] 2025-06-03T10:27:22.662604Z node 47 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [47:7511667774982236942:3359], TxId: 281474976715674, task: 1. Ctx: { SessionId : ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==. TraceId : 01jwtn9nxvec6asb4fyn02zmer. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-03T10:27:22.662609Z node 47 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715674, task: 1. Tasks execution finished 2025-06-03T10:27:22.662611Z node 47 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [47:7511667774982236942:3359], TxId: 281474976715674, task: 1. Ctx: { SessionId : ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==. TraceId : 01jwtn9nxvec6asb4fyn02zmer. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-03T10:27:22.662632Z node 47 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715674, task: 1. pass away 2025-06-03T10:27:22.662657Z node 47 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976715674;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-03T10:27:22.662722Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:434: ActorId: [47:7511667774982236939:3079] TxId: 281474976715674. Ctx: { TraceId: 01jwtn9nxvec6asb4fyn02zmer, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [47:7511667774982236942:3359], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 295 Tasks { TaskId: 1 CpuTimeUs: 66 FinishTimeMs: 1748946442662 OutputRows: 1 OutputBytes: 3 ResultRows: 1 ResultBytes: 3 ComputeCpuTimeUs: 7 BuildCpuTimeUs: 59 HostName: "ghrun-pyvh3niaay" NodeId: 47 CreateTimeMs: 1748946442661 UpdateTimeMs: 1748946442662 } MaxMemoryUsage: 1048576 } 2025-06-03T10:27:22.662729Z node 47 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715674. Ctx: { TraceId: 01jwtn9nxvec6asb4fyn02zmer, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [47:7511667774982236942:3359] 2025-06-03T10:27:22.662760Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2151: ActorId: [47:7511667774982236939:3079] TxId: 281474976715674. Ctx: { TraceId: 01jwtn9nxvec6asb4fyn02zmer, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-03T10:27:22.662768Z node 47 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:839: ActorId: [47:7511667774982236939:3079] TxId: 281474976715674. Ctx: { TraceId: 01jwtn9nxvec6asb4fyn02zmer, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000295s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-03T10:27:22.662784Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1707: SessionId: ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==, ActorId: [47:7511667774982236148:3079], ActorState: ExecuteState, TraceId: 01jwtn9nxvec6asb4fyn02zmer, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-06-03T10:27:22.662888Z node 47 :KQP_SESSION INFO: kqp_session_actor.cpp:1966: SessionId: ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==, ActorId: [47:7511667774982236148:3079], ActorState: ExecuteState, TraceId: 01jwtn9nxvec6asb4fyn02zmer, txInfo Status: Active Kind: ReadOnly TotalDuration: 0 ServerDuration: 391.834 QueriesCount: 1 2025-06-03T10:27:22.662904Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2121: SessionId: ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==, ActorId: [47:7511667774982236148:3079], ActorState: ExecuteState, TraceId: 01jwtn9nxvec6asb4fyn02zmer, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-03T10:27:22.662928Z node 47 :KQP_SESSION INFO: kqp_session_actor.cpp:2481: SessionId: ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==, ActorId: [47:7511667774982236148:3079], ActorState: ExecuteState, TraceId: 01jwtn9nxvec6asb4fyn02zmer, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-03T10:27:22.662932Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2542: SessionId: ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==, ActorId: [47:7511667774982236148:3079], ActorState: ExecuteState, TraceId: 01jwtn9nxvec6asb4fyn02zmer, EndCleanup, isFinal: 1 2025-06-03T10:27:22.662947Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2278: SessionId: ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==, ActorId: [47:7511667774982236148:3079], ActorState: ExecuteState, TraceId: 01jwtn9nxvec6asb4fyn02zmer, Sent query response back to proxy, proxyRequestId: 5, proxyId: [47:7511667766392298512:2270] 2025-06-03T10:27:22.662950Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2554: SessionId: ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==, ActorId: [47:7511667774982236148:3079], ActorState: unknown state, TraceId: 01jwtn9nxvec6asb4fyn02zmer, Cleanup temp tables: 0 2025-06-03T10:27:22.663165Z node 47 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946442000, txId: 18446744073709551615] shutting down 2025-06-03T10:27:22.663190Z node 47 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2645: SessionId: ydb://session/3?node_id=47&id=ZDUyYzVlMzAtOTMwOTFiYjQtYmFkYzhiYmEtZDM1NDY4YQ==, ActorId: [47:7511667774982236148:3079], ActorState: unknown state, TraceId: 01jwtn9nxvec6asb4fyn02zmer, Session actor destroyed 2025-06-03T10:27:23.151568Z node 47 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037888;parent=[47:7511667766392299301:2326];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-03T10:27:23.151607Z node 47 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037891;parent=[47:7511667766392299287:2324];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-03T10:27:23.151614Z node 47 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037889;parent=[47:7511667766392299290:2325];fline=actor.cpp:33;event=skip_flush_writing; 2025-06-03T10:27:23.151620Z node 47 :TX_COLUMNSHARD DEBUG: log.cpp:784: tablet_id=72075186224037890;parent=[47:7511667766392299286:2323];fline=actor.cpp:33;event=skip_flush_writing; ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::UpdateOnHidenChanges-DataColumn [GOOD] Test command err: Trying to start YDB, gRPC: 21362, MsgBus: 5976 2025-06-03T10:27:18.981247Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667754071017187:2202];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c4c/r3tmp/tmpNQVnzo/pdisk_1.dat 2025-06-03T10:27:19.101819Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:27:19.192429Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:19.192461Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:19.192689Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667754071017022:2079] 1748946438975620 != 1748946438975623 2025-06-03T10:27:19.206768Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:19.207674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21362, node 1 2025-06-03T10:27:19.238522Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:19.238535Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:19.238537Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:19.238587Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5976 TClient is connected to server localhost:5976 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:19.486270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.494434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:27:19.504886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.597453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.688255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.738358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:20.640539Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667762660953255:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.640811Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.689047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.714810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.738709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.758602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.777019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.796021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.817106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.849250Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667762660953906:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.849275Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.849614Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667762660953911:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.850681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:20.863386Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667762660953916:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:27:20.950472Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667762660953967:3396] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:21.578699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:22.448989Z node 1 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jwtn9nry9pf4534m40fdzm8f, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjkzNzgxMC0zNmUxZjEzNi0xYTM3NDNmMy0zYjJmYTVlYQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-03T10:27:22.460298Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=NjkzNzgxMC0zNmUxZjEzNi0xYTM3NDNmMy0zYjJmYTVlYQ==, ActorId: [1:7511667766955922274:2568], ActorState: ExecuteState, TraceId: 01jwtn9nry9pf4534m40fdzm8f, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 25472, MsgBus: 15406 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c4c/r3tmp/tmp9Wanth/pdisk_1.dat 2025-06-03T10:27:23.720128Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:27:23.722123Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667775892131809:2079] 1748946443660432 != 1748946443660435 2025-06-03T10:27:23.740643Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25472, node 2 2025-06-03T10:27:23.773989Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:23.774004Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:23.774007Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:23.774063Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:27:23.786074Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:23.786105Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:23.787596Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15406 TClient is connected to server localhost:15406 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:23.971080Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:23.977662Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:27:23.987142Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:24.014688Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:27:24.087135Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:27:24.128118Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:24.677855Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667780187100747:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:24.677880Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:24.693120Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:24.729208Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:24.750139Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:24.774386Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:24.802469Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:24.850596Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:24.927852Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:24.957992Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667780187101411:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:24.958021Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:24.958129Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667780187101416:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:24.959091Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:24.963050Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:27:24.963121Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511667780187101418:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:25.030361Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511667784482068765:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:25.275822Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:25.692460Z node 2 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jwtn9s7a5nsbq0t4dp7m2xf8, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ODY0YThjMWItZWU0NjVhN2QtMjNiYTg1NDUtOWM1MTJlN2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-03T10:27:25.692560Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=2&id=ODY0YThjMWItZWU0NjVhN2QtMjNiYTg1NDUtOWM1MTJlN2Y=, ActorId: [2:7511667784482069762:2566], ActorState: ExecuteState, TraceId: 01jwtn9s7a5nsbq0t4dp7m2xf8, Create QueryResponse for error on request, msg: |62.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_object_storage_listing/unittest >> KqpUniqueIndex::UpdateFkPkOverlap [GOOD] >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchTimeout [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_OldPartitionExists_NotBoundary_Test >> TPQTabletTests::DropTablet ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_erase_rows/unittest >> EraseRowsTests::ConditionalEraseRowsShouldBreakLocks [GOOD] Test command err: 2025-06-03T10:27:20.979333Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:27:20.979447Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:27:20.979485Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000e5b/r3tmp/tmpzUpNe3/pdisk_1.dat 2025-06-03T10:27:21.222315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:27:21.251622Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:21.252996Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946439774868 != 1748946439774872 2025-06-03T10:27:21.300765Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:21.300821Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:21.313897Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:21.399840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:27:21.446917Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:686:2584] 2025-06-03T10:27:21.447012Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:27:21.482540Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:27:21.482621Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:27:21.482858Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:27:21.482871Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:27:21.482880Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:27:21.482961Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:27:21.482990Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:27:21.483006Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:710:2584] in generation 1 2025-06-03T10:27:21.483571Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:689:2586] 2025-06-03T10:27:21.483624Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:27:21.498071Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:27:21.498127Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:27:21.498314Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-03T10:27:21.498325Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-03T10:27:21.498333Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-03T10:27:21.498386Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:27:21.498418Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:27:21.498431Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:731:2586] in generation 1 2025-06-03T10:27:21.498579Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037890 actor [1:691:2588] 2025-06-03T10:27:21.498622Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:27:21.500312Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:27:21.500357Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:27:21.500562Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037890 2025-06-03T10:27:21.500572Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037890 2025-06-03T10:27:21.500580Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037890 2025-06-03T10:27:21.500629Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:27:21.500648Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:27:21.500662Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037890 persisting started state actor id [1:737:2588] in generation 1 2025-06-03T10:27:21.513650Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:27:21.519614Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:27:21.519726Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:27:21.519760Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:741:2615] 2025-06-03T10:27:21.519767Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:27:21.519773Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:27:21.519780Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:21.519911Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:27:21.519921Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-03T10:27:21.519933Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:27:21.519941Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:742:2616] 2025-06-03T10:27:21.519946Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-03T10:27:21.519950Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-03T10:27:21.519954Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:27:21.519966Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:27:21.519972Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037890 2025-06-03T10:27:21.519981Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037890 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:27:21.519989Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037890, actorId: [1:743:2617] 2025-06-03T10:27:21.519993Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-03T10:27:21.519997Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037890, state: WaitScheme 2025-06-03T10:27:21.520000Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-03T10:27:21.520125Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:27:21.520154Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:27:21.520202Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:27:21.520210Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:27:21.520219Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:27:21.520226Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:27:21.520233Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-03T10:27:21.520243Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-03T10:27:21.520251Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037890 2025-06-03T10:27:21.520259Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037890 2025-06-03T10:27:21.520376Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:676:2579], serverId# [1:704:2594], sessionId# [0:0:0] 2025-06-03T10:27:21.520384Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-03T10:27:21.520388Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:27:21.520392Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-03T10:27:21.520397Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-03T10:27:21.520404Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-03T10:27:21.520408Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:27:21.520412Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037890 TxInFly 0 2025-06-03T10:27:21.520417Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-03T10:27:21.520459Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03 ... node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-03T10:27:25.415880Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:27:25.416148Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:27:25.416159Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:27:25.416165Z node 3 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-03T10:27:25.416186Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [3:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:27:25.416199Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:27:25.416213Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:25.416534Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:27:25.416881Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-03T10:27:25.416898Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:27:25.417050Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:27:25.435133Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:25.435170Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:741:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:25.435182Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:25.436418Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:27:25.458343Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:27:25.619575Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:27:25.620288Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:745:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:27:25.658653Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:815:2660] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:25.699964Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jwtn9s3t6peb05v7qaxwmjew, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=M2FmNDEwMmEtNjM1NTgzNzgtZTU3OTViZGMtMjE1OGI4OTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:25.700853Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:846:2677], serverId# [3:847:2678], sessionId# [0:0:0] 2025-06-03T10:27:25.701037Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:2] at 72075186224037888 2025-06-03T10:27:25.701108Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:2] at 72075186224037888, row count=3 2025-06-03T10:27:25.713688Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:25.818144Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtn9scx2g9xjah9vgctyax8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MTg5ZWVmMmMtZWVjN2Y2ZjUtYWY0NmU3ZS03OWI0YWY5Yw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:25.818998Z node 3 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 281474976715661, counter# 0 for [OwnerId: 72057594046644480, LocalPathId: 2] { items { uint64_value: 0 } } 2025-06-03T10:27:25.827534Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:885:2708], serverId# [3:886:2709], sessionId# [0:0:0] 2025-06-03T10:27:25.827966Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:26: TTxDirectBase(48) Execute: at tablet# 72075186224037888 2025-06-03T10:27:25.841670Z node 3 :TX_DATASHARD INFO: datashard__op_rows.cpp:80: TTxDirectBase(48) Complete: at tablet# 72075186224037888 2025-06-03T10:27:25.841719Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:25.841741Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2560: Waiting for PlanStep# 1501 from mediator time cast 2025-06-03T10:27:25.842022Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3780: Notified by mediator time cast with PlanStep# 1501 at tablet 72075186224037888 2025-06-03T10:27:25.842036Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:25.842110Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-03T10:27:25.842121Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:4487: Conditional erase complete: cookie: 4, at: 72075186224037888 2025-06-03T10:27:25.842186Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:27:25.842198Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:27:25.842211Z node 3 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:27:25.842222Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:27:25.842270Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [3:885:2708], serverId# [3:886:2709], sessionId# [0:0:0] 2025-06-03T10:27:25.886578Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtn9sgj8j0194g4m88mmsjb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MTg5ZWVmMmMtZWVjN2Y2ZjUtYWY0NmU3ZS03OWI0YWY5Yw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:25.887673Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:6] at 72075186224037888 2025-06-03T10:27:25.887744Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=6; 2025-06-03T10:27:25.889433Z node 3 :TX_DATASHARD INFO: datashard_write_operation.cpp:684: Write transaction 6 at 72075186224037888 has an error: Operation is aborting because locks are not valid 2025-06-03T10:27:25.889557Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 6 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-03T10:27:25.889663Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 6 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because locks are not valid" issue_code: 2001 severity: 1 } 2025-06-03T10:27:25.889690Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:25.889788Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:798: SelfId: [3:907:2683], Table: `/Root/table-1` ([72057594046644480:2:1]), SessionActorId: [3:853:2683]Got LOCKS BROKEN for table `/Root/table-1`. ShardID=72075186224037888, Sink=[3:907:2683].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-03T10:27:25.889934Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2935: SelfId: [3:900:2683], SessionActorId: [3:853:2683], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[3:853:2683]. isRollback=0 2025-06-03T10:27:25.890077Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1848: SessionId: ydb://session/3?node_id=3&id=MTg5ZWVmMmMtZWVjN2Y2ZjUtYWY0NmU3ZS03OWI0YWY5Yw==, ActorId: [3:853:2683], ActorState: ExecuteState, TraceId: 01jwtn9sgj8j0194g4m88mmsjb, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [3:901:2683] from: [3:900:2683] 2025-06-03T10:27:25.890142Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [3:901:2683] TxId: 281474976715662. Ctx: { TraceId: 01jwtn9sgj8j0194g4m88mmsjb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MTg5ZWVmMmMtZWVjN2Y2ZjUtYWY0NmU3ZS03OWI0YWY5Yw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/table-1`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-03T10:27:25.890289Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:7] at 72075186224037888 2025-06-03T10:27:25.890311Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:414: Skip empty write operation for [0:7] at 72075186224037888 2025-06-03T10:27:25.890362Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:25.890399Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=3&id=MTg5ZWVmMmMtZWVjN2Y2ZjUtYWY0NmU3ZS03OWI0YWY5Yw==, ActorId: [3:853:2683], ActorState: ExecuteState, TraceId: 01jwtn9sgj8j0194g4m88mmsjb, Create QueryResponse for error on request, msg: ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::SecondaryIndexReplace-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 30799, MsgBus: 20225 2025-06-03T10:27:18.853421Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667753802313969:2220];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c62/r3tmp/tmpJ1Y7gf/pdisk_1.dat 2025-06-03T10:27:18.904343Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:27:18.956495Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:18.957568Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667753802313762:2079] 1748946438832511 != 1748946438832514 TServer::EnableGrpc on GrpcPort 30799, node 1 2025-06-03T10:27:19.004884Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:19.004928Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:19.013494Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:19.025260Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:19.025274Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:19.025277Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:19.025340Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20225 TClient is connected to server localhost:20225 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:19.213359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.240674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.310251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.385043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.423518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.709323Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667758097282704:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.709349Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.858447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.882826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.909867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.932020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.960373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.981995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.006096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.035096Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667762392250660:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.035122Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.035311Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667762392250665:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.036354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:20.042704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:27:20.042835Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667762392250667:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:20.110081Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667762392250718:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:20.477808Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [1:7511667762392250988:3571], Recipient [1:7511667753802314190:2189]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:20.477824Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:20.477840Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:27:20.477850Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [1:7511667762392250984:3568], Recipient [1:7511667753802314190:2189]: {TEvModifySchemeTransaction txid# 281474976715672 TabletId# 72057594046644480} 2025-06-03T10:27:20.477852Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:27:20.518044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "TestTable" Columns { Name: "id" Type: "Uint64" NotNull: false } Columns { Name: "customer" Type: "Utf8" NotNull: false } Columns { Name: "created" Type: "Datetime" NotNull: false } Columns { Name: "processed" Type: "String" NotNull: false } KeyColumnNames: "id" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } Temporary: false } IndexDescription { Name: "ix_cust" KeyColumnNames: "customer" Type: EIndexTypeGlobal IndexImplTableDescriptions { PartitionConfig { } } } IndexDescription { Name: "ix_cust2" KeyColumnNames: "customer" KeyColumnNames: "created" Type: EIndexTypeGlobal IndexImplTableDescriptions { PartitionConfig { } } } IndexDescription { Name: "ix_cust3" KeyColumnNames: "customer" KeyColumnNames: "created" Type: EIndexTypeGlobal IndexImplTableDescriptions { PartitionConfig { } } DataColumnNames: "processed" } } } TxId: 281474976715672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:54654" , at schemeshard: 72057594046644480 2025-06-03T10:27:20.518186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /Root/TestTable domain path id: [OwnerId: 72057594046644480, LocalPathId: 1] domain path: /Root shardsToCreate: 4 GetShardsInside: 34 MaxShards: 200000 2025-06-03T10:27:20.518332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.518366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: scheme ... wX2: 4503612512274887 } Origin: 72075186224037923 State: 2 TxId: 281474976715672 Step: 0 Generation: 1 2025-06-03T10:27:25.811407Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1014: NTableState::TProposedWaitParts operationId# 281474976715672:2 HandleReply TEvSchemaChanged at tablet: 72057594046644480 2025-06-03T10:27:25.811415Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1018: NTableState::TProposedWaitParts operationId# 281474976715672:2 HandleReply TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 7511667784571207153 RawX2: 4503612512274887 } Origin: 72075186224037923 State: 2 TxId: 281474976715672 Step: 0 Generation: 1 2025-06-03T10:27:25.811419Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715672:2, shardIdx: 72057594046644480:36, datashard: 72075186224037923, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-03T10:27:25.811421Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-03T10:27:25.811423Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 281474976715672:2, datashard: 72075186224037923, at schemeshard: 72057594046644480 2025-06-03T10:27:25.811425Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976715672:2 129 -> 240 2025-06-03T10:27:25.811440Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:27:25.811468Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:25.811470Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:27:25.811485Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-03T10:27:25.811486Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:27:25.811494Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:25.811495Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:27:25.811497Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715672:0 2025-06-03T10:27:25.811505Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7511667784571207150:2502] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-03T10:27:25.811522Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-03T10:27:25.811523Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:27:25.811525Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715672:2 2025-06-03T10:27:25.811529Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7511667784571207153:2503] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-03T10:27:25.811543Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435072, Sender [3:7511667780276237608:2147], Recipient [3:7511667780276237608:2147]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-03T10:27:25.811545Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4899: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-03T10:27:25.811549Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:25.811553Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046644480] TDone opId# 281474976715672:0 ProgressState 2025-06-03T10:27:25.811561Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:27:25.811564Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:0 progress is 2/3 2025-06-03T10:27:25.811566Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-06-03T10:27:25.811569Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:0 progress is 2/3 2025-06-03T10:27:25.811570Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-06-03T10:27:25.811573Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 2/3, is published: true 2025-06-03T10:27:25.811594Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435072, Sender [3:7511667780276237608:2147], Recipient [3:7511667780276237608:2147]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-03T10:27:25.811596Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4899: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-03T10:27:25.811598Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-03T10:27:25.811600Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046644480] TDone opId# 281474976715672:2 ProgressState 2025-06-03T10:27:25.811606Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:27:25.811608Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:2 progress is 3/3 2025-06-03T10:27:25.811610Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-03T10:27:25.811613Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:2 progress is 3/3 2025-06-03T10:27:25.811615Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-03T10:27:25.811617Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 3/3, is published: true 2025-06-03T10:27:25.811625Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7511667784571207123:2500] message: TxId: 281474976715672 2025-06-03T10:27:25.811628Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-03T10:27:25.811633Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715672:0 2025-06-03T10:27:25.811635Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976715672:0 2025-06-03T10:27:25.811669Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-06-03T10:27:25.811672Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715672:1 2025-06-03T10:27:25.811674Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976715672:1 2025-06-03T10:27:25.811678Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-06-03T10:27:25.811680Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715672:2 2025-06-03T10:27:25.811682Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976715672:2 2025-06-03T10:27:25.811688Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-06-03T10:27:25.811757Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:27:25.811784Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:27:25.811791Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7511667784571207123:2500] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-03T10:27:25.812142Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [3:7511667784571207138:3575], Recipient [3:7511667780276237608:2147]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:25.812147Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:25.812149Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-03T10:27:25.813509Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [3:7511667784571207223:3634], Recipient [3:7511667780276237608:2147]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:25.813524Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:25.813527Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-03T10:27:25.813532Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [3:7511667784571207222:3633], Recipient [3:7511667780276237608:2147]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:25.813533Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:25.813535Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046644480 >> ObjectStorageListingTest::ListingNoFilter [GOOD] |62.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl |62.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl |62.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_ttl/ydb-core-tx-schemeshard-ut_ttl ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::SecondaryIndexSelectUsingScripting [GOOD] Test command err: Trying to start YDB, gRPC: 25245, MsgBus: 3199 2025-06-03T10:27:19.325513Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667760562519530:2090];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:19.325865Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c54/r3tmp/tmpSq4BDE/pdisk_1.dat 2025-06-03T10:27:19.450671Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:19.452687Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667760562519452:2079] 1748946439304067 != 1748946439304070 TServer::EnableGrpc on GrpcPort 25245, node 1 2025-06-03T10:27:19.481566Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:19.481579Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:19.481582Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:19.481661Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:27:19.509749Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:19.509780Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:19.511975Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3199 TClient is connected to server localhost:3199 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:19.670903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.677645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:27:19.686528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.747715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.853047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.900986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:20.311381Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667764857488378:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.311412Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.388229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.403760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.423848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.451487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.465958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.483806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.503558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.532950Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667764857489042:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.532976Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.533112Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667764857489047:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.534253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:20.541725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:27:20.541852Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667764857489049:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:20.633327Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667764857489100:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:20.938202Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [1:7511667764857489369:3568], Recipient [1:7511667760562519856:2171]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:20.938224Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:20.938227Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:27:20.938238Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [1:7511667764857489365:3565], Recipient [1:7511667760562519856:2171]: {TEvModifySchemeTransaction txid# 281474976715672 TabletId# 72057594046644480} 2025-06-03T10:27:20.938240Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:27:20.957789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "TestTable" Columns { Name: "Key" Type: "String" NotNull: false } Columns { Name: "Index2" Type: "String" NotNull: false } Columns { Name: "Value" Type: "String" NotNull: false } KeyColumnNames: "Key" PartitionConfig { ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } Temporary: false } IndexDescription { Name: "Index" KeyColumnNames: "Index2" Type: EIndexTypeGlobal IndexImplTableDescriptions { PartitionConfig { } } } } } TxId: 281474976715672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:51670" , at schemeshard: 72057594046644480 2025-06-03T10:27:20.957923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /Root/TestTable domain path id: [OwnerId: 72057594046644480, LocalPathId: 1] domain path: /Root shardsToCreate: 2 GetShardsInside: 34 MaxShards: 200000 2025-06-03T10:27:20.958001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.958026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/TestTable, opId: 281474976715672:0, schema: Name: "TestTable" Columns { Name: "Key" Type: "String" NotNull: false } Columns { Name: "Index2" Type: "String" NotNull: f ... 7922 State: 2 TxId: 281474976715672 Step: 0 Generation: 1 2025-06-03T10:27:26.243859Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1014: NTableState::TProposedWaitParts operationId# 281474976715672:0 HandleReply TEvSchemaChanged at tablet: 72057594046644480 2025-06-03T10:27:26.243867Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1018: NTableState::TProposedWaitParts operationId# 281474976715672:0 HandleReply TEvSchemaChanged at tablet: 72057594046644480 message: Source { RawX1: 7511667790056212317 RawX2: 4503612512274899 } Origin: 72075186224037922 State: 2 TxId: 281474976715672 Step: 0 Generation: 1 2025-06-03T10:27:26.243873Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715672:0, shardIdx: 72057594046644480:35, datashard: 72075186224037922, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-03T10:27:26.243875Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:26.243878Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 281474976715672:0, datashard: 72075186224037922, at schemeshard: 72057594046644480 2025-06-03T10:27:26.243880Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976715672:0 129 -> 240 2025-06-03T10:27:26.243911Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:27:26.243985Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-03T10:27:26.243986Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:27:26.243989Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715672:2 2025-06-03T10:27:26.244000Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7511667790056212313:2514] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-03T10:27:26.244023Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:26.244025Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:27:26.244028Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976715672:0 2025-06-03T10:27:26.244033Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7511667790056212317:2515] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-03T10:27:26.244045Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435072, Sender [3:7511667785761242701:2143], Recipient [3:7511667785761242701:2143]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-03T10:27:26.244048Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4899: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-03T10:27:26.244055Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715672:2, at schemeshard: 72057594046644480 2025-06-03T10:27:26.244060Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046644480] TDone opId# 281474976715672:2 ProgressState 2025-06-03T10:27:26.244069Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:27:26.244075Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:2 progress is 2/3 2025-06-03T10:27:26.244077Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-06-03T10:27:26.244080Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:2 progress is 2/3 2025-06-03T10:27:26.244082Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 2/3 2025-06-03T10:27:26.244087Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 2/3, is published: true 2025-06-03T10:27:26.244111Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435072, Sender [3:7511667785761242701:2143], Recipient [3:7511667785761242701:2143]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-03T10:27:26.244113Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4899: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-03T10:27:26.244117Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:26.244119Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046644480] TDone opId# 281474976715672:0 ProgressState 2025-06-03T10:27:26.244126Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:27:26.244128Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:0 progress is 3/3 2025-06-03T10:27:26.244129Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-03T10:27:26.244133Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715672:0 progress is 3/3 2025-06-03T10:27:26.244135Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-03T10:27:26.244137Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976715672, ready parts: 3/3, is published: true 2025-06-03T10:27:26.244148Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [3:7511667790056212281:2512] message: TxId: 281474976715672 2025-06-03T10:27:26.244153Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715672 ready parts: 3/3 2025-06-03T10:27:26.244160Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715672:0 2025-06-03T10:27:26.244163Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976715672:0 2025-06-03T10:27:26.244205Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 17] was 4 2025-06-03T10:27:26.244209Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715672:1 2025-06-03T10:27:26.244210Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976715672:1 2025-06-03T10:27:26.244215Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 3 2025-06-03T10:27:26.244218Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715672:2 2025-06-03T10:27:26.244220Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976715672:2 2025-06-03T10:27:26.244228Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 19] was 3 2025-06-03T10:27:26.244349Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:27:26.244370Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:27:26.244379Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [3:7511667790056212281:2512] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715672 at schemeshard: 72057594046644480 2025-06-03T10:27:26.247808Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [3:7511667790056212296:3583], Recipient [3:7511667785761242701:2143]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:26.247822Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:26.247826Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-03T10:27:26.254591Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [3:7511667790056212385:3644], Recipient [3:7511667785761242701:2143]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:26.254607Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:26.254612Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-03T10:27:26.254618Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [3:7511667790056212386:3645], Recipient [3:7511667785761242701:2143]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:26.254621Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:26.254623Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-03T10:27:26.303620Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:7511667785761242701:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:27:26.303643Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4890: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:27:26.303657Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124999, Sender [3:7511667785761242701:2143], Recipient [3:7511667785761242701:2143]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:27:26.303660Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4889: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> TPQTest::TestPQPartialRead >> TPQTabletTests::DropTablet [GOOD] >> TestKinesisHttpProxy::TestRequestWithIAM [GOOD] >> TPQTabletTests::DropTablet_And_PlannedConfigTransaction ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::UpdateFkPkOverlap [GOOD] Test command err: Trying to start YDB, gRPC: 65023, MsgBus: 5923 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c50/r3tmp/tmpLYmHLZ/pdisk_1.dat 2025-06-03T10:27:18.667398Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:27:18.761493Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:18.770101Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:18.777383Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 65023, node 1 2025-06-03T10:27:18.789042Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:18.797514Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:18.797529Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:18.797533Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:18.797582Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5923 TClient is connected to server localhost:5923 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:18.993595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.000837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:27:19.009475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.070163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.195704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.227949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.733118Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667758266055148:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.733172Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.839268Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.859639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.885477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.910435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.923504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.946119Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.978904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.038842Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667762561023095:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.038866Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.038951Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667762561023100:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.040008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:20.043090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-03T10:27:20.043149Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667762561023102:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:27:20.134591Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667762561023153:3405] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:20.390258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:20.971088Z node 1 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jwtn9mfb9yxyjrnzqr42aqe0, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTViZjY2NS1lZmE1OWE2OC1lZjU2YjMtYzYzNzM3N2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-03T10:27:20.973144Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=NTViZjY2NS1lZmE1OWE2OC1lZjU2YjMtYzYzNzM3N2E=, ActorId: [1:7511667762561024159:2566], ActorState: ExecuteState, TraceId: 01jwtn9mfb9yxyjrnzqr42aqe0, Create QueryResponse for error on request, msg: 2025-06-03T10:27:21.201033Z node 1 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jwtn9mrh0srekswd3q31cqa6, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTViZjY2NS1lZmE1OWE2OC1lZjU2YjMtYzYzNzM3N2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-03T10:27:21.201162Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=NTViZjY2NS1lZmE1OWE2OC1lZjU2YjMtYzYzNzM3N2E=, ActorId: [1:7511667762561024159:2566], ActorState: ExecuteState, TraceId: 01jwtn9mrh0srekswd3q31cqa6, Create QueryResponse for error on request, msg: 2025-06-03T10:27:21.228895Z node 1 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jwtn9mzt22cgqh1mdghfj375, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTViZjY2NS1lZmE1OWE2OC1lZjU2YjMtYzYzNzM3N2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-03T10:27:21.229011Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=NTViZjY2NS1lZmE1OWE2OC1lZjU2YjMtYzYzNzM3N2E=, ActorId: [1:7511667762561024159:2566], ActorState: ExecuteState, TraceId: 01jwtn9mzt22cgqh1mdghfj375, Create QueryResponse for error on request, msg: 2025-06-03T10:27:21.414775Z node 1 :KQP_EXECUTER ERROR: kqp_literal_executer.cpp:107: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jwtn9n0j9nww1vdw637cvqqc, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTViZjY2NS1lZmE1OWE2OC1lZjU2YjMtYzYzNzM3N2E=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, TKqpEnsure failed. 2025-06-03T10:27:21.414895Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=NTViZjY2NS1lZmE1OWE2OC1lZjU2YjMtYzYzNzM3N2E=, ActorId: [1:7511667762561024159:2566], ActorState: ExecuteState, TraceId: 01jwtn9n0j9nww1vdw637cvqqc, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 19551, MsgBus: 31435 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c50/r3tmp/tmpnnBYs0/pdisk_1.dat 2025-06-03T10:27:22.683724Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667771710573214:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:22.701941Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:27:22.793552Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:22.795345Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667771710573032:2079] 1748946442655907 != 1748946442655910 2025-06-03T10:27:22.812419Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:22.812454Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:22.821842Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19551, node 2 2025-06-03T10:27:22.839058Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:22.839070Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:22.839073Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:22.839124Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31435 TClient is connected to server localhost:31435 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:23.018769Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:23.026902Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:27:23.038443Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:23.090951Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:23.146682Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:23.183288Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:23.858839Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667776005541970:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:23.858866Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:23.887638Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:23.918490Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:23.934032Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:23.951071Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:23.970519Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:23.998845Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:24.023415Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:24.050400Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667780300509918:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:24.050425Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:24.050607Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667780300509923:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:24.051621Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:24.058623Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511667780300509925:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:24.145235Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511667780300509976:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:24.564320Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_object_storage_listing/unittest >> ObjectStorageListingTest::ListingNoFilter [GOOD] Test command err: 2025-06-03T10:27:26.253991Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:27:26.254122Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:27:26.254167Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0018d9/r3tmp/tmpppynDR/pdisk_1.dat 2025-06-03T10:27:26.562474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:27:26.584781Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:26.586083Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946445300495 != 1748946445300499 2025-06-03T10:27:26.639818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:26.639863Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:26.651867Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:26.739128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:27:26.761215Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:27:26.761341Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:27:26.773098Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:27:26.773152Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:27:26.773493Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:27:26.773510Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:27:26.773519Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:27:26.773607Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:27:26.773645Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:27:26.773664Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:27:26.784347Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:27:26.790205Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:27:26.790306Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:27:26.790343Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:27:26.790350Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:27:26.790356Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:27:26.790363Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:26.790569Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:27:26.790606Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:27:26.790725Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:27:26.790736Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:27:26.790748Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:27:26.790756Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:27:26.790772Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:27:26.790812Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:27:26.790871Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:27:26.790894Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:27:26.791285Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:27:26.802133Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:27:26.802191Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:27:26.978380Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-06-03T10:27:26.979482Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-03T10:27:26.979513Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:26.979605Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:27:26.979616Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:27:26.979630Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-03T10:27:26.979708Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-03T10:27:26.979749Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-03T10:27:26.979948Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:27:26.979967Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-03T10:27:26.980465Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-03T10:27:26.980576Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:27:26.980965Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-03T10:27:26.980977Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:26.981192Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-03T10:27:26.981206Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:27:26.981421Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:27:26.981432Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:27:26.981439Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-03T10:27:26.981456Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:27:26.981467Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:27:26.981480Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:26.982517Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:27:26.982838Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:27:26.982968Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-03T10:27:26.982979Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:27:26.991808Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:731:2613], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:26.991859Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:741:2618], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:26.991871Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:26.993086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:27:27.004312Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:27:27.171395Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:27:27.173681Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:745:2621], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:27:27.227829Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:815:2660] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:27.350985Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jwtn9tmf2qzmykncb8y0854p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzdjYzgzNTktODU2NGYxNy1lN2JiNmVkZC0yMDlmMjU4Mg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:27.352650Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:846:2677], serverId# [1:847:2678], sessionId# [0:0:0] 2025-06-03T10:27:27.361440Z node 1 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:2] at 72075186224037888 2025-06-03T10:27:27.361531Z node 1 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:2] at 72075186224037888, row count=5 2025-06-03T10:27:27.372032Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:27.378218Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:854:2684], serverId# [1:855:2685], sessionId# [0:0:0] 2025-06-03T10:27:27.378289Z node 1 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037888 S3 Listing: start at key ((type:2, value:"d\0\0\0") (type:4608, value:"/test/")), end at key ((type:2, value:"d\0\0\0") (type:4608, value:"/test0")) restarted: 0 last path: "" contents: 0 common prefixes: 0 2025-06-03T10:27:27.378345Z node 1 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:374: 72075186224037888 S3 Listing: finished status: 0 description: "" contents: 3 common prefixes: 2 2025-06-03T10:27:27.378382Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037888, clientId# [1:854:2684], serverId# [1:855:2685], sessionId# [0:0:0] >> KqpIndexes::Uint8Index [GOOD] >> TPQTabletTests::DropTablet_And_PlannedConfigTransaction [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_stats/unittest >> TSchemeshardStatsBatchingTest::ShouldPersistByBatchTimeout [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:27:05.388622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:05.388650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:05.388657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:05.388664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:05.388693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:05.388698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:05.388709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:05.388723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:05.388848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:05.388923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:05.421698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:27:05.421735Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:05.433452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:05.433657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:05.433721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:05.443900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:05.443998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:05.444148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:05.444237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:05.450382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:05.450531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:05.451033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:05.451051Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:05.451064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:05.451079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:05.451086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:05.451116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.458858Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:27:05.510615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:05.510728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.510810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:05.510875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:05.510889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.511976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:05.512018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:05.512113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.512130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:05.512136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:05.512146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:05.513516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.513541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:05.513550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:05.514050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.514066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:05.514073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:05.514081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:05.514878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:05.516034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:05.516099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:05.516366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:05.516408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:05.516418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:05.516509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:27:05.516519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:05.516567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:05.516585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:27:05.517190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:05.517203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:05.517266Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... TRACE: schemeshard__init.cpp:2162: TTxInit for Shards, read: 72057594046678944:1, tabletId: 72075186233409546, PathId: [OwnerId: 72057594046678944, LocalPathId: 2], TabletType: DataShard, at schemeshard: 72057594046678944 2025-06-03T10:27:27.232443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:27:27.232465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2237: TTxInit for TablePartitions, read records: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:27.232508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2303: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:27.232560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2453: TTxInit for ChannelsBinding, read records: 3, at schemeshard: 72057594046678944 2025-06-03T10:27:27.232625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2832: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:27.232644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2911: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:27.232708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3412: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:27.232720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3448: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:27.232751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3665: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:27.232766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3810: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:27.232780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3827: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:27.232811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3987: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:27.232824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4003: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:27.232856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4288: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:27.232895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4593: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:27.232908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4651: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:27.232927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4746: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:27.232934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4773: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:27.232942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4800: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-03T10:27:27.232986Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:27:27.238981Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:27:27.239074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:27.239665Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435083, Sender [1:1750:3671], Recipient [1:1750:3671]: NKikimr::NSchemeShard::TEvPrivate::TEvServerlessStorageBilling 2025-06-03T10:27:27.239681Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4945: StateWork, processing event TEvPrivate::TEvServerlessStorageBilling 2025-06-03T10:27:27.239933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:27.239946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:27.240056Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124999, Sender [1:1750:3671], Recipient [1:1750:3671]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:27:27.240067Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4889: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:27:27.240225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:27.240240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:27.240251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:27.240255Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:27:27.241133Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 274399233, Sender [1:1788:3671], Recipient [1:1750:3671]: NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-06-03T10:27:27.241148Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5036: StateWork, processing event TEvTxAllocatorClient::TEvAllocateResult 2025-06-03T10:27:27.241154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:1750:3671] sender: [1:1808:2058] recipient: [1:15:2062] 2025-06-03T10:27:27.283755Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122945, Sender [1:1807:3717], Recipient [1:1750:3671]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2025-06-03T10:27:27.283781Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4894: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-03T10:27:27.283817Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Simple" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:27:27.283938Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Simple" took 106us result status StatusSuccess 2025-06-03T10:27:27.284198Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Simple" PathDescription { Self { Name: "Simple" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1001 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Simple" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 MaxPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 27456 RowCount: 200 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 5182 Memory: 156584 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 27456 DataSize: 27456 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpScheme::AlterCompressionLevelInColumnFamily >> TPQTabletTests::Cancel_Tx >> TestKinesisHttpProxy::TestRequestNoAuthorization >> TestYmqHttpProxy::TestCreateQueueWithEmptyName [GOOD] >> TPQTabletTests::Cancel_Tx [GOOD] >> TPQTabletTests::Config_TEvTxCommit_After_Restart >> KqpScheme::CreateTableWithReadReplicasUncompat >> TTopicReaderTests::TestRun_ReadMoreMessagesThanLimit_Without_Wait_NoDelimiter [GOOD] >> TTopicReaderTests::TestRun_ReadMessages_Output_Base64 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/indexes/unittest >> KqpIndexes::Uint8Index [GOOD] Test command err: Trying to start YDB, gRPC: 11200, MsgBus: 24431 2025-06-03T10:27:19.176661Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667760574996546:2215];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c58/r3tmp/tmpa6Cygm/pdisk_1.dat 2025-06-03T10:27:19.407376Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:27:19.530299Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:19.531455Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667760574996342:2079] 1748946439163367 != 1748946439163370 TServer::EnableGrpc on GrpcPort 11200, node 1 2025-06-03T10:27:19.595446Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:19.595462Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:19.595464Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:19.595510Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:27:19.625913Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:19.625959Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:19.634118Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24431 TClient is connected to server localhost:24431 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:19.790108Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.798174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:27:19.814377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.867196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.938657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:20.026325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:20.712305Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667764869965293:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.712331Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.771903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.785323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.797836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.816791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.837700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.856658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.875993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.897951Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667764869965944:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.897975Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.898103Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667764869965949:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:20.899065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:20.905701Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667764869965951:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:27:20.966167Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667764869966002:3395] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:21.340420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:21.438073Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:2019: ActorId: [1:7511667769164933749:2508] TxId: 281474976710673. Ctx: { TraceId: 01jwtn9n6p75jtygdat0q2809a, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzBmMWU3ZDEtNzNmZjQxM2MtYzZmMWNkYTQtODJiMWQyOTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Read operation can be performed on async index table: [72057594046644480:19:1] only with StaleRO isolation level 2025-06-03T10:27:21.440185Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=MzBmMWU3ZDEtNzNmZjQxM2MtYzZmMWNkYTQtODJiMWQyOTM=, ActorId: [1:7511667769164933562:2508], ActorState: ExecuteState, TraceId: 01jwtn9n6p75jtygdat0q2809a, Create QueryResponse for error on request, msg: 2025-06-03T10:27:21.447235Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:2019: ActorId: [1:7511667769164933762:2508] TxId: 281474976710675. Ctx: { TraceId: 01jwtn9n757aq5bd97k4cne784, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzBmMWU3ZDEtNzNmZjQxM2MtYzZmMWNkYTQtODJiMWQyOTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Read operation can be performed on async index table: [72057594046644480:19:1] only with StaleRO isolation level 2025-06-03T10:27:21.447303Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=MzBmMWU3ZDEtNzNmZjQxM2MtYzZmMWNkYTQtODJiMWQyOTM=, ActorId: [1:7511667769164933562:2508], ActorState: ExecuteState, TraceId: 01jwtn9n757aq5bd97k4cne784, Create QueryResponse for error on request, msg: 2025-06-03T10:27:21.449302Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:2019: ActorId: [1:7511667769164933771:2508] TxId: 281474976710677. Ctx: { TraceId: 01jwtn9n78fb3c3q9tt48me6fd, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzBmMWU3ZDEtNzNmZjQxM2MtYzZmMWNkYTQtODJiMWQyOTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Read operation can be performed on async index table: [72057594046644480:19:1] only with StaleRO isolation level 2025-06-03T10:27:21.449349Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=MzBmMWU3ZDEtNzNmZjQxM2MtYzZmMWNkYTQtODJiMWQyOTM=, ActorId: [1:7511667769164933562:2508], ActorState: ExecuteState, TraceId: 01jwtn9n78fb3c3q9tt48me6fd, Create QueryResponse for error on request, msg: 2025-06-03T10:27:21.451625Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:2019: ActorId: [1:7511667769164933780:2508] TxId: 281474976710679. Ctx: { TraceId: 01jwtn9n7a8ba53qx42kh1z3kx, Database: /Root, Database ... ient [2:7511667769552914849:2147]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:24.587908Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:24.587913Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-03T10:27:24.587918Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [2:7511667782437819353:3808], Recipient [2:7511667769552914849:2147]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:24.587921Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:24.587923Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-03T10:27:25.042079Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271125000, Sender [0:0:0], Recipient [2:7511667769552914849:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:27:25.042096Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4890: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:27:25.042107Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124999, Sender [2:7511667769552914849:2147], Recipient [2:7511667769552914849:2147]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:27:25.042109Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4889: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime Trying to start YDB, gRPC: 16693, MsgBus: 16794 2025-06-03T10:27:25.939449Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511667787023691184:2208];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c58/r3tmp/tmpSf6wW0/pdisk_1.dat 2025-06-03T10:27:25.946518Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:27:25.998589Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:26.001633Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511667787023691002:2079] 1748946445917835 != 1748946445917838 TServer::EnableGrpc on GrpcPort 16693, node 3 2025-06-03T10:27:26.033952Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:26.033970Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:26.033972Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:26.034022Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:27:26.049725Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:26.049761Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:26.050926Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16794 TClient is connected to server localhost:16794 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:26.189368Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:26.194712Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:27:26.208824Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:26.239413Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:27:26.274067Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:27:26.339758Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:26.768257Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667791318659908:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:26.768310Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:26.802142Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:26.822833Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:26.843269Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:26.866942Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:26.882757Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:26.910645Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:26.942790Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:26.978406Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667791318660595:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:26.978435Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:26.978588Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667791318660600:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:26.979608Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:26.983330Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:27:26.983446Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511667791318660602:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:27.042736Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511667795613627949:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:27.315648Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:27.387258Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:27:27.418505Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:27:27.463911Z node 3 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill >> TPQTabletTests::Config_TEvTxCommit_After_Restart [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionInactive_1_Test >> KqpUniqueIndex::InsertFkAlreadyExist [GOOD] >> KqpUniqueIndex::InsertComplexFkPkOverlapDuplicate >> TPQTabletTests::All_New_Partitions_In_Another_Tablet |63.0%| [TA] $(B)/ydb/core/tx/schemeshard/ut_stats/test-results/unittest/{meta.json ... results_accumulator.log} >> TestYmqHttpProxy::TestCreateQueueWithAllAttributes >> KqpConstraints::DropCreateSerial |63.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/certificate_check/ut/unittest >> TPQTabletTests::All_New_Partitions_In_Another_Tablet [GOOD] >> KqpVectorIndexes::OrderByCosineLevel1+Nullable+UseSimilarity [GOOD] >> KqpVectorIndexes::OrderByCosineDistanceNotNullableLevel3 >> TCertificateAuthUtilsTest::ClientCertAuthorizationParamsMatch [GOOD] >> TPQTabletTests::After_Restarting_The_Tablet_Sends_A_TEvReadSet_For_Transactions_In_The_EXECUTED_State >> TCertificateAuthUtilsTest::GenerateAndVerifyCertificates >> TPQTabletTests::After_Restarting_The_Tablet_Sends_A_TEvReadSet_For_Transactions_In_The_EXECUTED_State [GOOD] |63.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut |63.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut >> KqpScheme::AlterCompressionLevelInColumnFamily [GOOD] >> KqpScheme::AlterIndexImplTable+VectorIndex |63.0%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_stats/test-results/unittest/{meta.json ... results_accumulator.log} |63.0%| [LD] {RESULT} $(B)/ydb/services/ydb/backup_ut/ydb-services-ydb-backup_ut |63.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/certificate_check/ut/unittest >> TCertificateAuthUtilsTest::ClientCertAuthorizationParamsMatch [GOOD] |63.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/certificate_check/ut/unittest >> TOlap::CreateStore >> TCertificateAuthUtilsTest::GenerateAndVerifyCertificates [GOOD] |63.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/certificate_check/ut/unittest >> KqpScheme::CreateTableWithReadReplicasUncompat [GOOD] >> KqpScheme::CreateTableWithReadReplicasCompat >> TInterconnectTest::TestManyEvents ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/unittest >> TPQTabletTests::After_Restarting_The_Tablet_Sends_A_TEvReadSet_For_Transactions_In_The_EXECUTED_State [GOOD] Test command err: 2025-06-03T10:27:27.851898Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:27:27.853094Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:27:27.853207Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037927937] doesn't have tx info 2025-06-03T10:27:27.853230Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:27:27.853236Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-03T10:27:27.853243Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:27:27.853254Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:27:27.853267Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-03T10:27:27.857979Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037927937] server connected, pipe [1:179:2193], now have 1 active actors on pipe 2025-06-03T10:27:27.858017Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1460: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-03T10:27:27.860828Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1646: [PQ: 72057594037927937] Config update version 1(current 0) received from actor [1:178:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-03T10:27:27.861868Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-03T10:27:27.861936Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:27:27.862136Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037927937] Config applied version 1 actor [1:178:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 TopicName: "topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } ReadRuleGenerations: 1 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } 2025-06-03T10:27:27.862188Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-03T10:27:27.862302Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-03T10:27:27.862412Z node 1 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:187:2199] 2025-06-03T10:27:27.862622Z node 1 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-03T10:27:27.862651Z node 1 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 2 [1:187:2199] 2025-06-03T10:27:27.862661Z node 1 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:27:27.862778Z node 1 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-03T10:27:27.862802Z node 1 :PERSQUEUE DEBUG: partition.cpp:3155: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit request with generation 1 2025-06-03T10:27:27.862808Z node 1 :PERSQUEUE DEBUG: partition.cpp:3224: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user reinit with generation 1 done 2025-06-03T10:27:27.862859Z node 1 :PERSQUEUE DEBUG: partition.cpp:2185: [PQ: 72057594037927937, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-06-03T10:27:27.862864Z node 1 :PERSQUEUE DEBUG: partition.cpp:2186: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- delete ---------------- 2025-06-03T10:27:27.862868Z node 1 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- write ----------------- 2025-06-03T10:27:27.862873Z node 1 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72057594037927937, Partition: 0, State: StateIdle] i0000000000 2025-06-03T10:27:27.862876Z node 1 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000cuser 2025-06-03T10:27:27.862880Z node 1 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72057594037927937, Partition: 0, State: StateIdle] m0000000000uuser 2025-06-03T10:27:27.862883Z node 1 :PERSQUEUE DEBUG: partition.cpp:2199: [PQ: 72057594037927937, Partition: 0, State: StateIdle] --- rename ---------------- 2025-06-03T10:27:27.862887Z node 1 :PERSQUEUE DEBUG: partition.cpp:2204: [PQ: 72057594037927937, Partition: 0, State: StateIdle] =========================== 2025-06-03T10:27:27.862905Z node 1 :PERSQUEUE DEBUG: partition_read.cpp:779: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-03T10:27:27.862955Z node 1 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-06-03T10:27:27.863858Z node 1 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72057594037927937, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-03T10:27:27.863998Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037927937] server connected, pipe [1:194:2204], now have 1 active actors on pipe 2025-06-03T10:27:27.864122Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037927937] server connected, pipe [1:197:2206], now have 1 active actors on pipe 2025-06-03T10:27:27.864143Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:1711: [PQ: 72057594037927937] Handle TEvPersQueue::TEvDropTablet 2025-06-03T10:27:28.303672Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:27:28.304761Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:27:28.304875Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72057594037927937] doesn't have tx info 2025-06-03T10:27:28.304885Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037927937] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:27:28.304891Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72057594037927937] no config, start with empty partitions and default config 2025-06-03T10:27:28.304899Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72057594037927937] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:27:28.304910Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:27:28.304923Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-03T10:27:28.315115Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72057594037927937] server connected, pipe [2:179:2193], now have 1 active actors on pipe 2025-06-03T10:27:28.315163Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1460: [PQ: 72057594037927937] Handle TEvPersQueue::TEvUpdateConfig 2025-06-03T10:27:28.315262Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:1646: [PQ: 72057594037927937] Config update version 2(current 0) received from actor [2:178:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-03T10:27:28.316048Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:590: [PQ: 72057594037927937] Apply new config CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } 2025-06-03T10:27:28.316080Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:27:28.316330Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037927937] Config applied version 2 actor [2:178:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/topic" YcCloudId: "somecloud" YcFolderId: "somefolder" YdbDatabaseId: "PQ" YdbDatabasePath: "/Root/PQ" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 FederationAccount: "federationAccount" MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { ... 2025-06-03T10:27:29.818041Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3633: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-03T10:27:29.820334Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1231: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-03T10:27:29.820363Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4291: [PQ: 72057594037927937] Try execute txs with state CALCULATED 2025-06-03T10:27:29.820369Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4336: [PQ: 72057594037927937] TxId 67891, State CALCULATED 2025-06-03T10:27:29.820382Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4283: [PQ: 72057594037927937] TxId 67891 State CALCULATED FrontTxId 67891 2025-06-03T10:27:29.820388Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4226: [PQ: 72057594037927937] TxId 67891, NewState WAIT_RS 2025-06-03T10:27:29.820394Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4261: [PQ: 72057594037927937] TxId 67891 moved from CALCULATED to WAIT_RS 2025-06-03T10:27:29.820406Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3968: [PQ: 72057594037927937] Send TEvTxProcessing::TEvReadSet to 1 receivers. Wait TEvTxProcessing::TEvReadSet from 1 senders. 2025-06-03T10:27:29.820414Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3978: [PQ: 72057594037927937] Send TEvReadSet to tablet 22222 2025-06-03T10:27:29.820430Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4459: [PQ: 72057594037927937] HaveParticipantsDecision 0 2025-06-03T10:27:29.826452Z node 6 :PERSQUEUE DEBUG: pqtablet_mock.cpp:87: Client pipe to tablet 72057594037927937 from 22222 is reset 2025-06-03T10:27:29.844469Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72057594037927937] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:27:29.845381Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72057594037927937] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:27:29.845821Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:741: [PQ: 72057594037927937] has a tx info 2025-06-03T10:27:29.845842Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72057594037927937] PlanStep 110, PlanTxId 67891, ExecStep 110, ExecTxId 67891 2025-06-03T10:27:29.845889Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:997: [PQ: 72057594037927937] ReadRange pair. Key tx_00000000000000067890, Status 0 2025-06-03T10:27:29.845918Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1006: [PQ: 72057594037927937] Restore Tx. TxId: 67890, Step: 100, State: EXECUTED, WriteId: 2025-06-03T10:27:29.845939Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:997: [PQ: 72057594037927937] ReadRange pair. Key tx_00000000000000067891, Status 0 2025-06-03T10:27:29.845946Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1006: [PQ: 72057594037927937] Restore Tx. TxId: 67891, Step: 110, State: CALCULATED, WriteId: 2025-06-03T10:27:29.845951Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1009: [PQ: 72057594037927937] Fix tx state 2025-06-03T10:27:29.845961Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72057594037927937] Txs.size=2, PlannedTxs.size=2 2025-06-03T10:27:29.845969Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4895: [PQ: 72057594037927937] top tx queue (100, 67890) 2025-06-03T10:27:29.845976Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4915: [PQ: 72057594037927937] TxsOrder: 67890 EXECUTED 0 2025-06-03T10:27:29.845982Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4915: [PQ: 72057594037927937] TxsOrder: 67891 PLANNED 0 2025-06-03T10:27:29.846119Z node 6 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:27:29.846128Z node 6 :PERSQUEUE INFO: pq_impl.cpp:787: [PQ: 72057594037927937] has a tx writes info 2025-06-03T10:27:29.846160Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitConfigStep 2025-06-03T10:27:29.846255Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-03T10:27:29.846313Z node 6 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [6:361:2338] 2025-06-03T10:27:29.846516Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitDiskStatusStep 2025-06-03T10:27:29.846783Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitMetaStep 2025-06-03T10:27:29.846842Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitInfoRangeStep 2025-06-03T10:27:29.846964Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitDataRangeStep 2025-06-03T10:27:29.847033Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitDataStep 2025-06-03T10:27:29.847038Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-03T10:27:29.847044Z node 6 :PERSQUEUE INFO: partition_init.cpp:774: [topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:27:29.847049Z node 6 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic:0:Initializer] Initializing completed. 2025-06-03T10:27:29.847057Z node 6 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'topic' partition 0 generation 3 [6:361:2338] 2025-06-03T10:27:29.847067Z node 6 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72057594037927937, Partition: 0, State: StateInit] SYNC INIT topic topic partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:27:29.847077Z node 6 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-03T10:27:29.847099Z node 6 :PERSQUEUE DEBUG: partition_read.cpp:779: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Topic 'topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 6 2025-06-03T10:27:29.847140Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3968: [PQ: 72057594037927937] Send TEvTxProcessing::TEvReadSet to 1 receivers. Wait TEvTxProcessing::TEvReadSet from 1 senders. 2025-06-03T10:27:29.847146Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3978: [PQ: 72057594037927937] Send TEvReadSet to tablet 22222 2025-06-03T10:27:29.847173Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4291: [PQ: 72057594037927937] Try execute txs with state EXECUTED 2025-06-03T10:27:29.847179Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4336: [PQ: 72057594037927937] TxId 67890, State EXECUTED 2025-06-03T10:27:29.847184Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4283: [PQ: 72057594037927937] TxId 67890 State EXECUTED FrontTxId 67890 2025-06-03T10:27:29.847191Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3987: [PQ: 72057594037927937] TPersQueue::SendEvReadSetAckToSenders 2025-06-03T10:27:29.847197Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4226: [PQ: 72057594037927937] TxId 67890, NewState WAIT_RS_ACKS 2025-06-03T10:27:29.847203Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4261: [PQ: 72057594037927937] TxId 67890 moved from EXECUTED to WAIT_RS_ACKS 2025-06-03T10:27:29.847210Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 67890] PredicateAcks: 0/1 2025-06-03T10:27:29.847214Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4537: [PQ: 72057594037927937] HaveAllRecipientsReceive 0, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-03T10:27:29.847218Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 67890] PredicateAcks: 0/1 2025-06-03T10:27:29.847225Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4291: [PQ: 72057594037927937] Try execute txs with state PLANNED 2025-06-03T10:27:29.847229Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4336: [PQ: 72057594037927937] TxId 67891, State PLANNED 2025-06-03T10:27:29.847233Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4283: [PQ: 72057594037927937] TxId 67891 State PLANNED FrontTxId 67891 2025-06-03T10:27:29.847238Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4404: [PQ: 72057594037927937] TxQueue.size 1 2025-06-03T10:27:29.847242Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:836: [PQ: 72057594037927937] New ExecStep 110, ExecTxId 67891 2025-06-03T10:27:29.847253Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4226: [PQ: 72057594037927937] TxId 67891, NewState CALCULATING 2025-06-03T10:27:29.847258Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4261: [PQ: 72057594037927937] TxId 67891 moved from PLANNED to CALCULATING 2025-06-03T10:27:29.847315Z node 6 :PERSQUEUE DEBUG: partition.cpp:1127: [PQ: 72057594037927937, Partition: 0, State: StateIdle] Handle TEvPQ::TEvTxCalcPredicate Step 110, TxId 67891 2025-06-03T10:27:29.847457Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3487: [PQ: 72057594037927937] Handle TEvPQ::TEvTxCalcPredicateResult Step 110, TxId 67891, Partition 0, Predicate 1 2025-06-03T10:27:29.847464Z node 6 :PERSQUEUE DEBUG: transaction.cpp:218: [TxId: 67891] Handle TEvTxCalcPredicateResult 2025-06-03T10:27:29.847470Z node 6 :PERSQUEUE DEBUG: transaction.cpp:267: [TxId: 67891] Partition responses 1/1 2025-06-03T10:27:29.847474Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4291: [PQ: 72057594037927937] Try execute txs with state CALCULATING 2025-06-03T10:27:29.847478Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4336: [PQ: 72057594037927937] TxId 67891, State CALCULATING 2025-06-03T10:27:29.847483Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4283: [PQ: 72057594037927937] TxId 67891 State CALCULATING FrontTxId 67891 2025-06-03T10:27:29.847489Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4420: [PQ: 72057594037927937] Received 1, Expected 1 2025-06-03T10:27:29.847495Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4226: [PQ: 72057594037927937] TxId 67891, NewState CALCULATED 2025-06-03T10:27:29.847500Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4261: [PQ: 72057594037927937] TxId 67891 moved from CALCULATING to CALCULATED 2025-06-03T10:27:29.847506Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3816: [PQ: 72057594037927937] write key for TxId 67891 2025-06-03T10:27:29.847564Z node 6 :PERSQUEUE DEBUG: transaction.cpp:374: [TxId: 67891] save tx TxId: 67891 State: CALCULATED MinStep: 152 MaxStep: 30152 PredicatesReceived { TabletId: 22222 } PredicateRecipients: 22222 Operations { PartitionId: 0 CommitOffsetsBegin: 0 CommitOffsetsEnd: 0 Consumer: "user" Path: "/topic" } Step: 110 Predicate: true Kind: KIND_DATA SourceActor { RawX1: 176 RawX2: 25769805966 } Partitions { } 2025-06-03T10:27:29.847583Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3633: [PQ: 72057594037927937] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-03T10:27:29.847599Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2913: [PQ: 72057594037927937] Handle TEvTabletPipe::TEvClientConnected 2025-06-03T10:27:29.847605Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2918: [PQ: 72057594037927937] Connected to tablet 22222 2025-06-03T10:27:29.854530Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1231: [PQ: 72057594037927937] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-03T10:27:29.854560Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4291: [PQ: 72057594037927937] Try execute txs with state CALCULATED 2025-06-03T10:27:29.854566Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4336: [PQ: 72057594037927937] TxId 67891, State CALCULATED 2025-06-03T10:27:29.854573Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4283: [PQ: 72057594037927937] TxId 67891 State CALCULATED FrontTxId 67891 2025-06-03T10:27:29.854579Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4226: [PQ: 72057594037927937] TxId 67891, NewState WAIT_RS 2025-06-03T10:27:29.854584Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4261: [PQ: 72057594037927937] TxId 67891 moved from CALCULATED to WAIT_RS 2025-06-03T10:27:29.854594Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3968: [PQ: 72057594037927937] Send TEvTxProcessing::TEvReadSet to 1 receivers. Wait TEvTxProcessing::TEvReadSet from 1 senders. 2025-06-03T10:27:29.854599Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3978: [PQ: 72057594037927937] Send TEvReadSet to tablet 22222 2025-06-03T10:27:29.854613Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4459: [PQ: 72057594037927937] HaveParticipantsDecision 0 >> TInterconnectTest::TestNotifyUndelivered |63.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots |63.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots |63.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_extsubdomain_reboots/ydb-core-tx-schemeshard-ut_extsubdomain_reboots |63.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/certificate_check/ut/unittest >> TCertificateAuthUtilsTest::GenerateAndVerifyCertificates [GOOD] |63.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/certificate_check/ut/unittest >> KqpConstraints::DropCreateSerial [GOOD] >> KqpConstraints::DefaultsAndDeleteAndUpdate >> TOlap::CreateStore [GOOD] >> TOlap::CreateDropTable >> TInterconnectTest::TestNotifyUndelivered [GOOD] >> TInterconnectTest::TestNotifyUndeliveredOnMissedActor |63.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/rbo/ydb-core-kqp-ut-rbo |63.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/rbo/ydb-core-kqp-ut-rbo |63.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/rbo/ydb-core-kqp-ut-rbo >> BSCStopPDisk::PDiskStop >> KqpAcl::AclDml-UseSink+IsOlap [GOOD] >> KqpAcl::AclDml+UseSink+IsOlap >> TestKinesisHttpProxy::TestRequestNoAuthorization [GOOD] >> KqpScheme::CreateTableWithReadReplicasCompat [GOOD] >> KqpScheme::CreateTableWithTtlOnIntColumn >> KqpUniqueIndex::InsertComplexFkPkOverlapDuplicate [GOOD] >> TInterconnectTest::TestManyEvents [GOOD] >> TInterconnectTest::TestCrossConnect >> TInterconnectTest::TestNotifyUndeliveredOnMissedActor [GOOD] >> TInterconnectTest::TestPreSerializedBlobEventUpToMebibytes >> KqpScheme::AlterIndexImplTable+VectorIndex [GOOD] >> KqpScheme::AlterDatabaseChangeOwner+EnableAlterDatabase >> KqpVectorIndexes::OrderByCosineDistanceNotNullableLevel3 [GOOD] >> TOlap::CreateDropTable [GOOD] >> TOlap::CreateDropStandaloneTableDefaultSharding |63.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/conveyor_composite/ut/ydb-core-tx-conveyor_composite-ut |63.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/conveyor_composite/ut/ydb-core-tx-conveyor_composite-ut |63.1%| [LD] {RESULT} $(B)/ydb/core/tx/conveyor_composite/ut/ydb-core-tx-conveyor_composite-ut >> TOlapReboots::CreateDropStore [GOOD] >> TestKinesisHttpProxy::TestUnauthorizedPutRecords >> TInterconnectTest::TestPreSerializedBlobEventUpToMebibytes [GOOD] >> TInterconnectTest::TestPingPongThroughSubChannel >> BSCStopPDisk::PDiskStop [GOOD] >> KqpConstraints::DefaultsAndDeleteAndUpdate [GOOD] >> KqpConstraints::DefaultValuesForTableNegative4 >> TestProtocols::TestResolveProtocol >> TInterconnectTest::TestPingPongThroughSubChannel [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::OrderByCosineDistanceNotNullableLevel3 [GOOD] Test command err: Trying to start YDB, gRPC: 1370, MsgBus: 20757 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c5d/r3tmp/tmp8QJDZP/pdisk_1.dat 2025-06-03T10:27:18.849541Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667754992394742:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:18.851971Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:27:18.899260Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:18.899654Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667754992394560:2079] 1748946438752013 != 1748946438752016 TServer::EnableGrpc on GrpcPort 1370, node 1 2025-06-03T10:27:18.925528Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:18.925541Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:18.925543Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:18.925585Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:27:18.961699Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:18.961721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:18.965543Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20757 TClient is connected to server localhost:20757 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:19.142383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.153899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:27:19.169191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.256606Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.351769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.418558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.496461Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667759287363495:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.496498Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.666742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.689968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.710541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.724508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.742866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.764869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.785237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.860946Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667759287364160:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.860983Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.861327Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667759287364165:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.862373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:19.866875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:27:19.869167Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667759287364167:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:19.933357Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667759287364218:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:20.238147Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [1:7511667763582331783:3570], Recipient [1:7511667754992394997:2192]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:20.238161Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:20.238165Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:27:20.238177Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [1:7511667763582331779:3567], Recipient [1:7511667754992394997:2192]: {TEvModifySchemeTransaction txid# 281474976715672 TabletId# 72057594046644480} 2025-06-03T10:27:20.238188Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:27:20.269142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "TestTable" Columns { Name: "pk" Type: "Int64" NotNull: false } Columns { Name: "emb" Type: "String" NotNull: false } Columns { Name: "data" Type: "String" NotNull: false } KeyColumnNames: "pk" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 3 } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 4 } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 6 } } } } Temporary: false } CreateIndexedTable { } } TxId: 281474976715672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:35104" , at schemeshard: 72057594046644480 2025-06-03T10:27:20.269408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.270608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/TestTable, opId: 281474976715672:0, schema: Name: "TestTable" Columns { Name: "pk" Type: "Int64" NotNull: false } Columns { Name: "emb" Type: "String" NotNull: false } Columns { Name: "data" Type: "String" NotNull: false } KeyColumnNames: "pk" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 3 } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } SplitB ... abletReply 2025-06-03T10:27:31.075026Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 45 ShardOwnerId: 72057594046644480 ShardLocalIdx: 45, at schemeshard: 72057594046644480 2025-06-03T10:27:31.075051Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 22] was 4 2025-06-03T10:27:31.075076Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 268698118, Sender [2:7511667802333051389:2140], Recipient [2:7511667802333051388:2139]: NKikimrHive.TEvDeleteTabletReply Status: OK Origin: 72057594037968897 TxId_Deprecated: 43 ShardOwnerId: 72057594046644480 ShardLocalIdx: 43 2025-06-03T10:27:31.075078Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4908: StateWork, processing event TEvHive::TEvDeleteTabletReply 2025-06-03T10:27:31.075083Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 43 ShardOwnerId: 72057594046644480 ShardLocalIdx: 43, at schemeshard: 72057594046644480 2025-06-03T10:27:31.075103Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 22] was 3 2025-06-03T10:27:31.075124Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 268698118, Sender [2:7511667802333051389:2140], Recipient [2:7511667802333051388:2139]: NKikimrHive.TEvDeleteTabletReply Status: OK Origin: 72057594037968897 TxId_Deprecated: 44 ShardOwnerId: 72057594046644480 ShardLocalIdx: 44 2025-06-03T10:27:31.075126Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4908: StateWork, processing event TEvHive::TEvDeleteTabletReply 2025-06-03T10:27:31.075131Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 44 ShardOwnerId: 72057594046644480 ShardLocalIdx: 44, at schemeshard: 72057594046644480 2025-06-03T10:27:31.075151Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 22] was 2 2025-06-03T10:27:31.075172Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 268698118, Sender [2:7511667802333051389:2140], Recipient [2:7511667802333051388:2139]: NKikimrHive.TEvDeleteTabletReply Status: OK Origin: 72057594037968897 TxId_Deprecated: 42 ShardOwnerId: 72057594046644480 ShardLocalIdx: 42 2025-06-03T10:27:31.075173Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4908: StateWork, processing event TEvHive::TEvDeleteTabletReply 2025-06-03T10:27:31.075178Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 42 ShardOwnerId: 72057594046644480 ShardLocalIdx: 42, at schemeshard: 72057594046644480 2025-06-03T10:27:31.075198Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 22] was 1 2025-06-03T10:27:31.075218Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435084, Sender [2:7511667802333051388:2139], Recipient [2:7511667802333051388:2139]: NKikimr::NSchemeShard::TEvPrivate::TEvCleanDroppedPaths 2025-06-03T10:27:31.075222Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5054: StateWork, processing event TEvPrivate::TEvCleanDroppedPaths 2025-06-03T10:27:31.075228Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 2 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-03T10:27:31.075232Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 23], at schemeshard: 72057594046644480 2025-06-03T10:27:31.075246Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 5 2025-06-03T10:27:31.075252Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 22], at schemeshard: 72057594046644480 2025-06-03T10:27:31.075257Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 18] was 4 2025-06-03T10:27:31.075721Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:46 2025-06-03T10:27:31.075729Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:46 tabletId 72075186224037933 2025-06-03T10:27:31.075743Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:45 2025-06-03T10:27:31.075745Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:45 tabletId 72075186224037932 2025-06-03T10:27:31.075749Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:43 2025-06-03T10:27:31.075751Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:43 tabletId 72075186224037930 2025-06-03T10:27:31.075754Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:44 2025-06-03T10:27:31.075756Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:44 tabletId 72075186224037931 2025-06-03T10:27:31.075760Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:42 2025-06-03T10:27:31.075764Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:42 tabletId 72075186224037929 2025-06-03T10:27:31.075772Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 2025-06-03T10:27:31.076050Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877763, Sender [2:7511667810922989408:4415], Recipient [2:7511667802333051388:2139]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72057594037968897 ClientId: [2:7511667810922989408:4415] ServerId: [2:7511667810922989409:4416] } 2025-06-03T10:27:31.076053Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4978: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-03T10:27:31.076057Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5768: Client pipe, to tablet: 72057594037968897, from:72057594046644480 is reset 2025-06-03T10:27:31.076596Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877763, Sender [2:7511667806628021786:4145], Recipient [2:7511667802333051388:2139]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186224037933 ClientId: [2:7511667806628021786:4145] ServerId: [2:7511667806628021792:4149] } 2025-06-03T10:27:31.076604Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4978: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-03T10:27:31.076607Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5768: Client pipe, to tablet: 72075186224037933, from:72057594046644480 is reset 2025-06-03T10:27:31.076620Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877763, Sender [2:7511667806628021524:3957], Recipient [2:7511667802333051388:2139]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186224037932 ClientId: [2:7511667806628021524:3957] ServerId: [2:7511667806628021548:3964] } 2025-06-03T10:27:31.076622Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4978: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-03T10:27:31.076623Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5768: Client pipe, to tablet: 72075186224037932, from:72057594046644480 is reset 2025-06-03T10:27:31.076630Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877763, Sender [2:7511667806628021522:3955], Recipient [2:7511667802333051388:2139]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186224037930 ClientId: [2:7511667806628021522:3955] ServerId: [2:7511667806628021544:3961] } 2025-06-03T10:27:31.076632Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4978: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-03T10:27:31.076633Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5768: Client pipe, to tablet: 72075186224037930, from:72057594046644480 is reset 2025-06-03T10:27:31.076640Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877763, Sender [2:7511667806628021523:3956], Recipient [2:7511667802333051388:2139]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186224037931 ClientId: [2:7511667806628021523:3956] ServerId: [2:7511667806628021545:3962] } 2025-06-03T10:27:31.076642Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4978: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-03T10:27:31.076643Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5768: Client pipe, to tablet: 72075186224037931, from:72057594046644480 is reset 2025-06-03T10:27:31.076650Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877763, Sender [2:7511667806628021521:3954], Recipient [2:7511667802333051388:2139]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186224037929 ClientId: [2:7511667806628021521:3954] ServerId: [2:7511667806628021560:3973] } 2025-06-03T10:27:31.076652Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4978: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-03T10:27:31.076653Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5768: Client pipe, to tablet: 72075186224037929, from:72057594046644480 is reset 2025-06-03T10:27:31.076676Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037933 not found 2025-06-03T10:27:31.076679Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037932 not found 2025-06-03T10:27:31.076682Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037930 not found 2025-06-03T10:27:31.076685Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037931 not found 2025-06-03T10:27:31.076688Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037929 not found 2025-06-03T10:27:31.082969Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122945, Sender [2:7511667810922989445:4446], Recipient [2:7511667802333051388:2139]: NKikimrSchemeOp.TDescribePath Path: "/Root/TestTable" Options { ShowPrivateTable: false } 2025-06-03T10:27:31.082989Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4894: StateWork, processing event TEvSchemeShard::TEvDescribeScheme ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/indexes/unittest >> KqpUniqueIndex::InsertComplexFkPkOverlapDuplicate [GOOD] Test command err: Trying to start YDB, gRPC: 27560, MsgBus: 22209 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c88/r3tmp/tmpH3qweT/pdisk_1.dat 2025-06-03T10:27:17.677513Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:27:17.697799Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667752903683169:2079] 1748946437523039 != 1748946437523042 2025-06-03T10:27:17.699556Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27560, node 1 2025-06-03T10:27:17.731485Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:17.739347Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:17.740563Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:17.751697Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:17.751711Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:17.751715Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:17.751762Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22209 TClient is connected to server localhost:22209 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:17.910361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:17.914838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:17.942350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:27:18.031734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.114608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:27:18.195122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:27:18.305971Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667757198652103:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:18.306006Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:18.389265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:18.405618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:18.424809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:18.442136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:18.500986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:18.514296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:18.542548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:18.568561Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667757198652754:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:18.568581Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:18.568833Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667757198652762:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:18.569886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:18.579942Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667757198652764:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:18.677144Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667757198652824:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:18.975191Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [1:7511667757198653094:3573], Recipient [1:7511667752903683627:2199]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:18.975208Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:18.975211Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:27:18.975221Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [1:7511667757198653090:3570], Recipient [1:7511667752903683627:2199]: {TEvModifySchemeTransaction txid# 281474976715672 TabletId# 72057594046644480} 2025-06-03T10:27:18.975224Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:27:19.021890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "TestTable" Columns { Name: "pk" Type: "Int64" NotNull: true } Columns { Name: "user" Type: "String" NotNull: true } Columns { Name: "emb" Type: "String" NotNull: true } Columns { Name: "data" Type: "String" NotNull: true } KeyColumnNames: "pk" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 3 } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 40 } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 60 } } } } Temporary: false } CreateIndexedTable { } } TxId: 281474976715672 TabletId: 72057594046644480 PeerName: "ipv6:[::1]:49284" , at schemeshard: 72057594046644480 2025-06-03T10:27:19.022027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/TestTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.022061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /Root/TestTable, opId: 281474976715672:0, schema: Name: "TestTable" Columns { Name: "pk" Type: "Int64" NotNull: true } Columns { Name: "user" Type: "String" NotNull: true } Columns { Name: "emb" Type: "String" NotNull: true } Columns { Name: "data" Type: "String" NotNull: true } KeyColumnNames: "pk" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 3 } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 40 } } } } SplitBoundary { KeyPrefix { Tuple { Optional { Int64: 60 } } } } Temporary: false, at schemeshard: 72057594046644480 2025-06-03T10:27:19.022200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only ... atabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:27.998896Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:28.003596Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511667795881546262:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:28.102662Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511667800176513618:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:28.466393Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:28.861825Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [2:7511667800176514821:2607], TxId: 281474976715677, task: 1. Ctx: { SessionId : ydb://session/3?node_id=2&id=MWU1NGE3ZWMtYjNkYTVkYWQtZjU5ZTczZGQtOTkzNTViMjA=. TraceId : 01jwtn9wc3bya18zj6wek4x8kz. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Conflict with existing key., code: 2012 }. 2025-06-03T10:27:28.861938Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7511667800176514822:2608], TxId: 281474976715677, task: 2. Ctx: { SessionId : ydb://session/3?node_id=2&id=MWU1NGE3ZWMtYjNkYTVkYWQtZjU5ZTczZGQtOTkzNTViMjA=. TraceId : 01jwtn9wc3bya18zj6wek4x8kz. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [2:7511667800176514818:2566], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-03T10:27:28.862010Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=2&id=MWU1NGE3ZWMtYjNkYTVkYWQtZjU5ZTczZGQtOTkzNTViMjA=, ActorId: [2:7511667800176514629:2566], ActorState: ExecuteState, TraceId: 01jwtn9wc3bya18zj6wek4x8kz, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 7344, MsgBus: 16328 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c88/r3tmp/tmpR7ZLJq/pdisk_1.dat 2025-06-03T10:27:29.265375Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:27:29.275939Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511667804961320362:2079] 1748946449198189 != 1748946449198192 2025-06-03T10:27:29.284394Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7344, node 3 2025-06-03T10:27:29.313943Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:29.313955Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:29.313957Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:29.314000Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:27:29.321866Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:29.321899Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:29.322503Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16328 TClient is connected to server localhost:16328 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:29.503018Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:29.505181Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:27:29.519031Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:29.554448Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:29.608945Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:29.647422Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:29.782154Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667804961322002:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:29.782191Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:29.795355Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.821947Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.850562Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.882666Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.910345Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.934484Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:30.004206Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:30.072718Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667809256289956:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:30.072789Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:30.073594Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511667809256289964:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:30.074885Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:30.081699Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511667809256289966:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:30.176961Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511667809256290017:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:30.666034Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... >> TestProtocols::TestResolveProtocol [GOOD] >> TestProtocols::TestHTTPCollectedVerySlow >> TInterconnectTest::TestBlobEvent >> TInterconnectTest::TestBlobEvent220BytesPreSerialized >> TestYmqHttpProxy::TestCreateQueueWithAllAttributes [GOOD] >> TInterconnectTest::TestBlobEvent [GOOD] >> TInterconnectTest::TestBlobEvent220Bytes >> TInterconnectTest::TestConnectAndDisconnect ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_stop_pdisk/unittest >> BSCStopPDisk::PDiskStop [GOOD] Test command err: RandomSeed# 15597974271646031631 |63.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestPingPongThroughSubChannel [GOOD] >> TInterconnectTest::TestBlobEvent220BytesPreSerialized [GOOD] >> TInterconnectTest::TestBlobEventDifferentSizes >> TestYmqHttpProxy::BillingRecordsForJsonApi >> TInterconnectTest::TestBlobEvent220Bytes [GOOD] >> TInterconnectTest::TestAddressResolve >> TSchemeShardTTLTests::AlterTableShouldSucceedOnIndexedTable >> KqpScheme::AlterDatabaseChangeOwner+EnableAlterDatabase [GOOD] >> KqpScheme::AlterDatabaseChangeOwner-EnableAlterDatabase >> TInterconnectTest::TestConnectAndDisconnect [GOOD] >> TInterconnectTest::TestBlobEventPreSerialized >> TInterconnectTest::TestBlobEventDifferentSizes [GOOD] >> TInterconnectTest::TestBlobEventDifferentSizesPreSerialized >> KqpVectorIndexes::SimpleVectorIndexOrderByCosineDistanceWithCover-Nullable [GOOD] >> TInterconnectTest::TestBlobEventPreSerialized [GOOD] >> TInterconnectTest::TestBlobEventUpToMebibytes >> KqpConstraints::DefaultValuesForTableNegative4 [GOOD] >> KqpConstraints::IndexedTableAndNotNullColumn >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_UnknownColumn >> TInterconnectTest::TestAddressResolve [GOOD] >> TInterconnectTest::OldNbs >> TSchemeShardTTLTests::AlterTableShouldSucceedOnIndexedTable [GOOD] >> TInterconnectTest::TestBlobEventDifferentSizesPreSerialized [GOOD] >> TInterconnectTest::TestBlobEventDifferentSizesPreSerializedAndRaw >> TInterconnectTest::TestBlobEventUpToMebibytes [GOOD] >> TInterconnectTest::TestBlobEventsThroughSubChannels ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_olap_reboots/unittest >> TOlapReboots::CreateDropStore [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:25:50.497599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:25:50.497635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:25:50.497641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:25:50.497647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:25:50.497663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:25:50.497668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:25:50.497679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:25:50.497696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:25:50.497836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:25:50.497921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:25:50.516210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:25:50.516241Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:25:50.516349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:25:50.526036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:25:50.526238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:25:50.526275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:25:50.532312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:25:50.532363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:25:50.532469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:25:50.532540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:25:50.533056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:25:50.533114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:25:50.533421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:25:50.533433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:25:50.533447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:25:50.533454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:25:50.533459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:25:50.533494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:25:50.535185Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:25:50.557255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:25:50.557383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.557456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:25:50.557508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:25:50.557522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.558456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:25:50.558488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:25:50.558545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.558557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:25:50.558563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:25:50.558569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:25:50.559293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.559309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:25:50.559317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:25:50.559742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.559751Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:25:50.559756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:25:50.559762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:25:50.560454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:25:50.560887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:25:50.560933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:25:50.561117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:25:50.561141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:25:50.561149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:25:50.561216Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... nd, to populator: [193:208:2209], at schemeshard: 72057594046678944, txId: 1003, path id: 1 2025-06-03T10:27:31.741998Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [193:208:2209], at schemeshard: 72057594046678944, txId: 1003, path id: 3 2025-06-03T10:27:31.742062Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-03T10:27:31.742075Z node 193 :FLAT_TX_SCHEMESHARD INFO: drop_store.cpp:182: TDropOlapStore TProposedWaitParts operationId# 1003:0 ProgressState at schemeshard: 72057594046678944 2025-06-03T10:27:31.742088Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: drop_store.cpp:202: TDropOlapStore TProposedWaitParts operationId# 1003:0 ProgressState wait for NotifyTxCompletionResult tabletId: 72075186233409546 2025-06-03T10:27:31.742310Z node 193 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:27:31.742327Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:27:31.742333Z node 193 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:27:31.742341Z node 193 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-03T10:27:31.742349Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-03T10:27:31.742479Z node 193 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:27:31.742492Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:27:31.742500Z node 193 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:27:31.742505Z node 193 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-03T10:27:31.742510Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:27:31.742521Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1003, ready parts: 0/1, is published: true 2025-06-03T10:27:31.743783Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1003:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275382275 2025-06-03T10:27:31.743830Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1003, partId: 0, tablet: 72075186233409546 2025-06-03T10:27:31.743890Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:27:31.743963Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6151: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 1003 2025-06-03T10:27:31.743970Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409546, partId: 0 2025-06-03T10:27:31.743984Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 1003 2025-06-03T10:27:31.743995Z node 193 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1003:0 129 -> 130 2025-06-03T10:27:31.744175Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:27:31.744642Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-03T10:27:31.744681Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-03T10:27:31.744688Z node 193 :FLAT_TX_SCHEMESHARD INFO: drop_store.cpp:235: TDropOlapStore TProposedDeleteParts operationId# 1003:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:31.744710Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:27:31.744745Z node 193 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1003:0 progress is 1/1 2025-06-03T10:27:31.744751Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-03T10:27:31.744757Z node 193 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1003:0 progress is 1/1 2025-06-03T10:27:31.744760Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-03T10:27:31.744771Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-06-03T10:27:31.744776Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-03T10:27:31.744781Z node 193 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1003:0 2025-06-03T10:27:31.744786Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1003:0 2025-06-03T10:27:31.744819Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:27:31.745383Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:27:31.745483Z node 193 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 Forgetting tablet 72075186233409546 2025-06-03T10:27:31.745624Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:31.745837Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:27:31.745947Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:27:31.745955Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:27:31.745971Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:27:31.746091Z node 193 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186233409546;self_id=[193:334:2320];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1152;event=tablet_die; 2025-06-03T10:27:31.749199Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:27:31.749226Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:27:31.749358Z node 193 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 1003 2025-06-03T10:27:31.749409Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-03T10:27:31.749420Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-03T10:27:31.749493Z node 193 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-03T10:27:31.749520Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-03T10:27:31.749539Z node 193 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [193:449:2419] TestWaitNotification: OK eventTxId 1003 2025-06-03T10:27:31.749626Z node 193 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/OlapStore" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:27:31.749671Z node 193 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/OlapStore" took 65us result status StatusPathDoesNotExist 2025-06-03T10:27:31.749721Z node 193 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/OlapStore\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/OlapStore" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldSucceedOnIndexedTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:27:33.328743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:33.328774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:33.328787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:33.328792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:33.328797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:33.328801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:33.328809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:33.328825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:33.328912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:33.328981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:33.340000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:27:33.340029Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:33.343908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:33.344010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:33.344042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:33.346299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:33.346376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:33.346510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:33.346568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:33.347199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:33.347255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:33.347596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:33.347608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:33.347622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:33.347634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:33.347641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:33.347664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:27:33.349118Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:27:33.371725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:33.371817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:33.371901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:33.371955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:33.371969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:33.373092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:33.373123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:33.373199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:33.373212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:33.373218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:33.373224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:33.373752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:33.373765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:33.373772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:33.374090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:33.374101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:33.374108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:33.374117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:33.374968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:33.378121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:33.378198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:33.378469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:33.378521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:33.378533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:33.378635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:27:33.378645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:33.378698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:33.378714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:27:33.379420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:33.379430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:33.379489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... d.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:33.532199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-03T10:27:33.532214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:27:33.532223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1045: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-06-03T10:27:33.532484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:27:33.532504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:27:33.532510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:27:33.532518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-03T10:27:33.532526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:27:33.532547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-03T10:27:33.533186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:27:33.554824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6290: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 319 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-03T10:27:33.554860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-03T10:27:33.554898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 319 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-03T10:27:33.554921Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 319 } } CommitVersion { Step: 5000003 TxId: 102 } FAKE_COORDINATOR: Erasing txId 102 2025-06-03T10:27:33.555204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 331 RawX2: 4294969608 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-03T10:27:33.555212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-03T10:27:33.555230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 331 RawX2: 4294969608 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-03T10:27:33.555237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1014: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-06-03T10:27:33.555246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1018: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 331 RawX2: 4294969608 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-03T10:27:33.555260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:33.555264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:27:33.555269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-03T10:27:33.555278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 129 -> 240 2025-06-03T10:27:33.555941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:27:33.556868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:27:33.556973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:27:33.556983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-03T10:27:33.557003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:27:33.557013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:27:33.557019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:27:33.557022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:27:33.557028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-03T10:27:33.557049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:381:2348] message: TxId: 102 2025-06-03T10:27:33.557058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:27:33.557065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:27:33.557069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:27:33.557099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:27:33.557800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:27:33.557818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:464:2424] TestWaitNotification: OK eventTxId 102 2025-06-03T10:27:33.557968Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:27:33.558049Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 92us result status StatusSuccess 2025-06-03T10:27:33.558216Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "UserDefinedIndexByExpireAt" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "modified_at" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TInterconnectTest::TestBlobEventDifferentSizesPreSerializedAndRaw [GOOD] >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_UnknownColumn [GOOD] >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-true ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/indexes/unittest >> KqpVectorIndexes::SimpleVectorIndexOrderByCosineDistanceWithCover-Nullable [GOOD] Test command err: Trying to start YDB, gRPC: 11290, MsgBus: 22895 2025-06-03T10:27:18.450987Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667755058858360:2213];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c67/r3tmp/tmphuqClk/pdisk_1.dat 2025-06-03T10:27:18.547700Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:27:18.620546Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667755058858167:2079] 1748946438430139 != 1748946438430142 2025-06-03T10:27:18.620821Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11290, node 1 2025-06-03T10:27:18.641709Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:18.641720Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:18.641722Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:18.641768Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:27:18.642162Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:18.642184Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:18.645724Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22895 TClient is connected to server localhost:22895 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:18.857069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.873688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:27:18.882539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.949356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.012420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.052041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:19.188088Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667759353827101:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.188125Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.320762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.356147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.385547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.411761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.447509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.470766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.499167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:19.533977Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667759353827749:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.534040Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.534240Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667759353827757:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:19.535194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:19.538758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:27:19.538853Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667759353827759:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:19.638668Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667759353827819:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:20.061475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:20.137962Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__create.cpp:23: TIndexBuilder::TXTYPE_CREATE_INDEX_BUILD: DoExecute TxId: 281474976715674 DatabaseName: "/Root" Settings { source_path: "/Root/TestTable" index { name: "index1" index_columns: "emb" global_vector_kmeans_tree_index { vector_settings { settings { metric: SIMILARITY_COSINE vector_type: VECTOR_TYPE_UINT8 vector_dimension: 2 } clusters: 2 levels: 2 } } } } UserSID: "" 2025-06-03T10:27:20.138338Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1117: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715674 Locking 2025-06-03T10:27:20.138364Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1118: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 281474976715674 Locking TBuildInfo{ IndexBuildId: 281474976715674, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [OwnerId: 72057594046644480, LocalPathId: 17], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index1, IndexColumn: emb, State: Locking, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:7511667763648795576:2532], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-03T10:27:20.138366Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.cpp:186: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: AllocateTxId 281474976715674 2025-06-03T10:27:20.138407Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2616: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvAllocateResult, BuildIndexId: 281474976715674, txId# 281474976710757 2025-06-03T10:27:20.138414Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2623: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvAllocateResult, buildInfo: TBuildInfo{ IndexBuildId: 281474976715674, Uid: , DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1], TablePathId: [O ... PartOwners: 72075186224037925 NodeId: 2 StartTime: 1748946442862 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-03T10:27:32.878045Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4919: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-03T10:27:32.878060Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037925 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 19] state 'Ready' dataSize 294 rowCount 6 cpuUsage 0 2025-06-03T10:27:32.878079Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037925 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 19] raw table stats: DataSize: 294 RowCount: 6 IndexSize: 0 InMemSize: 0 LastAccessTime: 1748946452341 LastUpdateTime: 1748946442989 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 6 RowDeletes: 0 RowReads: 0 RangeReads: 46 PartCount: 1 RangeReadRows: 92 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 294 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-03T10:27:32.878110Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269553162, Sender [2:7511667771185390719:2532], Recipient [2:7511667766890420951:2150]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186224037926 TableLocalId: 20 Generation: 1 Round: 0 TableStats { DataSize: 470 RowCount: 10 IndexSize: 0 InMemSize: 0 LastAccessTime: 1748946452342 LastUpdateTime: 1748946442999 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 10 RowDeletes: 0 RowReads: 0 RangeReads: 29 PartCount: 1 RangeReadRows: 109 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 470 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { Memory: 82344 } ShardState: 2 UserTablePartOwners: 72075186224037926 NodeId: 2 StartTime: 1748946442862 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-03T10:27:32.878112Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4919: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-03T10:27:32.878115Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037926 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 20] state 'Ready' dataSize 470 rowCount 10 cpuUsage 0 2025-06-03T10:27:32.878130Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037926 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 20] raw table stats: DataSize: 470 RowCount: 10 IndexSize: 0 InMemSize: 0 LastAccessTime: 1748946452342 LastUpdateTime: 1748946442999 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 10 RowDeletes: 0 RowReads: 0 RangeReads: 29 PartCount: 1 RangeReadRows: 109 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 470 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-03T10:27:32.883800Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7511667766890420951:2150]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-03T10:27:32.883825Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-03T10:27:32.883830Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 5 2025-06-03T10:27:32.883859Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 5 2025-06-03T10:27:32.883867Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 5 2025-06-03T10:27:32.883897Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 17 shard idx 72057594046644480:35 data size 896 row count 4 2025-06-03T10:27:32.883939Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037922 maps to shardIdx: 72057594046644480:35 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 17], pathId map=TestTable, is column=0, is olap=0, RowCount 4, DataSize 896 2025-06-03T10:27:32.883944Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037922, followerId 0 2025-06-03T10:27:32.883973Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:35 with partCount# 0, rowCount# 4, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-03T10:27:32.884006Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037922 2025-06-03T10:27:32.884021Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 17 shard idx 72057594046644480:37 data size 896 row count 4 2025-06-03T10:27:32.884029Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037924 maps to shardIdx: 72057594046644480:37 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 17], pathId map=TestTable, is column=0, is olap=0, RowCount 4, DataSize 896 2025-06-03T10:27:32.884031Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037924, followerId 0 2025-06-03T10:27:32.884038Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:37 with partCount# 0, rowCount# 4, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-03T10:27:32.884042Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037924 2025-06-03T10:27:32.884047Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 17 shard idx 72057594046644480:36 data size 704 row count 2 2025-06-03T10:27:32.884054Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037923 maps to shardIdx: 72057594046644480:36 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 17], pathId map=TestTable, is column=0, is olap=0, RowCount 2, DataSize 704 2025-06-03T10:27:32.884056Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037923, followerId 0 2025-06-03T10:27:32.884103Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:36 with partCount# 0, rowCount# 2, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-03T10:27:32.884109Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037923 2025-06-03T10:27:32.884116Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 19 shard idx 72057594046644480:38 data size 294 row count 6 2025-06-03T10:27:32.884126Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037925 maps to shardIdx: 72057594046644480:38 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 19], pathId map=indexImplLevelTable, is column=0, is olap=0, RowCount 6, DataSize 294 2025-06-03T10:27:32.884128Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037925, followerId 0 2025-06-03T10:27:32.884138Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:38 with partCount# 1, rowCount# 6, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-03T10:27:32.884149Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037925 2025-06-03T10:27:32.884157Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 20 shard idx 72057594046644480:39 data size 470 row count 10 2025-06-03T10:27:32.884167Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186224037926 maps to shardIdx: 72057594046644480:39 followerId=0, pathId: [OwnerId: 72057594046644480, LocalPathId: 20], pathId map=indexImplPostingTable, is column=0, is olap=0, RowCount 10, DataSize 470 2025-06-03T10:27:32.884169Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186224037926, followerId 0 2025-06-03T10:27:32.884176Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:235: [BackgroundCompaction] [Update] Skipped shard# 72057594046644480:39 with partCount# 1, rowCount# 10, searchHeight# 0, lastFullCompaction# 1970-01-01T00:00:00.000000Z at schemeshard 72057594046644480 2025-06-03T10:27:32.884180Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186224037926 2025-06-03T10:27:32.884208Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:27:32.884498Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [2:7511667766890420951:2150]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-03T10:27:32.884508Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-03T10:27:32.884510Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-03T10:27:32.925439Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435077, Sender [0:0:0], Recipient [2:7511667766890420951:2150]: NKikimr::NSchemeShard::TEvPrivate::TEvIndexBuildingMakeABill 2025-06-03T10:27:32.925464Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5022: StateWork, processing event TEvPrivate::TEvIndexBuildingMakeABill 2025-06-03T10:27:32.925556Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:1430: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TTxBilling, id# 281474976710674 2025-06-03T10:27:32.925563Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:27:32.925597Z node 2 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 >> TInterconnectTest::TestBlobEventsThroughSubChannels [GOOD] >> TInterconnectTest::OldNbs [GOOD] >> TSchemeShardTTLTests::AlterTableShouldSuccessOnSimultaneousAddColumnAndEnableTTL >> KqpScheme::AlterDatabaseChangeOwner-EnableAlterDatabase [GOOD] >> KqpScheme::AlterGroup >> TSchemeShardTTLTests::CreateTableShouldFailOnBeforeEpochTTL >> TestProtocols::TestHTTPCollectedVerySlow [GOOD] >> TestProtocols::TestHTTPRequest |63.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut |63.1%| [LD] {RESULT} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut |63.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/load_test/ut/ydb-core-load_test-ut |63.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction |63.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction |63.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_compaction/ydb-core-tx-schemeshard-ut_compaction |63.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestBlobEventDifferentSizesPreSerializedAndRaw [GOOD] |63.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::OldNbs [GOOD] >> TestProtocols::TestHTTPRequest [GOOD] |63.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_ttl/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardColumnTableTTL::CreateColumnTableNegative_UnknownColumn [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:27:33.966442Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:33.966474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:33.966488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:33.966495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:33.966502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:33.966507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:33.966517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:33.966530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:33.966644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:33.966731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:33.983593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:27:33.983619Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:33.994302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:33.994448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:33.994484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:33.996330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:33.996385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:33.999017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:33.999143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:34.000035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:34.000085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:34.000406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:34.000421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:34.000431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:34.000456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:34.000463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:34.000485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.003561Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:27:34.043300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:34.043377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.043447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:34.043499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:34.043511Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.044327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:34.044352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:34.044418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.044429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:34.044436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:34.044443Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:34.044855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.044867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:34.044874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:34.045186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.045194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.045201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:34.045210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:34.046079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:34.046603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:34.046648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:34.046847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:34.046875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:34.046884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:34.046957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:27:34.046966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:34.047000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:34.047014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:27:34.047405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:34.047413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:34.047471Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:34.047478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-03T10:27:34.047557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.047565Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-03T10:27:34.047579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-06-03T10:27:34.047584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:27:34.047590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-06-03T10:27:34.047594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:27:34.047600Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-03T10:27:34.047610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:27:34.047616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-06-03T10:27:34.047621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1:0 2025-06-03T10:27:34.047632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:27:34.047639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-03T10:27:34.047644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-03T10:27:34.048027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-03T10:27:34.048042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-03T10:27:34.048048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-03T10:27:34.048054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-03T10:27:34.048059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:34.048073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-03T10:27:34.049004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-03T10:27:34.049117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-03T10:27:34.050165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateColumnTable CreateColumnTable { Name: "TTLEnabledTable" Schema { Columns { Name: "key" Type: "Uint64" NotNull: true } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" } TtlSettings { Enabled { ColumnName: "created_at" } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:34.050235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: create_table.cpp:593: TCreateColumnTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.050348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: Incorrect ttl column - not found in scheme, at schemeshard: 72057594046678944 2025-06-03T10:27:34.050448Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:269:2259] Bootstrap 2025-06-03T10:27:34.052608Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:269:2259] Become StateWork (SchemeCache [1:274:2264]) 2025-06-03T10:27:34.052927Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:269:2259] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:27:34.053631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "Incorrect ttl column - not found in scheme" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:34.053667Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: Incorrect ttl column - not found in scheme, operation: CREATE COLUMN TABLE, path: /MyRoot/ 2025-06-03T10:27:34.053804Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 >> TSchemeShardTTLTests::AlterTableShouldSuccessOnSimultaneousAddColumnAndEnableTTL [GOOD] >> TestKinesisHttpProxy::TestUnauthorizedPutRecords [GOOD] >> TSchemeShardTTLTests::CreateTableShouldFailOnBeforeEpochTTL [GOOD] |63.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestBlobEventsThroughSubChannels [GOOD] >> TestKinesisHttpProxy::TestWrongStream >> EncryptedBackupParamsValidationTest::NoSourcePrefix |63.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots |63.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots |63.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume_reboots/ydb-core-tx-schemeshard-ut_bsvolume_reboots |63.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/actorlib_impl/ut/unittest >> TestProtocols::TestHTTPRequest [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_OldPartitionExists_NotBoundary_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_Active_Test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::AlterTableShouldSuccessOnSimultaneousAddColumnAndEnableTTL [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:27:34.860661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:34.860691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:34.860705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:34.860711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:34.860718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:34.860723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:34.860734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:34.860749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:34.860873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:34.860945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:34.882692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:27:34.882725Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:34.893508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:34.893693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:34.893731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:34.911556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:34.911635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:34.911774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:34.911834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:34.912484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:34.912535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:34.912862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:34.912873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:34.912884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:34.912892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:34.912899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:34.912922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.914493Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:27:34.940289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:34.940372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.940443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:34.940495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:34.940507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.941790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:34.941828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:34.941900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.941914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:34.941922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:34.941929Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:34.942689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.942701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:34.942708Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:34.943056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.943066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.943073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:34.943081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:34.943895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:34.944318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:34.944357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:34.944559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:34.944587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:34.944599Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:34.944674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:27:34.944682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:34.944717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:34.944730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:27:34.945129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:34.945137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:34.945183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... TxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:27:35.076572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:35.076581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-03T10:27:35.076869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:27:35.076881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1045: NTableState::TProposedWaitParts operationId# 102:0 ProgressState at tablet: 72057594046678944 2025-06-03T10:27:35.077028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:27:35.077043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:27:35.077050Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:27:35.077059Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-03T10:27:35.077067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:27:35.077088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-03T10:27:35.077409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6290: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 275 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-03T10:27:35.077425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-03T10:27:35.077452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 275 } } CommitVersion { Step: 5000003 TxId: 102 } 2025-06-03T10:27:35.077473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 102 Step: 5000003 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 275 } } CommitVersion { Step: 5000003 TxId: 102 } FAKE_COORDINATOR: Erasing txId 102 2025-06-03T10:27:35.077905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-03T10:27:35.077919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-03T10:27:35.077940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-03T10:27:35.077948Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1014: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-06-03T10:27:35.077958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1018: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-03T10:27:35.077973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:35.077978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:27:35.077984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-03T10:27:35.077992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 129 -> 240 2025-06-03T10:27:35.078447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:27:35.078754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:27:35.078786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:27:35.078856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:27:35.078865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-03T10:27:35.078883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:27:35.078889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:27:35.078895Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:27:35.078899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:27:35.078905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-03T10:27:35.078922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:335:2313] message: TxId: 102 2025-06-03T10:27:35.078933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:27:35.078940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:27:35.078946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:27:35.078982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:27:35.079347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:27:35.079357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:393:2364] TestWaitNotification: OK eventTxId 102 2025-06-03T10:27:35.079482Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLEnabledTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:27:35.079540Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLEnabledTable" took 67us result status StatusSuccess 2025-06-03T10:27:35.079672Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLEnabledTable" PathDescription { Self { Name: "TTLEnabledTable" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "Timestamp" TypeId: 50 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpScheme::AlterGroup [GOOD] >> KqpAcl::AclDml+UseSink+IsOlap [GOOD] >> KqpAcl::AclRevoke+UseSink+IsOlap >> KqpScheme::AlterColumnTableTtl >> KqpConstraints::IndexedTableAndNotNullColumn [GOOD] >> KqpConstraints::IndexAutoChooseAndNonReadyIndex >> BackupRestore::TestAllIndexTypes-EIndexTypeInvalid [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalUnique [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalVectorKmeansTree ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldFailOnBeforeEpochTTL [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:27:35.155511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:35.155539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:35.155545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:35.155551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:35.155558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:35.155563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:35.155572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:35.155586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:35.155690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:35.155758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:35.203579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:27:35.203607Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:35.218603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:35.218779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:35.218824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:35.227523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:35.227627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:35.227752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:35.227810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:35.228518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:35.228577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:35.228897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:35.228911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:35.228920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:35.228933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:35.228939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:35.228964Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:27:35.230508Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:27:35.298061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:35.298146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:35.298225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:35.298285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:35.298297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:35.307048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:35.307104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:35.307198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:35.307213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:35.307220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:35.307227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:35.307986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:35.308002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:35.308010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:35.308402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:35.308414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:35.308421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:35.308430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:35.309330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:35.313754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:35.313830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:35.314065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:35.314111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:35.314121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:35.314216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:27:35.314226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:35.314270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:35.314285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:27:35.317054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:35.317076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:35.317141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:35.317148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-03T10:27:35.317241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:35.317251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-03T10:27:35.317272Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-06-03T10:27:35.317277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:27:35.317283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-06-03T10:27:35.317286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:27:35.317309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-03T10:27:35.317321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:27:35.317327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-06-03T10:27:35.317331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1:0 2025-06-03T10:27:35.317361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:27:35.317368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-03T10:27:35.317372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-03T10:27:35.317875Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-03T10:27:35.317903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-03T10:27:35.317910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-03T10:27:35.317916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-03T10:27:35.317924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:35.317946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-03T10:27:35.328724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-03T10:27:35.328893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 WARNING: All log messages before y_absl::InitializeLog() is called are written to STDERR W0000 00:00:1748946455.329211 192788 text_format.cc:398] Warning parsing text-format NKikimrSchemeOp.TTableDescription: 9:35: text format contains deprecated field "ExpireAfterSeconds" TestModificationResults wait txId: 101 2025-06-03T10:27:35.330116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTable CreateTable { Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3153600000 Tiers { ApplyAfterSeconds: 3153600000 Delete { } } } } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:35.330205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:27:35.330235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /MyRoot/TTLEnabledTable, opId: 101:0, schema: Name: "TTLEnabledTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "modified_at" Type: "Timestamp" } KeyColumnNames: "key" TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3153600000 Tiers { ApplyAfterSeconds: 3153600000 Delete { } } } }, at schemeshard: 72057594046678944 2025-06-03T10:27:35.330343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusSchemeError, reason: TTL should be less than 1748946455 seconds (20242 days, 55 years). The ttl behaviour is undefined before 1970., at schemeshard: 72057594046678944 2025-06-03T10:27:35.330495Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:269:2259] Bootstrap 2025-06-03T10:27:35.332583Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:269:2259] Become StateWork (SchemeCache [1:274:2264]) 2025-06-03T10:27:35.332977Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:269:2259] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:27:35.342372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusSchemeError Reason: "TTL should be less than 1748946455 seconds (20242 days, 55 years). The ttl behaviour is undefined before 1970." TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:35.342430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusSchemeError, reason: TTL should be less than 1748946455 seconds (20242 days, 55 years). The ttl behaviour is undefined before 1970., operation: CREATE TABLE, path: /MyRoot/TTLEnabledTable 2025-06-03T10:27:35.342613Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 >> TTopicReaderTests::TestRun_ReadMessages_Output_Base64 [GOOD] >> BackupRestoreS3::RestoreTablePartitioningSettings >> KqpScheme::AlterColumnTableTtl [GOOD] >> TSchemeShardTestExtSubdomainReboots::SchemeLimits-AlterDatabaseCreateHiveFirst-false >> TOlap::CreateDropStandaloneTableDefaultSharding [GOOD] >> KqpScheme::CreateTableWithTtlOnIntColumn [GOOD] >> KqpScheme::CreateTableWithTtlOnDatetime64Column >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionInactive_1_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionNotExists_Test >> EncryptedBackupParamsValidationTest::NoSourcePrefix [GOOD] |63.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTable >> TSchemeShardTestExtSubdomainReboots::Fake [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::AlterColumnTableTtl [GOOD] Test command err: Trying to start YDB, gRPC: 64484, MsgBus: 12798 2025-06-03T10:27:28.433372Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667800593422682:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:28.433671Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00171b/r3tmp/tmp09NY8J/pdisk_1.dat 2025-06-03T10:27:28.507919Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64484, node 1 2025-06-03T10:27:28.533745Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:28.533780Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:28.539864Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:28.541960Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:28.541971Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:28.541974Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:28.542038Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12798 TClient is connected to server localhost:12798 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:28.715769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:28.719669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:27:28.724884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:28.799437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:27:28.862871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:27:28.882776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:28.945608Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667800593424205:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:28.945647Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:29.000999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.013911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.026351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.037538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.050895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.065430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.080886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.141853Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667804888392158:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:29.141878Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:29.142037Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667804888392163:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:29.142985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:29.147170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:27:29.147278Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667804888392165:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:29.219727Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667804888392216:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:29.412265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 9880, MsgBus: 15311 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00171b/r3tmp/tmpYhLt0t/pdisk_1.dat 2025-06-03T10:27:29.977585Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:27:29.993902Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:29.997395Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667803669231283:2079] 1748946449938765 != 1748946449938768 TServer::EnableGrpc on GrpcPort 9880, node 2 2025-06-03T10:27:30.010431Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:30.010447Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:30.010449Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:30.010505Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15311 2025-06-03T10:27:30.067075Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:30.067113Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:30.068034Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15311 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true Crea ... state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; 2025-06-03T10:27:36.670949Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; 2025-06-03T10:27:36.671021Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; 2025-06-03T10:27:36.671102Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; 2025-06-03T10:27:36.671182Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; 2025-06-03T10:27:36.671264Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; 2025-06-03T10:27:36.671345Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; 2025-06-03T10:27:36.675664Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7511667834556149687:2491], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:36.675689Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:36.680307Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:36.686804Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:27:36.686913Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:27:36.686994Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:27:36.687071Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:27:36.687147Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:27:36.687220Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:27:36.687297Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:27:36.687388Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:27:36.687472Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:27:36.687559Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:27:36.696180Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7511667834556149756:2498], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:36.696241Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:36.697263Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:36.701806Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715665;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715665; 2025-06-03T10:27:36.701910Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715665;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715665; 2025-06-03T10:27:36.701985Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715665;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715665; 2025-06-03T10:27:36.702065Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715665;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715665; 2025-06-03T10:27:36.702138Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715665;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715665; 2025-06-03T10:27:36.702213Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715665;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715665; 2025-06-03T10:27:36.702394Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715665;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715665; 2025-06-03T10:27:36.702495Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715665;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715665; 2025-06-03T10:27:36.702576Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715665;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715665; 2025-06-03T10:27:36.702654Z node 8 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715665;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715665; 2025-06-03T10:27:36.709179Z node 8 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[8:7511667834556148943:2339];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1152;event=tablet_die; 2025-06-03T10:27:36.710254Z node 8 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 8, TabletId: 72075186224037889 not found 2025-06-03T10:27:36.710290Z node 8 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[8:7511667834556148944:2340];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1152;event=tablet_die; 2025-06-03T10:27:36.711395Z node 8 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[8:7511667834556148936:2332];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1152;event=tablet_die; 2025-06-03T10:27:36.711395Z node 8 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[8:7511667834556148941:2337];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1152;event=tablet_die; 2025-06-03T10:27:36.712192Z node 8 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[8:7511667834556148937:2333];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1152;event=tablet_die; 2025-06-03T10:27:36.712261Z node 8 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[8:7511667834556148957:2341];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1152;event=tablet_die; 2025-06-03T10:27:36.713286Z node 8 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[8:7511667834556148939:2335];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1152;event=tablet_die; 2025-06-03T10:27:36.713441Z node 8 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[8:7511667834556148940:2336];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1152;event=tablet_die; 2025-06-03T10:27:36.714213Z node 8 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[8:7511667834556148942:2338];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1152;event=tablet_die; 2025-06-03T10:27:36.715011Z node 8 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[8:7511667834556148938:2334];ev=NKikimr::TEvTablet::TEvTabletDead;fline=columnshard_impl.cpp:1152;event=tablet_die; 2025-06-03T10:27:36.715959Z node 8 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 8, TabletId: 72075186224037896 not found 2025-06-03T10:27:36.715969Z node 8 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 8, TabletId: 72075186224037894 not found 2025-06-03T10:27:36.715972Z node 8 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 8, TabletId: 72075186224037891 not found 2025-06-03T10:27:36.715976Z node 8 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 8, TabletId: 72075186224037897 not found 2025-06-03T10:27:36.715979Z node 8 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 8, TabletId: 72075186224037888 not found 2025-06-03T10:27:36.715982Z node 8 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 8, TabletId: 72075186224037893 not found 2025-06-03T10:27:36.715986Z node 8 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 8, TabletId: 72075186224037890 not found 2025-06-03T10:27:36.715988Z node 8 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 8, TabletId: 72075186224037895 not found 2025-06-03T10:27:36.715991Z node 8 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 8, TabletId: 72075186224037892 not found ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/lib/ydb_cli/topic/ut/unittest >> TTopicReaderTests::TestRun_ReadMessages_Output_Base64 [GOOD] Test command err: === Starting PQ server === Server->StartServer(false); 2025-06-03T10:27:09.537690Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667715530071535:2084];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:09.537996Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:27:09.578352Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667717788229930:2222];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f9f/r3tmp/tmpoan51R/pdisk_1.dat 2025-06-03T10:27:09.689761Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:27:09.701399Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:27:09.701640Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:27:09.807789Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:09.819325Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:09.819355Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 5989, node 1 2025-06-03T10:27:09.824999Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:27:09.827848Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:09.910809Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/001f9f/r3tmp/yandexnuhIa9.tmp 2025-06-03T10:27:09.910824Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/001f9f/r3tmp/yandexnuhIa9.tmp 2025-06-03T10:27:09.910904Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/001f9f/r3tmp/yandexnuhIa9.tmp 2025-06-03T10:27:09.910955Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:27:09.913591Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:09.913620Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:09.921953Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:09.926537Z INFO: TTestServer started on Port 7121 GrpcPort 5989 TClient is connected to server localhost:7121 PQClient connected to localhost:5989 === TenantModeEnabled() = 0 === Init PQ - start server on port 5989 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:10.055530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 281474976710657 TabletId: 72057594046644480 PeerName: "" , at schemeshard: 72057594046644480 2025-06-03T10:27:10.055616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:27:10.055693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-03T10:27:10.055738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:27:10.055748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:27:10.060863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976710657, response: Status: StatusAccepted TxId: 281474976710657 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-03T10:27:10.060909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710657, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-03T10:27:10.060964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:27:10.060978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976710657:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-03T10:27:10.060980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 281474976710657:0 ProgressState no shards to create, do next state 2025-06-03T10:27:10.060984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710657:0 2 -> 3 waiting... 2025-06-03T10:27:10.061925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:27:10.061937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710657, ready parts: 0/1, is published: true 2025-06-03T10:27:10.061942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:27:10.069787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:27:10.069803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 281474976710657:0 ProgressState, at schemeshard: 72057594046644480 2025-06-03T10:27:10.069811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710657:0 3 -> 128 2025-06-03T10:27:10.074916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:27:10.074928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:27:10.074933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-03T10:27:10.074941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 281474976710657 ready parts: 1/1 2025-06-03T10:27:10.075836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976710657 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:10.079825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710657:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976710657 msg type: 269090816 2025-06-03T10:27:10.079887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 281474976710657, partId: 4294967295, tablet: 72057594046316545 2025-06-03T10:27:10.080765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1748946430126, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-03T10:27:10.080811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1748946430126 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-03T10:27:10.080819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-03T10:27:10.080893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710657:0 128 -> 240 2025-06-03T10:27:10.080899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 281474976710657:0, at tablet# 72057594046644480 2025-06-03T10:27:10.080938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-03T10:27:10.080949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-03T10:27:10.081422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-03T10:27:10.081430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976710657, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-03T10:27:10.081479Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-03T10:27:10.081484Z node 1 :FLAT_TX_SCHEME ... EUE_READ_BALANCER INFO: read_balancer__balancing.cpp:545: [72075186224037898][rt3.dc1--topic1] consumer cli family 1 status Active partitions [0] session "shared/cli_5_1_17655176915639680221_v1" sender [5:7511667826944766114:2620] lock partition 0 for ReadingSession "shared/cli_5_1_17655176915639680221_v1" (Sender=[5:7511667826944766114:2620], Pipe=[5:7511667826944766117:2620], Partitions=[], ActiveFamilyCount=1) generation 1 step 1 2025-06-03T10:27:35.705008Z node 6 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1322: [72075186224037898][rt3.dc1--topic1] consumer cli start rebalancing. familyCount=1, sessionCount=1, desiredFamilyCount=1, allowPlusOne=0 2025-06-03T10:27:35.705013Z node 6 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1399: [72075186224037898][rt3.dc1--topic1] consumer cli balancing duration: 0.000028s 2025-06-03T10:27:35.705327Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:1315: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 assign: record# { Partition: 0 TabletId: 72075186224037897 Topic: "rt3.dc1--topic1" Generation: 1 Step: 1 Session: "shared/cli_5_1_17655176915639680221_v1" ClientId: "cli" PipeClient { RawX1: 7511667826944766117 RawX2: 4503621102209596 } Path: "/Root/PQ/rt3.dc1--topic1" } 2025-06-03T10:27:35.705342Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:1132: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 INITING TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) 2025-06-03T10:27:35.705550Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:972: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) pipe restart attempt 0 pipe creation result: OK TabletId: 72075186224037897 Generation: 1, pipe: [5:7511667826944766119:2623] 2025-06-03T10:27:35.705817Z node 5 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: shared/cli_5_1_17655176915639680221_v1:1 with generation 1 2025-06-03T10:27:35.714593Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 0 event { CmdGetClientOffsetResult { Offset: 0 EndOffset: 3 SizeLag: 409 WriteTimestampEstimateMS: 1748946455699 ClientHasAnyCommits: false } Cookie: 18446744073709551615 } 2025-06-03T10:27:35.714622Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:683: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 INIT DONE TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 readOffset 0 committedOffset 0 2025-06-03T10:27:35.714650Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1413: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 sending to client partition status 2025-06-03T10:27:35.715328Z :INFO: [] [] [8fce3486-59775551-15b548fb-d0e43fb] [] Confirm partition stream create. Partition stream id: 1. Cluster: "-". Topic: "/topic1". Partition: 0. Read offset: (NULL) 2025-06-03T10:27:35.719176Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 grpc read done: success# 1, data# { start_partition_session_response { partition_session_id: 1 } } 2025-06-03T10:27:35.719273Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:533: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 got StartRead from client: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 0, commitOffset# (empty maybe) 2025-06-03T10:27:35.719300Z node 5 :PQ_READ_PROXY INFO: partition_actor.cpp:1012: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 Start reading TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 readOffset 0 committedOffset 0 clientCommitOffset (empty maybe) clientReadOffset 0 2025-06-03T10:27:35.719307Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:958: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) ready for read with readOffset 0 endOffset 3 2025-06-03T10:27:35.719333Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2309: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 partition ready for read: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 0, endOffset# 3, WTime# 0, sizeLag# 409 2025-06-03T10:27:35.719337Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2320: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1TEvPartitionReady. Aval parts: 1 2025-06-03T10:27:35.719357Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2243: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 performing read request: guid# b14eca84-348df872-e3fe89cd-36c155a, from# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), count# 3, size# 490, partitionsAsked# 1, maxTimeLag# 0ms 2025-06-03T10:27:35.719416Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1384: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 READ FROM TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1)maxCount 3 maxSize 490 maxTimeLagMs 0 readTimestampMs 0 readOffset 0 EndOffset 3 ClientCommitOffset 0 committedOffset 0 Guid b14eca84-348df872-e3fe89cd-36c155a 2025-06-03T10:27:35.719946Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { CmdReadResult { MaxOffset: 3 Result { Offset: 0 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 1 WriteTimestampMS: 1748946455596 CreateTimestampMS: 1748946455597 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } Result { Offset: 1 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 2 WriteTimestampMS: 1748946455600 CreateTimestampMS: 1748946455597 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } Result { Offset: 2 Data: "... 79 bytes ..." SourceId: "\000source1" SeqNo: 3 WriteTimestampMS: 1748946455600 CreateTimestampMS: 1748946455597 UncompressedSize: 8 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 0 SizeLag: 43 RealReadOffset: 2 WaitQuotaTimeMs: 0 EndOffset: 3 StartOffset: 0 } Cookie: 0 } 2025-06-03T10:27:35.719999Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 1 from offset3 2025-06-03T10:27:35.720010Z node 5 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 ReadOffset 3 ReadGuid b14eca84-348df872-e3fe89cd-36c155a has messages 1 2025-06-03T10:27:35.720052Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 read done: guid# b14eca84-348df872-e3fe89cd-36c155a, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), size# 371 2025-06-03T10:27:35.720069Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 response to read: guid# b14eca84-348df872-e3fe89cd-36c155a 2025-06-03T10:27:35.720207Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 Process answer. Aval parts: 0 2025-06-03T10:27:35.720456Z :DEBUG: [] [] [8fce3486-59775551-15b548fb-d0e43fb] [] Got ReadResponse, serverBytesSize = 371, now ReadSizeBudget = 0, ReadSizeServerDelta = 52428429 2025-06-03T10:27:35.720511Z :DEBUG: [] [] [8fce3486-59775551-15b548fb-d0e43fb] [] In ContinueReadingDataImpl, ReadSizeBudget = 0, ReadSizeServerDelta = 52428429 2025-06-03T10:27:35.720609Z :DEBUG: [] Decompression task done. Partition/PartitionSessionId: 1 (0-2) 2025-06-03T10:27:35.720618Z :DEBUG: [] [] [8fce3486-59775551-15b548fb-d0e43fb] [] Returning serverBytesSize = 371 to budget 2025-06-03T10:27:35.720624Z :DEBUG: [] [] [8fce3486-59775551-15b548fb-d0e43fb] [] In ContinueReadingDataImpl, ReadSizeBudget = 371, ReadSizeServerDelta = 52428429 2025-06-03T10:27:35.720682Z :DEBUG: [] [] [8fce3486-59775551-15b548fb-d0e43fb] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 52428800 2025-06-03T10:27:35.720780Z :DEBUG: [] Take Data. Partition 0. Read: {0, 0} (0-0) 2025-06-03T10:27:35.720807Z :DEBUG: [] Take Data. Partition 0. Read: {1, 0} (1-1) 2025-06-03T10:27:35.720815Z :DEBUG: [] Take Data. Partition 0. Read: {1, 1} (2-2) 2025-06-03T10:27:35.720829Z :DEBUG: [] [] [8fce3486-59775551-15b548fb-d0e43fb] [] The application data is transferred to the client. Number of messages 3, size 24 bytes 2025-06-03T10:27:35.720844Z :DEBUG: [] [] [8fce3486-59775551-15b548fb-d0e43fb] [] Returning serverBytesSize = 0 to budget 2025-06-03T10:27:35.720894Z :INFO: [] [] [8fce3486-59775551-15b548fb-d0e43fb] Closing read session. Close timeout: 0.000000s 2025-06-03T10:27:35.720904Z :INFO: [] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic1:0:1:2:0 2025-06-03T10:27:35.720913Z :INFO: [] [] [8fce3486-59775551-15b548fb-d0e43fb] Counters: { Errors: 0 CurrentSessionLifetimeMs: 21 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 24 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:27:35.720930Z :NOTICE: [] [] [8fce3486-59775551-15b548fb-d0e43fb] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-03T10:27:35.720937Z :DEBUG: [] [] [8fce3486-59775551-15b548fb-d0e43fb] [] Abort session to cluster 2025-06-03T10:27:35.721101Z :NOTICE: [] [] [8fce3486-59775551-15b548fb-d0e43fb] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-03T10:27:35.722053Z node 5 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 grpc read done: success# 1, data# { read_request { bytes_size: 371 } } 2025-06-03T10:27:35.722084Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 grpc closed 2025-06-03T10:27:35.722096Z node 5 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/cli session shared/cli_5_1_17655176915639680221_v1 is DEAD 2025-06-03T10:27:35.723648Z node 5 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/cli_5_1_17655176915639680221_v1 2025-06-03T10:27:35.723756Z node 6 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037898][rt3.dc1--topic1] pipe [5:7511667826944766117:2620] disconnected; active server actors: 1 2025-06-03T10:27:35.723772Z node 6 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037898][rt3.dc1--topic1] pipe [5:7511667826944766117:2620] client cli disconnected session shared/cli_5_1_17655176915639680221_v1 |63.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> CompositeConveyorTests::TestUniformScopesDistribution >> EncryptedBackupParamsValidationTest::NoSourcePrefixEncrypted |63.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest |63.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain_reboots/unittest >> TSchemeShardTestExtSubdomainReboots::Fake [GOOD] >> KqpScheme::CreateTableWithTtlOnDatetime64Column [GOOD] >> KqpScheme::CreateTableWithStoreExternalBlobs |63.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_olap/unittest >> TOlap::CreateDropStandaloneTableDefaultSharding [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:27:30.587200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:30.587233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:30.587240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:30.587246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:30.587253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:30.587257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:30.587269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:30.587290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:30.587410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:30.587517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:30.605252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:27:30.605285Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:30.610347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:30.610518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:30.610571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:30.613618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:30.613691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:30.613811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:30.613876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:30.614576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:30.614630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:30.614985Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:30.614998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:30.615008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:30.615018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:30.615025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:30.615047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:27:30.616627Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:27:30.642665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:30.642757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:30.642837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:30.642888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:30.642899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:30.643988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:30.644025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:30.644100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:30.644112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:30.644119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:30.644126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:30.644745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:30.644759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:30.644766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:30.645152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:30.645163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:30.645170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:30.645178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:30.645993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:30.646580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:30.646633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:30.646883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:30.646919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:30.646932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:30.647016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:27:30.647026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:30.647066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:30.647081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:27:30.647650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:30.647663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:30.647740Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... shard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-03T10:27:37.126299Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-03T10:27:37.126316Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:27:37.126322Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:27:37.126343Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-03T10:27:37.126347Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-03T10:27:37.126364Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:23 2025-06-03T10:27:37.126369Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:23 tabletId 72075186233409568 2025-06-03T10:27:37.126387Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:21 2025-06-03T10:27:37.126391Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:21 tabletId 72075186233409566 2025-06-03T10:27:37.126668Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:19 2025-06-03T10:27:37.126675Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:19 tabletId 72075186233409564 2025-06-03T10:27:37.126691Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:17 2025-06-03T10:27:37.126695Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:17 tabletId 72075186233409562 2025-06-03T10:27:37.126713Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:15 2025-06-03T10:27:37.126717Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:15 tabletId 72075186233409560 2025-06-03T10:27:37.126767Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:13 2025-06-03T10:27:37.126772Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:13 tabletId 72075186233409558 2025-06-03T10:27:37.126918Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:11 2025-06-03T10:27:37.126923Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:11 tabletId 72075186233409556 2025-06-03T10:27:37.127045Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:9 2025-06-03T10:27:37.127051Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:9 tabletId 72075186233409554 2025-06-03T10:27:37.127069Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:36 2025-06-03T10:27:37.127074Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:36 tabletId 72075186233409581 2025-06-03T10:27:37.127337Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:38 2025-06-03T10:27:37.127344Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:38 tabletId 72075186233409583 2025-06-03T10:27:37.127818Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:40 2025-06-03T10:27:37.127828Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:40 tabletId 72075186233409585 2025-06-03T10:27:37.127899Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:32 2025-06-03T10:27:37.127905Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:32 tabletId 72075186233409577 2025-06-03T10:27:37.128113Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:34 2025-06-03T10:27:37.128121Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:34 tabletId 72075186233409579 2025-06-03T10:27:37.128144Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:28 2025-06-03T10:27:37.128149Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:28 tabletId 72075186233409573 2025-06-03T10:27:37.128173Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:30 2025-06-03T10:27:37.128178Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:30 tabletId 72075186233409575 2025-06-03T10:27:37.128195Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:26 2025-06-03T10:27:37.128200Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:26 tabletId 72075186233409571 2025-06-03T10:27:37.128245Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:24 2025-06-03T10:27:37.128250Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:24 tabletId 72075186233409569 2025-06-03T10:27:37.128290Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:57 2025-06-03T10:27:37.128294Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:57 tabletId 72075186233409602 2025-06-03T10:27:37.128332Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:53 2025-06-03T10:27:37.128337Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:53 tabletId 72075186233409598 2025-06-03T10:27:37.128354Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:55 2025-06-03T10:27:37.128358Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:55 tabletId 72075186233409600 2025-06-03T10:27:37.129714Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:49 2025-06-03T10:27:37.129730Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:49 tabletId 72075186233409594 2025-06-03T10:27:37.129753Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:47 2025-06-03T10:27:37.129758Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:47 tabletId 72075186233409592 2025-06-03T10:27:37.129773Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:51 2025-06-03T10:27:37.129777Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:51 tabletId 72075186233409596 2025-06-03T10:27:37.129796Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:45 2025-06-03T10:27:37.129801Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:45 tabletId 72075186233409590 2025-06-03T10:27:37.129813Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:43 2025-06-03T10:27:37.129817Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:43 tabletId 72075186233409588 2025-06-03T10:27:37.129837Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:41 2025-06-03T10:27:37.129847Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:41 tabletId 72075186233409586 2025-06-03T10:27:37.129920Z node 3 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 105 2025-06-03T10:27:37.130127Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyDir/ColumnTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:27:37.130193Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyDir/ColumnTable" took 80us result status StatusPathDoesNotExist 2025-06-03T10:27:37.130239Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyDir/ColumnTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/MyDir\' (id: [OwnerId: 72057594046678944, LocalPathId: 2]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/MyDir/ColumnTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/MyDir" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "MyDir" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:27:37.130349Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 4 SchemeshardId: 72057594046678944 Options { }, at schemeshard: 72057594046678944 2025-06-03T10:27:37.130361Z node 3 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 4 took 12us result status StatusPathDoesNotExist 2025-06-03T10:27:37.130370Z node 3 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'\', error: path is empty, source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "" PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> BackupRestoreS3::RestoreTablePartitioningSettings [GOOD] >> BackupRestoreS3::RestoreIndexTablePartitioningSettings >> TestKinesisHttpProxy::TestWrongStream [GOOD] >> TestKinesisHttpProxy::TestWrongStream2 >> TSchemeshardCompactionQueueTest::EnqueueEmptyShard [GOOD] >> TSchemeshardCompactionQueueTest::EnqueueSinglePartedShard [GOOD] >> TSchemeshardCompactionQueueTest::EnqueueSinglePartedShardWhenEnabled [GOOD] >> BackupRestore::TestAllIndexTypes-EIndexTypeGlobalVectorKmeansTree [GOOD] >> BackupRestore::TestAllPrimitiveTypes-PRIMITIVE_TYPE_ID_UNSPECIFIED [GOOD] >> BackupRestore::TestAllPrimitiveTypes-BOOL >> KqpAcl::AclRevoke+UseSink+IsOlap [GOOD] >> KqpScheme::CreateTableWithStoreExternalBlobs [GOOD] >> KqpScheme::CreateTableWithPgColumn >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTable [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSubDomain [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeRtmrVolume [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSolomonVolume [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTableIndex >> EncryptedBackupParamsValidationTest::NoSourcePrefixEncrypted [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldCompactBorrowedBeforeSplit |63.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/ut_blobstorage-ut_check_integrity |63.2%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/ut_blobstorage-ut_check_integrity |63.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/ut_blobstorage-ut_check_integrity |63.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardCompactionQueueTest::EnqueueSinglePartedShardWhenEnabled [GOOD] >> EncryptedBackupParamsValidationTestFeatureDisabled::EncryptionParamsSpecifiedExport |63.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest |63.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> KqpScheme::CreateTableWithPgColumn [GOOD] |63.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTableIndex [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSequence |63.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpAcl::AclRevoke+UseSink+IsOlap [GOOD] Test command err: Trying to start YDB, gRPC: 14972, MsgBus: 30274 2025-06-03T10:27:13.279059Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667735462281268:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:13.279179Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00172f/r3tmp/tmpm0Ly52/pdisk_1.dat 2025-06-03T10:27:13.429572Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667735462281111:2079] 1748946433275380 != 1748946433275383 2025-06-03T10:27:13.433046Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:13.441705Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:13.441742Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:13.442110Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14972, node 1 2025-06-03T10:27:13.498756Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:13.498774Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:13.498777Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:13.498837Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30274 TClient is connected to server localhost:30274 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:13.694975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:13.702266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:27:13.719738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:13.793112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:13.831713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:13.870657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:14.025648Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667739757250045:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:14.025719Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:14.092102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:14.107312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:14.128129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:14.146709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:14.163056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:14.181264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:14.197238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:14.239860Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667739757250699:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:14.239888Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:14.239966Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667739757250704:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:14.240922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:14.243639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:27:14.243713Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667739757250706:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:14.338684Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667739757250766:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:14.695566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:14.918326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[1:7511667739757251295:2529];tablet_id=72075186224037981;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:27:14.918411Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[1:7511667739757251295:2529];tablet_id=72075186224037981;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:27:14.918550Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[1:7511667739757251295:2529];tablet_id=72075186224037981;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:27:14.918582Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[1:7511667739757251295:2529];tablet_id=72075186224037981;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:27:14.918611Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[1:7511667739757251295:2529];tablet_id=72075186224037981;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:27:14.918635Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[1:7511667739757251295:2529];tablet_id=72075186224037981;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:27:14.918658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[1:7511667739757251295:2529];tablet_id=72075186224037981;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:27:14.918689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[1:7511667739757251295:2529];tablet_id=72075186224037981;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:27:14.918715Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037981;self_id=[1:7511667739757251295:2529];tablet_id=72075186224037981;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normali ... 2], for# user0@builtin, access# DescribeSchema 2025-06-03T10:27:38.426631Z node 7 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [7:7511667842478098926:5362], for# user0@builtin, access# DescribeSchema 2025-06-03T10:27:38.427150Z node 7 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [7:7511667842478098923:3031], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:13: Error: Cannot find table 'db.[/Root/test_acl]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:27:38.428329Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=7&id=NzVhY2Q2Ni1jNzEzN2JlOC00ZWRmZDQ0My1hNzc1NDA2OA==, ActorId: [7:7511667842478098446:2966], ActorState: ExecuteState, TraceId: 01jwtna5smbxwymfz53zexvb2a, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:27:38.456464Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715706:0, at schemeshard: 72057594046644480 2025-06-03T10:27:38.512728Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715707. Ctx: { TraceId: 01jwtna5w24a0ym4q1wc2ts21y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:38.516888Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715708. Ctx: { TraceId: 01jwtna5w24a0ym4q1wc2ts21y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:38.520124Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715708;tx_id=281474976715708;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715708; 2025-06-03T10:27:38.527800Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715709. Ctx: { TraceId: 01jwtna5wy10xkp0zv46q7kazh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:38.532132Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715710. Ctx: { TraceId: 01jwtna5wy10xkp0zv46q7kazh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:38.535580Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715710;tx_id=281474976715710;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715710; 2025-06-03T10:27:38.545355Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715711. Ctx: { TraceId: 01jwtna5xg6eyp2qbt3bhrp0ck, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:38.548367Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715712. Ctx: { TraceId: 01jwtna5xg6eyp2qbt3bhrp0ck, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:38.551437Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715712;tx_id=281474976715712;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715712; 2025-06-03T10:27:38.565120Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715713. Ctx: { TraceId: 01jwtna5y2amhp66m70dd4jt1w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:38.578198Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715714. Ctx: { TraceId: 01jwtna5y2amhp66m70dd4jt1w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:38.581071Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715714;tx_id=281474976715714;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715714; 2025-06-03T10:27:38.592338Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715715. Ctx: { TraceId: 01jwtna5yw2f0vr255ywp1061k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:38.598591Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715716. Ctx: { TraceId: 01jwtna5yw2f0vr255ywp1061k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:38.613789Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715716;tx_id=281474976715716;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715716; 2025-06-03T10:27:38.628196Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715717. Ctx: { TraceId: 01jwtna603c8ydnrse1haexav8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:38.634468Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715718. Ctx: { TraceId: 01jwtna603c8ydnrse1haexav8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:38.645982Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715718;tx_id=281474976715718;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715718; 2025-06-03T10:27:38.653575Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715719. Ctx: { TraceId: 01jwtna60w0zk6pqmd6rax2yem, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:38.662761Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715720. Ctx: { TraceId: 01jwtna60w0zk6pqmd6rax2yem, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:38.674388Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715720;tx_id=281474976715720;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715720; 2025-06-03T10:27:38.705574Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715721. Ctx: { TraceId: 01jwtna620dwf94j3jzc7kk46m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:38.714653Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715722. Ctx: { TraceId: 01jwtna620dwf94j3jzc7kk46m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:38.727032Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715722;tx_id=281474976715722;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715722; 2025-06-03T10:27:38.730294Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715723. Ctx: { TraceId: 01jwtna639a7swp1dzwest1zeb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:38.739202Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715724. Ctx: { TraceId: 01jwtna639a7swp1dzwest1zeb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:38.754911Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715724;tx_id=281474976715724;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715724; 2025-06-03T10:27:38.764565Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715725. Ctx: { TraceId: 01jwtna64b54xwsc57hb52xqwd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:38.769125Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715726. Ctx: { TraceId: 01jwtna64b54xwsc57hb52xqwd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:38.782994Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715726;tx_id=281474976715726;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715726; 2025-06-03T10:27:38.787125Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715727:0, at schemeshard: 72057594046644480 2025-06-03T10:27:38.798350Z node 7 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [7:7511667842478099229:5507], for# user0@builtin, access# UpdateRow 2025-06-03T10:27:38.798485Z node 7 :KQP_EXECUTER ERROR: kqp_table_resolver.cpp:275: TxId: 281474976715728. Error resolving keys for entry: { TableId: [OwnerId: 72057594046644480, LocalPathId: 17] Access: 2 SyncVersion: false Status: AccessDenied Kind: KindUnknown PartitionsCount: 0 DomainInfo From: (Uint64 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-06-03T10:27:38.798568Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=7&id=MjEzOGFhMjktZDllOTUyNWQtYzM3ZTdkMmQtMzdmYmRkY2M=, ActorId: [7:7511667842478098950:3042], ActorState: ExecuteState, TraceId: 01jwtna65a0mefwr0z1gwv3erg, Create QueryResponse for error on request, msg: >> TestYmqHttpProxy::BillingRecordsForJsonApi [GOOD] >> TBSVWithReboots::SimultaneousCreateDropNbs >> TPQTest::TestPQPartialRead [GOOD] >> TPQTest::TestPQRead >> BackupRestoreS3::RestoreIndexTablePartitioningSettings [GOOD] >> BackupRestoreS3::RestoreIndexTableReadReplicasSettings >> KqpConstraints::IndexAutoChooseAndNonReadyIndex [GOOD] >> KqpConstraints::IndexedTableAndNotNullColumnAddNotNullColumn >> TestKinesisHttpProxy::TestWrongStream2 [GOOD] |63.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TestKinesisHttpProxy::TestWrongRequest >> EncryptedBackupParamsValidationTestFeatureDisabled::EncryptionParamsSpecifiedExport [GOOD] >> TestYmqHttpProxy::TestChangeMessageVisibility |63.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateTableWithPgColumn [GOOD] Test command err: Trying to start YDB, gRPC: 5688, MsgBus: 7877 2025-06-03T10:27:28.929083Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667796830916105:2207];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:28.929531Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001720/r3tmp/tmpcWibmM/pdisk_1.dat 2025-06-03T10:27:29.034253Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5688, node 1 2025-06-03T10:27:29.077514Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:29.077530Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:29.077533Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:29.077600Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:27:29.080453Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:29.080479Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:29.081411Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7877 TClient is connected to server localhost:7877 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:29.161854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:29.174988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:29.252397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:29.280996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:29.299220Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:29.414066Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667801125884852:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:29.414097Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:29.491075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.504645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.539007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.554759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.569434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.587184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.604615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.641921Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667801125885507:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:29.641949Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:29.642127Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667801125885512:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:29.643013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:29.646155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:27:29.646284Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667801125885514:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:29.739336Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667801125885565:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:30.063311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:30.092085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:27:30.093328Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037922 not found Trying to start YDB, gRPC: 10764, MsgBus: 17911 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001720/r3tmp/tmphMs6h9/pdisk_1.dat 2025-06-03T10:27:30.497593Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:27:30.505595Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10764, node 2 2025-06-03T10:27:30.517684Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:30.517695Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:30.517697Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:30.517756Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17911 TClient is connected to server localhost:17911 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:27:30.576638Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:30.576670Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:30.577529Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDi ... ype: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 26597, MsgBus: 23015 2025-06-03T10:27:39.491091Z node 9 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[9:7511667847484397501:2206];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:39.491185Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001720/r3tmp/tmpTTzmEb/pdisk_1.dat 2025-06-03T10:27:39.505931Z node 9 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26597, node 9 2025-06-03T10:27:39.521938Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:39.521956Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:39.521957Z node 9 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:39.522013Z node 9 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23015 TClient is connected to server localhost:23015 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:39.590852Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:39.590890Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:39.591904Z node 9 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(9, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:39.598059Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:39.601639Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:39.610099Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:27:39.631742Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:39.654038Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:39.665984Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:39.911208Z node 9 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [9:7511667847484398940:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:39.911238Z node 9 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:39.921728Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:39.934804Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:39.949374Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:39.959601Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:39.970197Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:39.984659Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:40.000256Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:40.025405Z node 9 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [9:7511667851779366891:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:40.025530Z node 9 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:40.025759Z node 9 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [9:7511667851779366896:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:40.026894Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:40.031167Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:27:40.031298Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [9:7511667851779366898:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:40.100650Z node 9 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [9:7511667851779366949:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:40.262751Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:40.289103Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:27:40.310397Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:27:40.343413Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:27:40.364552Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480 2025-06-03T10:27:40.384872Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480 2025-06-03T10:27:40.410341Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:0, at schemeshard: 72057594046644480 2025-06-03T10:27:40.432085Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715679:0, at schemeshard: 72057594046644480 2025-06-03T10:27:40.452747Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715680:0, at schemeshard: 72057594046644480 2025-06-03T10:27:40.472548Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480 2025-06-03T10:27:40.503827Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715682:0, at schemeshard: 72057594046644480 >> BackupRestore::TestAllPrimitiveTypes-BOOL [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT8 >> TBSVWithReboots::CreateAlterNoVersion >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonSourcePathSpecified >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_Active_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_InactiveConfig_Test >> TColumnShardTestReadWrite::WriteReadDuplicate [GOOD] >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerless >> TSchemeshardCompactionQueueTest::ShouldNotEnqueueEmptyShard [GOOD] >> TSchemeshardCompactionQueueTest::RemoveLastShardFromSubQueues [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSequence [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeReplication [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeView >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureMirror3dcCount6Idx0 [GOOD] |63.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldNotCompactBorrowedAfterSplitMergeWhenDisabled >> TBSVWithReboots::CreateAssignWithVersion >> CheckIntegrityBlock42::DataOk |63.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardCompactionQueueTest::RemoveLastShardFromSubQueues [GOOD] >> CheckIntegrityBlock42::PlacementOkWithErrors >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonSourcePathSpecified [GOOD] |63.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut |63.3%| [LD] {RESULT} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut |63.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tablet/ut/ydb-core-tablet-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_ftol/unittest >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureMirror3dcCount6Idx0 [GOOD] Test command err: iteration# 0 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 6 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 12 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 18 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 24 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 30 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 36 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 42 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 48 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 54 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 60 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 66 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 72 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 78 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 84 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 90 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 96 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 102 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 108 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 114 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 120 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 126 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 132 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 138 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 144 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 150 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 156 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 162 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 168 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 174 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 180 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 186 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 192 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 198 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 204 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 210 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 216 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 222 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 228 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 234 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 240 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 246 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 252 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 258 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 264 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 270 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 276 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 282 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 288 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 294 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 300 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 306 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 312 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 318 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 324 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 330 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 336 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 342 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 348 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 354 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 360 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 366 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 372 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 378 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 384 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 390 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 396 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 402 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 408 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 414 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 420 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 426 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 432 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 438 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 444 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 450 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 456 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 462 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 468 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 474 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 480 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 486 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 >> CheckIntegrityBlock42::DataOk [GOOD] >> CheckIntegrityBlock42::DataOkAdditionalEqualParts >> BackupRestoreS3::RestoreIndexTableReadReplicasSettings [GOOD] >> BackupRestoreS3::RestoreTableSplitBoundaries >> KqpConstraints::IndexedTableAndNotNullColumnAddNotNullColumn [GOOD] >> CheckIntegrityBlock42::PlacementOkWithErrors [GOOD] >> CheckIntegrityBlock42::PlacementWithErrorsOnBlobDisks |63.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table |63.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table |63.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_read_table/ydb-core-tx-datashard-ut_read_table >> CheckIntegrityBlock42::DataOkAdditionalEqualParts [GOOD] >> CheckIntegrityBlock42::DataErrorSixPartsTwoBroken >> CheckIntegrityBlock42::PlacementWithErrorsOnBlobDisks [GOOD] >> CheckIntegrityBlock42::PlacementStatusUnknown >> CheckIntegrityBlock42::DataErrorSixPartsTwoBroken [GOOD] >> CheckIntegrityBlock42::DataOkErasureFiveParts >> CheckIntegrityBlock42::PlacementStatusUnknown [GOOD] >> CheckIntegrityBlock42::DataOkErasureFiveParts [GOOD] |63.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream |63.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream |63.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/ydb-core-tx-schemeshard-ut_cdc_stream >> TestKinesisHttpProxy::TestWrongRequest [GOOD] >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonDestPrefixSpecified ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpConstraints::IndexedTableAndNotNullColumnAddNotNullColumn [GOOD] Test command err: Trying to start YDB, gRPC: 4600, MsgBus: 22068 2025-06-03T10:27:29.296967Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667802368026291:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:29.301992Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00171e/r3tmp/tmp46CzLR/pdisk_1.dat 2025-06-03T10:27:29.407351Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:29.407383Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 4600, node 1 2025-06-03T10:27:29.408525Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:29.415324Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:29.416419Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667802368026134:2079] 1748946449295948 != 1748946449295951 2025-06-03T10:27:29.441336Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:29.441349Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:29.441352Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:29.441406Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22068 TClient is connected to server localhost:22068 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:29.522442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:29.529278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:29.608253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:29.649882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:29.682770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:29.849838Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667802368027769:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:29.849877Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:29.916721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.951991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:29.963629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:30.026851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:30.039905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:30.103110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:30.136078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:30.162676Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667806662995723:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:30.162705Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:30.162749Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667806662995728:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:30.163587Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:30.168001Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667806662995730:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:30.243235Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667806662995781:3392] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:30.454684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:30.545169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropSequence, opId: 281474976715675:1, at schemeshard: 72057594046644480 2025-06-03T10:27:30.561726Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037923 not found 2025-06-03T10:27:30.569351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 21889, MsgBus: 23841 2025-06-03T10:27:30.940266Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667807388694753:2087];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:30.940470Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00171e/r3tmp/tmpMQwoXP/pdisk_1.dat 2025-06-03T10:27:30.977823Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21889, node 2 2025-06-03T10:27:30.989585Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:30.989601Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:30.989605Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:30.989663Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23841 2025-06-03T10:27:31.041624Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:31.041664Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:31.044110Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23841 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 Path ... anges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:27:42.156440Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:42.156469Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:42.156959Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:27:42.159061Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:27:42.163385Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:42.183991Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:27:42.210056Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:42.236006Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:42.486686Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511667857420263340:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:42.486712Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:42.497962Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:42.509429Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:42.520393Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:42.531267Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:42.545731Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:27:42.559458Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:27:42.574433Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:27:42.590708Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511667857420263991:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:42.590746Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:42.591154Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511667857420263996:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:42.592417Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:27:42.600532Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511667857420263998:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:27:42.665939Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511667857420264049:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:42.839686Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:27:43.140048Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710757:0, at schemeshard: 72057594046644480 2025-06-03T10:27:43.163947Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710760:0, at schemeshard: 72057594046644480 2025-06-03T10:27:43.217447Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710762:0, at schemeshard: 72057594046644480 2025-06-03T10:27:43.258793Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710765:0, at schemeshard: 72057594046644480 2025-06-03T10:27:43.349249Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710767:0, at schemeshard: 72057594046644480 2025-06-03T10:27:43.382863Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710770:0, at schemeshard: 72057594046644480 2025-06-03T10:27:43.460417Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710772:0, at schemeshard: 72057594046644480 2025-06-03T10:27:43.489618Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710775:0, at schemeshard: 72057594046644480 2025-06-03T10:27:43.544293Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710777:0, at schemeshard: 72057594046644480 2025-06-03T10:27:43.571542Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710780:0, at schemeshard: 72057594046644480 2025-06-03T10:27:43.633275Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710782:0, at schemeshard: 72057594046644480 2025-06-03T10:27:43.663379Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710785:0, at schemeshard: 72057594046644480 2025-06-03T10:27:43.719310Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710787:0, at schemeshard: 72057594046644480 2025-06-03T10:27:43.754280Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710790:0, at schemeshard: 72057594046644480 2025-06-03T10:27:43.828968Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710792:0, at schemeshard: 72057594046644480 2025-06-03T10:27:43.858876Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710795:0, at schemeshard: 72057594046644480 2025-06-03T10:27:43.942557Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710797:0, at schemeshard: 72057594046644480 2025-06-03T10:27:43.962705Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710800:0, at schemeshard: 72057594046644480 >> BackupRestore::TestAllPrimitiveTypes-INT8 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT16 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::PlacementStatusUnknown [GOOD] Test command err: RandomSeed# 5278378300815472856 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::DataOkErasureFiveParts [GOOD] Test command err: RandomSeed# 14276469803308527465 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:4:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 6 0 ] part 2: ver0 disks [ 7 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: { part 1 disks [ 6 0 ]; part 2 disks [ 7 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 6 0 ]; part 2 disks [ 7 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: ERROR: There are erasure restore fails *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: Erasure info: { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeView [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeResourcePool [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeTransfer [GOOD] >> BackupRestoreS3::TestAllSchemeObjectTypes-EPathTypeSysView [GOOD] >> EncryptedBackupParamsValidationTest::BadSourcePath >> CheckIntegrityBlock42::PlacementBlobIsLost >> CheckIntegrityMirror3dc::PlacementBlobIsLost >> BackupRestoreS3::RestoreTableSplitBoundaries [GOOD] >> BackupRestoreS3::RestoreIndexTableSplitBoundaries >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_SourceId_PartitionNotExists_Test [GOOD] >> TPartitionGraphTest::BuildGraph [GOOD] >> TPartitionTests::AfterRestart_1 >> CheckIntegrityBlock42::PlacementBlobIsLost [GOOD] >> CheckIntegrityBlock42::PlacementAllOnHandoff >> CheckIntegrityBlock42::PlacementWrongDisks ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestKinesisHttpProxy::TestWrongRequest [GOOD] Test command err: 2025-06-03T10:27:17.112985Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667751325822941:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:17.112996Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002b84/r3tmp/tmpQ80sky/pdisk_1.dat 2025-06-03T10:27:17.289278Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667751325822922:2079] 1748946437112138 != 1748946437112141 2025-06-03T10:27:17.293588Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18415, node 1 2025-06-03T10:27:17.336128Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:17.336143Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:17.336146Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:17.336201Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28628 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:17.422317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:17.427047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:27:17.438341Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:17.438378Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:17.439783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28628 2025-06-03T10:27:17.510103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:17.519411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-03T10:27:17.526006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:17.538034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-03T10:27:17.540754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:17.590693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:27:17.620270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:27:17.675995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:17.688219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:17.699649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:27:17.746310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:27:17.778204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:17.806367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:17.834036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:17.928484Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667751325824312:2372], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:17.928533Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:17.933389Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667751325824324:2375], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:17.936370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715673:3, at schemeshard: 72057594046644480 2025-06-03T10:27:17.949453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715673, at schemeshard: 72057594046644480 2025-06-03T10:27:17.953098Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667751325824326:2376], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715673 completed, doublechecking } 2025-06-03T10:27:18.006835Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667755620791673:2853] txid# 281474976715674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:18.124277Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715675. Ctx: { TraceId: 01jwtn9hs2dzctnz0qxdn2qrqc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzlhZGRkZjUtN2EwMzkwZjYtZWYzNTIyZmQtOWIwNWYwOTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:18.144984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.185123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.209441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.238866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715679:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.307955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715680:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.339130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.369573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715682:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.398477Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715683:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.426684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but pro ... heme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-03T10:27:44.293862Z node 8 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] Minikql data response: {"settings": [], "truncated": false} 2025-06-03T10:27:44.293877Z node 8 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_USER_SETTINGS_ID) Queue [] execution duration: 3ms 2025-06-03T10:27:44.293990Z node 8 :SQS TRACE: executor.cpp:286: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] HandleResponse { Status: 48 TxId: 281474976715685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-03T10:27:44.293992Z node 8 :SQS DEBUG: executor.cpp:287: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Attempt 1 execution duration: 3ms 2025-06-03T10:27:44.294065Z node 8 :SQS TRACE: executor.cpp:325: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Sending mkql execution result: { Status: 48 TxId: 281474976715685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-03T10:27:44.294069Z node 8 :SQS TRACE: executor.cpp:327: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] Minikql data response: {"queues": [], "truncated": false} 2025-06-03T10:27:44.294077Z node 8 :SQS DEBUG: executor.cpp:401: Request [] Query(idx=GET_QUEUES_LIST_ID) Queue [] execution duration: 3ms 2025-06-03T10:27:44.294129Z node 8 :SQS TRACE: user_settings_reader.cpp:89: Handle user settings: { Status: 48 TxId: 281474976715686 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "settings" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Name" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "Value" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-03T10:27:44.294207Z node 8 :SQS TRACE: queues_list_reader.cpp:82: Handle queues list: { Status: 48 TxId: 281474976715685 StatusCode: SUCCESS ExecutionEngineStatus: 1 ExecutionEngineResponseStatus: 2 ExecutionEngineEvaluatedResponse { Type { Kind: Struct Struct { Member { Name: "queues" Type { Kind: Optional Optional { Item { Kind: List List { Item { Kind: Struct Struct { Member { Name: "Account" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "CreatedTimestamp" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "CustomQueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "DlqName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "FifoQueue" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } Member { Name: "FolderId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "MasterTabletId" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "QueueName" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4608 } } } } } Member { Name: "QueueState" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "Shards" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } Member { Name: "TablesFormat" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 2 } } } } } Member { Name: "Version" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 4 } } } } } } } } } } } } Member { Name: "truncated" Type { Kind: Optional Optional { Item { Kind: Data Data { Scheme: 6 } } } } } } } Value { Struct { Optional { } } Struct { Optional { Bool: false } } } } } 2025-06-03T10:27:44.321315Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7511667868101439215:2436]: Pool not found 2025-06-03T10:27:44.321384Z node 8 :SQS DEBUG: monitoring.cpp:60: [monitoring] Report deletion queue data lag: 0.000000s, count: 0 2025-06-03T10:27:44.347739Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7511667868101439213:2435]: Pool not found 2025-06-03T10:27:44.347932Z node 8 :SQS DEBUG: cleanup_queue_data.cpp:100: [cleanup removed queues] getting queues... 2025-06-03T10:27:44.348687Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7511667868101439338:2455], DatabaseId: /Root/SQS, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:44.348706Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:602: [WorkloadService] [TDatabaseFetcherActor] ActorId: [8:7511667868101439339:2456], Database: /Root/SQS, Failed to fetch database info, UNSUPPORTED, issues: {
: Error: Invalid database path /Root/SQS, please check the correctness of the path } 2025-06-03T10:27:44.348719Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/SQS, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:44.399787Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:552: [WorkloadService] [Service] Reply cleanup error NOT_FOUND to [8:7511667868101439336:2454]: Pool not found 2025-06-03T10:27:44.400219Z node 8 :SQS DEBUG: cleanup_queue_data.cpp:138: [cleanup removed queues] there are no queues to delete 2025-06-03T10:27:45.281742Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:83: (#37,[::1]:46114) incoming connection opened 2025-06-03T10:27:45.281798Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:156: (#37,[::1]:46114) -> (POST /, 87 bytes) 2025-06-03T10:27:45.281874Z node 8 :HTTP_PROXY INFO: http_service.cpp:102: proxy service: incoming request from [9860:58fa:db30:0:8060:58fa:db30:0] request [CreateStream] url [/] database [] requestId: 4fe49e2a-92af450c-ac63fdf-20f0ae2c 2025-06-03T10:27:45.282096Z node 8 :HTTP_PROXY WARN: http_req.cpp:948: http request [CreateStream] requestId [4fe49e2a-92af450c-ac63fdf-20f0ae2c] got new request with incorrect json from [9860:58fa:db30:0:8060:58fa:db30:0] database '' 2025-06-03T10:27:45.282146Z node 8 :HTTP_PROXY INFO: http_req.cpp:1211: http request [CreateStream] requestId [4fe49e2a-92af450c-ac63fdf-20f0ae2c] reply with status: BAD_REQUEST message: ydb/core/http_proxy/json_proto_conversion.h:400: Unexpected json key: WrongStreamName 2025-06-03T10:27:45.282187Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:278: (#37,[::1]:46114) <- (400 InvalidArgumentException, 135 bytes) 2025-06-03T10:27:45.282201Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:287: (#37,[::1]:46114) Request: POST / HTTP/1.1 Host: example.amazonaws.com X-Amz-Target: kinesisApi.CreateStream X-Amz-Date: 20150830T123600Z Authorization: Content-Type: application/json Connection: Close Transfer-Encoding: chunked 57 { "ShardCount":5, "StreamName":"testtopic", "WrongStreamName":"WrongStreamName" } 0 2025-06-03T10:27:45.282207Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:294: (#37,[::1]:46114) Response: HTTP/1.1 400 InvalidArgumentException Connection: close x-amzn-requestid: 4fe49e2a-92af450c-ac63fdf-20f0ae2c x-amz-crc32: 3053902336 Content-Type: application/x-amz-json-1.1 Content-Length: 135 {"__type":"InvalidArgumentException","message":"ydb/core/http_proxy/json_proto_conversion.h:400: Unexpected json key: WrongStreamName"} 2025-06-03T10:27:45.282283Z node 8 :HTTP DEBUG: http_proxy_incoming.cpp:331: (#37,[::1]:46114) connection closed Http output full {"__type":"InvalidArgumentException","message":"ydb/core/http_proxy/json_proto_conversion.h:400: Unexpected json key: WrongStreamName"} 400 {"__type":"InvalidArgumentException","message":"ydb/core/http_proxy/json_proto_conversion.h:400: Unexpected json key: WrongStreamName"} >> CheckIntegrityBlock42::PlacementAllOnHandoff [GOOD] >> CheckIntegrityBlock42::PlacementDisintegrated >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonDestPrefixSpecified [GOOD] >> EncryptedBackupParamsValidationTest::BadSourcePath [GOOD] >> TPartitionTests::AfterRestart_1 [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/ut_rw/unittest >> TColumnShardTestReadWrite::WriteReadDuplicate [GOOD] Test command err: 2025-06-03T10:27:08.751463Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-03T10:27:08.760205Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-03T10:27:08.760308Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-03T10:27:08.761179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:27:08.761243Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:27:08.761290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:27:08.761570Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:27:08.761592Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:27:08.761621Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:27:08.761640Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:27:08.761674Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:27:08.761696Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:27:08.761720Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:27:08.761746Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:27:08.761767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:27:08.769887Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-03T10:27:08.769982Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-06-03T10:27:08.769996Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-03T10:27:08.770041Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-03T10:27:08.770101Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-03T10:27:08.770117Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-03T10:27:08.770124Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-03T10:27:08.770135Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-03T10:27:08.770147Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-03T10:27:08.770156Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-03T10:27:08.770162Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-03T10:27:08.770184Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-03T10:27:08.770195Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-03T10:27:08.770204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-03T10:27:08.770209Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-03T10:27:08.770222Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-03T10:27:08.770231Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-03T10:27:08.770240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-06-03T10:27:08.770245Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-06-03T10:27:08.770259Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-06-03T10:27:08.770267Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-03T10:27:08.770272Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-03T10:27:08.770281Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-03T10:27:08.770292Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-03T10:27:08.770297Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-03T10:27:08.770324Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:27:08.770333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:27:08.770337Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-03T10:27:08.770360Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:27:08.770368Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:27:08.770372Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-03T10:27:08.770386Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:27:08.770394Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:27:08.770398Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-03T10:27:08.770407Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:27:08.770415Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:27:08.770424Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:27:08.770429Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=18;type=RestoreV0ChunksMeta; 2025-06-03T10:27:08.770515Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;TablesLoadingTime=12; 2025-06-03T10:27:08.770528Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=common_data.cpp:29;SchemaPresetLoadingTime=7; 2025-06-03T10:27:08.770537Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute ... og.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=10;merger=0;interval_id=49; 2025-06-03T10:27:43.117218Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-03T10:27:43.117229Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:27:43.117234Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=1;count=10;finished=1; 2025-06-03T10:27:43.117241Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:199;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-03T10:27:43.117615Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:105;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-03T10:27:43.117648Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:1;records_count:10;schema=timestamp: timestamp[us];);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:27:43.117654Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-03T10:27:43.117667Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:230;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;);columns=1;rows=10; 2025-06-03T10:27:43.117679Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:250;stage=data_format;batch_size=80;num_rows=10;batch_columns=timestamp; 2025-06-03T10:27:43.117739Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:366;event=send_data;compute_actor_id=[1:10348:12354];bytes=80;rows=10;faults=0;finished=0;fault=0;schema=timestamp: timestamp[us]; 2025-06-03T10:27:43.117753Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:270;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:27:43.117767Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:27:43.117776Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:193;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:27:43.117838Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:105;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-03T10:27:43.117850Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:27:43.117858Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:193;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:27:43.117864Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:410: Scan [1:10352:12358] finished for tablet 9437184 2025-06-03T10:27:43.117954Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:416;event=scan_finish;compute_actor_id=[1:10348:12354];stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.113},{"events":["l_bootstrap"],"t":0.252},{"events":["f_processing","f_task_result"],"t":0.253},{"events":["f_ack","l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish","l_task_result"],"t":0.671}],"full":{"a":1748946462445977,"name":"_full_task","f":1748946462445977,"d_finished":0,"c":0,"l":1748946463117879,"d":671902},"events":[{"name":"bootstrap","f":1748946462446154,"d_finished":252615,"c":1,"l":1748946462698769,"d":252615},{"a":1748946463117836,"name":"ack","f":1748946463117608,"d_finished":172,"c":1,"l":1748946463117780,"d":215},{"a":1748946463117834,"name":"processing","f":1748946462699454,"d_finished":153484,"c":1766,"l":1748946463117780,"d":153529},{"name":"ProduceResults","f":1748946462559748,"d_finished":52588,"c":1769,"l":1748946463117861,"d":52588},{"a":1748946463117861,"name":"Finish","f":1748946463117861,"d_finished":0,"c":0,"l":1748946463117879,"d":18},{"name":"task_result","f":1748946462699464,"d_finished":149130,"c":1765,"l":1748946463117263,"d":149130}],"id":"9437184::49"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:27:43.117968Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:366;event=send_data;compute_actor_id=[1:10348:12354];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-03T10:27:43.118011Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:371;event=scan_finished;compute_actor_id=[1:10348:12354];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap"],"t":0},{"events":["f_ProduceResults"],"t":0.113},{"events":["l_bootstrap"],"t":0.252},{"events":["f_processing","f_task_result"],"t":0.253},{"events":["f_ack","l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish","l_task_result"],"t":0.671}],"full":{"a":1748946462445977,"name":"_full_task","f":1748946462445977,"d_finished":0,"c":0,"l":1748946463117974,"d":671997},"events":[{"name":"bootstrap","f":1748946462446154,"d_finished":252615,"c":1,"l":1748946462698769,"d":252615},{"a":1748946463117836,"name":"ack","f":1748946463117608,"d_finished":172,"c":1,"l":1748946463117780,"d":310},{"a":1748946463117834,"name":"processing","f":1748946462699454,"d_finished":153484,"c":1766,"l":1748946463117780,"d":153624},{"name":"ProduceResults","f":1748946462559748,"d_finished":52588,"c":1769,"l":1748946463117861,"d":52588},{"a":1748946463117861,"name":"Finish","f":1748946463117861,"d_finished":0,"c":0,"l":1748946463117974,"d":113},{"name":"task_result","f":1748946462699464,"d_finished":149130,"c":1765,"l":1748946463117263,"d":149130}],"id":"9437184::49"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:27:43.118030Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-03T10:27:42.445405Z;index_granules=0;index_portions=294;index_batches=294;committed_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=686784;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=686784;selected_rows=0; 2025-06-03T10:27:43.118038Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:190;event=scan_aborted;reason=unexpected on destructor; 2025-06-03T10:27:43.118112Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[1:10352:12358];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; >> CheckIntegrityBlock42::PlacementDisintegrated [GOOD] >> CheckIntegrityBlock42::DataStatusUnknown >> CheckIntegrityMirror3dc::PlacementBlobIsLost [GOOD] >> CheckIntegrityMirror3dc::PlacementDisintegrated >> TPartitionTests::AfterRestart_2 >> CheckIntegrityBlock42::PlacementWrongDisks [GOOD] >> CheckIntegrityMirror3dc::DataErrorOneCopy |63.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TestYmqHttpProxy::TestChangeMessageVisibility [GOOD] >> CheckIntegrityBlock42::DataStatusUnknown [GOOD] >> CheckIntegrityMirror3dc::PlacementDisintegrated [GOOD] >> CheckIntegrityMirror3dc::DataOk >> EncryptedBackupParamsValidationTest::NoDestination >> EncryptedBackupParamsValidationTestFeatureDisabled::EncryptionParamsSpecifiedImport >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-true [GOOD] >> TPartitionTests::AfterRestart_2 [GOOD] >> TestYmqHttpProxy::TestChangeMessageVisibilityBatch >> CheckIntegrityMirror3of4::PlacementOk >> BackupRestore::TestAllPrimitiveTypes-INT16 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT32 >> CheckIntegrityMirror3dc::DataOk [GOOD] >> CheckIntegrityMirror3dc::DataErrorOneCopy [GOOD] >> CheckIntegrityMirror3dc::DataErrorManyCopies ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::DataStatusUnknown [GOOD] Test command err: RandomSeed# 4981195649799780411 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** Group is disintegrated or has network problems *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: part 2: part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: |63.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> CheckIntegrityMirror3dc::PlacementOkWithErrors >> CheckIntegrityMirror3dc::DataErrorManyCopies [GOOD] >> CheckIntegrityMirror3of4::PlacementOk [GOOD] >> CheckIntegrityMirror3of4::PlacementMissingParts >> EncryptedBackupParamsValidationTest::NoDestination [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/unittest >> TPartitionTests::AfterRestart_2 [GOOD] Test command err: 2025-06-03T10:27:29.312540Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667804968265872:2211];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:29.312677Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:27:29.326284Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667802576097089:2217];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:29.326463Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00232f/r3tmp/tmp7Pp3ws/pdisk_1.dat 2025-06-03T10:27:29.449790Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:27:29.459895Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:27:29.543872Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511667802576096878:2071] 1748946449314577 != 1748946449314580 2025-06-03T10:27:29.554197Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:29.561975Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:29.562006Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:29.562563Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:29.562590Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:29.566882Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:27:29.566923Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:29.569888Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10578, node 1 2025-06-03T10:27:29.646431Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/00232f/r3tmp/yandexFWyjS3.tmp 2025-06-03T10:27:29.646444Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/00232f/r3tmp/yandexFWyjS3.tmp 2025-06-03T10:27:29.646528Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/00232f/r3tmp/yandexFWyjS3.tmp 2025-06-03T10:27:29.646594Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:27:29.679542Z INFO: TTestServer started on Port 27492 GrpcPort 10578 TClient is connected to server localhost:27492 PQClient connected to localhost:10578 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:29.810572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:29.838008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:29.869609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-03T10:27:29.918499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-03T10:27:30.167861Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667806871064577:2310], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:30.167916Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:30.174656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480 2025-06-03T10:27:30.172564Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667806871064604:2313], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:30.183151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976720657, at schemeshard: 72057594046644480 2025-06-03T10:27:30.183844Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511667806871064606:2314], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-03T10:27:30.289963Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511667806871064634:2164] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:30.294382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:27:30.300873Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511667809263234088:2341], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:27:30.303741Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=ZTY1NTQ4YjUtNDdmN2RmMDItMmFkYTg2ZWEtNDU5MmMwOTI=, ActorId: [1:7511667809263234062:2334], ActorState: ExecuteState, TraceId: 01jwtn9xsa9mjtqqatebnqfg1r, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:27:30.303853Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:27:30.302048Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7511667806871064641:2318], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:27:30.302720Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=2&id=YTYyYjAxNzgtMmU3ZTU1MGYtM2Q5OGRkNjMtN2UzNTFlMDQ=, ActorId: [2:7511667806871064575:2309], ActorState: ExecuteState, TraceId: 01jwtn9xqpf5v0qd4820x6w9j7, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:27:30.303337Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:27:30.363833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:27:30.449489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-03T10:27:30.510210Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jwtn9y1d8cgwe0f4wyr4arwb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTRmZDNlMzgtZjg0Y2UwMjMtYTFjZjZjNDAtZTgzZDE0Mw==, CurrentExecutionId: , Cus ... RE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-03T10:27:45.159558Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__sm_chooser_actor.h:116: TPartitionChooser [3:7511667873544932554:3632] (SourceId=A_Source_5, PreferedPartition=(NULL)) GetOwnershipFast Partition=1 TabletId=1001 2025-06-03T10:27:45.159599Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 269877760, Sender [3:7511667873544932555:3632], Recipient [3:7511667869249964494:3191]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 1001 Status: OK ServerId: [3:7511667873544932554:3632] Leader: 1 Dead: 0 Generation: 1 VersionInfo: } 2025-06-03T10:27:45.159626Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 271188557, Sender [3:7511667873544932554:3632], Recipient [3:7511667869249964494:3191]: NKikimrPQ.TEvCheckPartitionStatusRequest Partition: 1 SourceId: "A_Source_5" 2025-06-03T10:27:45.159639Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__sm_chooser_actor.h:139: StateOwnershipFast, received event# 271188558, Sender [3:7511667869249964494:3191], Recipient [3:7511667873544932554:3632]: NKikimrPQ.TEvCheckPartitionStatusResponse Status: Active 2025-06-03T10:27:45.159644Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:88: TPartitionChooser [3:7511667873544932554:3632] (SourceId=A_Source_5, PreferedPartition=(NULL)) InitTable: SourceId=A_Source_5 TopicsAreFirstClassCitizen=1 UseSrcIdMetaMappingInFirstClass=1 2025-06-03T10:27:45.159659Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 65543, Sender [3:7511667873544932554:3632], Recipient [3:7511667869249964494:3191]: NActors::TEvents::TEvPoison 2025-06-03T10:27:45.159825Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:101: StateInitTable, received event# 277020685, Sender [3:7511667839185191496:2069], Recipient [3:7511667873544932554:3632]: NKikimr::NMetadata::NProvider::TEvManagerPrepared 2025-06-03T10:27:45.159831Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [3:7511667873544932554:3632] (SourceId=A_Source_5, PreferedPartition=(NULL)) StartKqpSession 2025-06-03T10:27:45.160495Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:132: StateCreateKqpSession, received event# 271646728, Sender [3:7511667839185191509:2080], Recipient [3:7511667873544932554:3632]: NKikimrKqp.TEvCreateSessionResponse Error: "" Response { SessionId: "ydb://session/3?node_id=3&id=ZGM0ODM4YzAtNWEzNzAzOTMtZjdlNjFkOC02MTNkNWY5NQ==" NodeId: 3 } YdbStatus: SUCCESS ResourceExhausted: false 2025-06-03T10:27:45.160506Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [3:7511667873544932554:3632] (SourceId=A_Source_5, PreferedPartition=(NULL)) Select from the table 2025-06-03T10:27:45.188363Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:163: StateSelect, received event# 271646721, Sender [3:7511667839185191509:2080], Recipient [3:7511667873544932554:3632]: NKikimrKqp.TEvQueryResponse Response { SessionId: "ydb://session/3?node_id=3&id=ZGM0ODM4YzAtNWEzNzAzOTMtZjdlNjFkOC02MTNkNWY5NQ==" PreparedQuery: "c6ba9753-d7849ce0-1e015ec-32780702" QueryParameters { Name: "$Hash" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Topic" Type { Kind: Data Data { Scheme: 4608 } } } QueryParameters { Name: "$SourceId" Type { Kind: Data Data { Scheme: 4608 } } } TxMeta { id: "01jwtnacd0fwr2gvk1qhhhcrdk" } YdbResults { columns { name: "Partition" type { optional_type { item { type_id: UINT32 } } } } columns { name: "CreateTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "AccessTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "SeqNo" type { optional_type { item { type_id: UINT64 } } } } rows { items { uint32_value: 0 } items { uint64_value: 1748946465122 } items { uint64_value: 1748946465122 } items { uint64_value: 13 } } } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 15 2025-06-03T10:27:45.188412Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:151: TPartitionChooser [3:7511667873544932554:3632] (SourceId=A_Source_5, PreferedPartition=(NULL)) Selected from table PartitionId=0 SeqNo=13 2025-06-03T10:27:45.188420Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__sm_chooser_actor.h:209: TPartitionChooser [3:7511667873544932554:3632] (SourceId=A_Source_5, PreferedPartition=(NULL)) OnPartitionChosen 2025-06-03T10:27:45.188465Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 269877760, Sender [3:7511667873544932580:3632], Recipient [3:7511667869249964494:3191]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 1001 Status: OK ServerId: [3:7511667873544932554:3632] Leader: 1 Dead: 0 Generation: 1 VersionInfo: } 2025-06-03T10:27:45.188476Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 271188557, Sender [3:7511667873544932554:3632], Recipient [3:7511667869249964494:3191]: NKikimrPQ.TEvCheckPartitionStatusRequest Partition: 1 2025-06-03T10:27:45.188487Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:240: StateCheckPartition, received event# 271188558, Sender [3:7511667869249964494:3191], Recipient [3:7511667873544932554:3632]: NKikimrPQ.TEvCheckPartitionStatusResponse Status: Active 2025-06-03T10:27:45.188493Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [3:7511667873544932554:3632] (SourceId=A_Source_5, PreferedPartition=(NULL)) Update the table 2025-06-03T10:27:45.188535Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 65543, Sender [3:7511667873544932554:3632], Recipient [3:7511667869249964494:3191]: NActors::TEvents::TEvPoison Received TEvChooseResult: 1 2025-06-03T10:27:45.210758Z node 3 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:212: StateUpdate, received event# 271646721, Sender [3:7511667839185191509:2080], Recipient [3:7511667873544932554:3632]: NKikimrKqp.TEvQueryResponse Response { SessionId: "ydb://session/3?node_id=3&id=ZGM0ODM4YzAtNWEzNzAzOTMtZjdlNjFkOC02MTNkNWY5NQ==" PreparedQuery: "9f4d3378-9fc30a7-9abea4f6-e45b152f" QueryParameters { Name: "$AccessTime" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$CreateTime" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Hash" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Partition" Type { Kind: Data Data { Scheme: 2 } } } QueryParameters { Name: "$SourceId" Type { Kind: Data Data { Scheme: 4608 } } } QueryParameters { Name: "$SeqNo" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Topic" Type { Kind: Data Data { Scheme: 4608 } } } TxMeta { } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 9 2025-06-03T10:27:45.210777Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7511667873544932554:3632] (SourceId=A_Source_5, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-03T10:27:45.210790Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7511667873544932554:3632] (SourceId=A_Source_5, PreferedPartition=(NULL)) ReplyResult: Partition=1, SeqNo=13 2025-06-03T10:27:45.210795Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7511667873544932554:3632] (SourceId=A_Source_5, PreferedPartition=(NULL)) Start idle Run query: --!syntax_v1 SELECT Partition, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash = 11131928866524144434 AND Topic = "Root" AND ProducerId = "00415F536F757263655F35" 2025-06-03T10:27:45.234813Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715703. Ctx: { TraceId: 01jwtnacdy6tv1hesz8xt4brg8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MTAwYjk2YTEtMzhlMTJlZDktZDAzN2FhMjUtNmM0Njg2MTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:45.642929Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1948: ActorId: [3:7511667873544932669:2644] TxId: 281474976715704. Ctx: { TraceId: 01jwtnactm9y69rc5wt8j9wse5, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MmZiM2JkOWEtNDQxYmMxMzgtZTUyZDA3NC1kYmQ3MGI4Mg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 4 2025-06-03T10:27:45.643319Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7511667873544932673:2644], TxId: 281474976715704, task: 2. Ctx: { SessionId : ydb://session/3?node_id=3&id=MmZiM2JkOWEtNDQxYmMxMzgtZTUyZDA3NC1kYmQ3MGI4Mg==. TraceId : 01jwtnactm9y69rc5wt8j9wse5. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [3:7511667873544932669:2644], status: UNAVAILABLE, reason: {
: Error: Terminate execution } 2025-06-03T10:27:46.612183Z node 5 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:27:46.612221Z node 5 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-03T10:27:46.627177Z node 5 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [5:178:2192] 2025-06-03T10:27:46.627756Z node 5 :PERSQUEUE INFO: partition_init.cpp:785: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-03T10:27:46.000000Z 2025-06-03T10:27:46.627774Z node 5 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [5:178:2192] Got cmd write: CmdWrite { Key: "i0000000003" Value: "\010\000\020\n\030\000(\320\351\324\252\3632" StorageChannel: INLINE } CmdWrite { Key: "I0000000003" Value: "\010\271`\020\316\255\001" StorageChannel: INLINE } CmdWrite { Key: "m0000000003cclient" Value: "\010\004\020\000\030\000\"\007session(\0000\000@\001" StorageChannel: INLINE } CmdWrite { Key: "m0000000003uclient" Value: "\004\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000session" StorageChannel: INLINE } 2025-06-03T10:27:47.249418Z node 6 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:27:47.249464Z node 6 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-03T10:27:47.254193Z node 6 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 3, State: StateInit] bootstrapping 3 [6:180:2194] 2025-06-03T10:27:47.254603Z node 6 :PERSQUEUE INFO: partition_init.cpp:785: [Root/PQ/rt3.dc1--account--topic:3:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp from keys completed. Value 2025-06-03T10:27:47.000000Z 2025-06-03T10:27:47.254618Z node 6 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 3, State: StateInit] init complete for topic 'Root/PQ/rt3.dc1--account--topic' partition 3 generation 0 [6:180:2194] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_ttl/unittest >> TSchemeShardTTLTests::CreateTableShouldSucceed-EnableTablePgTypes-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:27:34.528875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:34.528915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:34.528936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:34.528944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:34.528953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:34.528959Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:34.528971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:34.528989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:34.529117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:34.529198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:34.551342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:27:34.551383Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:34.556341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:34.556527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:34.556573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:34.558773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:34.558844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:34.558966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:34.559024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:34.562482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:34.562583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:34.563019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:34.563035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:34.563051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:34.563062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:34.563068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:34.563099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.565604Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:27:34.593448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:34.593553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.593632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:34.593684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:34.593697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.594616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:34.594646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:34.594734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.594747Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:34.594753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:34.594758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:34.595218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.595236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:34.595244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:34.595635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.595645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:34.595650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:34.595659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:34.596292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:34.596685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:34.596730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:34.596930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:34.596955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:34.596962Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:34.597034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:27:34.597040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:34.597075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:34.597086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:27:34.598187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:34.598202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:34.598295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... flight, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:27:47.523608Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-03T10:27:47.523617Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:27:47.524049Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:27:47.524073Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:27:47.524079Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:27:47.524086Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-03T10:27:47.524092Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:27:47.524112Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 101 2025-06-03T10:27:47.524492Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6290: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 1 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 386 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-03T10:27:47.524500Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-03T10:27:47.524518Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 1 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 386 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-03T10:27:47.524530Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 101 Step: 5000002 OrderId: 101 ExecLatency: 1 ProposeLatency: 2 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 386 } } CommitVersion { Step: 5000002 TxId: 101 } 2025-06-03T10:27:47.524885Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 120259086581 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-03T10:27:47.524896Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-03T10:27:47.524912Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 120259086581 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-03T10:27:47.524921Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1014: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-06-03T10:27:47.524927Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1018: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 307 RawX2: 120259086581 } Origin: 72075186233409546 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-03T10:27:47.524937Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:47.524940Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:27:47.524944Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 101:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-03T10:27:47.524949Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 101:0 129 -> 240 2025-06-03T10:27:47.525033Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:27:47.525360Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:27:47.525693Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:27:47.525721Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:27:47.525768Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:27:47.525775Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-03T10:27:47.525788Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:27:47.525791Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:27:47.525795Z node 28 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:27:47.525797Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:27:47.525801Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-03T10:27:47.525813Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [28:333:2311] message: TxId: 101 2025-06-03T10:27:47.525819Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:27:47.525824Z node 28 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-06-03T10:27:47.525830Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 101:0 2025-06-03T10:27:47.525848Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:27:47.526571Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:27:47.526597Z node 28 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [28:334:2312] TestWaitNotification: OK eventTxId 101 2025-06-03T10:27:47.526738Z node 28 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TTLTableWithpgint8Column_UNIT_NANOSECONDS" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:27:47.526813Z node 28 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TTLTableWithpgint8Column_UNIT_NANOSECONDS" took 86us result status StatusSuccess 2025-06-03T10:27:47.526951Z node 28 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TTLTableWithpgint8Column_UNIT_NANOSECONDS" PathDescription { Self { Name: "TTLTableWithpgint8Column_UNIT_NANOSECONDS" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TTLTableWithpgint8Column_UNIT_NANOSECONDS" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "modified_at" Type: "pgint8" TypeId: 12288 Id: 2 NotNull: false TypeInfo { PgTypeId: 20 } IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 TTLSettings { Enabled { ColumnName: "modified_at" ExpireAfterSeconds: 3600 ColumnUnit: UNIT_NANOSECONDS Tiers { ApplyAfterSeconds: 3600 Delete { } } } } IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3dc::DataOk [GOOD] Test command err: RandomSeed# 863941797976319761 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** Group is disintegrated or has network problems *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** Disks: 0: [82000000:1:1:0:0] 1: [82000000:1:2:0:0] 2: [82000000:1:0:0:0] 3: [82000000:1:1:1:0] 4: [82000000:1:2:1:0] 5: [82000000:1:0:1:0] 6: [82000000:1:1:2:0] 7: [82000000:1:2:2:0] 8: [82000000:1:0:2:0] Layout info: ver0 disks [ 0 1 2 ] >> CheckIntegrityBlock42::DataErrorAdditionalUnequalParts >> EncryptedBackupParamsValidationTestFeatureDisabled::EncryptionParamsSpecifiedImport [GOOD] >> CheckIntegrityMirror3of4::PlacementMissingParts [GOOD] >> CheckIntegrityMirror3of4::PlacementDisintegrated >> CheckIntegrityMirror3dc::PlacementOkWithErrors [GOOD] >> CheckIntegrityMirror3dc::PlacementOkWithErrorsOnBlobDisks >> CheckIntegrityBlock42::DataErrorAdditionalUnequalParts [GOOD] >> CheckIntegrityBlock42::DataErrorSixPartsOneBroken >> EncryptedBackupParamsValidationTest::NoItemDestination ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3dc::DataErrorManyCopies [GOOD] Test command err: RandomSeed# 14810416625640565010 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** Disks: 0: [82000000:1:1:0:0] 1: [82000000:1:2:0:0] 2: [82000000:1:0:0:0] 3: [82000000:1:1:1:0] 4: [82000000:1:2:1:0] 5: [82000000:1:0:1:0] 6: [82000000:1:1:2:0] 7: [82000000:1:2:2:0] 8: [82000000:1:0:2:0] Layout info: ver0 disks [ 0 1 ], ver1 disks [ 2 ] ERROR: There are unequal parts *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:1:0] FINISHED WITH OK *** Disks: 0: [82000000:1:1:0:0] 1: [82000000:1:2:0:0] 2: [82000000:1:0:0:0] 3: [82000000:1:1:1:0] 4: [82000000:1:2:1:0] 5: [82000000:1:0:1:0] 6: [82000000:1:1:2:0] 7: [82000000:1:2:2:0] 8: [82000000:1:0:2:0] Layout info: ver0 disks [ 0 1 2 ], ver1 disks [ 3 4 5 ] ERROR: There are unequal parts |63.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer |63.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer |63.4%| [LD] {RESULT} $(B)/ydb/core/backup/impl/ut_table_writer/ydb-core-backup-impl-ut_table_writer >> CheckIntegrityMirror3of4::PlacementDisintegrated [GOOD] >> BackupRestoreS3::RestoreIndexTableSplitBoundaries [GOOD] >> BackupRestoreS3::RestoreIndexTableDecimalSplitBoundaries >> TResourceBroker::TestErrors >> CheckIntegrityBlock42::DataErrorSixPartsOneBroken [GOOD] >> CheckIntegrityBlock42::DataErrorFivePartsOneBroken >> BackupRestore::TestAllPrimitiveTypes-INT32 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INT64 >> CheckIntegrityMirror3dc::PlacementOkWithErrorsOnBlobDisks [GOOD] >> CheckIntegrityMirror3of4::PlacementBlobIsLost >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonSourcePrefixSpecified >> TResourceBroker::TestOverusage >> TResourceBroker::TestErrors [GOOD] >> TResourceBroker::TestExecutionStat >> CheckIntegrityBlock42::DataErrorFivePartsOneBroken [GOOD] >> CheckIntegrityBlock42::DataErrorHeavySixPartsWithManyBroken >> CheckIntegrityMirror3of4::PlacementBlobIsLost [GOOD] |63.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TResourceBroker::TestExecutionStat [GOOD] >> TResourceBroker::TestOverusage [GOOD] >> TResourceBroker::TestNotifyActorDied >> EncryptedBackupParamsValidationTest::NoItemDestination [GOOD] >> TFlatMetrics::TimeSeriesAvg16x60 [GOOD] >> TFlatMetrics::TimeSeriesAvg16Signed [GOOD] >> TResourceBroker::TestRealUsage ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3of4::PlacementDisintegrated [GOOD] Test command err: RandomSeed# 2576452687863555119 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** Group is disintegrated or has network problems >> TResourceBroker::TestNotifyActorDied [GOOD] >> CheckIntegrityBlock42::DataErrorHeavySixPartsWithManyBroken [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TResourceBroker::TestExecutionStat [GOOD] Test command err: 2025-06-03T10:27:49.415711Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-03T10:27:49.415854Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new unknown task task-1 (1 by [1:100:2134]) priority=5 resources={400, 400} 2025-06-03T10:27:49.415864Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [1:100:2134]) to queue queue_default 2025-06-03T10:27:49.415874Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {400, 400} for task task-1 (1 by [1:100:2134]) from queue queue_default 2025-06-03T10:27:49.415880Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [1:100:2134]) to queue queue_default 2025-06-03T10:27:49.415896Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 0.000000 to 800.000000 (insert task task-1 (1 by [1:100:2134])) 2025-06-03T10:27:49.415914Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-2 (2 by [1:100:2134]) priority=5 resources={500, 500} 2025-06-03T10:27:49.415918Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:49.415924Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [1:100:2134]) 2025-06-03T10:27:49.415930Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-3 (3 by [1:100:2134]) priority=5 resources={500, 500} 2025-06-03T10:27:49.415935Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-3 (3 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:49.415943Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [1:100:2134]) 2025-06-03T10:27:49.415950Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-4 (4 by [1:100:2134]) priority=5 resources={500, 500} 2025-06-03T10:27:49.415955Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-4 (4 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:49.415959Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [1:100:2134]) 2025-06-03T10:27:49.415966Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-2 (2 by [1:100:2134]) priority=5 resources={500, 500} 2025-06-03T10:27:49.415972Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:408: SubmitTask failed for task 2 to [1:100:2134]: task with the same ID has been already submitted 2025-06-03T10:27:49.415986Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:494: RemoveQueuedTask failed for task 1 to [1:100:2134]: cannot remove in-fly task 2025-06-03T10:27:49.415994Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:486: RemoveQueuedTask failed for task 5 to [1:100:2134]: cannot remove unknown task 2025-06-03T10:27:49.416002Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:486: RemoveQueuedTask failed for task 2 to [1:101:2135]: cannot remove unknown task 2025-06-03T10:27:49.416009Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:499: Removing task task-2 (2 by [1:100:2134]) 2025-06-03T10:27:49.416015Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-3 (3 by [1:100:2134]) 2025-06-03T10:27:49.416023Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:433: UpdateTask failed for task 2 to [1:100:2134]: cannot update unknown task 2025-06-03T10:27:49.416031Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:433: UpdateTask failed for task 4 to [1:101:2135]: cannot update unknown task 2025-06-03T10:27:49.416039Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:441: Update task task-4 (4 by [1:100:2134]) (priority=4 type=compaction0 resources={250, 250} resubmit=0) 2025-06-03T10:27:49.416044Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-4 (4 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:49.416051Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-4 (4 by [1:100:2134]) 2025-06-03T10:27:49.416057Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:441: Update task task-3 (3 by [1:100:2134]) (priority=6 type=compaction0 resources={250, 250} resubmit=0) 2025-06-03T10:27:49.416062Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-3 (3 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:49.416066Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-4 (4 by [1:100:2134]) 2025-06-03T10:27:49.416073Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:515: FinishTask failed for task 5 to [1:100:2134]: cannot finish unknown task 2025-06-03T10:27:49.416081Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:515: FinishTask failed for task 2 to [1:101:2135]: cannot finish unknown task 2025-06-03T10:27:49.416088Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:523: FinishTask failed for task 3 to [1:100:2134]: cannot finish queued task 2025-06-03T10:27:49.416096Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-1 (1 by [1:100:2134]) (release resources {400, 400}) 2025-06-03T10:27:49.416103Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 0.000000 to 1600.000000 2025-06-03T10:27:49.416109Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {250, 250} for task task-4 (4 by [1:100:2134]) from queue queue_compaction0 2025-06-03T10:27:49.416114Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-4 (4 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:49.416119Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 500.000000 (insert task task-4 (4 by [1:100:2134])) 2025-06-03T10:27:49.416125Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:630: Skip queue queue_compaction0 due to exceeded limits 2025-06-03T10:27:49.416131Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-4 (4 by [1:100:2134]) (release resources {250, 250}) 2025-06-03T10:27:49.416136Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_compaction0 from 0.000000 to 500.000000 2025-06-03T10:27:49.416140Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {250, 250} for task task-3 (3 by [1:100:2134]) from queue queue_compaction0 2025-06-03T10:27:49.416145Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-3 (3 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:49.416151Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 500.000000 to 1000.000000 (insert task task-3 (3 by [1:100:2134])) 2025-06-03T10:27:49.854350Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-03T10:27:49.854448Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new unknown task task-1 (1 by [2:100:2134]) priority=5 resources={500, 500} 2025-06-03T10:27:49.854457Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [2:100:2134]) to queue queue_default 2025-06-03T10:27:49.854465Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {500, 500} for task task-1 (1 by [2:100:2134]) from queue queue_default 2025-06-03T10:27:49.854471Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [2:100:2134]) to queue queue_default 2025-06-03T10:27:49.854482Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 0.000000 to 1000.000000 (insert task task-1 (1 by [2:100:2134])) 2025-06-03T10:27:49.854492Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-2 (2 by [2:100:2134]) priority=5 resources={50, 50} 2025-06-03T10:27:49.854497Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:49.854503Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:100:2134]) 2025-06-03T10:27:49.854510Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-3 (3 by [2:100:2134]) priority=5 resources={50, 50} 2025-06-03T10:27:49.854514Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-3 (3 by [2:100:2134]) to queue queue_compaction0 2025-06-03T10:27:49.854519Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:100:2134]) 2025-06-03T10:27:49.854523Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-03T10:27:49.854530Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-4 (4 by [2:100:2134]) priority=5 resources={50, 50} 2025-06-03T10:27:49.854535Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-4 (4 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:49.854539Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:100:2134]) 2025-06-03T10:27:49.854543Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-03T10:27:49.854550Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-5 (5 by [2:100:2134]) priority=5 resources={50, 50} 2025-06-03T10:27:49.854554Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-5 (5 by [2:100:2134]) to queue queue_compaction0 2025-06-03T10:27:49.854559Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:100:2134]) 2025-06-03T10:27:49.854563Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-03T10:27:49.854571Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-6 (6 by [2:100:2134]) priority=5 resources={50, 50} 2025-06-03T10:27:49.854575Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-6 (6 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:49.854579Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:100:2134]) 2025-06-03T10:27:49.854583Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-03T10:27:49.854590Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-7 (7 by [2:100:2134]) priority=5 resources={50, 50} 2025-06-03T10:27:49.854597Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-7 (7 by [2:100:2134]) to queue queue_compaction0 2025-06-03T10:27:49.854601Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:100:2134]) 2025-06-03T10:27:49.854605Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-03T10:27:49.854616Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-1 (1 by [2:100:2134]) (release resources {500, 500}) 2025-06-03T10:27:49.854623Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated plann ... urce_broker.cpp:679: Assigning in-fly task task-1 (1 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:49.855508Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 1150.000000 to 1207.500000 (insert task task-1 (1 by [2:100:2134])) 2025-06-03T10:27:49.855514Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-1 (1 by [2:100:2134]) (release resources {50, 50}) 2025-06-03T10:27:49.855519Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction1 from 1207.500000 to 1200.000000 (remove task task-1 (1 by [2:100:2134])) 2025-06-03T10:27:49.855523Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_compaction1 from 1150.000000 to 1200.000000 2025-06-03T10:27:49.855529Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-1 (1 by [2:100:2134]) priority=5 resources={50, 50} 2025-06-03T10:27:49.855533Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:49.855537Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {50, 50} for task task-1 (1 by [2:100:2134]) from queue queue_compaction1 2025-06-03T10:27:49.855540Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:49.855545Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 1200.000000 to 1255.000000 (insert task task-1 (1 by [2:100:2134])) 2025-06-03T10:27:49.855553Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-1 (1 by [2:100:2134]) (release resources {50, 50}) 2025-06-03T10:27:49.855558Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction1 from 1255.000000 to 1250.000000 (remove task task-1 (1 by [2:100:2134])) 2025-06-03T10:27:49.855562Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_compaction1 from 1200.000000 to 1250.000000 2025-06-03T10:27:49.855569Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-1 (1 by [2:100:2134]) priority=5 resources={50, 50} 2025-06-03T10:27:49.855573Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:49.855577Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {50, 50} for task task-1 (1 by [2:100:2134]) from queue queue_compaction1 2025-06-03T10:27:49.855581Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:49.855585Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 1250.000000 to 1302.500000 (insert task task-1 (1 by [2:100:2134])) 2025-06-03T10:27:49.855592Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-1 (1 by [2:100:2134]) (release resources {50, 50}) 2025-06-03T10:27:49.855597Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction1 from 1302.500000 to 1300.000000 (remove task task-1 (1 by [2:100:2134])) 2025-06-03T10:27:49.855602Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_compaction1 from 1250.000000 to 1300.000000 2025-06-03T10:27:49.855608Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new unknown task task-1 (1 by [2:100:2134]) priority=5 resources={500, 500} 2025-06-03T10:27:49.855612Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [2:100:2134]) to queue queue_default 2025-06-03T10:27:49.855616Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {500, 500} for task task-1 (1 by [2:100:2134]) from queue queue_default 2025-06-03T10:27:49.855620Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [2:100:2134]) to queue queue_default 2025-06-03T10:27:49.855625Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 0.000000 to 950.000000 (insert task task-1 (1 by [2:100:2134])) 2025-06-03T10:27:49.855631Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-2 (2 by [2:100:2134]) priority=5 resources={50, 50} 2025-06-03T10:27:49.855635Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:49.855639Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:100:2134]) 2025-06-03T10:27:49.855645Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-3 (3 by [2:100:2134]) priority=5 resources={50, 50} 2025-06-03T10:27:49.855649Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-3 (3 by [2:100:2134]) to queue queue_compaction0 2025-06-03T10:27:49.855653Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:706: Updated real resource usage for queue queue_compaction0 from 300.000000 to 1300.000000 2025-06-03T10:27:49.855658Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:100:2134]) 2025-06-03T10:27:49.855679Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-03T10:27:49.855685Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-4 (4 by [2:100:2134]) priority=5 resources={50, 50} 2025-06-03T10:27:49.855689Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-4 (4 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:49.855693Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:100:2134]) 2025-06-03T10:27:49.855696Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-03T10:27:49.855702Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-5 (5 by [2:100:2134]) priority=5 resources={50, 50} 2025-06-03T10:27:49.855706Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-5 (5 by [2:100:2134]) to queue queue_compaction0 2025-06-03T10:27:49.855710Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:100:2134]) 2025-06-03T10:27:49.855713Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-03T10:27:49.855719Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-6 (6 by [2:100:2134]) priority=5 resources={50, 50} 2025-06-03T10:27:49.855724Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-6 (6 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:49.855728Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:100:2134]) 2025-06-03T10:27:49.855731Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-03T10:27:49.855737Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-7 (7 by [2:100:2134]) priority=5 resources={50, 50} 2025-06-03T10:27:49.855741Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-7 (7 by [2:100:2134]) to queue queue_compaction0 2025-06-03T10:27:49.855745Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:100:2134]) 2025-06-03T10:27:49.855748Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-03T10:27:49.855755Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-1 (1 by [2:100:2134]) (release resources {500, 500}) 2025-06-03T10:27:49.855760Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_default from 950.000000 to 0.000000 (remove task task-1 (1 by [2:100:2134])) 2025-06-03T10:27:49.855764Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {50, 50} for task task-2 (2 by [2:100:2134]) from queue queue_compaction1 2025-06-03T10:27:49.855768Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-2 (2 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:49.855772Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 1300.000000 to 1350.000000 (insert task task-2 (2 by [2:100:2134])) 2025-06-03T10:27:49.855777Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {50, 50} for task task-3 (3 by [2:100:2134]) from queue queue_compaction0 2025-06-03T10:27:49.855780Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-3 (3 by [2:100:2134]) to queue queue_compaction0 2025-06-03T10:27:49.855785Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 300.000000 to 1400.000000 (insert task task-3 (3 by [2:100:2134])) 2025-06-03T10:27:49.855791Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {50, 50} for task task-4 (4 by [2:100:2134]) from queue queue_compaction1 2025-06-03T10:27:49.855795Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-4 (4 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:49.855799Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 1350.000000 to 1400.000000 (insert task task-4 (4 by [2:100:2134])) 2025-06-03T10:27:49.855804Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {50, 50} for task task-6 (6 by [2:100:2134]) from queue queue_compaction1 2025-06-03T10:27:49.855807Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-6 (6 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:49.855812Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 1400.000000 to 1450.000000 (insert task task-6 (6 by [2:100:2134])) 2025-06-03T10:27:49.855816Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {50, 50} for task task-5 (5 by [2:100:2134]) from queue queue_compaction0 2025-06-03T10:27:49.855820Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-5 (5 by [2:100:2134]) to queue queue_compaction0 2025-06-03T10:27:49.855824Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 1400.000000 to 1500.000000 (insert task task-5 (5 by [2:100:2134])) 2025-06-03T10:27:49.855828Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {50, 50} for task task-7 (7 by [2:100:2134]) from queue queue_compaction0 2025-06-03T10:27:49.855833Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-7 (7 by [2:100:2134]) to queue queue_compaction0 2025-06-03T10:27:49.855838Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 1500.000000 to 1600.000000 (insert task task-7 (7 by [2:100:2134])) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3of4::PlacementBlobIsLost [GOOD] Test command err: RandomSeed# 10072310332410216092 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:2:0] FINISHED WITH OK *** >> TTabletPipeTest::TestShutdown >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonSourcePrefixSpecified [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_InactiveConfig_Test [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_InactiveActor_Test |63.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TFlatMetrics::TimeSeriesAvg16Signed [GOOD] >> TResourceBroker::TestRealUsage [GOOD] >> TResourceBroker::TestRandomQueue >> EncryptedBackupParamsValidationTest::NoCommonDestination >> TestYmqHttpProxy::TestChangeMessageVisibilityBatch [GOOD] >> TCdcStreamTests::VirtualTimestamps >> BackupRestoreS3::RestoreIndexTableDecimalSplitBoundaries [GOOD] >> BackupRestoreS3::RestoreViewQueryText ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TResourceBroker::TestNotifyActorDied [GOOD] Test command err: 2025-06-03T10:27:49.846910Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-03T10:27:49.847031Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-1 (1 by [1:100:2134]) priority=5 resources={50, 50} 2025-06-03T10:27:49.847040Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:49.847048Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {50, 50} for task task-1 (1 by [1:100:2134]) from queue queue_compaction0 2025-06-03T10:27:49.847054Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:49.847066Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 100.000000 (insert task task-1 (1 by [1:100:2134])) 2025-06-03T10:27:49.847076Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-2 (2 by [1:100:2134]) priority=5 resources={410, 410} 2025-06-03T10:27:49.847080Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:49.847086Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:630: Skip queue queue_compaction0 due to exceeded limits 2025-06-03T10:27:49.847093Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-3 (3 by [1:100:2134]) priority=5 resources={550, 550} 2025-06-03T10:27:49.847097Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-3 (3 by [1:100:2134]) to queue queue_compaction1 2025-06-03T10:27:49.847102Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-3 (3 by [1:100:2134]) 2025-06-03T10:27:49.847106Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-03T10:27:49.847120Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-1 (1 by [1:100:2134]) (release resources {50, 50}) 2025-06-03T10:27:49.847127Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_compaction0 from 0.000000 to 100.000000 2025-06-03T10:27:49.847133Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {550, 550} for task task-3 (3 by [1:100:2134]) from queue queue_compaction1 2025-06-03T10:27:49.847137Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-3 (3 by [1:100:2134]) to queue queue_compaction1 2025-06-03T10:27:49.847142Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 0.000000 to 1100.000000 (insert task task-3 (3 by [1:100:2134])) 2025-06-03T10:27:49.847147Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [1:100:2134]) 2025-06-03T10:27:49.847153Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-3 (3 by [1:100:2134]) (release resources {550, 550}) 2025-06-03T10:27:49.847160Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction1 from 1100.000000 to 550.000000 (remove task task-3 (3 by [1:100:2134])) 2025-06-03T10:27:49.847164Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_compaction1 from 0.000000 to 550.000000 2025-06-03T10:27:49.847169Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {410, 410} for task task-2 (2 by [1:100:2134]) from queue queue_compaction0 2025-06-03T10:27:49.847173Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-2 (2 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:49.847178Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 100.000000 to 920.000000 (insert task task-2 (2 by [1:100:2134])) 2025-06-03T10:27:50.241684Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-03T10:27:50.241799Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-1 (1 by [2:100:2134]) priority=5 resources={500, 500} 2025-06-03T10:27:50.241809Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [2:100:2134]) to queue queue_compaction0 2025-06-03T10:27:50.241817Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {500, 500} for task task-1 (1 by [2:100:2134]) from queue queue_compaction0 2025-06-03T10:27:50.241823Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [2:100:2134]) to queue queue_compaction0 2025-06-03T10:27:50.241842Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 1000.000000 (insert task task-1 (1 by [2:100:2134])) 2025-06-03T10:27:50.241851Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-2 (2 by [2:100:2134]) priority=5 resources={200, 200} 2025-06-03T10:27:50.241856Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:50.241862Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:100:2134]) 2025-06-03T10:27:50.241869Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-3 (3 by [2:101:2135]) priority=5 resources={200, 200} 2025-06-03T10:27:50.241873Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-3 (3 by [2:101:2135]) to queue queue_compaction0 2025-06-03T10:27:50.241878Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:100:2134]) 2025-06-03T10:27:50.241882Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-03T10:27:50.241889Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-4 (4 by [2:101:2135]) priority=5 resources={200, 200} 2025-06-03T10:27:50.241893Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-4 (4 by [2:101:2135]) to queue queue_compaction1 2025-06-03T10:27:50.241897Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:100:2134]) 2025-06-03T10:27:50.241901Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-03T10:27:50.241915Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:499: Removing task task-2 (2 by [2:100:2134]) 2025-06-03T10:27:50.241922Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-1 (1 by [2:100:2134]) (release resources {500, 500}) 2025-06-03T10:27:50.241929Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction0 from 1000.000000 to 100.000000 (remove task task-1 (1 by [2:100:2134])) 2025-06-03T10:27:50.241935Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_compaction0 from 0.000000 to 100.000000 2025-06-03T10:27:50.241940Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {200, 200} for task task-4 (4 by [2:101:2135]) from queue queue_compaction1 2025-06-03T10:27:50.241944Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-4 (4 by [2:101:2135]) to queue queue_compaction1 2025-06-03T10:27:50.241950Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 0.000000 to 400.000000 (insert task task-4 (4 by [2:101:2135])) 2025-06-03T10:27:50.241955Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {200, 200} for task task-3 (3 by [2:101:2135]) from queue queue_compaction0 2025-06-03T10:27:50.241963Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-3 (3 by [2:101:2135]) to queue queue_compaction0 2025-06-03T10:27:50.241968Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 100.000000 to 500.000000 (insert task task-3 (3 by [2:101:2135])) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::DataErrorHeavySixPartsWithManyBroken [GOOD] Test command err: RandomSeed# 14863220279233685951 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:4:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 6 ], ver1 disks [ 7 ], ver2 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] ERROR: There are unequal parts Erasure info: { part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 0 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK ERROR: There are erasure restore fails *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: ver0 disks [ 5 ] Erasure info: { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 1 disks [ 0 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK ERROR: There are erasure restore fails *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 0 ] part 2: ver0 disks [ 1 ] part 3: ver0 disks [ 2 ] part 4: ver0 disks [ 3 ] part 5: ver0 disks [ 4 ] part 6: Erasure info: ERROR: There are erasure restore fails *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:3:0] FINISHED WITH OK *** Disks: 0: [82000000:1:0:5:0] 1: [82000000:1:0:6:0] 2: [82000000:1:0:7:0] 3: [82000000:1:0:0:0] 4: [82000000:1:0:1:0] 5: [82000000:1:0:2:0] 6: [82000000:1:0:3:0] 7: [82000000:1:0:4:0] Layout info: part 1: ver0 disks [ 6 ], ver1 disks [ 0 ] part 2: ver0 disks [ 6 ], ver1 disks [ 1 ] part 3: ver0 disks [ 6 ], ver1 disks [ 2 ] part 4: ver0 disks [ 3 ], ver1 disks [ 6 ] part 5: ver0 disks [ 4 ], ver1 disks [ 6 ] part 6: ver0 disks [ 5 ], ver1 disks [ 6 ] ERROR: There are unequal parts Erasure info: { part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 1 disks [ 6 ] -> OK { part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 1 disks [ 0 ] -> OK { part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 2 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 2 disks [ 1 ] -> OK { part 1 disks [ 0 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 5 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 5 disks [ 4 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 3 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 4 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 3 disks [ 2 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 4 disks [ 3 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 6 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 6 disks [ 5 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 6 disks [ 5 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 4 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 5 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 4 disks [ 3 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 5 disks [ 4 ]; } CHECK part 6 disks [ 5 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; } CHECK part 5 disks [ 6 ] -> OK { part 1 disks [ 6 ]; part 2 disks [ 6 ]; part 3 disks [ 6 ]; part 4 disks [ 6 ]; } CHECK part 6 disks [ 6 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 5 disks [ 4 ] -> OK { part 1 disks [ 0 ]; part 2 disks [ 1 ]; part 3 disks [ 2 ]; part 4 disks [ 3 ]; } CHECK part 6 disks [ 5 ] -> OK ERROR: There are erasure restore fails >> TTabletPipeTest::TestShutdown [GOOD] >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonDestPathSpecified >> TResourceBroker::TestRandomQueue [GOOD] >> TCdcStreamTests::Basic >> EncryptedBackupParamsValidationTest::NoCommonDestination [GOOD] >> TCdcStreamTests::VirtualTimestamps [GOOD] >> TCdcStreamTests::ResolvedTimestamps >> BackupRestore::TestAllPrimitiveTypes-INT64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-FLOAT |63.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestShutdown [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/inside_ydb_ut/unittest >> TestYmqHttpProxy::TestChangeMessageVisibilityBatch [GOOD] Test command err: 2025-06-03T10:27:17.389374Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667752964067760:2192];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:17.389422Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002b7e/r3tmp/tmp9xFg6n/pdisk_1.dat 2025-06-03T10:27:17.567633Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:17.571658Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667752964067607:2079] 1748946437386514 != 1748946437386517 TServer::EnableGrpc on GrpcPort 1537, node 1 2025-06-03T10:27:17.604997Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:17.605036Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:17.605956Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:17.616690Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:17.616706Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:17.616709Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:17.616761Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23628 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:17.730768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:17.737639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 TClient is connected to server localhost:23628 2025-06-03T10:27:17.805802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:17.809689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-03T10:27:17.814389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:17.825692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-03T10:27:17.828228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:17.895210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:27:17.936034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:27:17.978968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715665, at schemeshard: 72057594046644480 2025-06-03T10:27:17.983087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.017840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.049876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.078064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.106094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.133650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.161627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.244200Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667757259036290:2372], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:18.244235Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:18.244519Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667757259036302:2375], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:18.245589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715673:3, at schemeshard: 72057594046644480 2025-06-03T10:27:18.249114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715673, at schemeshard: 72057594046644480 2025-06-03T10:27:18.249194Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667757259036304:2376], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715673 completed, doublechecking } 2025-06-03T10:27:18.325353Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667757259036355:2853] txid# 281474976715674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 18], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:18.412710Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715675. Ctx: { TraceId: 01jwtn9j3220scjbr6kh0tdyze, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmQ3MzQxMTYtZmE1NmMyOTQtYjFkYmI3ZTMtZWIyNTRmYzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:27:18.439089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.494145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.522424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.542240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715679:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.562640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715680:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.590514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.615971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715682:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:18.638226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474 ... or query(idx=CHANGE_VISIBILITY_ID). Mode: COMPILE_AND_EXEC 2025-06-03T10:27:50.779237Z node 7 :SQS TRACE: executor.cpp:154: Request [fb768ea8-cdde9ecd-74472f02-2200f847] Query(idx=CHANGE_VISIBILITY_ID) Queue [cloud4/000000000000000101v0] Serializing params: {"QUEUE_ID_NUMBER": 2, "QUEUE_ID_NUMBER_HASH": 17472595041006102391, "SHARD": 1, "QUEUE_ID_NUMBER_AND_SHARD_HASH": 5923258363543965525, "NOW": 1748946470776, "GROUPS_READ_ATTEMPT_IDS_PERIOD": 300000, "KEYS": [{"LockTimestamp": 1748946470693, "Offset": 1, "NewVisibilityDeadline": 1748946471776}, {"LockTimestamp": 1748946470710, "Offset": 2, "NewVisibilityDeadline": 1748946472776}]} 2025-06-03T10:27:50.779386Z node 7 :SQS TRACE: executor.cpp:203: Request [fb768ea8-cdde9ecd-74472f02-2200f847] Query(idx=CHANGE_VISIBILITY_ID) Queue [cloud4/000000000000000101v0] Execute program: { Transaction { MiniKQLTransaction { Mode: COMPILE_AND_EXEC Program { Bin: "O\034\014Exists*NewVisibilityDeadline\014Offset\006Arg\014Member\nFlags\010Name\010Args\016Payload\022Parameter\006And\032LockTimestamp$VisibilityDeadline\014Invoke\t\211\004\206\202?\000\206\202\030Extend\000\006\002?\000\t\211\004\202\203\005@\206\205\n\203\014\207\203\010\203\014\203\010?\020(ChangeConddCurrentVisibilityDeadline\002\006\n$SetResult\000\003?\006\014result\t\211\006?\024\206\205\006?\020?\020?\020.\006\n?\032?\0220MapParameter\000\t\351\000?\034\005\205\004\206\205\004\203\010\203\005@\026\032\203\005@\036\"\006\000?&\003?(\010KEYS\003&\000\t\251\000?\032\016\000\005?\022\t\211\004?\010\207\203\014?\010 Coalesce\000\t\211\004?<\207\203\014\207\203\014*\000\t\211\006?B\203\005@\203\010?\0146\000\003?J\026LessOrEqual\t\351\000?L\005\205\004\206\205\004\203\010\203\005@\026\032\203\005@\036\"\006\000?X\003?Z\006NOW\003&\000\t\211\004?\014\207\205\004\207\203\010?\014.2\203\004\022\000\t\211\n?n\203\005\004\200\205\004\203\004\203\004.2\213\010\203\010\203\010\203\004?\020\203\004$SelectRow\000\003?t \000\001\205\000\000\000\000\001\030\000\000\000\000\000\000\000?l\005?z\003?v\020\003?x\026\003\013?\202\t\351\000?|\005\205\004\206\205\004\203\010\203\005@\026\032\203\005@\036\"\006\000?\226\003?\230> EncryptedBackupParamsValidationTest::IncorrectKeyLengthExport >> TCdcStreamTests::ResolvedTimestamps [GOOD] >> TCdcStreamTests::SchemaChanges >> DataShardReadTableSnapshots::ReadTableSnapshot ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TResourceBroker::TestRandomQueue [GOOD] Test command err: 2025-06-03T10:27:50.472414Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-03T10:27:50.472540Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-1 (1 by [1:100:2134]) priority=5 resources={400, 400} 2025-06-03T10:27:50.472551Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:50.472560Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {400, 400} for task task-1 (1 by [1:100:2134]) from queue queue_compaction0 2025-06-03T10:27:50.472566Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:50.472578Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 800.000000 (insert task task-1 (1 by [1:100:2134])) 2025-06-03T10:27:50.472587Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-2 (2 by [1:100:2134]) priority=5 resources={400, 400} 2025-06-03T10:27:50.472591Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [1:100:2134]) to queue queue_compaction1 2025-06-03T10:27:50.472596Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [1:100:2134]) 2025-06-03T10:27:50.472602Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-3 (3 by [1:100:2134]) priority=5 resources={400, 400} 2025-06-03T10:27:50.472606Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-3 (3 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:50.472610Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [1:100:2134]) 2025-06-03T10:27:50.472614Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-03T10:27:50.472620Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-4 (4 by [1:100:2134]) priority=5 resources={400, 400} 2025-06-03T10:27:50.472624Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-4 (4 by [1:100:2134]) to queue queue_compaction1 2025-06-03T10:27:50.472628Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [1:100:2134]) 2025-06-03T10:27:50.472632Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-03T10:27:50.472638Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-5 (5 by [1:100:2134]) priority=5 resources={400, 400} 2025-06-03T10:27:50.472642Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-5 (5 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:50.472650Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [1:100:2134]) 2025-06-03T10:27:50.472654Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-03T10:27:50.472660Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-6 (6 by [1:100:2134]) priority=5 resources={400, 400} 2025-06-03T10:27:50.472664Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-6 (6 by [1:100:2134]) to queue queue_compaction1 2025-06-03T10:27:50.472668Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [1:100:2134]) 2025-06-03T10:27:50.472672Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-03T10:27:50.472689Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-1 (1 by [1:100:2134]) (release resources {400, 400}) 2025-06-03T10:27:50.472698Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_compaction0 from 0.000000 to 800.000000 2025-06-03T10:27:50.472704Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {400, 400} for task task-2 (2 by [1:100:2134]) from queue queue_compaction1 2025-06-03T10:27:50.472708Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-2 (2 by [1:100:2134]) to queue queue_compaction1 2025-06-03T10:27:50.472712Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 0.000000 to 800.000000 (insert task task-2 (2 by [1:100:2134])) 2025-06-03T10:27:50.472717Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-4 (4 by [1:100:2134]) 2025-06-03T10:27:50.472721Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-03T10:27:50.472727Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-2 (2 by [1:100:2134]) (release resources {400, 400}) 2025-06-03T10:27:50.472733Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction1 from 800.000000 to 280.000000 (remove task task-2 (2 by [1:100:2134])) 2025-06-03T10:27:50.472737Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_compaction1 from 0.000000 to 280.000000 2025-06-03T10:27:50.472742Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {400, 400} for task task-4 (4 by [1:100:2134]) from queue queue_compaction1 2025-06-03T10:27:50.472746Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-4 (4 by [1:100:2134]) to queue queue_compaction1 2025-06-03T10:27:50.472751Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 280.000000 to 1054.000000 (insert task task-4 (4 by [1:100:2134])) 2025-06-03T10:27:50.472755Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-3 (3 by [1:100:2134]) 2025-06-03T10:27:50.472758Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction1 blocked by an earlier queue 2025-06-03T10:27:50.472763Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-4 (4 by [1:100:2134]) (release resources {400, 400}) 2025-06-03T10:27:50.472767Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction1 from 1054.000000 to 560.000000 (remove task task-4 (4 by [1:100:2134])) 2025-06-03T10:27:50.472772Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_compaction1 from 280.000000 to 560.000000 2025-06-03T10:27:50.472776Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {400, 400} for task task-6 (6 by [1:100:2134]) from queue queue_compaction1 2025-06-03T10:27:50.472780Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-6 (6 by [1:100:2134]) to queue queue_compaction1 2025-06-03T10:27:50.472784Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 560.000000 to 1308.000000 (insert task task-6 (6 by [1:100:2134])) 2025-06-03T10:27:50.472788Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-3 (3 by [1:100:2134]) 2025-06-03T10:27:50.809476Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-03T10:27:50.809621Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-1 (1 by [2:100:2134]) priority=1 resources={10, 330} 2025-06-03T10:27:50.809635Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:50.809645Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {10, 330} for task task-1 (1 by [2:100:2134]) from queue queue_compaction1 2025-06-03T10:27:50.809651Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:50.809663Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 0.000000 to 660.000000 (insert task task-1 (1 by [2:100:2134])) 2025-06-03T10:27:50.809676Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new unknown task task-2 (2 by [2:100:2134]) priority=1 resources={458, 20} 2025-06-03T10:27:50.809681Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [2:100:2134]) to queue queue_default 2025-06-03T10:27:50.809687Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {458, 20} for task task-2 (2 by [2:100:2134]) from queue queue_default 2025-06-03T10:27:50.809692Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-2 (2 by [2:100:2134]) to queue queue_default 2025-06-03T10:27:50.809697Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 0.000000 to 916.000000 (insert task task-2 (2 by [2:100:2134])) 2025-06-03T10:27:50.809705Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new unknown task task-3 (3 by [2:100:2134]) priority=1 resources={226, 415} 2025-06-03T10:27:50.809711Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning waiting task 'task-3 (3 by [2:100:2134])' of unknown type 'wrong' to default queue 2025-06-03T10:27:50.809718Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-3 (3 by [2:100:2134]) 2025-06-03T10:27:50.809725Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-4 (4 by [2:100:2134]) priority=2 resources={103, 415} 2025-06-03T10:27:50.809730Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-4 (4 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:50.809735Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-4 (4 by [2:100:2134]) 2025-06-03T10:27:50.809740Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_default blocked by an earlier queue 2025-06-03T10:27:50.809747Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-5 (5 by [2:100:2134]) priority=1 resources={120, 143} 2025-06-03T10:27:50.809751Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-5 (5 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:50.809756Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-5 (5 by [2:100:2134]) 2025-06-03T10:27:50.809760Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_default blocked by an earlier queue 2025-06-03T10:27:50.809766Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-6 (6 by [2:100:2134]) priority=1 resources={154, 279} 2025-06-03T10:27:50.809770Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-6 (6 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:50.809774Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-5 (5 by [2:100:2134]) 2025-06-03T10:27:50.809778Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_default blocked by an earlier queue 2025-06-03T10:27:50.809789Z node 2 :RESOUR ... 69601Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-838 (838 by [2:100:2134]) (release resources {357, 15}) 2025-06-03T10:27:50.969605Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 716971.982800 to 719261.780800 2025-06-03T10:27:50.969609Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {308, 267} for task task-851 (851 by [2:100:2134]) from queue queue_default 2025-06-03T10:27:50.969613Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-851 (851 by [2:100:2134])' of unknown type 'wrong' to default queue 2025-06-03T10:27:50.969618Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 718769.977600 to 720837.139200 (insert task task-851 (851 by [2:100:2134])) 2025-06-03T10:27:50.969622Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-853 (853 by [2:100:2134]) 2025-06-03T10:27:50.969630Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-851 (851 by [2:100:2134]) (release resources {308, 267}) 2025-06-03T10:27:50.969635Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_default from 720837.139200 to 720819.028800 (remove task task-851 (851 by [2:100:2134])) 2025-06-03T10:27:50.969639Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 719261.780800 to 720819.028800 2025-06-03T10:27:50.969644Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {163, 471} for task task-853 (853 by [2:100:2134]) from queue queue_default 2025-06-03T10:27:50.969648Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-853 (853 by [2:100:2134])' of unknown type 'wrong' to default queue 2025-06-03T10:27:50.969653Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 720819.028800 to 723053.264400 (insert task task-853 (853 by [2:100:2134])) 2025-06-03T10:27:50.969657Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-869 (869 by [2:100:2134]) 2025-06-03T10:27:50.969662Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-853 (853 by [2:100:2134]) (release resources {163, 471}) 2025-06-03T10:27:50.969667Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_default from 723053.264400 to 721372.924800 (remove task task-853 (853 by [2:100:2134])) 2025-06-03T10:27:50.969673Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 720819.028800 to 721372.924800 2025-06-03T10:27:50.969678Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {1, 157} for task task-869 (869 by [2:100:2134]) from queue queue_default 2025-06-03T10:27:50.969682Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-869 (869 by [2:100:2134]) to queue queue_default 2025-06-03T10:27:50.969687Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 721372.924800 to 722093.806000 (insert task task-869 (869 by [2:100:2134])) 2025-06-03T10:27:50.969692Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {271, 71} for task task-905 (905 by [2:100:2134]) from queue queue_default 2025-06-03T10:27:50.969696Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-905 (905 by [2:100:2134])' of unknown type 'wrong' to default queue 2025-06-03T10:27:50.969700Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 722093.806000 to 723338.129600 (insert task task-905 (905 by [2:100:2134])) 2025-06-03T10:27:50.969705Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-917 (917 by [2:100:2134]) 2025-06-03T10:27:50.969714Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-869 (869 by [2:100:2134]) (release resources {1, 157}) 2025-06-03T10:27:50.969719Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_default from 723338.129600 to 723333.618800 (remove task task-869 (869 by [2:100:2134])) 2025-06-03T10:27:50.969724Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 721372.924800 to 721395.011200 2025-06-03T10:27:50.969729Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:630: Skip queue queue_default due to exceeded limits 2025-06-03T10:27:50.969735Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-905 (905 by [2:100:2134]) (release resources {271, 71}) 2025-06-03T10:27:50.969740Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 721395.011200 to 723072.501200 2025-06-03T10:27:50.969744Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {203, 409} for task task-917 (917 by [2:100:2134]) from queue queue_default 2025-06-03T10:27:50.969748Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-917 (917 by [2:100:2134])' of unknown type 'wrong' to default queue 2025-06-03T10:27:50.969753Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 723333.618800 to 725047.983200 (insert task task-917 (917 by [2:100:2134])) 2025-06-03T10:27:50.969758Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-942 (942 by [2:100:2134]) 2025-06-03T10:27:50.969763Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-917 (917 by [2:100:2134]) (release resources {203, 409}) 2025-06-03T10:27:50.969768Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_default from 725047.983200 to 724904.015200 (remove task task-917 (917 by [2:100:2134])) 2025-06-03T10:27:50.969773Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 723072.501200 to 724643.061200 2025-06-03T10:27:50.969777Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {365, 369} for task task-942 (942 by [2:100:2134]) from queue queue_default 2025-06-03T10:27:50.969782Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-942 (942 by [2:100:2134])' of unknown type 'wrong' to default queue 2025-06-03T10:27:50.969787Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 724904.015200 to 726517.873600 (insert task task-942 (942 by [2:100:2134])) 2025-06-03T10:27:50.969792Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-954 (954 by [2:100:2134]) 2025-06-03T10:27:50.969798Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-942 (942 by [2:100:2134]) (release resources {365, 369}) 2025-06-03T10:27:50.969804Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_default from 726517.873600 to 725602.310800 (remove task task-942 (942 by [2:100:2134])) 2025-06-03T10:27:50.969808Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 724643.061200 to 725341.504400 2025-06-03T10:27:50.969812Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {291, 338} for task task-954 (954 by [2:100:2134]) from queue queue_default 2025-06-03T10:27:50.969816Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-954 (954 by [2:100:2134])' of unknown type 'wrong' to default queue 2025-06-03T10:27:50.969821Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 725602.310800 to 727017.719600 (insert task task-954 (954 by [2:100:2134])) 2025-06-03T10:27:50.969826Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-972 (972 by [2:100:2134]) 2025-06-03T10:27:50.969832Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-954 (954 by [2:100:2134]) (release resources {291, 338}) 2025-06-03T10:27:50.969836Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 725341.504400 to 727790.382000 2025-06-03T10:27:50.969840Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {418, 8} for task task-972 (972 by [2:100:2134]) from queue queue_default 2025-06-03T10:27:50.969844Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-972 (972 by [2:100:2134])' of unknown type 'wrong' to default queue 2025-06-03T10:27:50.969850Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 727017.719600 to 729648.475600 (insert task task-972 (972 by [2:100:2134])) 2025-06-03T10:27:50.969854Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-981 (981 by [2:100:2134]) 2025-06-03T10:27:50.969860Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-972 (972 by [2:100:2134]) (release resources {418, 8}) 2025-06-03T10:27:50.969865Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 727790.382000 to 730866.193200 2025-06-03T10:27:50.969869Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {354, 219} for task task-981 (981 by [2:100:2134]) from queue queue_default 2025-06-03T10:27:50.969873Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-981 (981 by [2:100:2134])' of unknown type 'wrong' to default queue 2025-06-03T10:27:50.969877Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 729648.475600 to 732490.628400 (insert task task-981 (981 by [2:100:2134])) 2025-06-03T10:27:50.969882Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:630: Skip queue queue_default due to exceeded limits 2025-06-03T10:27:50.969889Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-981 (981 by [2:100:2134]) (release resources {354, 219}) 2025-06-03T10:27:50.969896Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 730866.193200 to 733014.973200 2025-06-03T10:27:50.969900Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {59, 192} for task task-999 (999 by [2:100:2134]) from queue queue_default 2025-06-03T10:27:50.969903Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:675: Assigning in-fly task 'task-999 (999 by [2:100:2134])' of unknown type 'wrong' to default queue 2025-06-03T10:27:50.969908Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 732490.628400 to 733940.950800 (insert task task-999 (999 by [2:100:2134])) 2025-06-03T10:27:50.969915Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-999 (999 by [2:100:2134]) (release resources {59, 192}) 2025-06-03T10:27:50.969920Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_default from 733940.950800 to 733932.272400 (remove task task-999 (999 by [2:100:2134])) 2025-06-03T10:27:50.969924Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 733014.973200 to 733932.272400 >> DataShardReadTableSnapshots::ReadTableDropColumnLatePropose >> TCdcStreamTests::Basic [GOOD] >> TCdcStreamTests::Attributes >> TCdcStreamTests::SchemaChanges [GOOD] >> TCdcStreamTests::RetentionPeriod >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureMirror3dcCount6Idx3 [GOOD] >> BackupRestoreS3::RestoreViewQueryText [GOOD] >> BackupRestoreS3::RestoreViewReferenceTable >> DataShardReadTableSnapshots::ReadTableSplitBefore >> EncryptedBackupParamsValidationTestFeatureDisabled::CommonDestPathSpecified [GOOD] >> DataShardReadTableSnapshots::ReadTableDropColumn >> TCdcStreamTests::Attributes [GOOD] >> TCdcStreamTests::DocApi >> DataShardReadTableSnapshots::ReadTableSplitNewTxIdResolveResultReorder >> EncryptedBackupParamsValidationTest::IncorrectKeyLengthExport [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_ftol/unittest >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureMirror3dcCount6Idx3 [GOOD] Test command err: iteration# 3 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 9 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 15 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 21 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 27 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 33 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 39 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 45 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 51 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 57 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 63 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 69 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 75 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 81 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 87 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 93 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 99 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 105 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 111 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 117 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 123 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 129 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 135 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 141 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 147 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 153 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 159 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 165 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 171 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 177 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 183 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 189 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 195 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 201 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 207 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 213 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 219 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 225 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 231 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 237 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 243 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 249 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 255 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 261 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 267 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 273 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 279 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 285 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 291 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 297 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 303 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 309 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 315 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 321 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 327 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 333 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 339 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 345 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 351 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 357 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 363 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 369 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 375 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 381 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 387 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 393 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 399 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 405 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 411 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 417 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 423 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 429 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 435 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 441 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 447 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 453 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 459 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 465 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 471 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 477 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 483 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 489 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 |63.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg |63.5%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg |63.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/pg/ydb-core-kqp-ut-pg >> TTabletPipeTest::TestSendAfterOpen >> TCdcStreamTests::DocApi [GOOD] >> TCdcStreamTests::DocApiNegative >> EncryptedBackupParamsValidationTestFeatureDisabled::SrcPrefixAndSrcPathSpecified >> TCdcStreamTests::RetentionPeriod [GOOD] >> TCdcStreamTests::TopicPartitions >> TTabletPipeTest::TestSendAfterOpen [GOOD] >> TCdcStreamTests::DocApiNegative [GOOD] >> TCdcStreamTests::Negative >> TResourceBrokerInstant::Test >> TTabletPipeTest::TestPipeWithVersionInfo >> TTabletResolver::NodeProblem >> EncryptedBackupParamsValidationTest::EmptyImportItem >> TTabletPipeTest::TestPipeWithVersionInfo [GOOD] >> TResourceBrokerInstant::Test [GOOD] >> TResourceBrokerInstant::TestErrors >> DataShardReadTableSnapshots::ReadTableSnapshot [GOOD] >> DataShardReadTableSnapshots::ReadTableSplitAfter >> BackupRestore::TestAllPrimitiveTypes-FLOAT [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DOUBLE >> EncryptedBackupParamsValidationTestFeatureDisabled::SrcPrefixAndSrcPathSpecified [GOOD] >> TCdcStreamTests::Negative [GOOD] >> TCdcStreamTests::DisableProtoSourceIdInfo |63.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendAfterOpen [GOOD] >> TResourceBrokerInstant::TestErrors [GOOD] >> TTabletResolver::NodeProblem [GOOD] >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerless [GOOD] >> TSchemeshardBackgroundCompactionTest::ShouldCompactServerless >> TCdcStreamTests::DisableProtoSourceIdInfo [GOOD] >> TCdcStreamTests::CreateStream |63.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestPipeWithVersionInfo [GOOD] >> TCdcStreamTests::TopicPartitions [GOOD] >> TCdcStreamTests::ReplicationAttribute >> TTabletPipeTest::TestSendBeforeBootTarget >> EncryptedExportTest::EncryptedExportAndImport >> DataShardReadTableSnapshots::ReadTableDropColumnLatePropose [GOOD] >> DataShardReadTableSnapshots::ReadTableMaxRows >> DataShardReadTableSnapshots::ReadTableSplitBefore [GOOD] >> DataShardReadTableSnapshots::ReadTableSplitFinished >> BackupRestoreS3::RestoreViewReferenceTable [GOOD] >> BackupRestoreS3::RestoreViewDependentOnAnotherView ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletResolver::NodeProblem [GOOD] Test command err: 2025-06-03T10:27:54.365133Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StInit ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:27:54.365195Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 123 entry.State: StInitResolve success: true ev: {EvInfo Status: 0 TabletID: 123 Cookie: 0 CurrentLeader: [1:210:2136] CurrentLeaderTablet: [1:211:2137] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[1:24343667:0] : 3}, {[1:1099535971443:0] : 6}}}} 2025-06-03T10:27:54.365203Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 123 followers: 0 2025-06-03T10:27:54.365212Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [1:210:2136] 2025-06-03T10:27:54.365242Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StInit ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:27:54.365266Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 234 entry.State: StInitResolve success: true ev: {EvInfo Status: 0 TabletID: 234 Cookie: 0 CurrentLeader: [1:216:2140] CurrentLeaderTablet: [1:217:2141] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[1:24343667:0] : 3}, {[1:1099535971443:0] : 6}}}} 2025-06-03T10:27:54.365270Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 234 followers: 0 2025-06-03T10:27:54.365275Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [1:216:2140] 2025-06-03T10:27:54.365515Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StNormal ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:27:54.365524Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [1:210:2136] 2025-06-03T10:27:54.365547Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StNormal ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:27:54.365552Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [1:216:2140] 2025-06-03T10:27:54.365575Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:583: Handle TEvNodeProblem nodeId: 1 max(problemEpoch): 2 2025-06-03T10:27:54.365584Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:429: Delayed invalidation of tabletId: 123 leader: [1:210:2136] by NodeId 2025-06-03T10:27:54.365591Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StProblemResolve ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:27:54.365619Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 123 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 123 Cookie: 0 CurrentLeader: [2:226:2094] CurrentLeaderTablet: [2:227:2095] CurrentGeneration: 2 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[1:24343667:0] : 3}, {[1:1099535971443:0] : 6}}}} 2025-06-03T10:27:54.365624Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 123 followers: 0 2025-06-03T10:27:54.365629Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [2:226:2094] 2025-06-03T10:27:54.365659Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:429: Delayed invalidation of tabletId: 234 leader: [1:216:2140] by NodeId 2025-06-03T10:27:54.365664Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StProblemResolve ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:27:54.365689Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 234 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 234 Cookie: 0 CurrentLeader: [2:232:2096] CurrentLeaderTablet: [2:233:2097] CurrentGeneration: 2 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[1:2199047599219:0] : 9}, {[1:1099535971443:0] : 6}}}} 2025-06-03T10:27:54.365693Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 234 followers: 0 2025-06-03T10:27:54.365698Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [2:232:2096] 2025-06-03T10:27:54.365894Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:583: Handle TEvNodeProblem nodeId: 2 max(problemEpoch): 2 2025-06-03T10:27:54.365902Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StNormal ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:27:54.365908Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [2:226:2094] 2025-06-03T10:27:54.365940Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StNormal ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:27:54.365945Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [2:232:2096] 2025-06-03T10:27:54.365971Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:583: Handle TEvNodeProblem nodeId: 2 max(problemEpoch): 4 2025-06-03T10:27:54.365976Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:429: Delayed invalidation of tabletId: 123 leader: [2:226:2094] by NodeId 2025-06-03T10:27:54.365982Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StProblemResolve ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:27:54.366008Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 123 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 123 Cookie: 0 CurrentLeader: [3:244:2094] CurrentLeaderTablet: [3:245:2095] CurrentGeneration: 3 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[1:24343667:0] : 3}, {[1:1099535971443:0] : 6}}}} 2025-06-03T10:27:54.366012Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 123 followers: 0 2025-06-03T10:27:54.366017Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [3:244:2094] 2025-06-03T10:27:54.366049Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StNormal ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:27:54.366053Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 2 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [2:232:2096] 2025-06-03T10:27:54.366083Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:583: Handle TEvNodeProblem nodeId: 2 max(problemEpoch): 5 2025-06-03T10:27:54.366089Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 123 entry.State: StNormal ev: {EvForward TabletID: 123 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:27:54.366093Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 123 followers: 0 countLeader 1 allowFollowers 0 winner: [3:244:2094] 2025-06-03T10:27:54.366122Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:429: Delayed invalidation of tabletId: 234 leader: [2:232:2096] by NodeId 2025-06-03T10:27:54.366127Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 234 entry.State: StProblemResolve ev: {EvForward TabletID: 234 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:27:54.366154Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 234 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 234 Cookie: 0 CurrentLeader: [3:250:2096] CurrentLeaderTablet: [3:251:2097] CurrentGeneration: 3 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[1:24343667:0] : 3}, {[1:1099535971443:0] : 6}}}} 2025-06-03T10:27:54.366158Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 234 followers: 0 2025-06-03T10:27:54.366162Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 1 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 234 followers: 0 countLeader 1 allowFollowers 0 winner: [3:250:2096] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TResourceBrokerInstant::TestErrors [GOOD] Test command err: 2025-06-03T10:27:54.253121Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-03T10:27:54.253203Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-1 (1 by [1:100:2134]) priority=0 resources={100, 100} 2025-06-03T10:27:54.253210Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:54.253215Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {100, 100} for task task-1 (1 by [1:100:2134]) from queue queue_compaction0 2025-06-03T10:27:54.253219Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:54.253231Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 200.000000 (insert task task-1 (1 by [1:100:2134])) 2025-06-03T10:27:54.253243Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:441: Update task task-1 (1 by [1:100:2134]) (priority=0 type=compaction0 resources={80, 70} resubmit=0) 2025-06-03T10:27:54.253246Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:54.253249Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 160.000000 (insert task task-1 (1 by [1:100:2134])) 2025-06-03T10:27:54.253266Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-1 (1 by [1:100:2134]) (release resources {80, 70}) 2025-06-03T10:27:54.253271Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction0 from 160.000000 to 0.000000 (remove task task-1 (1 by [1:100:2134])) 2025-06-03T10:27:54.581119Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-03T10:27:54.581256Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-1 (1 by [2:100:2134]) priority=0 resources={100, 100} 2025-06-03T10:27:54.581267Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [2:100:2134]) to queue queue_compaction0 2025-06-03T10:27:54.581278Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {100, 100} for task task-1 (1 by [2:100:2134]) from queue queue_compaction0 2025-06-03T10:27:54.581285Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [2:100:2134]) to queue queue_compaction0 2025-06-03T10:27:54.581314Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 200.000000 (insert task task-1 (1 by [2:100:2134])) 2025-06-03T10:27:54.581337Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-1 (1 by [2:100:2134]) priority=0 resources={100500, 100500} 2025-06-03T10:27:54.581344Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:408: SubmitTask failed for task 1 to [2:100:2134]: task with the same ID has been already submitted 2025-06-03T10:27:54.581366Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:515: FinishTask failed for task 2 to [2:100:2134]: cannot finish unknown task 2025-06-03T10:27:54.581373Z node 2 :RESOURCE_BROKER ERROR: resource_broker.cpp:1080: FinishTaskInstant failed for task 2: cannot finish unknown task >> DataShardReadTableSnapshots::ReadTableDropColumn [GOOD] >> DataShardReadTableSnapshots::CorruptedDyNumber >> TCdcStreamTests::CreateStream [GOOD] >> TCdcStreamTests::AlterStream >> EncryptedBackupParamsValidationTest::EmptyImportItem [GOOD] >> TCdcStreamTests::ReplicationAttribute [GOOD] >> TCdcStreamTests::StreamOnIndexTableNegative >> TCdcStreamTests::StreamOnIndexTableNegative [GOOD] >> TCdcStreamTests::StreamOnIndexTable >> TCdcStreamTests::AlterStream [GOOD] >> TCdcStreamTests::DropStream >> DataShardReadTableSnapshots::ReadTableSplitAfter [GOOD] >> DataShardReadTableSnapshots::ReadTableSplitNewTxIdResolveResultReorder [GOOD] >> DataShardReadTableSnapshots::ReadTableUUID >> EncryptedBackupParamsValidationTest::IncorrectKeyImport >> TCdcStreamTests::StreamOnIndexTable [GOOD] >> TCdcStreamTests::StreamOnBuildingIndexTable >> EncryptedExportTest::EncryptedExportAndImport [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DOUBLE [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATE |63.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_table_writer/unittest |63.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_table_writer/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_read_table/unittest >> DataShardReadTableSnapshots::ReadTableSplitAfter [GOOD] Test command err: 2025-06-03T10:27:53.139039Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:27:53.139139Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:27:53.139172Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0019c5/r3tmp/tmp2NvMNb/pdisk_1.dat 2025-06-03T10:27:53.271677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:27:53.291411Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:53.292728Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946472658061 != 1748946472658065 2025-06-03T10:27:53.337798Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:27:53.338022Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-03T10:27:53.338216Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:53.338244Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:53.350027Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:53.435164Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-03T10:27:53.435200Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-03T10:27:53.435237Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:639:2547] 2025-06-03T10:27:53.483389Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:639:2547] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-03T10:27:53.483465Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:639:2547] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-03T10:27:53.483727Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1627: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-03T10:27:53.483745Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:639:2547] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:27:53.483821Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:27:53.483880Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:639:2547] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:27:53.483900Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:639:2547] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-03T10:27:53.483981Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvClientConnected 2025-06-03T10:27:53.484403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:27:53.484677Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [1:639:2547] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-03T10:27:53.484692Z node 1 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [1:639:2547] txid# 281474976715657 SEND to# [1:591:2517] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-03T10:27:53.505480Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:27:53.505850Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:27:53.505967Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:27:53.506044Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:27:53.534198Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:27:53.534447Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:27:53.534506Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:27:53.534731Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:27:53.534743Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:27:53.534751Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:27:53.534809Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:27:53.534831Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:27:53.534846Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:27:53.545644Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:27:53.551929Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:27:53.552032Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:27:53.552077Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:27:53.552084Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:27:53.552090Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:27:53.552098Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:53.552178Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:53.552188Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:53.552307Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:27:53.552337Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:27:53.552456Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:27:53.552466Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:27:53.552475Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:27:53.552481Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:27:53.552489Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:27:53.552496Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:27:53.552502Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:27:53.552518Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:53.552525Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:53.552534Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:27:53.552559Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:27:53.552565Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:27:53.552589Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:27:53.552647Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:27:53.552660Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:27:53.552682Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:27:53.552692Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:27:53.552698Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:27:53.552705Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:27:53.552711Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 7 ... :2683]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037890 Status: RESPONSE_DATA TxId: 281474976715662 TxResult: "\n\016\n\003key\022\007\252\006\004\n\002\010\002\n\020\n\005value\022\007\252\006\004\n\002\010\002\030\001\022\016b\005\035\003\000\000\000b\005\035!\000\000\000" RowOffsets: 36 ApiVersion: 1 DataSeqNo: 1 DataLastKey: "\001\000\004\000\000\000\003\000\000\000" 2025-06-03T10:27:55.956058Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:854:2683] TxId# 281474976715661] Received stream data from ShardId# 72075186224037890 2025-06-03T10:27:55.956062Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1711: [ReadTable [2:854:2683] TxId# 281474976715661] Sending TEvStreamDataAck to [2:991:2789] ShardId# 72075186224037890 2025-06-03T10:27:55.956079Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037890, TxId: 281474976715662, PendingAcks: 0 2025-06-03T10:27:55.956088Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287428, Sender [2:991:2789], Recipient [2:854:2683]: NKikimrTx.TEvStreamQuotaRequest TxId: 281474976715662 ShardId: 72075186224037890 2025-06-03T10:27:55.956092Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:854:2683] TxId# 281474976715661] Received TEvStreamQuotaRequest from ShardId# 72075186224037890 2025-06-03T10:27:55.956149Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287941, Sender [2:853:2683], Recipient [2:854:2683]: NKikimrTx.TEvStreamQuotaResponse TxId: 281474976715661 MessageSizeLimit: 1 ReservedMessages: 1 2025-06-03T10:27:55.956155Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:854:2683] TxId# 281474976715661] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-03T10:27:55.956159Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:854:2683] TxId# 281474976715661] Reserving quota 1 messages for ShardId# 72075186224037890 2025-06-03T10:27:55.956166Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715662, MessageQuota: 1 2025-06-03T10:27:55.956179Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037890, TxId: 281474976715662, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-03T10:27:55.956194Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:991:2789], Recipient [2:854:2683]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037890 Status: RESPONSE_DATA TxId: 281474976715662 TxResult: "\n\016\n\003key\022\007\252\006\004\n\002\010\002\n\020\n\005value\022\007\252\006\004\n\002\010\002\030\001\022\016b\005\035\004\000\000\000b\005\035,\000\000\000" RowOffsets: 36 ApiVersion: 1 DataSeqNo: 2 DataLastKey: "\001\000\004\000\000\000\004\000\000\000" 2025-06-03T10:27:55.956199Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:854:2683] TxId# 281474976715661] Received stream data from ShardId# 72075186224037890 2025-06-03T10:27:55.956203Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1711: [ReadTable [2:854:2683] TxId# 281474976715661] Sending TEvStreamDataAck to [2:991:2789] ShardId# 72075186224037890 2025-06-03T10:27:55.956212Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037890, TxId: 281474976715662, PendingAcks: 0 2025-06-03T10:27:55.956221Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287428, Sender [2:991:2789], Recipient [2:854:2683]: NKikimrTx.TEvStreamQuotaRequest TxId: 281474976715662 ShardId: 72075186224037890 2025-06-03T10:27:55.956225Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:854:2683] TxId# 281474976715661] Received TEvStreamQuotaRequest from ShardId# 72075186224037890 2025-06-03T10:27:55.956263Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287941, Sender [2:853:2683], Recipient [2:854:2683]: NKikimrTx.TEvStreamQuotaResponse TxId: 281474976715661 MessageSizeLimit: 1 ReservedMessages: 1 2025-06-03T10:27:55.956268Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:854:2683] TxId# 281474976715661] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-03T10:27:55.956271Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:854:2683] TxId# 281474976715661] Reserving quota 1 messages for ShardId# 72075186224037890 2025-06-03T10:27:55.956278Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715662, MessageQuota: 1 2025-06-03T10:27:55.956288Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037890, TxId: 281474976715662, MessageQuota: 1 2025-06-03T10:27:55.956315Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287429, Sender [2:991:2789], Recipient [2:854:2683]: NKikimrTx.TEvStreamQuotaRelease TxId: 281474976715662 ShardId: 72075186224037890 2025-06-03T10:27:55.956320Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2120: [ReadTable [2:854:2683] TxId# 281474976715661] Received TEvStreamQuotaRelease from ShardId# 72075186224037890 2025-06-03T10:27:55.956324Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2188: [ReadTable [2:854:2683] TxId# 281474976715661] Released quota 1 reserved messages from ShardId# 72075186224037890 2025-06-03T10:27:55.956332Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037890 2025-06-03T10:27:55.956337Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715662, at: 72075186224037890 2025-06-03T10:27:55.956362Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [2:894:2714], Recipient [2:894:2714]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:55.956367Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:55.956376Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-03T10:27:55.956381Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 0 immediate 1 planned 0 2025-06-03T10:27:55.956387Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715662] at 72075186224037890 for ReadTableScan 2025-06-03T10:27:55.956391Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037890 on unit ReadTableScan 2025-06-03T10:27:55.956397Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976715662] at 72075186224037890 error: , IsFatalError: 0 2025-06-03T10:27:55.956403Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037890 is Executed 2025-06-03T10:27:55.956407Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715662] at 72075186224037890 executing on unit ReadTableScan 2025-06-03T10:27:55.956412Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715662] at 72075186224037890 to execution unit FinishPropose 2025-06-03T10:27:55.956416Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037890 on unit FinishPropose 2025-06-03T10:27:55.956426Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037890 is DelayComplete 2025-06-03T10:27:55.956430Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715662] at 72075186224037890 executing on unit FinishPropose 2025-06-03T10:27:55.956434Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715662] at 72075186224037890 to execution unit CompletedOperations 2025-06-03T10:27:55.956438Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037890 on unit CompletedOperations 2025-06-03T10:27:55.956446Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037890 is Executed 2025-06-03T10:27:55.956450Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715662] at 72075186224037890 executing on unit CompletedOperations 2025-06-03T10:27:55.956455Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715662] at 72075186224037890 has finished 2025-06-03T10:27:55.956459Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:27:55.956463Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037890 2025-06-03T10:27:55.956468Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2025-06-03T10:27:55.956471Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037890 2025-06-03T10:27:55.956480Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-03T10:27:55.956484Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715662] at 72075186224037890 on unit FinishPropose 2025-06-03T10:27:55.956490Z node 2 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715662 at tablet 72075186224037890 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-03T10:27:55.956503Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-03T10:27:55.956557Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:894:2714], Recipient [2:854:2683]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037890 Status: COMPLETE TxId: 281474976715662 Step: 0 OrderId: 281474976715662 ExecLatency: 0 ProposeLatency: 0 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186224037890 CpuTimeUsec: 51 } } CommitVersion { Step: 0 TxId: 281474976715662 } 2025-06-03T10:27:55.956563Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1850: [ReadTable [2:854:2683] TxId# 281474976715661] Received stream complete from ShardId# 72075186224037890 2025-06-03T10:27:55.956579Z node 2 :TX_PROXY INFO: read_table_impl.cpp:2933: [ReadTable [2:854:2683] TxId# 281474976715661] RESPONSE Status# ExecComplete prepare time: 0.012213s execute time: 0.204654s total time: 0.216867s 2025-06-03T10:27:55.956661Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553190, Sender [2:854:2683], Recipient [2:664:2568]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715661 2025-06-03T10:27:55.956702Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553190, Sender [2:854:2683], Recipient [2:889:2712]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715661 2025-06-03T10:27:55.956771Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553190, Sender [2:854:2683], Recipient [2:894:2714]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715661 |63.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_table_writer/unittest >> TCdcStreamTests::DropStream [GOOD] >> TCdcStreamTests::AlterStreamImplShouldFail >> EncryptedExportTest::EncryptionAndCompression >> TCdcStreamTests::StreamOnBuildingIndexTable [GOOD] >> TCdcStreamWithInitialScanTests::InitialScanEnabled |63.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_table_writer/unittest >> DataShardReadTableSnapshots::CorruptedDyNumber [GOOD] >> DataShardReadTableSnapshots::ReadTableMaxRows [GOOD] >> TTabletPipeTest::TestSendBeforeBootTarget [GOOD] >> EncryptedBackupParamsValidationTest::IncorrectKeyImport [GOOD] >> BackupRestoreS3::RestoreViewDependentOnAnotherView [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeInvalid [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobal >> TCdcStreamTests::AlterStreamImplShouldFail [GOOD] >> TCdcStreamTests::DropStreamImplShouldFail >> DataShardReadTableSnapshots::ReadTableSplitFinished [GOOD] >> TCdcStreamWithInitialScanTests::InitialScanEnabled [GOOD] >> TCdcStreamWithInitialScanTests::InitialScanDisabled ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_read_table/unittest >> DataShardReadTableSnapshots::CorruptedDyNumber [GOOD] Test command err: 2025-06-03T10:27:53.870464Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:27:53.870543Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:27:53.870567Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001a0b/r3tmp/tmpwQKY6T/pdisk_1.dat 2025-06-03T10:27:54.005966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:27:54.024985Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:54.028946Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946473303133 != 1748946473303137 2025-06-03T10:27:54.073322Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:27:54.073566Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-03T10:27:54.073758Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:54.073785Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:54.085344Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:54.166758Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-03T10:27:54.166789Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-03T10:27:54.166814Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:639:2547] 2025-06-03T10:27:54.206902Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:639:2547] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-03T10:27:54.206951Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:639:2547] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-03T10:27:54.207200Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1627: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-03T10:27:54.207217Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:639:2547] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:27:54.207295Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:27:54.207351Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:639:2547] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:27:54.207370Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:639:2547] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-03T10:27:54.207450Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvClientConnected 2025-06-03T10:27:54.207870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:27:54.208145Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [1:639:2547] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-03T10:27:54.208160Z node 1 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [1:639:2547] txid# 281474976715657 SEND to# [1:591:2517] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-03T10:27:54.234773Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:27:54.235096Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:27:54.235195Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:27:54.235269Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:27:54.251258Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:27:54.251479Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:27:54.251512Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:27:54.251716Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:27:54.251727Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:27:54.251735Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:27:54.251791Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:27:54.251810Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:27:54.251822Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:27:54.265532Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:27:54.270655Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:27:54.270747Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:27:54.270788Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:27:54.270795Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:27:54.270800Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:27:54.270806Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:54.270880Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:54.270888Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:54.271001Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:27:54.271028Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:27:54.271141Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:27:54.271150Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:27:54.271158Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:27:54.271164Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:27:54.271169Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:27:54.271175Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:27:54.271182Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:27:54.271196Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:54.271202Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:54.271208Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:27:54.271232Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:27:54.271238Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:27:54.271261Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:27:54.271316Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:27:54.271328Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:27:54.271347Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:27:54.271356Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:27:54.271361Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:27:54.271367Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:27:54.271372Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 7 ... ing event TEvTxProcessing::TEvStreamClearancePending 2025-06-03T10:27:56.818503Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287940, Sender [2:739:2620], Recipient [2:664:2568]: NKikimrTx.TEvStreamClearanceResponse TxId: 281474976715659 Cleared: true 2025-06-03T10:27:56.818509Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3150: StateWork, processing event TEvTxProcessing::TEvStreamClearanceResponse 2025-06-03T10:27:56.818549Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [2:664:2568], Recipient [2:664:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:56.818555Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:56.818564Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:27:56.818572Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-03T10:27:56.818580Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715659] at 72075186224037888 for WaitForStreamClearance 2025-06-03T10:27:56.818586Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715659] at 72075186224037888 on unit WaitForStreamClearance 2025-06-03T10:27:56.818593Z node 2 :TX_DATASHARD TRACE: wait_for_stream_clearance_unit.cpp:156: Got stream clearance for [0:281474976715659] at 72075186224037888 2025-06-03T10:27:56.818598Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715659] at 72075186224037888 is Executed 2025-06-03T10:27:56.818603Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715659] at 72075186224037888 executing on unit WaitForStreamClearance 2025-06-03T10:27:56.818609Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715659] at 72075186224037888 to execution unit ReadTableScan 2025-06-03T10:27:56.818614Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715659] at 72075186224037888 on unit ReadTableScan 2025-06-03T10:27:56.818659Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715659] at 72075186224037888 is Continue 2025-06-03T10:27:56.818665Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-03T10:27:56.818670Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-03T10:27:56.818676Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:27:56.818681Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:27:56.818691Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:27:56.818825Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435082, Sender [2:769:2637], Recipient [2:664:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-03T10:27:56.818832Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-03T10:27:56.818847Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287428, Sender [2:769:2637], Recipient [2:739:2620]: NKikimrTx.TEvStreamQuotaRequest TxId: 281474976715659 ShardId: 72075186224037888 2025-06-03T10:27:56.818853Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:739:2620] TxId# 281474976715658] Received TEvStreamQuotaRequest from ShardId# 72075186224037888 2025-06-03T10:27:56.818926Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287941, Sender [2:738:2620], Recipient [2:739:2620]: NKikimrTx.TEvStreamQuotaResponse TxId: 281474976715658 MessageSizeLimit: 1 ReservedMessages: 1 2025-06-03T10:27:56.818934Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:739:2620] TxId# 281474976715658] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-03T10:27:56.818942Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:739:2620] TxId# 281474976715658] Reserving quota 1 messages for ShardId# 72075186224037888 2025-06-03T10:27:56.818952Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715659, MessageQuota: 1 2025-06-03T10:27:56.818986Z node 2 :TX_DATASHARD ERROR: read_table_scan.cpp:681: Got scan fatal error: Invalid DyNumber binary representation 2025-06-03T10:27:56.818994Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715659, MessageQuota: 1 2025-06-03T10:27:56.819027Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-03T10:27:56.819033Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715659, at: 72075186224037888 2025-06-03T10:27:56.819064Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287429, Sender [2:769:2637], Recipient [2:739:2620]: NKikimrTx.TEvStreamQuotaRelease TxId: 281474976715659 ShardId: 72075186224037888 2025-06-03T10:27:56.819070Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2120: [ReadTable [2:739:2620] TxId# 281474976715658] Received TEvStreamQuotaRelease from ShardId# 72075186224037888 2025-06-03T10:27:56.819076Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2188: [ReadTable [2:739:2620] TxId# 281474976715658] Released quota 1 reserved messages from ShardId# 72075186224037888 2025-06-03T10:27:56.819088Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [2:664:2568], Recipient [2:664:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:56.819093Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:56.819101Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:27:56.819106Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-03T10:27:56.819112Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715659] at 72075186224037888 for ReadTableScan 2025-06-03T10:27:56.819117Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715659] at 72075186224037888 on unit ReadTableScan 2025-06-03T10:27:56.819124Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976715659] at 72075186224037888 error: Invalid DyNumber binary representation, IsFatalError: 1 2025-06-03T10:27:56.819133Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715659] at 72075186224037888 is Executed 2025-06-03T10:27:56.819139Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715659] at 72075186224037888 executing on unit ReadTableScan 2025-06-03T10:27:56.819144Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715659] at 72075186224037888 to execution unit FinishPropose 2025-06-03T10:27:56.819148Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715659] at 72075186224037888 on unit FinishPropose 2025-06-03T10:27:56.819157Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715659] at 72075186224037888 is DelayComplete 2025-06-03T10:27:56.819162Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715659] at 72075186224037888 executing on unit FinishPropose 2025-06-03T10:27:56.819167Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715659] at 72075186224037888 to execution unit CompletedOperations 2025-06-03T10:27:56.819174Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715659] at 72075186224037888 on unit CompletedOperations 2025-06-03T10:27:56.819189Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715659] at 72075186224037888 is Executed 2025-06-03T10:27:56.819193Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715659] at 72075186224037888 executing on unit CompletedOperations 2025-06-03T10:27:56.819199Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715659] at 72075186224037888 has finished 2025-06-03T10:27:56.819204Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:27:56.819209Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-03T10:27:56.819213Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:27:56.819218Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:27:56.819227Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:27:56.819232Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715659] at 72075186224037888 on unit FinishPropose 2025-06-03T10:27:56.819241Z node 2 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715659 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: EXEC_ERROR 2025-06-03T10:27:56.819250Z node 2 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715659 at tablet 72075186224037888 status: EXEC_ERROR errors: PROGRAM_ERROR (Invalid DyNumber binary representation) | 2025-06-03T10:27:56.819266Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:56.819333Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:664:2568], Recipient [2:739:2620]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037888 Status: EXEC_ERROR Error { Kind: PROGRAM_ERROR Reason: "Invalid DyNumber binary representation" } TxId: 281474976715659 Step: 0 OrderId: 281474976715659 ExecLatency: 0 ProposeLatency: 0 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186224037888 CpuTimeUsec: 94 } } CommitVersion { Step: 0 TxId: 281474976715659 } 2025-06-03T10:27:56.819341Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1921: [ReadTable [2:739:2620] TxId# 281474976715658] Received TEvProposeTransactionResult Status# EXEC_ERROR ShardId# 72075186224037888 2025-06-03T10:27:56.819356Z node 2 :TX_PROXY ERROR: read_table_impl.cpp:2919: [ReadTable [2:739:2620] TxId# 281474976715658] RESPONSE Status# ExecError shard: 72075186224037888 table: /Root/Table 2025-06-03T10:27:56.819432Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553190, Sender [2:739:2620], Recipient [2:664:2568]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 1500 TxId: 281474976715658 |63.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut |63.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut |63.5%| [LD] {RESULT} $(B)/ydb/core/sys_view/ut/ydb-core-sys_view-ut |63.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_table_writer/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendBeforeBootTarget [GOOD] Test command err: Leader for TabletID 9437184 is [0:0:0] sender: [1:105:2057] recipient: [1:103:2136] IGNORE Leader for TabletID 9437184 is [0:0:0] sender: [1:105:2057] recipient: [1:103:2136] Leader for TabletID 9437184 is [1:109:2140] sender: [1:110:2057] recipient: [1:103:2136] Leader for TabletID 9437184 is [1:109:2140] sender: [1:129:2057] recipient: [1:14:2061] Leader for TabletID 9437185 is [0:0:0] sender: [1:158:2057] recipient: [1:156:2162] IGNORE Leader for TabletID 9437185 is [0:0:0] sender: [1:158:2057] recipient: [1:156:2162] Leader for TabletID 9437185 is [1:162:2166] sender: [1:163:2057] recipient: [1:156:2162] Leader for TabletID 9437185 is [1:162:2166] sender: [1:188:2057] recipient: [1:14:2061] |63.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_table_writer/unittest >> DataShardReadTableSnapshots::ReadTableUUID [GOOD] >> EncryptedExportTest::EncryptionAndCompression [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_read_table/unittest >> DataShardReadTableSnapshots::ReadTableMaxRows [GOOD] Test command err: 2025-06-03T10:27:53.516029Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:27:53.516135Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:27:53.516829Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0019ed/r3tmp/tmpCSjyPK/pdisk_1.dat 2025-06-03T10:27:53.661083Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:27:53.679071Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:53.680406Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946472806302 != 1748946472806306 2025-06-03T10:27:53.725307Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:27:53.725508Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-03T10:27:53.725669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:53.725694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:53.737780Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:53.820609Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-03T10:27:53.820656Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-03T10:27:53.820687Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:639:2547] 2025-06-03T10:27:53.875015Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:639:2547] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-03T10:27:53.875067Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:639:2547] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-03T10:27:53.875305Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1627: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-03T10:27:53.875323Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:639:2547] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:27:53.875396Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:27:53.875468Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:639:2547] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:27:53.875487Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:639:2547] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-03T10:27:53.875565Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvClientConnected 2025-06-03T10:27:53.875989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:27:53.876324Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [1:639:2547] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-03T10:27:53.876342Z node 1 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [1:639:2547] txid# 281474976715657 SEND to# [1:591:2517] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-03T10:27:53.892517Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:27:53.892826Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:27:53.892926Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:27:53.893011Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:27:53.927911Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:27:53.928131Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:27:53.928163Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:27:53.928372Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:27:53.928383Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:27:53.928393Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:27:53.928446Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:27:53.928467Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:27:53.928484Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:27:53.940020Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:27:53.952442Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:27:53.952532Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:27:53.952570Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:27:53.952576Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:27:53.952582Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:27:53.952589Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:53.952659Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:53.952667Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:53.952772Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:27:53.952795Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:27:53.952900Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:27:53.952909Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:27:53.952917Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:27:53.952923Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:27:53.952928Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:27:53.952934Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:27:53.952940Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:27:53.952956Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:53.952964Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:53.952972Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:27:53.952995Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:27:53.953001Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:27:53.953022Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:27:53.953076Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:27:53.953087Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:27:53.953107Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:27:53.953118Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:27:53.953123Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:27:53.953130Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:27:53.953135Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 7 ... 224037890 2025-06-03T10:27:56.956726Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-03T10:27:56.956787Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435082, Sender [2:990:2789], Recipient [2:879:2701]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-03T10:27:56.956793Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-03T10:27:56.956805Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287428, Sender [2:990:2789], Recipient [2:964:2765]: NKikimrTx.TEvStreamQuotaRequest TxId: 281474976715663 ShardId: 72075186224037890 2025-06-03T10:27:56.956810Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:964:2765] TxId# 281474976715662] Received TEvStreamQuotaRequest from ShardId# 72075186224037890 2025-06-03T10:27:56.956815Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:964:2765] TxId# 281474976715662] Reserving quota 1 messages for ShardId# 72075186224037890 ... observed row limit of 2 rows at [2:990:2789] 2025-06-03T10:27:56.956830Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715663, MessageQuota: 1 2025-06-03T10:27:56.956892Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037890, TxId: 281474976715663, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-03T10:27:56.956920Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:990:2789], Recipient [2:964:2765]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037890 Status: RESPONSE_DATA TxId: 281474976715663 TxResult: "\n\016\n\003key\022\007\252\006\004\n\002\010\002\n\020\n\005value\022\007\252\006\004\n\002\010\002\030\001\022\016b\005\035\004\000\000\000b\005\035,\000\000\000" RowOffsets: 36 ApiVersion: 1 DataSeqNo: 1 DataLastKey: "\001\000\004\000\000\000\004\000\000\000" 2025-06-03T10:27:56.956926Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:964:2765] TxId# 281474976715662] Received stream data from ShardId# 72075186224037890 2025-06-03T10:27:56.956930Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1711: [ReadTable [2:964:2765] TxId# 281474976715662] Sending TEvStreamDataAck to [2:990:2789] ShardId# 72075186224037890 2025-06-03T10:27:56.956939Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037890, TxId: 281474976715663, PendingAcks: 0 2025-06-03T10:27:56.956954Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287428, Sender [2:990:2789], Recipient [2:964:2765]: NKikimrTx.TEvStreamQuotaRequest TxId: 281474976715663 ShardId: 72075186224037890 2025-06-03T10:27:56.956958Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:964:2765] TxId# 281474976715662] Received TEvStreamQuotaRequest from ShardId# 72075186224037890 2025-06-03T10:27:56.957009Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287941, Sender [2:963:2765], Recipient [2:964:2765]: NKikimrTx.TEvStreamQuotaResponse TxId: 281474976715662 MessageSizeLimit: 1 ReservedMessages: 1 2025-06-03T10:27:56.957014Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:964:2765] TxId# 281474976715662] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-03T10:27:56.957018Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:964:2765] TxId# 281474976715662] Reserving quota 1 messages for ShardId# 72075186224037890 ... observed row limit of 1 rows at [2:990:2789] 2025-06-03T10:27:56.957031Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037890, TxId: 281474976715663, MessageQuota: 1 2025-06-03T10:27:56.957040Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037890, TxId: 281474976715663, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-03T10:27:56.957059Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:990:2789], Recipient [2:964:2765]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037890 Status: RESPONSE_DATA TxId: 281474976715663 TxResult: "\n\016\n\003key\022\007\252\006\004\n\002\010\002\n\020\n\005value\022\007\252\006\004\n\002\010\002\030\001\022\016b\005\035\005\000\000\000b\005\0357\000\000\000" RowOffsets: 36 ApiVersion: 1 DataSeqNo: 2 DataLastKey: "\001\000\004\000\000\000\005\000\000\000" 2025-06-03T10:27:56.957063Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:964:2765] TxId# 281474976715662] Received stream data from ShardId# 72075186224037890 2025-06-03T10:27:56.957067Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1711: [ReadTable [2:964:2765] TxId# 281474976715662] Sending TEvStreamDataAck to [2:990:2789] ShardId# 72075186224037890 2025-06-03T10:27:56.957080Z node 2 :TX_PROXY INFO: read_table_impl.cpp:2933: [ReadTable [2:964:2765] TxId# 281474976715662] RESPONSE Status# ExecComplete prepare time: 0.013190s execute time: 0.163299s total time: 0.176489s 2025-06-03T10:27:56.957111Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037890, TxId: 281474976715663, PendingAcks: 0 2025-06-03T10:27:56.957133Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037890, TxId: 281474976715663, MessageQuota: 0 2025-06-03T10:27:56.957165Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037890 2025-06-03T10:27:56.957170Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715663, at: 72075186224037890 2025-06-03T10:27:56.957238Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553190, Sender [2:964:2765], Recipient [2:874:2699]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715662 2025-06-03T10:27:56.957638Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [2:879:2701], Recipient [2:879:2701]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:56.957655Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:56.957664Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037890 2025-06-03T10:27:56.957669Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 active 1 active planned 0 immediate 1 planned 0 2025-06-03T10:27:56.957676Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715663] at 72075186224037890 for ReadTableScan 2025-06-03T10:27:56.957681Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715663] at 72075186224037890 on unit ReadTableScan 2025-06-03T10:27:56.957689Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976715663] at 72075186224037890 error: , IsFatalError: 0 2025-06-03T10:27:56.957696Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715663] at 72075186224037890 is Executed 2025-06-03T10:27:56.957701Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715663] at 72075186224037890 executing on unit ReadTableScan 2025-06-03T10:27:56.957706Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715663] at 72075186224037890 to execution unit FinishPropose 2025-06-03T10:27:56.957711Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715663] at 72075186224037890 on unit FinishPropose 2025-06-03T10:27:56.957718Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715663] at 72075186224037890 is DelayComplete 2025-06-03T10:27:56.957722Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715663] at 72075186224037890 executing on unit FinishPropose 2025-06-03T10:27:56.957726Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715663] at 72075186224037890 to execution unit CompletedOperations 2025-06-03T10:27:56.957731Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715663] at 72075186224037890 on unit CompletedOperations 2025-06-03T10:27:56.957740Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715663] at 72075186224037890 is Executed 2025-06-03T10:27:56.957744Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715663] at 72075186224037890 executing on unit CompletedOperations 2025-06-03T10:27:56.957749Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715663] at 72075186224037890 has finished 2025-06-03T10:27:56.957753Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037890 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:27:56.957761Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037890 2025-06-03T10:27:56.957765Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037890 has no attached operations 2025-06-03T10:27:56.957769Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037890 2025-06-03T10:27:56.957779Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-03T10:27:56.957784Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715663] at 72075186224037890 on unit FinishPropose 2025-06-03T10:27:56.957790Z node 2 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715663 at tablet 72075186224037890 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-03T10:27:56.957804Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-03T10:27:56.957872Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549569, Sender [2:964:2765], Recipient [2:879:2701]: NKikimrTxDataShard.TEvCancelTransactionProposal TxId: 281474976715663 2025-06-03T10:27:56.957878Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3171: StateWork, processing event TEvDataShard::TEvCancelTransactionProposal 2025-06-03T10:27:56.957883Z node 2 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:73: Got TEvDataShard::TEvCancelTransactionProposal 72075186224037890 txId 281474976715663 2025-06-03T10:27:56.957891Z node 2 :TX_DATASHARD DEBUG: datashard__cancel_tx_proposal.cpp:44: Start TTxCancelTransactionProposal at tablet 72075186224037890 txId 281474976715663 2025-06-03T10:27:56.957918Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287431, Sender [2:964:2765], Recipient [2:879:2701]: NKikimrTx.TEvInterruptTransaction TxId: 281474976715663 2025-06-03T10:27:56.957922Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3152: StateWork, processing event TEvTxProcessing::TEvInterruptTransaction 2025-06-03T10:27:56.957936Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553190, Sender [2:964:2765], Recipient [2:879:2701]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715662 >> EncryptedBackupParamsValidationTest::EncryptionSettingsWithoutKeyImport |63.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_table_writer/unittest >> BackupRestore::TestAllPrimitiveTypes-DATE [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATETIME >> TCdcStreamTests::DropStreamImplShouldFail [GOOD] >> TCdcStreamTests::CopyTableShouldNotCopyStream >> TCdcStreamWithInitialScanTests::InitialScanDisabled [GOOD] >> TCdcStreamWithInitialScanTests::InitialScanProgress >> EncryptedExportTest::EncryptionAndChecksum ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_read_table/unittest >> DataShardReadTableSnapshots::ReadTableSplitFinished [GOOD] >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_InactiveActor_Test [GOOD] Test command err: 2025-06-03T10:27:53.912829Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:27:53.912909Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:27:53.912939Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0019dd/r3tmp/tmpN7rN3C/pdisk_1.dat 2025-06-03T10:27:54.042325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:27:54.061555Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:54.064375Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946473300043 != 1748946473300047 2025-06-03T10:27:54.108119Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:27:54.108336Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-03T10:27:54.108509Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:54.108535Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:54.119320Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:54.196115Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-03T10:27:54.196148Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-03T10:27:54.196184Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:639:2547] 2025-06-03T10:27:54.224767Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:639:2547] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-03T10:27:54.224823Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:639:2547] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-03T10:27:54.225074Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1627: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-03T10:27:54.225090Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:639:2547] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:27:54.225157Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:27:54.225208Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:639:2547] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:27:54.225226Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:639:2547] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-03T10:27:54.225329Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvClientConnected 2025-06-03T10:27:54.225850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:27:54.226173Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [1:639:2547] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-03T10:27:54.226188Z node 1 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [1:639:2547] txid# 281474976715657 SEND to# [1:591:2517] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-03T10:27:54.241977Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:27:54.242306Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:27:54.242419Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:27:54.242532Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:27:54.254538Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:27:54.254789Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:27:54.254822Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:27:54.255051Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:27:54.255061Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:27:54.255069Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:27:54.255130Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:27:54.255146Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:27:54.255158Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:27:54.265649Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:27:54.271015Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:27:54.271115Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:27:54.271154Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:27:54.271159Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:27:54.271163Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:27:54.271167Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:54.271249Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:54.271255Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:54.271367Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:27:54.271390Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:27:54.271483Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:27:54.271490Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:27:54.271497Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:27:54.271502Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:27:54.271505Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:27:54.271509Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:27:54.271513Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:27:54.271522Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:54.271527Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:54.271532Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:27:54.271548Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:27:54.271553Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:27:54.271572Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:27:54.271625Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:27:54.271638Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:27:54.271656Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:27:54.271665Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:27:54.271669Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:27:54.271673Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:27:54.271677Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 7 ... RD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037896, TxId: 281474976715664, Size: 36, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-03T10:27:57.303782Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:1346:3059], Recipient [2:1074:2847]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037896 Status: RESPONSE_DATA TxId: 281474976715664 TxResult: "\n\016\n\003key\022\007\252\006\004\n\002\010\002\n\020\n\005value\022\007\252\006\004\n\002\010\002\030\001\022\016b\005\035\006\000\000\000b\005\035B\000\000\000" RowOffsets: 36 ApiVersion: 1 DataSeqNo: 1 DataLastKey: "\001\000\004\000\000\000\006\000\000\000" 2025-06-03T10:27:57.303788Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:1074:2847] TxId# 281474976715663] Received stream data from ShardId# 72075186224037896 2025-06-03T10:27:57.303792Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1711: [ReadTable [2:1074:2847] TxId# 281474976715663] Sending TEvStreamDataAck to [2:1346:3059] ShardId# 72075186224037896 2025-06-03T10:27:57.303803Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037896, TxId: 281474976715664, PendingAcks: 0 2025-06-03T10:27:57.303815Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287428, Sender [2:1346:3059], Recipient [2:1074:2847]: NKikimrTx.TEvStreamQuotaRequest TxId: 281474976715664 ShardId: 72075186224037896 2025-06-03T10:27:57.303820Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:1074:2847] TxId# 281474976715663] Received TEvStreamQuotaRequest from ShardId# 72075186224037896 2025-06-03T10:27:57.303889Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287941, Sender [2:1073:2847], Recipient [2:1074:2847]: NKikimrTx.TEvStreamQuotaResponse TxId: 281474976715663 MessageSizeLimit: 1 ReservedMessages: 1 2025-06-03T10:27:57.303896Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:1074:2847] TxId# 281474976715663] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-03T10:27:57.303903Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:1074:2847] TxId# 281474976715663] Reserving quota 1 messages for ShardId# 72075186224037896 2025-06-03T10:27:57.303910Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037896, TxId: 281474976715664, MessageQuota: 1 2025-06-03T10:27:57.303918Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037896, TxId: 281474976715664, MessageQuota: 1 2025-06-03T10:27:57.303943Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037896 2025-06-03T10:27:57.303948Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715664, at: 72075186224037896 2025-06-03T10:27:57.303973Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269287429, Sender [2:1346:3059], Recipient [2:1074:2847]: NKikimrTx.TEvStreamQuotaRelease TxId: 281474976715664 ShardId: 72075186224037896 2025-06-03T10:27:57.303978Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2120: [ReadTable [2:1074:2847] TxId# 281474976715663] Received TEvStreamQuotaRelease from ShardId# 72075186224037896 2025-06-03T10:27:57.303983Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2188: [ReadTable [2:1074:2847] TxId# 281474976715663] Released quota 1 reserved messages from ShardId# 72075186224037896 2025-06-03T10:27:57.303999Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [2:1245:2980], Recipient [2:1245:2980]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:57.304007Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:57.304015Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037896 2025-06-03T10:27:57.304020Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037896 active 1 active planned 0 immediate 1 planned 0 2025-06-03T10:27:57.304027Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715664] at 72075186224037896 for ReadTableScan 2025-06-03T10:27:57.304032Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715664] at 72075186224037896 on unit ReadTableScan 2025-06-03T10:27:57.304038Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976715664] at 72075186224037896 error: , IsFatalError: 0 2025-06-03T10:27:57.304045Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715664] at 72075186224037896 is Executed 2025-06-03T10:27:57.304050Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715664] at 72075186224037896 executing on unit ReadTableScan 2025-06-03T10:27:57.304055Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715664] at 72075186224037896 to execution unit FinishPropose 2025-06-03T10:27:57.304060Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715664] at 72075186224037896 on unit FinishPropose 2025-06-03T10:27:57.304068Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715664] at 72075186224037896 is DelayComplete 2025-06-03T10:27:57.304072Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715664] at 72075186224037896 executing on unit FinishPropose 2025-06-03T10:27:57.304077Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715664] at 72075186224037896 to execution unit CompletedOperations 2025-06-03T10:27:57.304081Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715664] at 72075186224037896 on unit CompletedOperations 2025-06-03T10:27:57.304090Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715664] at 72075186224037896 is Executed 2025-06-03T10:27:57.304094Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715664] at 72075186224037896 executing on unit CompletedOperations 2025-06-03T10:27:57.304099Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715664] at 72075186224037896 has finished 2025-06-03T10:27:57.304104Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037896 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:27:57.304108Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037896 2025-06-03T10:27:57.304113Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037896 has no attached operations 2025-06-03T10:27:57.304118Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037896 2025-06-03T10:27:57.304126Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037896 2025-06-03T10:27:57.304132Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715664] at 72075186224037896 on unit FinishPropose 2025-06-03T10:27:57.304138Z node 2 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715664 at tablet 72075186224037896 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-03T10:27:57.304151Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037896 2025-06-03T10:27:57.304200Z node 2 :TX_PROXY TRACE: read_table_impl.cpp:1232: StateReadTable, received event# 269550080, Sender [2:1245:2980], Recipient [2:1074:2847]: NKikimrTxDataShard.TEvProposeTransactionResult TxKind: TX_KIND_SCAN Origin: 72075186224037896 Status: COMPLETE TxId: 281474976715664 Step: 0 OrderId: 281474976715664 ExecLatency: 0 ProposeLatency: 0 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186224037896 CpuTimeUsec: 59 } } CommitVersion { Step: 0 TxId: 281474976715664 } 2025-06-03T10:27:57.304205Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1850: [ReadTable [2:1074:2847] TxId# 281474976715663] Received stream complete from ShardId# 72075186224037896 2025-06-03T10:27:57.304219Z node 2 :TX_PROXY INFO: read_table_impl.cpp:2933: [ReadTable [2:1074:2847] TxId# 281474976715663] RESPONSE Status# ExecComplete prepare time: 0.013764s execute time: 0.491685s total time: 0.505449s 2025-06-03T10:27:57.304370Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553190, Sender [2:1074:2847], Recipient [2:874:2699]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 2025-06-03T10:27:57.304414Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553190, Sender [2:1074:2847], Recipient [2:984:2781]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 2025-06-03T10:27:57.304506Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553190, Sender [2:1074:2847], Recipient [2:989:2783]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 2025-06-03T10:27:57.304561Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553190, Sender [2:1074:2847], Recipient [2:1242:2978]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 2025-06-03T10:27:57.304629Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [2:1349:3062], Recipient [2:1130:2896]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:57.304636Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:57.304645Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037893, clientId# [2:1347:3060], serverId# [2:1349:3062], sessionId# [0:0:0] 2025-06-03T10:27:57.304658Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553190, Sender [2:1074:2847], Recipient [2:1245:2980]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 2025-06-03T10:27:57.304704Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553190, Sender [2:1074:2847], Recipient [2:1130:2896]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 2025-06-03T10:27:57.304734Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [2:1350:3063], Recipient [2:1135:2898]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:57.304739Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:57.304744Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037894, clientId# [2:1348:3061], serverId# [2:1350:3063], sessionId# [0:0:0] 2025-06-03T10:27:57.304760Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553190, Sender [2:1074:2847], Recipient [2:1135:2898]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715663 >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_OtherPartition_Test >> TableWriter::Backup [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_read_table/unittest >> DataShardReadTableSnapshots::ReadTableUUID [GOOD] Test command err: 2025-06-03T10:27:54.402078Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:27:54.402197Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:27:54.402238Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0019f2/r3tmp/tmpycdkIf/pdisk_1.dat 2025-06-03T10:27:54.575116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:27:54.594675Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:54.595972Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946473696743 != 1748946473696747 2025-06-03T10:27:54.638748Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:27:54.638941Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-03T10:27:54.639107Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:54.639132Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:54.649915Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:54.727229Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-03T10:27:54.727264Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-03T10:27:54.727300Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:639:2547] 2025-06-03T10:27:54.753470Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:639:2547] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-03T10:27:54.753506Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:639:2547] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-03T10:27:54.753695Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1627: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-03T10:27:54.753710Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:639:2547] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:27:54.753776Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:27:54.753828Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:639:2547] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:27:54.753842Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:639:2547] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-03T10:27:54.753904Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvClientConnected 2025-06-03T10:27:54.754240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:27:54.756247Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [1:639:2547] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-03T10:27:54.756271Z node 1 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [1:639:2547] txid# 281474976715657 SEND to# [1:591:2517] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-03T10:27:54.771876Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:27:54.772146Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:27:54.772245Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:27:54.772326Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:27:54.784375Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:27:54.784565Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:27:54.784594Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:27:54.784794Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:27:54.784805Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:27:54.784812Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:27:54.784865Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:27:54.784885Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:27:54.784898Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:27:54.795341Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:27:54.801152Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:27:54.801238Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:27:54.801274Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:27:54.801281Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:27:54.801286Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:27:54.801314Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:54.801389Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:54.801397Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:54.801515Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:27:54.801538Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:27:54.801620Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:27:54.801629Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:27:54.801638Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:27:54.801644Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:27:54.801649Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:27:54.801655Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:27:54.801661Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:27:54.801675Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:54.801681Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:54.801689Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:27:54.801710Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:27:54.801716Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:27:54.801736Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:27:54.801784Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:27:54.801797Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:27:54.801816Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:27:54.801826Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:27:54.801832Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:27:54.801839Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:27:54.801845Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 7 ... 86224037888 to execution unit ReadTableScan 2025-06-03T10:27:57.741649Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037888 on unit ReadTableScan 2025-06-03T10:27:57.741705Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037888 is Continue 2025-06-03T10:27:57.741711Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-03T10:27:57.741716Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-03T10:27:57.741722Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:27:57.741726Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:27:57.741735Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:27:57.741852Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435082, Sender [2:866:2694], Recipient [2:664:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvRegisterScanActor 2025-06-03T10:27:57.741858Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3159: StateWork, processing event TEvPrivate::TEvRegisterScanActor 2025-06-03T10:27:57.741866Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:854:2683] TxId# 281474976715661] Received TEvStreamQuotaRequest from ShardId# 72075186224037888 2025-06-03T10:27:57.741927Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:854:2683] TxId# 281474976715661] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-03T10:27:57.741932Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:854:2683] TxId# 281474976715661] Reserving quota 1 messages for ShardId# 72075186224037888 2025-06-03T10:27:57.741942Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715662, MessageQuota: 1 2025-06-03T10:27:57.741988Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715662, Size: 38, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-03T10:27:57.742004Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:854:2683] TxId# 281474976715661] Received stream data from ShardId# 72075186224037888 2025-06-03T10:27:57.742016Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715662, PendingAcks: 0 2025-06-03T10:27:57.742024Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:854:2683] TxId# 281474976715661] Received TEvStreamQuotaRequest from ShardId# 72075186224037888 2025-06-03T10:27:57.742094Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:854:2683] TxId# 281474976715661] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-03T10:27:57.742099Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:854:2683] TxId# 281474976715661] Reserving quota 1 messages for ShardId# 72075186224037888 2025-06-03T10:27:57.742105Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715662, MessageQuota: 1 2025-06-03T10:27:57.742114Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715662, Size: 38, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-03T10:27:57.742124Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:854:2683] TxId# 281474976715661] Received stream data from ShardId# 72075186224037888 2025-06-03T10:27:57.742131Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715662, PendingAcks: 0 2025-06-03T10:27:57.742137Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:854:2683] TxId# 281474976715661] Received TEvStreamQuotaRequest from ShardId# 72075186224037888 2025-06-03T10:27:57.742173Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:854:2683] TxId# 281474976715661] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-03T10:27:57.742177Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:854:2683] TxId# 281474976715661] Reserving quota 1 messages for ShardId# 72075186224037888 2025-06-03T10:27:57.742183Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715662, MessageQuota: 1 2025-06-03T10:27:57.742189Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:662: Send response data ShardId: 72075186224037888, TxId: 281474976715662, Size: 38, Rows: 0, PendingAcks: 1, MessageQuota: 0 2025-06-03T10:27:57.742199Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1700: [ReadTable [2:854:2683] TxId# 281474976715661] Received stream data from ShardId# 72075186224037888 2025-06-03T10:27:57.742206Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2069: [ReadTable [2:854:2683] TxId# 281474976715661] Received TEvStreamQuotaRequest from ShardId# 72075186224037888 2025-06-03T10:27:57.742212Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:483: Got stream data ack ShardId: 72075186224037888, TxId: 281474976715662, PendingAcks: 0 2025-06-03T10:27:57.742244Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2097: [ReadTable [2:854:2683] TxId# 281474976715661] Updated quotas, allocated = 1, message size = 1, message rows = 0, available = 1 2025-06-03T10:27:57.742248Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2160: [ReadTable [2:854:2683] TxId# 281474976715661] Reserving quota 1 messages for ShardId# 72075186224037888 2025-06-03T10:27:57.742254Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:514: Got quota for read table scan ShardId: 72075186224037888, TxId: 281474976715662, MessageQuota: 1 2025-06-03T10:27:57.742263Z node 2 :TX_DATASHARD DEBUG: read_table_scan.cpp:718: Finish scan ShardId: 72075186224037888, TxId: 281474976715662, MessageQuota: 1 2025-06-03T10:27:57.742293Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4471: FullScan complete at 72075186224037888 2025-06-03T10:27:57.742299Z node 2 :TX_DATASHARD DEBUG: datashard.cpp:4477: Found op: cookie: 281474976715662, at: 72075186224037888 2025-06-03T10:27:57.742310Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2120: [ReadTable [2:854:2683] TxId# 281474976715661] Received TEvStreamQuotaRelease from ShardId# 72075186224037888 2025-06-03T10:27:57.742315Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:2188: [ReadTable [2:854:2683] TxId# 281474976715661] Released quota 1 reserved messages from ShardId# 72075186224037888 2025-06-03T10:27:57.742344Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [2:664:2568], Recipient [2:664:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:57.742349Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:27:57.742357Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:27:57.742363Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-03T10:27:57.742369Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:281474976715662] at 72075186224037888 for ReadTableScan 2025-06-03T10:27:57.742374Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037888 on unit ReadTableScan 2025-06-03T10:27:57.742381Z node 2 :TX_DATASHARD TRACE: read_table_scan_unit.cpp:158: ReadTable scan complete for [0:281474976715662] at 72075186224037888 error: , IsFatalError: 0 2025-06-03T10:27:57.742388Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037888 is Executed 2025-06-03T10:27:57.742393Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715662] at 72075186224037888 executing on unit ReadTableScan 2025-06-03T10:27:57.742398Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715662] at 72075186224037888 to execution unit FinishPropose 2025-06-03T10:27:57.742403Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037888 on unit FinishPropose 2025-06-03T10:27:57.747349Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037888 is DelayComplete 2025-06-03T10:27:57.747391Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715662] at 72075186224037888 executing on unit FinishPropose 2025-06-03T10:27:57.747398Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715662] at 72075186224037888 to execution unit CompletedOperations 2025-06-03T10:27:57.747405Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715662] at 72075186224037888 on unit CompletedOperations 2025-06-03T10:27:57.747424Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715662] at 72075186224037888 is Executed 2025-06-03T10:27:57.747429Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715662] at 72075186224037888 executing on unit CompletedOperations 2025-06-03T10:27:57.747435Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715662] at 72075186224037888 has finished 2025-06-03T10:27:57.747445Z node 2 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:27:57.747451Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-03T10:27:57.747458Z node 2 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:27:57.747463Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:27:57.747492Z node 2 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:27:57.747498Z node 2 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715662] at 72075186224037888 on unit FinishPropose 2025-06-03T10:27:57.747510Z node 2 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715662 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-03T10:27:57.747541Z node 2 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:27:57.747640Z node 2 :TX_PROXY DEBUG: read_table_impl.cpp:1850: [ReadTable [2:854:2683] TxId# 281474976715661] Received stream complete from ShardId# 72075186224037888 2025-06-03T10:27:57.747660Z node 2 :TX_PROXY INFO: read_table_impl.cpp:2933: [ReadTable [2:854:2683] TxId# 281474976715661] RESPONSE Status# ExecComplete prepare time: 0.013637s execute time: 0.125934s total time: 0.139571s 2025-06-03T10:27:57.747792Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553190, Sender [2:854:2683], Recipient [2:664:2568]: NKikimrTxDataShard.TEvDiscardVolatileSnapshotRequest OwnerId: 72057594046644480 PathId: 2 Step: 2000 TxId: 281474976715661 |63.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_table_writer/unittest >> KqpPg::NoTableQuery+useSink |63.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_table_writer/unittest >> TableWriter::Backup [GOOD] >> TCdcStreamWithInitialScanTests::InitialScanProgress [GOOD] >> TCdcStreamWithInitialScanTests::WithoutPqTransactions >> EncryptedBackupParamsValidationTest::EncryptionSettingsWithoutKeyImport [GOOD] >> TableWriter::Restore [GOOD] >> TResourceBroker::TestCounters >> KqpPg::TypeCoercionBulkUpsert >> TCdcStreamTests::CopyTableShouldNotCopyStream [GOOD] >> TCdcStreamTests::MoveTableShouldFail >> KqpPg::InsertFromSelect_Simple+useSink >> TResourceBroker::TestCounters [GOOD] >> TResourceBroker::TestChangeTaskType |63.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_table_writer/unittest >> TableWriter::Restore [GOOD] >> TResourceBroker::TestChangeTaskType [GOOD] >> KqpPg::ReadPgArray >> KqpPg::ReadPgArray [GOOD] |63.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record |63.6%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record |63.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_json_change_record/tx-replication-service-ut_json_change_record >> KqpPg::TableArrayInsert+useSink >> KqpPg::CreateTableBulkUpsertAndRead >> KqpPg::NoTableQuery+useSink [GOOD] >> KqpPg::NoTableQuery-useSink >> TCdcStreamTests::MoveTableShouldFail [GOOD] >> TCdcStreamTests::CheckSchemeLimits >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobal [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalAsync >> BackupRestore::TestAllPrimitiveTypes-DATETIME [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INTERVAL >> KqpPg::InsertNoTargetColumns_Simple+useSink |63.6%| [TA] $(B)/ydb/core/backup/impl/ut_table_writer/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpPg::TypeCoercionInsert-useSink >> TCdcStreamWithInitialScanTests::WithoutPqTransactions [GOOD] >> TCdcStreamWithInitialScanTests::WithPqTransactions ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TResourceBroker::TestChangeTaskType [GOOD] Test command err: 2025-06-03T10:27:59.691339Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-03T10:27:59.691470Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-1 (1 by [1:100:2134]) priority=5 resources={200, 200} 2025-06-03T10:27:59.691481Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:59.691491Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {200, 200} for task task-1 (1 by [1:100:2134]) from queue queue_compaction0 2025-06-03T10:27:59.691497Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:59.691510Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 400.000000 (insert task task-1 (1 by [1:100:2134])) 2025-06-03T10:27:59.691520Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-2 (2 by [1:100:2134]) priority=5 resources={100, 100} 2025-06-03T10:27:59.691524Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [1:100:2134]) to queue queue_compaction1 2025-06-03T10:27:59.691529Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {100, 100} for task task-2 (2 by [1:100:2134]) from queue queue_compaction1 2025-06-03T10:27:59.691533Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-2 (2 by [1:100:2134]) to queue queue_compaction1 2025-06-03T10:27:59.691538Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 0.000000 to 200.000000 (insert task task-2 (2 by [1:100:2134])) 2025-06-03T10:27:59.691546Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-3 (3 by [1:100:2134]) priority=5 resources={100, 100} 2025-06-03T10:27:59.691549Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-3 (3 by [1:100:2134]) to queue queue_compaction1 2025-06-03T10:27:59.691553Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {100, 100} for task task-3 (3 by [1:100:2134]) from queue queue_compaction1 2025-06-03T10:27:59.691557Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-3 (3 by [1:100:2134]) to queue queue_compaction1 2025-06-03T10:27:59.691562Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 200.000000 to 400.000000 (insert task task-3 (3 by [1:100:2134])) 2025-06-03T10:27:59.691569Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-4 (4 by [1:100:2134]) priority=5 resources={100, 100} 2025-06-03T10:27:59.691573Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-4 (4 by [1:100:2134]) to queue queue_compaction1 2025-06-03T10:27:59.691578Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {100, 100} for task task-4 (4 by [1:100:2134]) from queue queue_compaction1 2025-06-03T10:27:59.691583Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-4 (4 by [1:100:2134]) to queue queue_compaction1 2025-06-03T10:27:59.691589Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 400.000000 to 600.000000 (insert task task-4 (4 by [1:100:2134])) 2025-06-03T10:27:59.691597Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-5 (5 by [1:100:2134]) priority=5 resources={250, 250} 2025-06-03T10:27:59.691602Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-5 (5 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:59.691608Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-5 (5 by [1:100:2134]) 2025-06-03T10:27:59.691617Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-6 (6 by [1:100:2134]) priority=5 resources={250, 250} 2025-06-03T10:27:59.691622Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-6 (6 by [1:100:2134]) to queue queue_compaction1 2025-06-03T10:27:59.691632Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-5 (5 by [1:100:2134]) 2025-06-03T10:27:59.691637Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction1 blocked by an earlier queue 2025-06-03T10:27:59.691644Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-7 (7 by [1:100:2134]) priority=5 resources={150, 150} 2025-06-03T10:27:59.691660Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-7 (7 by [1:100:2134]) to queue queue_compaction1 2025-06-03T10:27:59.691665Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-5 (5 by [1:100:2134]) 2025-06-03T10:27:59.691669Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction1 blocked by an earlier queue 2025-06-03T10:27:59.691715Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-1 (1 by [1:100:2134]) (release resources {200, 200}) 2025-06-03T10:27:59.691723Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction0 from 400.000000 to 0.000000 (remove task task-1 (1 by [1:100:2134])) 2025-06-03T10:27:59.691727Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-5 (5 by [1:100:2134]) 2025-06-03T10:27:59.691731Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction1 blocked by an earlier queue 2025-06-03T10:27:59.691737Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-2 (2 by [1:100:2134]) (release resources {100, 100}) 2025-06-03T10:27:59.691742Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction1 from 600.000000 to 400.000000 (remove task task-2 (2 by [1:100:2134])) 2025-06-03T10:27:59.691746Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {250, 250} for task task-5 (5 by [1:100:2134]) from queue queue_compaction0 2025-06-03T10:27:59.691750Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-5 (5 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:59.691754Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 475.000000 (insert task task-5 (5 by [1:100:2134])) 2025-06-03T10:27:59.691759Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-6 (6 by [1:100:2134]) 2025-06-03T10:27:59.691785Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-3 (3 by [1:100:2134]) (release resources {100, 100}) 2025-06-03T10:27:59.691791Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction1 from 400.000000 to 200.000000 (remove task task-3 (3 by [1:100:2134])) 2025-06-03T10:27:59.691795Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-6 (6 by [1:100:2134]) 2025-06-03T10:27:59.691801Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-4 (4 by [1:100:2134]) (release resources {100, 100}) 2025-06-03T10:27:59.691805Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction1 from 200.000000 to 0.000000 (remove task task-4 (4 by [1:100:2134])) 2025-06-03T10:27:59.691809Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {250, 250} for task task-6 (6 by [1:100:2134]) from queue queue_compaction1 2025-06-03T10:27:59.691816Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-6 (6 by [1:100:2134]) to queue queue_compaction1 2025-06-03T10:27:59.691821Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 0.000000 to 425.000000 (insert task task-6 (6 by [1:100:2134])) 2025-06-03T10:27:59.691825Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-7 (7 by [1:100:2134]) 2025-06-03T10:27:59.691850Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-5 (5 by [1:100:2134]) (release resources {250, 250}) 2025-06-03T10:27:59.691856Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction0 from 475.000000 to 0.000000 (remove task task-5 (5 by [1:100:2134])) 2025-06-03T10:27:59.691860Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {150, 150} for task task-7 (7 by [1:100:2134]) from queue queue_compaction1 2025-06-03T10:27:59.691865Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-7 (7 by [1:100:2134]) to queue queue_compaction1 2025-06-03T10:27:59.691870Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 425.000000 to 680.000000 (insert task task-7 (7 by [1:100:2134])) 2025-06-03T10:27:59.691876Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-6 (6 by [1:100:2134]) (release resources {250, 250}) 2025-06-03T10:27:59.691882Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction1 from 680.000000 to 255.000000 (remove task task-6 (6 by [1:100:2134])) 2025-06-03T10:27:59.691909Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-7 (7 by [1:100:2134]) (release resources {150, 150}) 2025-06-03T10:27:59.691914Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction1 from 255.000000 to 0.000000 (remove task task-7 (7 by [1:100:2134])) 2025-06-03T10:27:59.691922Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-1000 (1000 by [1:100:2134]) priority=5 resources={500, 500} 2025-06-03T10:27:59.691927Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1000 (1000 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:59.691932Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {500, 500} for task task-1000 (1000 by [1:100:2134]) from queue queue_compaction0 2025-06-03T10:27:59.691936Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1000 (1000 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:27:59.691942Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 900.000000 (insert task task-1000 (1000 by [1:100:2134])) 2025-06-03T10:27:59.691951Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new unknown task task-1 (1 by [1:100:2134]) priority=5 resources={1, 1} 2025-06-03T10:27:59.691955Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [1:100:2134]) to queue queue_default 2025-06-03T10:27:59.691960Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-1 (1 by [1:100:2134]) 2025-06-03T10:27:59.691966Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new unknown task task-2 (2 by [1:100:2134]) priority=5 resources={1, 1} 2025-06-03T10:27:59.691970Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task- ... task-9 (9 by [1:100:2134]) priority=5 resources={1, 1} 2025-06-03T10:27:59.692037Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-9 (9 by [1:100:2134]) to queue queue_default 2025-06-03T10:27:59.692039Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-1 (1 by [1:100:2134]) 2025-06-03T10:27:59.692044Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new unknown task task-10 (10 by [1:100:2134]) priority=5 resources={1, 1} 2025-06-03T10:27:59.692046Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-10 (10 by [1:100:2134]) to queue queue_default 2025-06-03T10:27:59.692049Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-1 (1 by [1:100:2134]) 2025-06-03T10:27:59.692052Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-1000 (1000 by [1:100:2134]) (release resources {500, 500}) 2025-06-03T10:27:59.692057Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_compaction0 from 0.000000 to 1500.000000 2025-06-03T10:27:59.692061Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {1, 1} for task task-1 (1 by [1:100:2134]) from queue queue_default 2025-06-03T10:27:59.692064Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [1:100:2134]) to queue queue_default 2025-06-03T10:27:59.692067Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 0.000000 to 2.000000 (insert task task-1 (1 by [1:100:2134])) 2025-06-03T10:27:59.692070Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {1, 1} for task task-2 (2 by [1:100:2134]) from queue queue_default 2025-06-03T10:27:59.692073Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-2 (2 by [1:100:2134]) to queue queue_default 2025-06-03T10:27:59.692075Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 2.000000 to 4.000000 (insert task task-2 (2 by [1:100:2134])) 2025-06-03T10:27:59.692078Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {1, 1} for task task-3 (3 by [1:100:2134]) from queue queue_default 2025-06-03T10:27:59.692081Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-3 (3 by [1:100:2134]) to queue queue_default 2025-06-03T10:27:59.692083Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 4.000000 to 6.000000 (insert task task-3 (3 by [1:100:2134])) 2025-06-03T10:27:59.692086Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {1, 1} for task task-4 (4 by [1:100:2134]) from queue queue_default 2025-06-03T10:27:59.692089Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-4 (4 by [1:100:2134]) to queue queue_default 2025-06-03T10:27:59.692092Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 6.000000 to 8.000000 (insert task task-4 (4 by [1:100:2134])) 2025-06-03T10:27:59.692094Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {1, 1} for task task-5 (5 by [1:100:2134]) from queue queue_default 2025-06-03T10:27:59.692097Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-5 (5 by [1:100:2134]) to queue queue_default 2025-06-03T10:27:59.692100Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 8.000000 to 10.000000 (insert task task-5 (5 by [1:100:2134])) 2025-06-03T10:27:59.692103Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {1, 1} for task task-6 (6 by [1:100:2134]) from queue queue_default 2025-06-03T10:27:59.692106Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-6 (6 by [1:100:2134]) to queue queue_default 2025-06-03T10:27:59.692109Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 10.000000 to 12.000000 (insert task task-6 (6 by [1:100:2134])) 2025-06-03T10:27:59.692111Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {1, 1} for task task-7 (7 by [1:100:2134]) from queue queue_default 2025-06-03T10:27:59.692114Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-7 (7 by [1:100:2134]) to queue queue_default 2025-06-03T10:27:59.692117Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 12.000000 to 14.000000 (insert task task-7 (7 by [1:100:2134])) 2025-06-03T10:27:59.692120Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {1, 1} for task task-8 (8 by [1:100:2134]) from queue queue_default 2025-06-03T10:27:59.692122Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-8 (8 by [1:100:2134]) to queue queue_default 2025-06-03T10:27:59.692127Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 14.000000 to 16.000000 (insert task task-8 (8 by [1:100:2134])) 2025-06-03T10:27:59.692130Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {1, 1} for task task-9 (9 by [1:100:2134]) from queue queue_default 2025-06-03T10:27:59.692132Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-9 (9 by [1:100:2134]) to queue queue_default 2025-06-03T10:27:59.692135Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 16.000000 to 18.000000 (insert task task-9 (9 by [1:100:2134])) 2025-06-03T10:27:59.692138Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {1, 1} for task task-10 (10 by [1:100:2134]) from queue queue_default 2025-06-03T10:27:59.692140Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-10 (10 by [1:100:2134]) to queue queue_default 2025-06-03T10:27:59.692143Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_default from 18.000000 to 20.000000 (insert task task-10 (10 by [1:100:2134])) 2025-06-03T10:27:59.692152Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-1 (1 by [1:100:2134]) (release resources {1, 1}) 2025-06-03T10:27:59.692155Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_default from 0.000000 to 20.000000 2025-06-03T10:27:59.692159Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-2 (2 by [1:100:2134]) (release resources {1, 1}) 2025-06-03T10:27:59.692173Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-3 (3 by [1:100:2134]) (release resources {1, 1}) 2025-06-03T10:27:59.692177Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-4 (4 by [1:100:2134]) (release resources {1, 1}) 2025-06-03T10:27:59.692180Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-5 (5 by [1:100:2134]) (release resources {1, 1}) 2025-06-03T10:27:59.692184Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-6 (6 by [1:100:2134]) (release resources {1, 1}) 2025-06-03T10:27:59.692187Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-7 (7 by [1:100:2134]) (release resources {1, 1}) 2025-06-03T10:27:59.692190Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-8 (8 by [1:100:2134]) (release resources {1, 1}) 2025-06-03T10:27:59.692194Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-9 (9 by [1:100:2134]) (release resources {1, 1}) 2025-06-03T10:27:59.692197Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-10 (10 by [1:100:2134]) (release resources {1, 1}) 2025-06-03T10:27:59.964056Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-03T10:27:59.964161Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-1 (1 by [2:100:2134]) priority=5 resources={400, 400} 2025-06-03T10:27:59.964169Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [2:100:2134]) to queue queue_compaction0 2025-06-03T10:27:59.964177Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {400, 400} for task task-1 (1 by [2:100:2134]) from queue queue_compaction0 2025-06-03T10:27:59.964183Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [2:100:2134]) to queue queue_compaction0 2025-06-03T10:27:59.964195Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 800.000000 (insert task task-1 (1 by [2:100:2134])) 2025-06-03T10:27:59.964224Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-2 (2 by [2:100:2134]) priority=5 resources={400, 400} 2025-06-03T10:27:59.964229Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [2:100:2134]) to queue queue_compaction0 2025-06-03T10:27:59.964234Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:100:2134]) 2025-06-03T10:27:59.964241Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-3 (3 by [2:100:2134]) priority=5 resources={400, 400} 2025-06-03T10:27:59.964245Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-3 (3 by [2:100:2134]) to queue queue_compaction0 2025-06-03T10:27:59.964249Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:100:2134]) 2025-06-03T10:27:59.964268Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:441: Update task task-3 (3 by [2:100:2134]) (priority=5 type=compaction1 resources={400, 400} resubmit=0) 2025-06-03T10:27:59.964273Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-3 (3 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:59.964277Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-3 (3 by [2:100:2134]) 2025-06-03T10:27:59.964281Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:608: Skip queue queue_compaction0 blocked by an earlier queue 2025-06-03T10:27:59.964289Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-1 (1 by [2:100:2134]) (release resources {400, 400}) 2025-06-03T10:27:59.964296Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction0 from 800.000000 to 80.000000 (remove task task-1 (1 by [2:100:2134])) 2025-06-03T10:27:59.964301Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:582: Updated real resource usage for queue queue_compaction0 from 0.000000 to 80.000000 2025-06-03T10:27:59.964307Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {400, 400} for task task-3 (3 by [2:100:2134]) from queue queue_compaction1 2025-06-03T10:27:59.964311Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-3 (3 by [2:100:2134]) to queue queue_compaction1 2025-06-03T10:27:59.964316Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 0.000000 to 800.000000 (insert task task-3 (3 by [2:100:2134])) 2025-06-03T10:27:59.964321Z node 2 :RESOURCE_BROKER DEBUG: resource_broker.cpp:619: Not enough resources to start task task-2 (2 by [2:100:2134]) >> KqpPg::EmptyQuery+useSink |63.6%| [TA] $(B)/ydb/core/tx/datashard/ut_read_table/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpPg::NoTableQuery-useSink [GOOD] >> KqpPg::PgCreateTable >> EncryptedExportTest::EncryptionAndChecksum [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/backup_ut/unittest >> EncryptedBackupParamsValidationTest::EncryptionSettingsWithoutKeyImport [GOOD] Test command err: 2025-06-03T10:27:37.633868Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667837276807146:2236];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:37.633936Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0025f2/r3tmp/tmp1lZSAG/pdisk_1.dat 2025-06-03T10:27:37.743646Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:37.744565Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:37.744582Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:37.748857Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2284, node 1 2025-06-03T10:27:37.765126Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:37.765136Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:37.765138Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:37.765178Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63861 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:37.792105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:37.813598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:27:38.148859Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667841571775228:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:38.148893Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:38.195140Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7511667837276807207:2140] Handle TEvProposeTransaction 2025-06-03T10:27:38.195170Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7511667837276807207:2140] TxId# 281474976715658 ProcessProposeTransaction 2025-06-03T10:27:38.195191Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7511667837276807207:2140] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7511667841571775249:2607] 2025-06-03T10:27:38.212270Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:7511667841571775249:2607] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Value" Type: "Utf8" NotNull: false } KeyColumnNames: "Key" PartitionConfig { } Temporary: false } } } UserToken: "" DatabaseName: "" 2025-06-03T10:27:38.212302Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:7511667841571775249:2607] txid# 281474976715658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-03T10:27:38.212497Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1627: Actor# [1:7511667841571775249:2607] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-03T10:27:38.212512Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:7511667841571775249:2607] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:27:38.212555Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:7511667841571775249:2607] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:27:38.212611Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:7511667841571775249:2607] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:27:38.212626Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7511667841571775249:2607] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-03T10:27:38.212693Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:7511667841571775249:2607] txid# 281474976715658 HANDLE EvClientConnected 2025-06-03T10:27:38.213153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:27:38.224550Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [1:7511667841571775249:2607] txid# 281474976715658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715658} 2025-06-03T10:27:38.224581Z node 1 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [1:7511667841571775249:2607] txid# 281474976715658 SEND to# [1:7511667841571775248:2340] Source {TEvProposeTransactionStatus txid# 281474976715658 Status# 53} 2025-06-03T10:27:38.279652Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667841571775386:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:38.279685Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:38.279776Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667841571775391:2350], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:38.279925Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7511667837276807207:2140] Handle TEvProposeTransaction 2025-06-03T10:27:38.279932Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7511667837276807207:2140] TxId# 281474976715659 ProcessProposeTransaction 2025-06-03T10:27:38.279945Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7511667837276807207:2140] Cookie# 0 userReqId# "" txid# 281474976715659 SEND to# [1:7511667841571775394:2721] 2025-06-03T10:27:38.280978Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:7511667841571775394:2721] txid# 281474976715659 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-06-03T10:27:38.281006Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:7511667841571775394:2721] txid# 281474976715659 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-03T10:27:38.281010Z node 1 :TX_PROXY DEBUG: schemereq.cpp:578: Actor# [1:7511667841571775394:2721] txid# 281474976715659 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-06-03T10:27:38.281536Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1627: Actor# [1:7511667841571775394:2721] txid# 281474976715659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-03T10:27:38.281553Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:7511667841571775394:2721] txid# 281474976715659 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:27:38.281590Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:7511667841571775394:2721] txid# 281474976715659 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:27:38.281635Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:7511667841571775394:2721] HANDLE EvNavigateKeySetResult, txid# 281474976715659 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:27:38.281649Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7511667841571775394:2721] txid# 281474976715659 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715659 TabletId# 72057594046644480} 2025-06-03T10:27:38.281702Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:7511667841571775394:2721] txid# 281474976715659 HANDLE EvClientConnected 2025-06-03T10:27:38.282056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:27:38.283507Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [1:7511667841571775394:2721] txid# 281474976715659 Status StatusAccepted HANDLE {TEvModifySchemeTransact ... T_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046644480, at schemeshard: 72057594046644480, message: Source { RawX1: 7511667932482294372 RawX2: 4503745656260922 } Origin: 72075186224037890 State: 2 TxId: 281474976710761 Step: 0 Generation: 1 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-03T10:27:59.370400Z node 34 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 281474976710761, tablet: 72075186224037890, partId: 0 2025-06-03T10:27:59.370435Z node 34 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 281474976710761:0, at schemeshard: 72057594046644480, message: Source { RawX1: 7511667932482294372 RawX2: 4503745656260922 } Origin: 72075186224037890 State: 2 TxId: 281474976710761 Step: 0 Generation: 1 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-03T10:27:59.370455Z node 34 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TRestore TProposedWaitParts, opId: 281474976710761:0 HandleReply TEvSchemaChanged at tablet# 72057594046644480 message# Source { RawX1: 7511667932482294372 RawX2: 4503745656260922 } Origin: 72075186224037890 State: 2 TxId: 281474976710761 Step: 0 Generation: 1 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-03T10:27:59.370474Z node 34 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710761:0, shardIdx: 72057594046644480:3, datashard: 72075186224037890, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046644480 2025-06-03T10:27:59.370480Z node 34 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 281474976710761:0, at schemeshard: 72057594046644480 2025-06-03T10:27:59.370483Z node 34 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 281474976710761:0, datashard: 72075186224037890, at schemeshard: 72057594046644480 2025-06-03T10:27:59.370490Z node 34 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710761:0 129 -> 240 2025-06-03T10:27:59.370554Z node 34 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TRestore, opId# 281474976710761:0, reason# domain is not a serverless db, domain# /Root, domainPathId# [OwnerId: 72057594046644480, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046644480, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-03T10:27:59.370625Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:27:59.371436Z node 34 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976710761:0, at schemeshard: 72057594046644480 2025-06-03T10:27:59.371449Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:27:59.371455Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 281474976710761:0 2025-06-03T10:27:59.371486Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [34:7511667932482294372:2362] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 281474976710761 at schemeshard: 72057594046644480 2025-06-03T10:27:59.371532Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435072, Sender [34:7511667928187325869:2205], Recipient [34:7511667928187325869:2205]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-03T10:27:59.371540Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4899: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-03T10:27:59.371550Z node 34 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710761:0, at schemeshard: 72057594046644480 2025-06-03T10:27:59.371560Z node 34 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046644480] TDone opId# 281474976710761:0 ProgressState 2025-06-03T10:27:59.371578Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:27:59.371587Z node 34 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-03T10:27:59.371592Z node 34 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-03T10:27:59.371599Z node 34 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-03T10:27:59.371601Z node 34 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-03T10:27:59.371605Z node 34 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: true 2025-06-03T10:27:59.371622Z node 34 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [34:7511667928187325869:2205] message: TxId: 281474976710761 2025-06-03T10:27:59.371629Z node 34 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-03T10:27:59.371634Z node 34 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710761:0 2025-06-03T10:27:59.371637Z node 34 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976710761:0 2025-06-03T10:27:59.371690Z node 34 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 14] was 3 2025-06-03T10:27:59.372227Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:27:59.372254Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [34:7511667928187325869:2205] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710761 at schemeshard: 72057594046644480 2025-06-03T10:27:59.372299Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124998, Sender [34:7511667928187325869:2205], Recipient [34:7511667928187325869:2205]: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710761 2025-06-03T10:27:59.372309Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5040: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletionResult 2025-06-03T10:27:59.372311Z node 34 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-03T10:27:59.372314Z node 34 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6753: Message: TxId: 281474976710761 2025-06-03T10:27:59.372326Z node 34 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-03T10:27:59.372329Z node 34 :IMPORT DEBUG: schemeshard_import__create.cpp:1472: TImport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-03T10:27:59.372379Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_xxport__tx_base.h:63: SendNotifications: : id# 281474976715662, subscribers count# 0 2025-06-03T10:27:59.373012Z node 34 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-03T10:27:59.373105Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [34:7511667932482294534:3190], Recipient [34:7511667928187325869:2205]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:59.373114Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:59.373116Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-03T10:27:59.457546Z node 34 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [34:7511667932482294546:2367] [0] Resolve database: name# /Root 2025-06-03T10:27:59.457810Z node 34 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [34:7511667932482294546:2367] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:27:59.457823Z node 34 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [34:7511667932482294546:2367] [0] Send request: schemeShardId# 72057594046644480 2025-06-03T10:27:59.457923Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [34:7511667932482294549:3203], Recipient [34:7511667928187325869:2205]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:59.457935Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:59.457938Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:27:59.457971Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 275251202, Sender [34:7511667932482294546:2367], Recipient [34:7511667928187325869:2205]: NKikimrImport.TEvGetImportRequest Request { Id: 281474976715662 } DatabaseName: "/Root" 2025-06-03T10:27:59.457975Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4995: StateWork, processing event TEvImport::TEvGetImportRequest 2025-06-03T10:27:59.458167Z node 34 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [34:7511667932482294546:2367] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976715662 Status: SUCCESS Progress: PROGRESS_DONE ImportFromS3Settings { endpoint: "localhost:29917" scheme: HTTP bucket: "test_bucket" source_prefix: "Prefix" destination_path: "Root//RestorePrefix/" encryption_settings { symmetric_key { key: "Cool random key!" } } } StartTime { seconds: 1748946479 } EndTime { seconds: 1748946479 } } 2025-06-03T10:27:59.458420Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [34:7511667932482294549:3203], Recipient [34:7511667928187325869:2205]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:59.458432Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:27:59.458436Z node 34 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046644480 >> KqpPg::CreateTableSerialColumns+useSink >> TCdcStreamTests::CheckSchemeLimits [GOOD] >> TCdcStreamTests::RebootSchemeShard >> EncryptedExportTest::EncryptionChecksumAndCompression |63.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange |63.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange >> KqpPg::EmptyQuery+useSink [GOOD] >> TCdcStreamWithInitialScanTests::WithPqTransactions [GOOD] >> KqpPg::InsertNoTargetColumns_Simple+useSink [GOOD] |63.6%| [TA] {RESULT} $(B)/ydb/core/backup/impl/ut_table_writer/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpPg::InsertNoTargetColumns_Simple-useSink >> KqpPg::JoinWithQueryService+StreamLookup >> TCdcStreamWithInitialScanTests::AlterStream >> BackupRestore::TestAllPrimitiveTypes-INTERVAL [GOOD] >> TCdcStreamTests::RebootSchemeShard [GOOD] >> KqpPg::EmptyQuery-useSink >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalAsync [GOOD] >> KqpPg::CreateTableSerialColumns+useSink [GOOD] >> TCdcStreamTests::MeteringServerless >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalUnique [GOOD] >> TCdcStreamWithInitialScanTests::AlterStream [GOOD] >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalVectorKmeansTree >> KqpPg::CreateTableSerialColumns-useSink >> KqpPg::InsertNoTargetColumns_Simple-useSink [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATE32 >> SystemView::ShowCreateTablePartitionAtKeys >> KqpPg::InsertNoTargetColumns_Serial-useSink >> TCdcStreamWithInitialScanTests::DropStream >> KqpPg::EmptyQuery-useSink [GOOD] >> KqpPg::DuplicatedColumns+useSink |63.6%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_read_table/test-results/unittest/{meta.json ... results_accumulator.log} |63.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_exchange/ydb-core-tx-datashard-ut_change_exchange >> KqpPg::TypeCoercionBulkUpsert [GOOD] >> KqpPg::TypeCoercionInsert+useSink >> TCdcStreamWithInitialScanTests::DropStream [GOOD] >> TCdcStreamWithInitialScanTests::RacyAlterStreamAndRestart >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureMirror3dcCount6Idx5 [GOOD] >> KqpPg::JoinWithQueryService+StreamLookup [GOOD] >> KqpPg::Insert_Serial+useSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_ftol/unittest >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureMirror3dcCount6Idx5 [GOOD] Test command err: iteration# 5 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 11 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 17 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 23 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 29 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 35 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 41 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 47 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 53 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 59 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 65 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 71 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 77 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 83 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 89 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 95 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 101 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 107 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 113 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 119 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 125 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 131 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 137 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 143 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 149 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 155 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 161 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 167 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 173 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 179 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 185 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 191 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 197 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 203 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 209 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 215 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 221 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 227 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 233 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 239 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 245 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 251 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 257 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 263 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 269 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 275 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 281 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 287 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 293 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 299 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 305 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 311 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 317 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 323 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 329 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 335 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 341 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 347 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 353 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 359 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 365 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 371 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 377 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 383 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 389 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 395 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 401 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 407 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 413 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 419 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 425 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 431 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 437 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 443 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 449 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 455 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 461 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 467 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 473 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 479 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 iteration# 485 BlobsWritten# 490 blobsWrittenFull# 391 blobsWrittenAlmostFull# 99 blobsUnwritten# 0 >> KqpPg::CreateTableSerialColumns-useSink [GOOD] >> KqpPg::DropIndex >> KqpPg::DuplicatedColumns+useSink [GOOD] >> KqpPg::DuplicatedColumns-useSink >> TCdcStreamWithInitialScanTests::RacyAlterStreamAndRestart [GOOD] >> TCdcStreamWithInitialScanTests::MeteringServerless >> KqpPg::InsertNoTargetColumns_Serial-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefault+useSink >> EncryptedExportTest::EncryptionChecksumAndCompression [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATE32 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DATETIME64 >> KqpPg::Insert_Serial+useSink [GOOD] >> KqpPg::Insert_Serial-useSink >> TCdcStreamWithInitialScanTests::MeteringServerless [GOOD] >> TCdcStreamWithInitialScanTests::MeteringDedicated >> BackupRestoreS3::TestAllIndexTypes-EIndexTypeGlobalVectorKmeansTree [GOOD] >> BackupRestoreS3::PrefixedVectorIndex |63.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export |63.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export |63.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_export/ydb-core-tx-schemeshard-ut_export >> KqpPg::DropIndex [GOOD] >> KqpPg::CreateUniqPgColumn+useSink >> ListObjectsInS3Export::ExportWithSchemaMapping >> KqpPg::DuplicatedColumns-useSink [GOOD] >> KqpPg::InsertFromSelect_NoReorder+useSink >> KqpPg::InsertValuesFromTableWithDefault+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefault-useSink >> SystemView::StoragePoolsRanges >> KqpPg::Insert_Serial-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultText+useSink >> KqpPg::InsertFromSelect_NoReorder+useSink [GOOD] >> KqpPg::DropTablePg >> ListObjectsInS3Export::ExportWithSchemaMapping [GOOD] >> TSchemeshardBackgroundCompactionTest::ShouldCompactServerless [GOOD] >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerlessAfterDisable >> KqpPg::InsertValuesFromTableWithDefault-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultAndCast+useSink >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureBlock42Count6Idx3 [GOOD] >> KqpPg::InsertFromSelect_Simple+useSink [GOOD] >> KqpPg::InsertFromSelect_Simple-useSink >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_OtherPartition_Test [GOOD] >> KqpPg::TableArrayInsert+useSink [GOOD] >> KqpPg::TableArrayInsert-useSink >> ListObjectsInS3Export::ExportWithoutSchemaMapping >> BackupRestore::TestAllPrimitiveTypes-DATETIME64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-INTERVAL64 >> KqpPg::InsertValuesFromTableWithDefaultText+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultText-useSink >> KqpPg::CreateUniqPgColumn+useSink [GOOD] >> KqpPg::CreateUniqPgColumn-useSink >> TBSVWithReboots::CreateAssignWithVersion [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_ftol/unittest >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureBlock42Count6Idx3 [GOOD] Test command err: iteration# 3 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 9 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 15 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 21 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 27 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 33 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 39 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 45 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 51 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 57 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 63 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 69 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 75 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 81 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 87 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 93 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 99 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 105 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 111 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 117 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 123 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 129 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 135 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 141 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 147 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 153 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 159 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 165 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 171 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 177 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 183 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 189 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 195 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 201 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 207 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 213 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 219 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 225 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 231 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 237 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 243 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 249 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 255 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 261 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 267 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 273 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 279 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 285 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 291 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 297 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 303 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 309 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 315 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 321 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 327 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 333 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 339 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 345 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 351 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 357 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 363 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 369 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 375 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 381 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 387 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 393 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 399 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 405 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 411 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 417 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 423 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 429 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 435 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 441 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 447 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 453 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 459 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 465 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 471 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 477 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 483 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 489 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 495 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 501 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 507 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 513 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 519 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 525 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 531 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 537 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 543 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 549 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 555 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 561 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 567 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 573 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 579 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 585 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 591 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 597 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 603 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 609 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 615 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 621 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 627 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 633 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 639 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 645 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 651 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 657 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 663 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 669 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 675 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 681 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 687 BlobsWritten# 2041 blobsWrittenFul ... blobsUnwritten# 1218 iteration# 1365 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1371 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1377 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1383 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1389 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1395 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1401 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1407 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1413 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1419 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1425 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1431 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1437 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1443 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1449 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1455 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1461 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1467 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1473 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1479 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1485 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1491 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1497 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1503 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1509 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1515 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1521 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1527 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1533 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1539 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1545 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1551 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1557 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1563 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1569 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1575 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1581 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1587 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1593 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1599 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1605 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1611 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1617 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1623 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1629 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1635 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1641 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1647 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1653 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1659 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1665 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1671 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1677 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1683 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1689 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1695 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1701 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1707 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1713 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1719 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1725 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1731 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1737 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1743 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1749 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1755 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1761 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1767 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1773 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1779 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1785 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1791 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1797 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1803 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1809 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1815 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1821 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1827 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1833 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1839 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1845 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1851 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1857 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1863 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1869 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1875 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1881 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1887 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1893 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1899 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1905 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1911 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1917 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1923 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1929 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1935 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1941 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1947 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1953 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1959 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1965 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1971 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1977 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1983 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1989 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1995 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2001 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2007 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2013 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2019 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2025 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2031 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2037 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/unittest >> TPartitionChooserSuite::TPartitionChooserActor_SplitMergeEnabled_PreferedPartition_OtherPartition_Test [GOOD] Test command err: 2025-06-03T10:27:27.777428Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667793399025968:2211];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:27.777481Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:27:27.853255Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002336/r3tmp/tmpLPK8iZ/pdisk_1.dat 2025-06-03T10:27:27.859250Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:27:27.861418Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667795023146140:2286];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:27.861606Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:27:27.930692Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:27.935783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:27.935837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:27.950221Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12615, node 1 2025-06-03T10:27:27.974544Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:27.974576Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:27.985068Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:27:27.989564Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:27.997457Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/002336/r3tmp/yandexqqeckd.tmp 2025-06-03T10:27:27.997471Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/002336/r3tmp/yandexqqeckd.tmp 2025-06-03T10:27:27.997572Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/002336/r3tmp/yandexqqeckd.tmp 2025-06-03T10:27:27.997600Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:27:28.021445Z INFO: TTestServer started on Port 9517 GrpcPort 12615 TClient is connected to server localhost:9517 PQClient connected to localhost:12615 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:28.086794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:27:28.117862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-03T10:27:28.131075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-03T10:27:28.383455Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667797693994149:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:28.383494Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:28.386400Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667797693994169:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:28.387631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480 2025-06-03T10:27:28.397774Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667797693994202:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:28.397801Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:28.418524Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667797693994171:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-03T10:27:28.488227Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667797693994248:2724] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:27:28.492914Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:27:28.535692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:27:28.537472Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511667797693994307:2351], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:27:28.537949Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=NTk0ZDI4YjYtM2JlMGM5NGQtZmQ4OTFkYzktOTljY2E0ODM=, ActorId: [1:7511667797693994130:2333], ActorState: ExecuteState, TraceId: 01jwtn9vzv32g9m1sz6ga10wr4, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:27:28.538497Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:27:28.556400Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7511667799318113603:2315], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:27:28.557010Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=2&id=NmE2OWE5ZjktZmQ4OGU3ZTItMWVhYWVmMmMtNTM0ZDU5Y2Q=, ActorId: [2:7511667799318113564:2309], ActorState: ExecuteState, TraceId: 01jwtn9w4k5mwnj5tsnnymjsjf, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:27:28.557188Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:27:28.662671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-03T10:27:28.780865Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jwtn9wbk4w213wkg86728k2s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzQxMjc5MTctZWY5ZmU1YjItYWNjYzQ1N2It ... _ERROR remove tx with tx_id: 2025-06-03T10:27:59.930201Z node 10 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:28:00.021406Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-03T10:28:00.117786Z node 9 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jwtnatynewgj7szns6c3mj4x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=9&id=OTE4MDk0YTktM2NmMzY5MjItNzU2ODQ5MjMtMzBmYTMwZDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [9:7511667938148677299:3032] 2025-06-03T10:28:03.948051Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[9:7511667929558741304:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:03.948090Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:28:03.961424Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[10:7511667927577769959:2224];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:03.961457Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:28:05.196351Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:1, at schemeshard: 72057594046644480 2025-06-03T10:28:05.407363Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715680:0, at schemeshard: 72057594046644480 2025-06-03T10:28:05.524228Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715683:0, at schemeshard: 72057594046644480 2025-06-03T10:28:05.653479Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715686:0, at schemeshard: 72057594046644480 2025-06-03T10:28:05.786115Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715689:0, at schemeshard: 72057594046644480 2025-06-03T10:28:05.888246Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715692:0, at schemeshard: 72057594046644480 Run query: --!syntax_v1 UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES (16261273835729377752, "Root", "00415F536F757263655F3130", 1748946486013, 1748946486013, 0, 13); 2025-06-03T10:28:06.036914Z node 9 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715698. Ctx: { TraceId: 01jwtnb0r3f3z142g9tybj1s97, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=9&id=ZTZjZGI1NGUtZWJmOTUzM2YtMTc2MTAzNzQtODFiYzJiMGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:06.049752Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-03T10:28:06.049765Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-03T10:28:06.049767Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-03T10:28:06.049775Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__sm_chooser_actor.h:116: TPartitionChooser [9:7511667963918482192:3628] (SourceId=A_Source_10, PreferedPartition=1) GetOwnershipFast Partition=1 TabletId=1001 2025-06-03T10:28:06.049828Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 269877760, Sender [9:7511667963918482193:3628], Recipient [9:7511667959623514155:3200]: NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 1001 Status: OK ServerId: [9:7511667963918482192:3628] Leader: 1 Dead: 0 Generation: 1 VersionInfo: } 2025-06-03T10:28:06.049854Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 271188557, Sender [9:7511667963918482192:3628], Recipient [9:7511667959623514155:3200]: NKikimrPQ.TEvCheckPartitionStatusRequest Partition: 1 SourceId: "A_Source_10" 2025-06-03T10:28:06.049875Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__sm_chooser_actor.h:139: StateOwnershipFast, received event# 271188558, Sender [9:7511667959623514155:3200], Recipient [9:7511667963918482192:3628]: NKikimrPQ.TEvCheckPartitionStatusResponse Status: Active 2025-06-03T10:28:06.049888Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:88: TPartitionChooser [9:7511667963918482192:3628] (SourceId=A_Source_10, PreferedPartition=1) InitTable: SourceId=A_Source_10 TopicsAreFirstClassCitizen=1 UseSrcIdMetaMappingInFirstClass=1 2025-06-03T10:28:06.049914Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_ut.cpp:382: StateMockWork, received event# 65543, Sender [9:7511667963918482192:3628], Recipient [9:7511667959623514155:3200]: NActors::TEvents::TEvPoison 2025-06-03T10:28:06.049977Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:101: StateInitTable, received event# 277020685, Sender [9:7511667929558741146:2069], Recipient [9:7511667963918482192:3628]: NKikimr::NMetadata::NProvider::TEvManagerPrepared 2025-06-03T10:28:06.049985Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [9:7511667963918482192:3628] (SourceId=A_Source_10, PreferedPartition=1) StartKqpSession 2025-06-03T10:28:06.052180Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:132: StateCreateKqpSession, received event# 271646728, Sender [9:7511667929558741303:2211], Recipient [9:7511667963918482192:3628]: NKikimrKqp.TEvCreateSessionResponse Error: "" Response { SessionId: "ydb://session/3?node_id=9&id=N2E3NzFhZjgtYTM3YjA2MTMtZmI0MjRiOTQtZDkzNGRlNTY=" NodeId: 9 } YdbStatus: SUCCESS ResourceExhausted: false 2025-06-03T10:28:06.052196Z node 9 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [9:7511667963918482192:3628] (SourceId=A_Source_10, PreferedPartition=1) Select from the table Received TEvChooseError: MessageGroupId A_Source_10 is already bound to PartitionGroupId 1, but client provided 2. MessageGroupId->PartitionGroupId binding cannot be changed, either use another MessageGroupId, specify PartitionGroupId 1, or do not specify PartitionGroupId at all. 2025-06-03T10:28:06.084812Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:163: StateSelect, received event# 271646721, Sender [9:7511667929558741303:2211], Recipient [9:7511667963918482192:3628]: NKikimrKqp.TEvQueryResponse Response { SessionId: "ydb://session/3?node_id=9&id=N2E3NzFhZjgtYTM3YjA2MTMtZmI0MjRiOTQtZDkzNGRlNTY=" PreparedQuery: "67476054-b7a51032-91ace31d-41ed176e" QueryParameters { Name: "$Hash" Type { Kind: Data Data { Scheme: 4 } } } QueryParameters { Name: "$Topic" Type { Kind: Data Data { Scheme: 4608 } } } QueryParameters { Name: "$SourceId" Type { Kind: Data Data { Scheme: 4608 } } } TxMeta { id: "01jwtnb0sv5v6yrr303rwcap13" } YdbResults { columns { name: "Partition" type { optional_type { item { type_id: UINT32 } } } } columns { name: "CreateTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "AccessTime" type { optional_type { item { type_id: UINT64 } } } } columns { name: "SeqNo" type { optional_type { item { type_id: UINT64 } } } } rows { items { uint32_value: 0 } items { uint64_value: 1748946486013 } items { uint64_value: 1748946486013 } items { uint64_value: 13 } } } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 14 2025-06-03T10:28:06.084873Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:151: TPartitionChooser [9:7511667963918482192:3628] (SourceId=A_Source_10, PreferedPartition=1) Selected from table PartitionId=0 SeqNo=13 2025-06-03T10:28:06.084880Z node 9 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__sm_chooser_actor.h:209: TPartitionChooser [9:7511667963918482192:3628] (SourceId=A_Source_10, PreferedPartition=1) OnPartitionChosen 2025-06-03T10:28:06.084887Z node 9 :PQ_PARTITION_CHOOSER INFO: partition_chooser_impl__abstract_chooser_actor.h:312: TPartitionChooser [9:7511667963918482192:3628] (SourceId=A_Source_10, PreferedPartition=1) ReplyError: MessageGroupId A_Source_10 is already bound to PartitionGroupId 1, but client provided 2. MessageGroupId->PartitionGroupId binding cannot be changed, either use another MessageGroupId, specify PartitionGroupId 1, or do not specify PartitionGroupId at all. Run query: --!syntax_v1 SELECT Partition, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash = 16261273835729377752 AND Topic = "Root" AND ProducerId = "00415F536F757263655F3130" 2025-06-03T10:28:06.121491Z node 9 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715702. Ctx: { TraceId: 01jwtnb0tecr4g8grj03wz7d7f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=9&id=ZDcwNGM0YTEtNDQwOGVlNjYtNWZhNTU1YTgtYmE0M2ViM2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> BackupRestoreS3::PrefixedVectorIndex [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-BOOL >> KqpPg::InsertValuesFromTableWithDefaultAndCast+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultAndCast-useSink >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureBlock42Count6Idx5 [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TBSVWithReboots::CreateAssignWithVersion [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:27:44.172279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:44.172316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:44.172322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:44.172327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:44.172345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:44.172350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:44.172360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:44.172375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:44.172506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:44.172580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:44.190760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:27:44.190782Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:44.190893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:27:44.195945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:44.196061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:44.196101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:44.198377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:44.198445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:44.198586Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:44.198638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:44.199072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:44.199126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:44.199421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:44.199432Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:44.199450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:44.199458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:44.199465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:44.199512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:27:44.201400Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:27:44.223609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:44.223704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:44.223785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:44.223827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:44.223838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:44.227725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:44.227779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:44.227850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:44.227863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:44.227868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:44.227875Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:44.228574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:44.228591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:44.228598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:44.229091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:44.229108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:44.229115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:44.229124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:44.230064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:44.230584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:44.230633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:44.230876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:44.230909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:44.230917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:44.230989Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... G: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1002 2025-06-03T10:28:06.905247Z node 60 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1002, at schemeshard: 72057594046678944 2025-06-03T10:28:06.905269Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1002: got EvNotifyTxCompletionResult 2025-06-03T10:28:06.905275Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1002: satisfy waiter [60:389:2368] TestWaitNotification: OK eventTxId 1002 2025-06-03T10:28:06.905401Z node 60 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/BSVolume_4" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:28:06.905466Z node 60 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/BSVolume_4" took 79us result status StatusSuccess 2025-06-03T10:28:06.905567Z node 60 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/BSVolume_4" PathDescription { Self { Name: "BSVolume_4" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeBlockStoreVolume CreateFinished: true CreateTxId: 1001 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 BSVVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } BlockStoreVolumeDescription { Name: "BSVolume_4" PathId: 3 VolumeConfig { BlockSize: 4096 Partitions { BlockCount: 16 } Version: 1 DiskId: "foo" ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-2" } } Partitions { PartitionId: 0 TabletId: 72075186233409546 } VolumeTabletId: 72075186233409547 AlterVersion: 1 MountToken: "Owner123" TokenVersion: 1 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 1003 2025-06-03T10:28:06.906368Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/DirA" OperationType: ESchemeOpAssignBlockStoreVolume AssignBlockStoreVolume { Name: "BSVolume_4" NewMountToken: "Owner124" TokenVersion: 1 } } TxId: 1003 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:28:06.906419Z node 60 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_assign_bsv.cpp:25: TAssignBlockStoreVolume Propose, path: /MyRoot/DirA/BSVolume_4, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-03T10:28:06.906460Z node 60 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1003:1, propose status:StatusSuccess, reason: , at schemeshard: 72057594046678944 2025-06-03T10:28:06.906468Z node 60 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAssignBlockStoreVolume, opId: 1003:0, at schemeshard: 72057594046678944 2025-06-03T10:28:06.906494Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1003:0 progress is 1/1 2025-06-03T10:28:06.906500Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-03T10:28:06.906507Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1003:0 progress is 1/1 2025-06-03T10:28:06.906511Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-03T10:28:06.906527Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:28:06.906539Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: false 2025-06-03T10:28:06.906546Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-03T10:28:06.906551Z node 60 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1003:0 2025-06-03T10:28:06.906558Z node 60 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1003, publications: 1, subscribers: 0 2025-06-03T10:28:06.906563Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-03T10:28:06.909993Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1003, response: Status: StatusSuccess TxId: 1003 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:06.910053Z node 60 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1003, database: /MyRoot, subject: , status: StatusSuccess, operation: ALTER BLOCK STORE VOLUME ASSIGN, path: /MyRoot/DirA/BSVolume_4 2025-06-03T10:28:06.910112Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:06.910121Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:28:06.910180Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:06.910187Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [60:206:2207], at schemeshard: 72057594046678944, txId: 1003, path id: 3 2025-06-03T10:28:06.910342Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:28:06.910356Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:28:06.910362Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:28:06.910368Z node 60 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-03T10:28:06.910394Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-03T10:28:06.910426Z node 60 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-03T10:28:06.913633Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-03T10:28:06.913749Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-03T10:28:06.913761Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-03T10:28:06.913864Z node 60 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-03T10:28:06.913898Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-03T10:28:06.913905Z node 60 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [60:400:2379] TestWaitNotification: OK eventTxId 1003 2025-06-03T10:28:06.914032Z node 60 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/BSVolume_4" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:28:06.914108Z node 60 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/BSVolume_4" took 98us result status StatusSuccess 2025-06-03T10:28:06.914216Z node 60 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/BSVolume_4" PathDescription { Self { Name: "BSVolume_4" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeBlockStoreVolume CreateFinished: true CreateTxId: 1001 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 BSVVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } BlockStoreVolumeDescription { Name: "BSVolume_4" PathId: 3 VolumeConfig { BlockSize: 4096 Partitions { BlockCount: 16 } Version: 1 DiskId: "foo" ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-2" } } Partitions { PartitionId: 0 TabletId: 72075186233409546 } VolumeTabletId: 72075186233409547 AlterVersion: 1 MountToken: "Owner124" TokenVersion: 2 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KqpPg::DropTablePg [GOOD] >> KqpPg::DropTablePgMultiple >> ListObjectsInS3Export::ExportWithoutSchemaMapping [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_ftol/unittest >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureBlock42Count6Idx5 [GOOD] Test command err: iteration# 5 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 11 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 17 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 23 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 29 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 35 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 41 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 47 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 53 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 59 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 65 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 71 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 77 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 83 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 89 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 95 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 101 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 107 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 113 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 119 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 125 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 131 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 137 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 143 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 149 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 155 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 161 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 167 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 173 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 179 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 185 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 191 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 197 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 203 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 209 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 215 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 221 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 227 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 233 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 239 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 245 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 251 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 257 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 263 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 269 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 275 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 281 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 287 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 293 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 299 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 305 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 311 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 317 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 323 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 329 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 335 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 341 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 347 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 353 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 359 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 365 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 371 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 377 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 383 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 389 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 395 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 401 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 407 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 413 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 419 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 425 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 431 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 437 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 443 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 449 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 455 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 461 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 467 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 473 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 479 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 485 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 491 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 497 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 503 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 509 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 515 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 521 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 527 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 533 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 539 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 545 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 551 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 557 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 563 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 569 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 575 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 581 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 587 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 593 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 599 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 605 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 611 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 617 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 623 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 629 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 635 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 641 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 647 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 653 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 659 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 665 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 671 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 677 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 683 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 689 BlobsWritten# 2041 blobsWrittenF ... blobsUnwritten# 1218 iteration# 1367 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1373 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1379 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1385 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1391 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1397 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1403 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1409 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1415 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1421 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1427 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1433 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1439 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1445 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1451 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1457 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1463 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1469 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1475 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1481 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1487 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1493 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1499 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1505 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1511 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1517 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1523 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1529 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1535 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1541 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1547 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1553 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1559 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1565 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1571 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1577 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1583 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1589 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1595 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1601 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1607 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1613 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1619 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1625 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1631 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1637 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1643 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1649 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1655 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1661 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1667 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1673 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1679 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1685 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1691 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1697 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1703 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1709 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1715 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1721 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1727 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1733 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1739 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1745 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1751 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1757 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1763 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1769 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1775 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1781 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1787 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1793 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1799 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1805 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1811 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1817 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1823 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1829 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1835 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1841 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1847 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1853 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1859 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1865 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1871 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1877 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1883 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1889 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1895 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1901 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1907 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1913 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1919 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1925 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1931 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1937 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1943 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1949 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1955 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1961 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1967 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1973 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1979 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1985 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1991 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1997 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2003 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2009 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2015 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2021 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2027 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2033 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2039 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 >> KqpPg::InsertValuesFromTableWithDefaultText-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNull+useSink >> BackupRestore::TestAllPrimitiveTypes-INTERVAL64 [GOOD] >> BackupRestore::TestAllPrimitiveTypes-JSON >> SystemView::VSlotsFields >> KqpPg::InsertValuesFromTableWithDefaultAndCast-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultBool+useSink >> ListObjectsInS3Export::ExportWithEncryption >> SystemView::ShowCreateTableDefaultLiteral >> KqpPg::CreateUniqPgColumn-useSink [GOOD] >> KqpPg::CreateUniqComplexPgColumn+useSink >> KqpPg::DropTablePgMultiple [GOOD] >> KqpPg::DropTableIfExists >> SystemView::PartitionStatsOneSchemeShard >> BackupRestoreS3::TestAllPrimitiveTypes-BOOL [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATE >> KqpPg::CreateTableBulkUpsertAndRead [GOOD] >> KqpPg::CopyTableSerialColumns+useSink >> SystemView::CollectPreparedQueries >> KqpPg::InsertValuesFromTableWithDefaultTextNotNull+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNull-useSink >> ListObjectsInS3Export::ExportWithEncryption [GOOD] >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureBlock42Count6Idx1 [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultBool+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultBool-useSink >> KqpPg::TypeCoercionInsert-useSink [GOOD] >> KqpPg::V1CreateTable >> KqpPg::TypeCoercionInsert+useSink [GOOD] >> KqpPg::TableSelect+useSink >> CompositeConveyorTests::TestUniformScopesDistribution [GOOD] >> SystemView::ShowCreateTablePartitionAtKeys [GOOD] >> SystemView::ShowCreateTablePartitionByHash >> KqpPg::DropTableIfExists [GOOD] >> KqpPg::DropTableIfExists_GenericQuery ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_ftol/unittest >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureBlock42Count6Idx1 [GOOD] Test command err: iteration# 1 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 7 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 13 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 19 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 25 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 31 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 37 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 43 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 49 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 55 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 61 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 67 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 73 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 79 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 85 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 91 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 97 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 103 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 109 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 115 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 121 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 127 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 133 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 139 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 145 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 151 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 157 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 163 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 169 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 175 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 181 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 187 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 193 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 199 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 205 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 211 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 217 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 223 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 229 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 235 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 241 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 247 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 253 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 259 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 265 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 271 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 277 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 283 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 289 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 295 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 301 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 307 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 313 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 319 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 325 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 331 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 337 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 343 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 349 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 355 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 361 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 367 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 373 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 379 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 385 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 391 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 397 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 403 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 409 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 415 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 421 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 427 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 433 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 439 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 445 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 451 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 457 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 463 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 469 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 475 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 481 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 487 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 493 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 499 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 505 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 511 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 517 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 523 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 529 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 535 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 541 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 547 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 553 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 559 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 565 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 571 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 577 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 583 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 589 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 595 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 601 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 607 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 613 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 619 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 625 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 631 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 637 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 643 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 649 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 655 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 661 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 667 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 673 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 679 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 685 BlobsWritten# 2041 blobsWrittenFul ... blobsUnwritten# 1218 iteration# 1363 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1369 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1375 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1381 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1387 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1393 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1399 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1405 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1411 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1417 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1423 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1429 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1435 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1441 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1447 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1453 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1459 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1465 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1471 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1477 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1483 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1489 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1495 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1501 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1507 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1513 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1519 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1525 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1531 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1537 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1543 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1549 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1555 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1561 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1567 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1573 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1579 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1585 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1591 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1597 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1603 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1609 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1615 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1621 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1627 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1633 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1639 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1645 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1651 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1657 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1663 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1669 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1675 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1681 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1687 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1693 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1699 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1705 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1711 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1717 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1723 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1729 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1735 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1741 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1747 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1753 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1759 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1765 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1771 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1777 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1783 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1789 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1795 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1801 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1807 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1813 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1819 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1825 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1831 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1837 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1843 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1849 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1855 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1861 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1867 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1873 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1879 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1885 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1891 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1897 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1903 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1909 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1915 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1921 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1927 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1933 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1939 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1945 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1951 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1957 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1963 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1969 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1975 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1981 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1987 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1993 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1999 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2005 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2011 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2017 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2023 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2029 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2035 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 >> ListObjectsInS3Export::ExportWithWrongEncryptionKey >> KqpPg::CreateUniqComplexPgColumn+useSink [GOOD] >> KqpPg::CreateUniqComplexPgColumn-useSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/conveyor_composite/ut/unittest >> CompositeConveyorTests::TestUniformScopesDistribution [GOOD] Test command err: {1:106476};{2:81232};{3:73809}; {1:119803};{2:89314};{3:84512}; {1:135326};{2:105365};{3:100266}; {1:165753};{2:152129};{3:120731}; {1:225592};{2:202561};{3:146503}; {1:291077};{2:244601};{3:173967}; {1:338366};{2:279794};{3:238582}; {1:373116};{2:343874};{3:286752}; {1:423337};{2:412468};{3:314223}; {1:464715};{2:468103};{3:337773}; {1:494379};{2:499158};{3:361295}; {1:513987};{2:535777};{3:383238}; {1:546150};{2:564558};{3:462912}; {1:601300};{2:598984};{3:541342}; {1:660038};{2:645969};{3:627770}; {1:738035};{2:703470};{3:691960}; {1:809647};{2:760766};{3:720426}; {1:857673};{2:809089};{3:758101}; {1:896736};{2:854422};{3:827091}; {1:924300};{2:908530};{3:897678}; {1:957027};{2:954348};{3:960078}; {1:985790};{2:985684};{3:996803}; {1:1000000};{2:995121};{3:1000000}; {1:1000000};{2:1000000};{3:1000000}; 72us per task 22.046109s;23.048592s;22.046120s; >> TBSVWithReboots::CreateAlterNoVersion [GOOD] >> BackupRestore::TestAllPrimitiveTypes-JSON [GOOD] >> BackupRestore::TestAllPrimitiveTypes-JSON_DOCUMENT >> KqpPg::CopyTableSerialColumns+useSink [GOOD] >> KqpPg::CopyTableSerialColumns-useSink >> SystemView::CollectPreparedQueries [GOOD] >> SystemView::CollectScanQueries >> KqpPg::InsertValuesFromTableWithDefaultTextNotNull-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNullButNull+useSink >> KqpPg::InsertValuesFromTableWithDefaultBool-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_SerialNotNull+useSink >> KqpPg::DropTableIfExists_GenericQuery [GOOD] >> KqpPg::EquiJoin+useSink >> ListObjectsInS3Export::ExportWithWrongEncryptionKey [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATE [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TBSVWithReboots::CreateAlterNoVersion [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:27:43.197316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:43.197356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:43.197364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:43.197372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:43.197394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:43.197400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:43.197412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:43.197428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:43.197580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:43.197685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:43.226803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:27:43.226845Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:43.227024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:27:43.232041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:43.232261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:43.232313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:43.234953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:43.235022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:43.235218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:43.235318Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:43.236269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:43.236349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:43.236761Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:43.236781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:43.236804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:43.236831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:43.236840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:43.236896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:27:43.239264Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:27:43.264648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:43.264756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:43.264841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:43.264893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:43.264906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:43.266102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:43.266141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:43.266208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:43.266223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:43.266230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:43.266237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:43.267199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:43.267218Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:43.267226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:43.267752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:43.267766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:43.267773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:43.267781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:43.268552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:43.269241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:43.269311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:43.269589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:43.269627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:43.269637Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:43.269724Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... strongly msg operationId: 1002:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 272761856 2025-06-03T10:28:10.160909Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1002, partId: 0, tablet: 72075186233409547 2025-06-03T10:28:10.160951Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 1002, tablet: 72075186233409547, partId: 0 2025-06-03T10:28:10.160971Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 1002:0, at schemeshard: 72057594046678944, message: TxId: 1002 Origin: 72075186233409547 Status: OK 2025-06-03T10:28:10.160977Z node 78 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_bsv.cpp:23: NBSVState::TConfigureParts operationId: 1002:0 HandleReply TEvSetConfigResult, at schemeshard: 72057594046678944 2025-06-03T10:28:10.160982Z node 78 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1002:0 3 -> 128 2025-06-03T10:28:10.161352Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-03T10:28:10.161378Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-03T10:28:10.161385Z node 78 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_bsv.cpp:192: NBSVState::TPropose operationId# 1002:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:28:10.161392Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1002 ready parts: 1/1 2025-06-03T10:28:10.161419Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1002 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:28:10.161739Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1002:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1002 msg type: 269090816 2025-06-03T10:28:10.161762Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1002, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1002 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1002 at step: 5000004 2025-06-03T10:28:10.161829Z node 78 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:10.161849Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1002 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 335007451244 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:10.161857Z node 78 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_bsv.cpp:141: NBSVState::TPropose operationId# 1002:0 HandleReply TEvOperationPlan, at schemeshard: 72057594046678944 2025-06-03T10:28:10.161888Z node 78 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1002:0 128 -> 240 2025-06-03T10:28:10.161913Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 FAKE_COORDINATOR: Erasing txId 1002 2025-06-03T10:28:10.162472Z node 78 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:10.162486Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1002, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:28:10.162528Z node 78 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:10.162534Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [78:206:2207], at schemeshard: 72057594046678944, txId: 1002, path id: 3 2025-06-03T10:28:10.162619Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-03T10:28:10.162628Z node 78 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1002:0 ProgressState 2025-06-03T10:28:10.162641Z node 78 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1002:0 progress is 1/1 2025-06-03T10:28:10.162645Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-03T10:28:10.162651Z node 78 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1002:0 progress is 1/1 2025-06-03T10:28:10.162655Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-03T10:28:10.162660Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1002, ready parts: 1/1, is published: false 2025-06-03T10:28:10.162666Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-03T10:28:10.162671Z node 78 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1002:0 2025-06-03T10:28:10.162676Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1002:0 2025-06-03T10:28:10.162711Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 6 2025-06-03T10:28:10.162717Z node 78 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1002, publications: 1, subscribers: 0 2025-06-03T10:28:10.162722Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1002, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-03T10:28:10.162845Z node 78 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:28:10.162859Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:28:10.162864Z node 78 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1002 2025-06-03T10:28:10.162869Z node 78 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-03T10:28:10.162874Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-03T10:28:10.162888Z node 78 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1002, subscribers: 0 2025-06-03T10:28:10.163587Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 TestModificationResult got TxId: 1002, wait until txId: 1002 TestWaitNotification wait txId: 1002 2025-06-03T10:28:10.163645Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1002: send EvNotifyTxCompletion 2025-06-03T10:28:10.163655Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1002 2025-06-03T10:28:10.163725Z node 78 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1002, at schemeshard: 72057594046678944 2025-06-03T10:28:10.163744Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1002: got EvNotifyTxCompletionResult 2025-06-03T10:28:10.163750Z node 78 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1002: satisfy waiter [78:409:2388] TestWaitNotification: OK eventTxId 1002 2025-06-03T10:28:10.163830Z node 78 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/BSVolume_2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:28:10.163872Z node 78 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/BSVolume_2" took 52us result status StatusSuccess 2025-06-03T10:28:10.163964Z node 78 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/BSVolume_2" PathDescription { Self { Name: "BSVolume_2" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeBlockStoreVolume CreateFinished: true CreateTxId: 1001 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 BSVVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } BlockStoreVolumeDescription { Name: "BSVolume_2" PathId: 3 VolumeConfig { BlockSize: 4096 Partitions { BlockCount: 32 } Partitions { BlockCount: 32 } Version: 2 DiskId: "foo" ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-2" } } Partitions { PartitionId: 0 TabletId: 72075186233409546 } Partitions { PartitionId: 1 TabletId: 72075186233409548 } VolumeTabletId: 72075186233409547 AlterVersion: 2 MountToken: "" TokenVersion: 0 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> SystemView::Nodes >> KqpPg::InsertValuesFromTableWithDefaultTextNotNullButNull+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultTextNotNullButNull-useSink >> SystemView::CollectScanQueries [GOOD] >> SystemView::AuthUsers >> KqpPg::InsertNoTargetColumns_SerialNotNull+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_SerialNotNull-useSink >> TCdcStreamWithInitialScanTests::MeteringDedicated [GOOD] >> SystemView::PartitionStatsOneSchemeShard [GOOD] >> SystemView::PartitionStatsOneSchemeShardDataQuery >> KqpPg::CreateUniqComplexPgColumn-useSink [GOOD] >> KqpPg::CreateTempTable >> SystemView::AuthGroups_Access >> ListObjectsInS3Export::PagingParameters >> KqpPg::CopyTableSerialColumns-useSink [GOOD] >> KqpPg::CreateIndex >> KqpPg::InsertValuesFromTableWithDefaultTextNotNullButNull-useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase+useSink >> KqpPg::EquiJoin+useSink [GOOD] >> KqpPg::EquiJoin-useSink >> BackupRestore::TestAllPrimitiveTypes-JSON_DOCUMENT [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DYNUMBER >> SystemView::StoragePoolsRanges [GOOD] >> SystemView::TopPartitionsByCpuFields ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_cdc_stream/unittest >> TCdcStreamWithInitialScanTests::MeteringDedicated [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:27:51.145751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:51.145786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:51.145806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:51.145813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:51.145834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:51.145838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:51.145850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:51.145868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:51.145997Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:51.146094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:51.179878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:27:51.179914Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:51.206769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:51.206940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:51.206998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:51.226030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:51.226125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:51.226297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:51.226373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:51.227312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:51.227383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:51.227822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:51.227855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:51.227869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:51.227881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:51.227888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:51.227918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:27:51.229911Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:27:51.256506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:51.256596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:51.256714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:51.256781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:51.256793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:51.257813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:51.257854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:51.257924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:51.257938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:51.257945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:51.257952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:51.258512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:51.258524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:51.258531Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:51.258888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:51.258897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:51.258904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:51.258913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:51.259752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:51.260702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:51.260759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:51.260988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:51.261024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:51.261035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:51.261124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:27:51.261132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:51.261177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:51.261191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:27:51.261798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:51.261810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:51.261876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 3T10:28:05.666893Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72075186233409546, cookie: 281474976715657 2025-06-03T10:28:05.666898Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 281474976715657 2025-06-03T10:28:05.666906Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 281474976715657, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], version: 5 2025-06-03T10:28:05.666910Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 6 2025-06-03T10:28:05.666919Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 2/3, is published: true 2025-06-03T10:28:05.677566Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 281474976715657 2025-06-03T10:28:05.677614Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 281474976715657 2025-06-03T10:28:05.700528Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6290: Handle TEvProposeTransactionResult, at schemeshard: 72075186233409546, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409552 Status: COMPLETE TxId: 281474976715657 Step: 250 OrderId: 281474976715657 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72075186233409547 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 246 } } CommitVersion { Step: 250 TxId: 281474976715657 } 2025-06-03T10:28:05.700562Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 281474976715657, tablet: 72075186233409552, partId: 1 2025-06-03T10:28:05.700608Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 281474976715657:1, at schemeshard: 72075186233409546, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409552 Status: COMPLETE TxId: 281474976715657 Step: 250 OrderId: 281474976715657 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72075186233409547 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 246 } } CommitVersion { Step: 250 TxId: 281474976715657 } 2025-06-03T10:28:05.700629Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72075186233409546, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409552 Status: COMPLETE TxId: 281474976715657 Step: 250 OrderId: 281474976715657 ExecLatency: 0 ProposeLatency: 4 DomainCoordinators: 72075186233409547 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 246 } } CommitVersion { Step: 250 TxId: 281474976715657 } 2025-06-03T10:28:05.700900Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72075186233409546, at schemeshard: 72075186233409546, message: Source { RawX1: 755 RawX2: 81604381267 } Origin: 72075186233409552 State: 2 TxId: 281474976715657 Step: 0 Generation: 2 2025-06-03T10:28:05.700909Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 281474976715657, tablet: 72075186233409552, partId: 1 2025-06-03T10:28:05.700925Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 281474976715657:1, at schemeshard: 72075186233409546, message: Source { RawX1: 755 RawX2: 81604381267 } Origin: 72075186233409552 State: 2 TxId: 281474976715657 Step: 0 Generation: 2 2025-06-03T10:28:05.700934Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1014: NTableState::TProposedWaitParts operationId# 281474976715657:1 HandleReply TEvSchemaChanged at tablet: 72075186233409546 2025-06-03T10:28:05.700944Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1018: NTableState::TProposedWaitParts operationId# 281474976715657:1 HandleReply TEvSchemaChanged at tablet: 72075186233409546 message: Source { RawX1: 755 RawX2: 81604381267 } Origin: 72075186233409552 State: 2 TxId: 281474976715657 Step: 0 Generation: 2 2025-06-03T10:28:05.700965Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976715657:1, shardIdx: 72075186233409546:4, datashard: 72075186233409552, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72075186233409546 2025-06-03T10:28:05.700970Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 281474976715657:1, at schemeshard: 72075186233409546 2025-06-03T10:28:05.700976Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 281474976715657:1, datashard: 72075186233409552, at schemeshard: 72075186233409546 2025-06-03T10:28:05.700986Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976715657:1 129 -> 240 2025-06-03T10:28:05.703257Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715657:1, at schemeshard: 72075186233409546 2025-06-03T10:28:05.703433Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976715657:1, at schemeshard: 72075186233409546 2025-06-03T10:28:05.703580Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715657:1, at schemeshard: 72075186233409546 2025-06-03T10:28:05.703595Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72075186233409546] TDone opId# 281474976715657:1 ProgressState 2025-06-03T10:28:05.703619Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715657:1 progress is 3/3 2025-06-03T10:28:05.703626Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715657 ready parts: 3/3 2025-06-03T10:28:05.703634Z node 19 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976715657:1 progress is 3/3 2025-06-03T10:28:05.703638Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715657 ready parts: 3/3 2025-06-03T10:28:05.703643Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976715657, ready parts: 3/3, is published: true 2025-06-03T10:28:05.703652Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715657 ready parts: 3/3 2025-06-03T10:28:05.703660Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715657:0 2025-06-03T10:28:05.703666Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976715657:0 2025-06-03T10:28:05.703684Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 3] was 3 2025-06-03T10:28:05.703691Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715657:1 2025-06-03T10:28:05.703695Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976715657:1 2025-06-03T10:28:05.703715Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 5 2025-06-03T10:28:05.703725Z node 19 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715657:2 2025-06-03T10:28:05.703729Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976715657:2 2025-06-03T10:28:05.703735Z node 19 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 4 2025-06-03T10:28:08.508738Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 4 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-03T10:28:08.508847Z node 19 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 4 took 143us result status StatusNameConflict 2025-06-03T10:28:08.508903Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusNameConflict Reason: "Check failed: path: \'/MyRoot/Shared/Table/Stream/streamImpl\', error: path is not a common path (id: [OwnerId: 72075186233409546, LocalPathId: 4], type: EPathTypePersQueueGroup, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Shared/Table/Stream/streamImpl" PathId: 4 LastExistedPrefixPath: "/MyRoot/Shared/Table/Stream/streamImpl" LastExistedPrefixPathId: 4 LastExistedPrefixDescription { Self { Name: "streamImpl" PathId: 4 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 106 CreateStep: 200 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeStreamImpl ChildrenExist: false BalancerTabletID: 72075186233409554 } } PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-03T10:28:11.002694Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 4 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-03T10:28:11.002794Z node 19 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72075186233409546 describe pathId 4 took 115us result status StatusNameConflict 2025-06-03T10:28:11.002843Z node 19 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusNameConflict Reason: "Check failed: path: \'/MyRoot/Shared/Table/Stream/streamImpl\', error: path is not a common path (id: [OwnerId: 72075186233409546, LocalPathId: 4], type: EPathTypePersQueueGroup, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Shared/Table/Stream/streamImpl" PathId: 4 LastExistedPrefixPath: "/MyRoot/Shared/Table/Stream/streamImpl" LastExistedPrefixPathId: 4 LastExistedPrefixDescription { Self { Name: "streamImpl" PathId: 4 SchemeshardId: 72075186233409546 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 106 CreateStep: 200 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeStreamImpl ChildrenExist: false BalancerTabletID: 72075186233409554 } } PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 >> KqpPg::InsertNoTargetColumns_SerialNotNull-useSink [GOOD] >> ShowCreateView::WithTablePathPrefix >> KqpPg::CreateTempTable [GOOD] >> KqpPg::CreateTempTableSerial >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATE32 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/pg/unittest >> KqpPg::InsertNoTargetColumns_SerialNotNull-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 19713, MsgBus: 16773 2025-06-03T10:28:00.729848Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667936770427366:2206];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:00.730071Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0023a3/r3tmp/tmp21xruM/pdisk_1.dat 2025-06-03T10:28:00.852179Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:00.852809Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:00.852822Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:00.854051Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 19713, node 1 2025-06-03T10:28:00.912522Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:00.912534Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:00.912536Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:00.912575Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16773 TClient is connected to server localhost:16773 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:01.131668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:01.135132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:28:01.427351Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667941065395121:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:01.427381Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:01.427518Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667941065395157:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:01.428326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:01.430957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-03T10:28:01.431049Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667941065395159:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-03T10:28:01.526411Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667941065395210:2325] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:01.565044Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 4899, MsgBus: 6254 2025-06-03T10:28:02.173223Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667942972159150:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:02.173380Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0023a3/r3tmp/tmpTYcFHw/pdisk_1.dat 2025-06-03T10:28:02.194013Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4899, node 2 2025-06-03T10:28:02.228411Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:02.228429Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:02.228431Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:02.228472Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6254 2025-06-03T10:28:02.274108Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:02.274137Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:02.275343Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6254 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:28:02.349973Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:02.351762Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:02.726618Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667942972159726:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:02.726645Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:02.726819Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667942972159763:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:02.727619Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:02.730792Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511667942972159765:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:02.809258Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511667942972159816:2323] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:02.816237Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 1300, MsgBus: 7845 2025-06-03T10:28:03.330047Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511667947874062740:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:03.334379Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /hom ... ted to server localhost:28370 TClient is connected to server localhost:28370 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:10.428322Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:10.428361Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:10.429396Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:10.432757Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:10.767903Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7511667980510909323:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:10.767927Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7511667980510909312:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:10.767948Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:10.768796Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:10.770952Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7511667980510909326:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:10.844288Z node 10 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [10:7511667980510909377:2323] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:10.849354Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:28:10.865104Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:28:10.881793Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7511667980510909610:2357], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Missing not null column in input: c. All not null columns should be initialized, code: 2032 2025-06-03T10:28:10.881922Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=10&id=OGQwODE3YTktMmRlZmU2ZTQtOTg1ZGE0ZjEtMjhlNGYzNTM=, ActorId: [10:7511667980510909608:2356], ActorState: ExecuteState, TraceId: 01jwtnb5fv4sp0bv57d4zsrmq9, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: Trying to start YDB, gRPC: 11232, MsgBus: 2019 2025-06-03T10:28:11.278755Z node 11 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7511667982970391261:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:11.280176Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0023a3/r3tmp/tmpb8bYP8/pdisk_1.dat 2025-06-03T10:28:11.298786Z node 11 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11232, node 11 2025-06-03T10:28:11.318696Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:11.318714Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:11.318716Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:11.318784Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2019 TClient is connected to server localhost:2019 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:11.389711Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:11.389747Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:11.390254Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:11.390961Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:28:11.392230Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:11.758798Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7511667982970391711:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:11.758823Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:11.758908Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7511667982970391746:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:11.759518Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:11.761401Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-03T10:28:11.761452Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7511667982970391748:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:11.842391Z node 11 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [11:7511667982970391799:2323] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:11.849650Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:28:11.932609Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:28:11.954569Z node 11 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [11:7511667982970392035:2357], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Missing not null column in input: c. All not null columns should be initialized, code: 2032 2025-06-03T10:28:11.955269Z node 11 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=11&id=MmU3NGExMDAtNjJmYzlhZDItZDkzNzE5ZmUtYmRhNTVjOTA=, ActorId: [11:7511667982970392033:2356], ActorState: ExecuteState, TraceId: 01jwtnb6heaknxf76zwsmrdj9v, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: >> KqpPg::CreateIndex [GOOD] >> KqpPg::CreateNotNullPgColumn >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase+useSink [GOOD] >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase-useSink >> KqpPg::EquiJoin-useSink [GOOD] >> KqpPg::ExplainColumnsReorder >> KqpPg::PgCreateTable [GOOD] >> KqpPg::PgUpdate+useSink >> KqpPg::CreateTempTableSerial [GOOD] >> KqpPg::DropSequence >> DbCounters::TabletsSimple >> KqpPg::InsertFromSelect_Simple-useSink [GOOD] >> KqpPg::InsertFromSelect_NoReorder-useSink >> KqpPg::CreateNotNullPgColumn [GOOD] >> KqpPg::CreateSequence >> TPQTest::TestPQRead [GOOD] >> TPQTest::TestPQReadAhead >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase-useSink [GOOD] >> BackupRestore::TestAllPrimitiveTypes-DYNUMBER [GOOD] >> SystemView::PartitionStatsOneSchemeShardDataQuery [GOOD] >> SystemView::PgTablesOneSchemeShardDataQuery >> SystemView::Nodes [GOOD] >> SystemView::PartitionStatsFields >> KqpPg::PgUpdate+useSink [GOOD] >> KqpPg::PgUpdate-useSink |63.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_json_change_record/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/pg/unittest >> KqpPg::InsertValuesFromTableWithDefaultNegativeCase-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 28554, MsgBus: 19895 2025-06-03T10:28:02.822110Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667944883240722:2203];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:02.822174Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00239d/r3tmp/tmp3ypWbD/pdisk_1.dat 2025-06-03T10:28:02.923624Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:02.927871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:02.927905Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 28554, node 1 2025-06-03T10:28:02.931735Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:02.956794Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:02.956808Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:02.956810Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:02.956863Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19895 TClient is connected to server localhost:19895 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:28:03.098326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:03.116981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:03.435983Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667949178208504:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:03.436004Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:03.443802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:28:03.515640Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667949178208610:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:03.515666Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:03.517972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:28:03.536973Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667949178208688:2351], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:03.536993Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:03.537089Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667949178208693:2354], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:03.538396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480 2025-06-03T10:28:03.543694Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667949178208695:2355], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-03T10:28:03.642921Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667949178208746:2431] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 63227, MsgBus: 31751 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00239d/r3tmp/tmp4jb3WI/pdisk_1.dat 2025-06-03T10:28:04.057814Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:04.097103Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63227, node 2 2025-06-03T10:28:04.128999Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:04.129013Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:04.129014Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:04.129067Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:28:04.154177Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:04.154209Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:04.155282Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31751 TClient is connected to server localhost:31751 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:04.204656Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:04.219743Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:04.539533Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667954170549588:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:04.539576Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:04.539748Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667954170549623:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:04.540569Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:04.542687Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511667954170549625:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:04.610142Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511667954170549676:2326] txid# 281474976715659, issues: { message: "Check failed: path: \ ... FIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:11.977919Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:11.977990Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11978 2025-06-03T10:28:12.018746Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:12.018788Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:12.019553Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11978 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:12.053081Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:12.448021Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7511667989047903754:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:12.448021Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7511667989047903779:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:12.448049Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:12.449025Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:12.451725Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7511667989047903783:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:12.534483Z node 10 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [10:7511667989047903834:2323] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:12.541242Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7511667989047903843:2335], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiCreateTable!
:1:1: Error: Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: "text" 2025-06-03T10:28:12.542086Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=10&id=YTc2ZDRjMWYtMzU3N2QxNWMtN2ZmMmFmNjYtYzI3NGQ4MA==, ActorId: [10:7511667989047903752:2326], ActorState: ExecuteState, TraceId: 01jwtnb6n098tgqf7xxqgj3kbf, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiCreateTable!
:1:1: Error: Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: "text" Trying to start YDB, gRPC: 17788, MsgBus: 14518 2025-06-03T10:28:12.905837Z node 11 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7511667989688404690:2141];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00239d/r3tmp/tmpY8W7Zu/pdisk_1.dat 2025-06-03T10:28:12.907000Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:28:12.939303Z node 11 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17788, node 11 2025-06-03T10:28:12.962451Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:12.962465Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:12.962468Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:12.962533Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14518 TClient is connected to server localhost:14518 2025-06-03T10:28:13.014142Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:13.014173Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:13.014845Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:13.021246Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:13.022745Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:13.361670Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7511667993983372520:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:13.361704Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:13.361777Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7511667993983372547:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:13.362567Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:13.364450Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7511667993983372549:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:13.442376Z node 11 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [11:7511667993983372600:2324] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:13.444913Z node 11 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [11:7511667993983372617:2335], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiCreateTable!
:1:1: Error: Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: "text" 2025-06-03T10:28:13.444993Z node 11 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=11&id=NWRjNzk2OTMtNjhjMzVhNTktYWQ4MTljNmItNDVjMjkyYzc=, ActorId: [11:7511667993983372518:2326], ActorState: ExecuteState, TraceId: 01jwtnb7k2av0phfyarbk8rm9w, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiCreateTable!
:1:1: Error: Failed to parse default expr for typename int4, error reason: Error while converting text to binary: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: invalid input syntax for type integer: "text" >> KqpPg::CreateSequence [GOOD] >> KqpPg::AlterSequence ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/backup_ut/unittest >> BackupRestore::TestAllPrimitiveTypes-DYNUMBER [GOOD] Test command err: 2025-06-03T10:27:36.688353Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667831458670149:2207];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:36.688454Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpNtg316/pdisk_1.dat 2025-06-03T10:27:36.880936Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11547, node 1 2025-06-03T10:27:36.916772Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:36.916787Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:36.916790Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:36.916836Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14997 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:37.003114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:37.036844Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:37.036873Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:37.039798Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:37.370680Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667835753638261:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:37.370737Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:37.413641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 Backup "/Root" to "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpHLGUx7/"Create temporary directory "/Root/~backup_20250603T102737" in database2025-06-03T10:27:37.557910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 Process "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpHLGUx7/table"Copy tables: { src: "/Root/table", dst: "/Root/~backup_20250603T102737/table" }Describe table "/Root/table"Describe table "/Root/~backup_20250603T102737/table"Backup table "/Root/~backup_20250603T102737/table" to "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpHLGUx7/table"Write scheme into "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpHLGUx7/table/scheme.pb"Write ACL into "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpHLGUx7/table/permissions.pb"Read table "/Root/~backup_20250603T102737/table"Write data into "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpHLGUx7/table/data_00.csv"Drop table "/Root/~backup_20250603T102737/table"Remove temporary directory "/Root/~backup_20250603T102737" in database2025-06-03T10:27:37.744657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715664:0, at schemeshard: 72057594046644480 Backup completed successfully2025-06-03T10:27:37.759713Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found 2025-06-03T10:27:37.759728Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037892 not found 2025-06-03T10:27:37.759731Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037893 not found 2025-06-03T10:27:37.762039Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667835753639174:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:37.762147Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } Restore "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpHLGUx7/" to "/Root"2025-06-03T10:27:37.857145Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-03T10:27:37.857168Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-03T10:27:37.857171Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found Resolved db base path: "/Root"List of entries in the backup: [{"type":"Directory","path":"/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpHLGUx7/"},{"type":"Table","path":"/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpHLGUx7/table"}]Process "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpHLGUx7/table"Read scheme from "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpHLGUx7/table/scheme.pb"Restore table "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpHLGUx7/table" to "/Root/table"2025-06-03T10:27:37.880902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 Created "/Root/table"Read data from "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpHLGUx7/table/data_00.csv"Restore index "byValue" on "/Root/table"2025-06-03T10:27:37.975246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:27:38.038860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:27:38.106122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710760:0, at schemeshard: 72057594046644480 2025-06-03T10:27:38.274053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710762:0, at schemeshard: 72057594046644480 2025-06-03T10:27:38.373422Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037897 not found 2025-06-03T10:27:38.373436Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037898 not found Restore ACL "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpHLGUx7/table" to "/Root/table"Read ACL from "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpHLGUx7/table/permissions.pb"2025-06-03T10:27:38.724759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715669:0, at schemeshard: 72057594046644480 Restore completed successfully 2025-06-03T10:27:39.586849Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511667846037941134:2207];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:39.586905Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpV5VhVu/pdisk_1.dat 2025-06-03T10:27:39.721796Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:39.723034Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:39.723053Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:39.743285Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13210, node 4 2025-06-03T10:27:39.785656Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:39.785668Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:39.785670Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:39.785716Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7959 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTyp ... home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpRaz0Ci/JsonDocumentTable"Read scheme from "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpRaz0Ci/JsonDocumentTable/scheme.pb"Restore table "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpRaz0Ci/JsonDocumentTable" to "/Root/JsonDocumentTable"2025-06-03T10:28:11.213424Z node 46 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 Created "/Root/JsonDocumentTable"Read data from "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpRaz0Ci/JsonDocumentTable/data_00.csv"2025-06-03T10:28:11.256584Z node 46 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jwtnb5v92p3e4en81my65552, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=46&id=NWJhOGMyNDctMjUyYzJmNjEtMjlmZDExNjEtZjdhMGRkOTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Restore ACL "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpRaz0Ci/JsonDocumentTable" to "/Root/JsonDocumentTable"Read ACL from "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpRaz0Ci/JsonDocumentTable/permissions.pb"2025-06-03T10:28:11.269047Z node 46 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715674:0, at schemeshard: 72057594046644480 Restore completed successfully2025-06-03T10:28:11.297741Z node 46 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715675. Ctx: { TraceId: 01jwtnb5w8325mmnka2ngjkery, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=46&id=MTEwNGExMGEtZTFjMWJjOWYtZDY2MjZmMTQtZTkyYzM0NGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:11.972837Z node 49 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[49:7511667981567749469:2207];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:11.974886Z node 49 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpBDdvVN/pdisk_1.dat 2025-06-03T10:28:12.034542Z node 49 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 32448, node 49 2025-06-03T10:28:12.064254Z node 49 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:12.064274Z node 49 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:12.064276Z node 49 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:12.064384Z node 49 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28652 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:28:12.082396Z node 49 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(49, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:12.082437Z node 49 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(49, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:12.085466Z node 49 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(49, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:12.097679Z node 49 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:12.491327Z node 49 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [49:7511667985862717605:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:12.491360Z node 49 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:12.491491Z node 49 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [49:7511667985862717617:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:12.494796Z node 49 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:12.500034Z node 49 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [49:7511667985862717619:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:12.595239Z node 49 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [49:7511667985862717686:2644] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:12.601098Z node 49 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:28:12.701232Z node 49 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtnb78f88mk281sh1xpxsrw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=49&id=ZDVlNjJmZWEtNWVmNmRmZjQtZjhlNTkwM2ItNDU4NTRhZDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:12.723147Z node 49 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtnb793fea47p47bwj4txg9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=49&id=ZDVlNjJmZWEtNWVmNmRmZjQtZjhlNTkwM2ItNDU4NTRhZDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Backup "/Root" to "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpxp9adt/"Create temporary directory "/Root/~backup_20250603T102812" in databaseProcess "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpxp9adt/DyNumberTable"Copy tables: { src: "/Root/DyNumberTable", dst: "/Root/~backup_20250603T102812/DyNumberTable" }Describe table "/Root/DyNumberTable"Describe table "/Root/~backup_20250603T102812/DyNumberTable"Backup table "/Root/~backup_20250603T102812/DyNumberTable" to "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpxp9adt/DyNumberTable"Write scheme into "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpxp9adt/DyNumberTable/scheme.pb"Write ACL into "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpxp9adt/DyNumberTable/permissions.pb"Read table "/Root/~backup_20250603T102812/DyNumberTable"Write data into "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpxp9adt/DyNumberTable/data_00.csv"Drop table "/Root/~backup_20250603T102812/DyNumberTable"Remove temporary directory "/Root/~backup_20250603T102812" in database2025-06-03T10:28:12.906893Z node 49 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 49, TabletId: 72075186224037889 not found 2025-06-03T10:28:12.914858Z node 49 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRmDir, opId: 281474976715668:0, at schemeshard: 72057594046644480 Backup completed successfullyRestore "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpxp9adt/" to "/Root"Resolved db base path: "/Root"List of entries in the backup: [{"type":"Directory","path":"/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpxp9adt/"},{"type":"Table","path":"/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpxp9adt/DyNumberTable"}]Process "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpxp9adt/DyNumberTable"Read scheme from "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpxp9adt/DyNumberTable/scheme.pb"Restore table "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpxp9adt/DyNumberTable" to "/Root/DyNumberTable"2025-06-03T10:28:12.948207Z node 49 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 49, TabletId: 72075186224037888 not found 2025-06-03T10:28:12.949977Z node 49 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480 Created "/Root/DyNumberTable"Read data from "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpxp9adt/DyNumberTable/data_00.csv"2025-06-03T10:28:12.985678Z node 49 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715671. Ctx: { TraceId: 01jwtnb7ha2esagpys7x6jcqxk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=49&id=OWQ1YWNkYzItODQ2M2ZkMDUtM2FiZWUyN2ItNDhlNGU1MjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Restore ACL "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpxp9adt/DyNumberTable" to "/Root/DyNumberTable"Read ACL from "/home/runner/.ya/build/build_root/u93c/0025fb/r3tmp/tmpxp9adt/DyNumberTable/permissions.pb"2025-06-03T10:28:13.001455Z node 49 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 Restore completed successfully2025-06-03T10:28:13.032440Z node 49 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jwtnb7jhb29qd1z1e5exwkbh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=49&id=ZDVlNjJmZWEtNWVmNmRmZjQtZjhlNTkwM2ItNDU4NTRhZDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> KqpPg::ExplainColumnsReorder [GOOD] >> SystemView::AuthGroups_Access [GOOD] >> SystemView::AuthGroups_ResultOrder >> SystemView::AuthUsers [GOOD] >> SystemView::AuthUsers_LockUnlock >> KqpPg::DropSequence [GOOD] >> KqpPg::DeleteWithQueryService+useSink >> KqpPg::InsertFromSelect_NoReorder-useSink [GOOD] >> KqpPg::InsertFromSelect_Serial+useSink >> BackupRestoreS3::TestAllPrimitiveTypes-DATE32 [GOOD] >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME64 >> KqpPg::TableSelect+useSink [GOOD] >> KqpPg::TableSelect-useSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/pg/unittest >> KqpPg::ExplainColumnsReorder [GOOD] Test command err: Trying to start YDB, gRPC: 6382, MsgBus: 28224 2025-06-03T10:28:01.215300Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667940497733349:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:01.215605Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0023bf/r3tmp/tmp5WCgok/pdisk_1.dat 2025-06-03T10:28:01.410725Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6382, node 1 2025-06-03T10:28:01.453545Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:01.453559Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:01.453561Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:01.453604Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28224 2025-06-03T10:28:01.544958Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:01.544986Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:01.549799Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28224 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:28:01.677673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.681526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:01.904344Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667940497733961:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:01.904380Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:01.904555Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667940497733973:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:01.905438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:01.909246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-03T10:28:01.909403Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667940497733975:2332], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:01.967717Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667940497734026:2325] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 21012, MsgBus: 20632 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0023bf/r3tmp/tmpEStWXp/pdisk_1.dat 2025-06-03T10:28:02.427737Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:02.441875Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21012, node 2 2025-06-03T10:28:02.473461Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:02.473475Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:02.473478Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:02.473525Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20632 2025-06-03T10:28:02.513281Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:02.513327Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:02.514361Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20632 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:02.560818Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:02.565670Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:02.979986Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667943976861412:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:02.980005Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667943976861423:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:02.980013Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:02.980740Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:02.982968Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511667943976861426:2332], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:03.069984Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511667948271828773:2327] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 14311, MsgBus: 62831 2025-06-03T10:28:03.391393Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511667948273288807:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:03.411827Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0023bf/r3tmp/tmp7bGSUT/pdisk_1.dat 2025-06-03T10:28:03.441442Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:03.441775Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511667948273288625:2079] 1748946483381685 != 1748946483381688 TServer::EnableGrpc on GrpcPort 14311, node 3 2025-06-03T10:28:03.462047Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:03.462058Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T ... ck failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 22985, MsgBus: 32380 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0023bf/r3tmp/tmpofE1aP/pdisk_1.dat 2025-06-03T10:28:11.953808Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:11.961586Z node 11 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:11.964239Z node 11 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [11:7511667984820715726:2079] 1748946491907047 != 1748946491907050 TServer::EnableGrpc on GrpcPort 22985, node 11 2025-06-03T10:28:11.990027Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:11.990044Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:11.990046Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:11.990110Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32380 2025-06-03T10:28:12.021739Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:12.021769Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:12.022809Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:32380 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:12.059928Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:12.416981Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7511667989115683680:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:12.417013Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:12.419244Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:28:12.434881Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:28:12.452107Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7511667989115683854:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:12.452142Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:12.452174Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7511667989115683859:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:12.453049Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480 2025-06-03T10:28:12.462522Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7511667989115683861:2350], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-03T10:28:12.559371Z node 11 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [11:7511667989115683912:2430] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:13.491199Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:297:2343], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:13.491273Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:28:13.491304Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0023bf/r3tmp/tmpmnvcAV/pdisk_1.dat 2025-06-03T10:28:13.581712Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:13.597018Z node 12 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:13.597720Z node 12 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [12:32:2079] 1748946493064520 != 1748946493064524 2025-06-03T10:28:13.644802Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:13.644843Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:13.655615Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:13.737281Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:643:2551], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:13.737374Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:652:2556], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:13.737387Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:13.738445Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480 2025-06-03T10:28:13.854760Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:657:2559], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-03T10:28:13.899149Z node 12 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [12:726:2597] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } PreparedQuery: "b64b09cd-7840f515-9eeffb12-26b40710" QueryAst: "(\n(let $1 (PgType \'int4))\n(let $2 \'(\'(\'\"_logical_id\" \'218) \'(\'\"_id\" \'\"149584e-1aa76696-48d73b76-2a55860a\") \'(\'\"_partition_mode\" \'\"single\")))\n(let $3 (DqPhyStage \'() (lambda \'() (Iterator (AsList (AsStruct \'(\'\"x\" (PgConst \'1 $1)) \'(\'\"y\" (PgConst \'2 $1)))))) $2))\n(let $4 (DqCnResult (TDqOutput $3 \'\"0\") \'(\'\"y\" \'\"x\")))\n(return (KqpPhysicalQuery \'((KqpPhysicalTx \'($3) \'($4) \'() \'(\'(\'\"type\" \'\"generic\")))) \'((KqpTxResultBinding (ListType (StructType \'(\'\"x\" $1) \'(\'\"y\" $1))) \'\"0\" \'\"0\")) \'(\'(\'\"type\" \'\"query\"))))\n)\n" QueryPlan: "{\"Plan\":{\"Plans\":[{\"PlanNodeId\":2,\"Plans\":[{\"PlanNodeId\":1,\"Operators\":[{\"Inputs\":[],\"Iterator\":\"[{x: \\\"1\\\",y: \\\"2\\\"}]\",\"Name\":\"Iterator\"}],\"Node Type\":\"ConstantExpr\"}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"Stats\":{\"ResourcePoolId\":\"default\"},\"PlanNodeType\":\"Query\"},\"meta\":{\"version\":\"0.2\",\"type\":\"query\"},\"tables\":[],\"SimplifiedPlan\":{\"PlanNodeId\":0,\"Plans\":[{\"PlanNodeId\":1,\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"OptimizerStats\":{\"EquiJoinsCount\":0,\"JoinsCount\":0},\"PlanNodeType\":\"Query\"}}" YdbResults { columns { name: "y" type { pg_type { oid: 23 } } } columns { name: "x" type { pg_type { oid: 23 } } } } QueryDiagnostics: "" >> SystemView::VSlotsFields [GOOD] >> SystemView::TopPartitionsByCpuTables >> KqpPg::AlterSequence [GOOD] >> KqpPg::AlterColumnSetDefaultFromSequence >> KqpPg::PgUpdate-useSink [GOOD] >> KqpPg::JoinWithQueryService-StreamLookup >> KqpPg::DeleteWithQueryService+useSink [GOOD] >> KqpPg::DeleteWithQueryService-useSink |63.7%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_json_change_record/unittest >> Cdc::KeysOnlyLog[PqRunner] >> KqpPg::InsertFromSelect_Serial+useSink [GOOD] >> KqpPg::InsertFromSelect_Serial-useSink >> SystemView::PgTablesOneSchemeShardDataQuery [GOOD] >> SystemView::QueryStats |63.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut |63.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut |63.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/syncer/ut/ydb-core-blobstorage-vdisk-syncer-ut >> KqpPg::DeleteWithQueryService-useSink [GOOD] >> KqpPg::JoinWithQueryService-StreamLookup [GOOD] >> KqpPg::PgAggregate+useSink >> SystemView::ShowCreateTableDefaultLiteral [GOOD] >> SystemView::ShowCreateTableColumn >> KqpPg::TableArrayInsert-useSink [GOOD] >> KqpPg::Returning+useSink >> SystemView::ShowCreateTablePartitionByHash [GOOD] >> SystemView::ShowCreateTablePartitionSettings >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME64 [GOOD] >> ShowCreateView::WithTablePathPrefix [GOOD] >> ShowCreateView::WithSingleQuotedTablePathPrefix >> KqpPg::AlterColumnSetDefaultFromSequence [GOOD] >> KqpPg::CreateTableIfNotExists_GenericQuery >> TExportToS3Tests::ExportPartitioningSettings >> TExportToS3Tests::ShouldSucceedOnConcurrentTxs >> KqpPg::InsertFromSelect_Serial-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_ColumnOrder+useSink >> SystemView::AuthGroups_ResultOrder [GOOD] >> SystemView::AuthGroups_TableRange ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/pg/unittest >> KqpPg::DeleteWithQueryService-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 22083, MsgBus: 16232 2025-06-03T10:28:01.893983Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667940816775233:2151];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0023a0/r3tmp/tmp803kXa/pdisk_1.dat 2025-06-03T10:28:01.938043Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:28:01.969010Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:01.969095Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667940816775093:2079] 1748946481889936 != 1748946481889939 TServer::EnableGrpc on GrpcPort 22083, node 1 2025-06-03T10:28:01.992520Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:01.992559Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:01.993665Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:02.013649Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:02.013667Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:02.013670Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:02.013728Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16232 TClient is connected to server localhost:16232 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:02.134154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:02.137681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:02.516724Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667945111743046:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:02.516756Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:02.526112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:28:02.602530Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667945111743179:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:02.602566Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:02.603039Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667945111743184:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:02.604188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:28:02.607204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-03T10:28:02.607334Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667945111743186:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:28:02.685896Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667945111743237:2403] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 1 1 1 Trying to start YDB, gRPC: 19813, MsgBus: 14930 2025-06-03T10:28:03.045936Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667947280723656:2204];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:03.045988Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0023a0/r3tmp/tmpmVurPT/pdisk_1.dat 2025-06-03T10:28:03.061149Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19813, node 2 2025-06-03T10:28:03.085542Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:03.085557Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:03.085560Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:03.085618Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14930 TClient is connected to server localhost:14930 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:28:03.150799Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:03.150831Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:03.153873Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:03.155746Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:03.157575Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:03.515155Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667947280724138:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:03.515182Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:03.517945Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:28:03.533929Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667947280724269:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:03.533963Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:03.534014Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667947280724274:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:03.534897Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part pr ... nt=undelivered;self_id=[11:7511667994200265173:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:14.334124Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0023a0/r3tmp/tmp3MHezc/pdisk_1.dat 2025-06-03T10:28:14.351267Z node 11 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20053, node 11 2025-06-03T10:28:14.366728Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:14.366739Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:14.366741Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:14.366793Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17646 TClient is connected to server localhost:17646 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:14.438169Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:14.438210Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting waiting... 2025-06-03T10:28:14.439045Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:14.439700Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:14.900776Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7511667994200265791:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:14.900846Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:14.902913Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:28:14.927084Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7511667994200265892:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:14.927120Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:14.927277Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7511667994200265897:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:14.928457Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:28:14.931499Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7511667994200265899:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:28:14.988719Z node 11 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [11:7511667994200265950:2381] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 29083, MsgBus: 31900 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0023a0/r3tmp/tmpoU23yy/pdisk_1.dat 2025-06-03T10:28:15.462886Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:15.477160Z node 12 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29083, node 12 2025-06-03T10:28:15.501591Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:15.501609Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:15.501611Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:15.501671Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:31900 2025-06-03T10:28:15.548594Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:15.548634Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:15.549698Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31900 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:15.617813Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:15.619704Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:15.884737Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7511668002125009205:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:15.884768Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:15.888199Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:28:15.899179Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7511668002125009308:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:15.899209Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7511668002125009313:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:15.899214Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:15.900063Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:28:15.905541Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7511668002125009315:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:28:15.956832Z node 12 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [12:7511668002125009366:2384] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> TBSVWithReboots::SimultaneousCreateDropNbs [GOOD] >> Cdc::KeysOnlyLog[PqRunner] [GOOD] >> Cdc::KeysOnlyLog[YdsRunner] >> KqpPg::PgAggregate+useSink [GOOD] >> KqpPg::PgAggregate-useSink >> TExportToS3Tests::CheckItemProgress >> TExportToS3Tests::ExportPartitioningSettings [GOOD] >> TExportToS3Tests::ExportIndexTablePartitioningSettings >> TExportToS3Tests::ShouldSucceedOnConcurrentTxs [GOOD] >> TExportToS3Tests::ShouldSucceedOnConcurrentExport >> TSchemeShardServerLess::StorageBillingLabels [GOOD] |63.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_sysview_reboots/ydb-core-tx-schemeshard-ut_sysview_reboots |63.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_sysview_reboots/ydb-core-tx-schemeshard-ut_sysview_reboots |63.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sysview_reboots/ydb-core-tx-schemeshard-ut_sysview_reboots ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/backup_ut/unittest >> BackupRestoreS3::TestAllPrimitiveTypes-DATETIME64 [GOOD] Test command err: 2025-06-03T10:27:36.980876Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667832624325455:2083];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:36.980899Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0025f6/r3tmp/tmpL7qnyI/pdisk_1.dat 2025-06-03T10:27:37.127346Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:37.127595Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:37.127735Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:37.133171Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12330, node 1 2025-06-03T10:27:37.153713Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:37.153729Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:37.153733Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:37.153795Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30253 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:37.206874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:37.557548Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667836919293681:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:37.557600Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:37.630643Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7511667832624325662:2140] Handle TEvProposeTransaction 2025-06-03T10:27:37.630664Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7511667832624325662:2140] TxId# 281474976710658 ProcessProposeTransaction 2025-06-03T10:27:37.630683Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7511667832624325662:2140] Cookie# 0 userReqId# "" txid# 281474976710658 SEND to# [1:7511667836919293702:2609] 2025-06-03T10:27:37.647031Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:7511667836919293702:2609] txid# 281474976710658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table" Columns { Name: "Key" Type: "Uint32" NotNull: false } Columns { Name: "Value" Type: "Utf8" NotNull: false } KeyColumnNames: "Key" PartitionConfig { PartitioningPolicy { MinPartitionsCount: 10 SplitByLoadSettings { Enabled: true } } } Temporary: false } } } UserToken: "" DatabaseName: "" 2025-06-03T10:27:37.647161Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:7511667836919293702:2609] txid# 281474976710658 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-03T10:27:37.647333Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1627: Actor# [1:7511667836919293702:2609] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-03T10:27:37.647346Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:7511667836919293702:2609] txid# 281474976710658 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:27:37.647422Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:7511667836919293702:2609] txid# 281474976710658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:27:37.647475Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:7511667836919293702:2609] HANDLE EvNavigateKeySetResult, txid# 281474976710658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:27:37.647497Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7511667836919293702:2609] txid# 281474976710658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976710658 TabletId# 72057594046644480} 2025-06-03T10:27:37.647566Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:7511667836919293702:2609] txid# 281474976710658 HANDLE EvClientConnected 2025-06-03T10:27:37.648025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-03T10:27:37.649517Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [1:7511667836919293702:2609] txid# 281474976710658 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710658} 2025-06-03T10:27:37.649542Z node 1 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [1:7511667836919293702:2609] txid# 281474976710658 SEND to# [1:7511667836919293701:2340] Source {TEvProposeTransactionStatus txid# 281474976710658 Status# 53} 2025-06-03T10:27:37.734632Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511667832624325662:2140] Handle TEvNavigate describe path /Root/table 2025-06-03T10:27:37.736663Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511667836919293843:2722] HANDLE EvNavigateScheme /Root/table 2025-06-03T10:27:37.736759Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7511667836919293843:2722] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-03T10:27:37.736809Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7511667836919293843:2722] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/Root/table" Options { ShowPrivateTable: false } 2025-06-03T10:27:37.737223Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7511667836919293843:2722] Handle TEvDescribeSchemeResult Forward to# [1:7511667836919293841:2346] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/table" PathDescription { Self { Name: "table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1748946457762 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table" Columns { Name: "Key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "Key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 10 SplitByLoadSettings { Enabled: true } } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 720 ... 6;x-amz-date, Signature=c8ad1967992ad5bccad75982d1a23dc6fccc0a62e9bdfe8386a0d20ee25d5977 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250603T102815Z 2025-06-03T10:28:15.675030Z node 52 :IMPORT DEBUG: schemeshard_import_getters.cpp:373: HandlePermissions TEvExternalStorage::TEvHeadObjectResponse: self# [52:7511668000152979454:2207], result# No response body. REQUEST: GET /test_bucket?prefix=Datetime64Table HTTP/1.1 HEADERS: Host: localhost:20131 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 7048A578-47A6-4A3B-BA7F-49487E2241EF amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250603/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=45bccc3f9b12d17df4fd16eae049c660e31a8469acc2e66b0b5b86bee1e41986 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250603T102815Z S3_MOCK::HttpServeList: Datetime64Table 2025-06-03T10:28:15.683552Z node 52 :IMPORT DEBUG: schemeshard_import_getters.cpp:654: HandleChangefeeds TEvExternalStorage::TEvListObjectResponse: self# [52:7511668000152979454:2207], result# ListObjectsResult { } 2025-06-03T10:28:15.683578Z node 52 :IMPORT INFO: schemeshard_import_getters.cpp:687: Reply: self# [52:7511668000152979454:2207], success# 1, error# 2025-06-03T10:28:15.683642Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-03T10:28:15.683647Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:989: TImport::TTxProgress: OnSchemeResult: id# 281474976715665, itemIdx# 0, success# 1 2025-06-03T10:28:15.683846Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:629: TImport::TTxProgress: Allocate txId: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/Datetime64Table' DstPathId: State: CreateSchemeObject SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-06-03T10:28:15.687075Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-03T10:28:15.687154Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-03T10:28:15.687159Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1218: TImport::TTxProgress: OnAllocateResult: txId# 281474976710760, id# 281474976715665 2025-06-03T10:28:15.687188Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:419: TImport::TTxProgress: CreateTable propose: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/Datetime64Table' DstPathId: State: CreateSchemeObject SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976710760 2025-06-03T10:28:15.687274Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-03T10:28:15.688178Z node 52 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710760:0, at schemeshard: 72057594046644480 2025-06-03T10:28:15.689547Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-03T10:28:15.689560Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1314: TImport::TTxProgress: OnModifyResult: txId# 281474976710760, status# StatusAccepted 2025-06-03T10:28:15.689613Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:643: TImport::TTxProgress: Wait for completion: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/Datetime64Table' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: CreateSchemeObject SubState: Subscribed WaitTxId: 281474976710760 Issue: '' } 2025-06-03T10:28:15.691003Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-03T10:28:15.708149Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-03T10:28:15.708172Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1472: TImport::TTxProgress: OnNotifyResult: txId# 281474976710760 2025-06-03T10:28:15.708221Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:629: TImport::TTxProgress: Allocate txId: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/Datetime64Table' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: AllocateTxId WaitTxId: 0 Issue: '' } 2025-06-03T10:28:15.708544Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-03T10:28:15.708572Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-03T10:28:15.708576Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1218: TImport::TTxProgress: OnAllocateResult: txId# 281474976710761, id# 281474976715665 2025-06-03T10:28:15.708586Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:520: TImport::TTxProgress: Restore propose: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/Datetime64Table' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Proposed WaitTxId: 0 Issue: '' }, txId# 281474976710761 2025-06-03T10:28:15.708736Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-03T10:28:15.708848Z node 52 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpRestore, opId: 281474976710761:0, at schemeshard: 72057594046644480 2025-06-03T10:28:15.709208Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-03T10:28:15.709217Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1314: TImport::TTxProgress: OnModifyResult: txId# 281474976710761, status# StatusAccepted 2025-06-03T10:28:15.709234Z node 52 :IMPORT INFO: schemeshard_import__create.cpp:643: TImport::TTxProgress: Wait for completion: info# { Id: 281474976715665 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046644480, LocalPathId: 1] UserSID: '(empty maybe)' State: Waiting Issue: '' Items: 1 }, item# { Idx: 0 DstPathName: '/Root/Datetime64Table' DstPathId: [OwnerId: 72057594046644480, LocalPathId: 9] State: Transferring SubState: Subscribed WaitTxId: 281474976710761 Issue: '' } 2025-06-03T10:28:15.709584Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete REQUEST: HEAD /test_bucket/Datetime64Table/data_00.csv HTTP/1.1 HEADERS: Host: localhost:20131 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 57FFC84E-DC3E-4923-9A0C-DD8B2C573504 amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250603/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=cd47b05654bf3e6022da4a8b3b07f7e15a481fd9b28bdb28b234a3a02879625b content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250603T102815Z S3_MOCK::HttpServeRead: /test_bucket/Datetime64Table/data_00.csv / 7 REQUEST: GET /test_bucket/Datetime64Table/data_00.csv HTTP/1.1 HEADERS: Host: localhost:20131 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: FE6D2667-8081-435E-A0CE-94EFFFF6D3C3 amz-sdk-request: attempt=1 authorization: AWS4-HMAC-SHA256 Credential=test_key/20250603/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-type;host;range;x-amz-api-version;x-amz-content-sha256;x-amz-date, Signature=107a6c6cf8d80ecc953a3446cb606073ef09090202a9bfb6e59f3a292b1459e9 content-type: application/xml range: bytes=0-6 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 x-amz-content-sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 x-amz-date: 20250603T102815Z S3_MOCK::HttpServeRead: /test_bucket/Datetime64Table/data_00.csv / 7 2025-06-03T10:28:15.732563Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:361: TImport::TTxProgress: DoExecute 2025-06-03T10:28:15.732580Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:1472: TImport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-03T10:28:15.733151Z node 52 :IMPORT DEBUG: schemeshard_import__create.cpp:385: TImport::TTxProgress: DoComplete 2025-06-03T10:28:15.856422Z node 52 :TX_PROXY DEBUG: rpc_operation_request_base.h:50: [GetImport] [52:7511668000152979670:2393] [0] Resolve database: name# /Root 2025-06-03T10:28:15.856640Z node 52 :TX_PROXY DEBUG: rpc_operation_request_base.h:66: [GetImport] [52:7511668000152979670:2393] [0] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:28:15.856661Z node 52 :TX_PROXY DEBUG: rpc_operation_request_base.h:106: [GetImport] [52:7511668000152979670:2393] [0] Send request: schemeShardId# 72057594046644480 2025-06-03T10:28:15.856931Z node 52 :TX_PROXY DEBUG: rpc_get_operation.cpp:220: [GetImport] [52:7511668000152979670:2393] [0] Handle TEvImport::TEvGetImportResponse: record# Entry { Id: 281474976715665 Status: SUCCESS Progress: PROGRESS_DONE ImportFromS3Settings { endpoint: "localhost:20131" scheme: HTTP bucket: "test_bucket" items { source_prefix: "Datetime64Table" destination_path: "/Root/Datetime64Table" } } StartTime { seconds: 1748946495 } EndTime { seconds: 1748946495 } } 2025-06-03T10:28:15.878826Z node 52 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [52:7511667995858010567:2125] Handle TEvExecuteKqpTransaction 2025-06-03T10:28:15.878850Z node 52 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [52:7511667995858010567:2125] TxId# 281474976715666 ProcessProposeKqpTransaction 2025-06-03T10:28:15.879322Z node 52 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715666. Ctx: { TraceId: 01jwtnbabp4ermh03ha80cxxkb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=52&id=N2IzNTMxYjUtNzgzNmRiYzQtODc4ZDcyYTMtYWM0Mzk4MjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> KqpPg::CreateTableIfNotExists_GenericQuery [GOOD] >> KqpPg::AlterColumnSetDefaultFromSequenceWithSchemaname >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldNotCompactBorrowedAfterSplitMergeWhenDisabled [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldHandleDataShardReboot >> KqpPg::InsertNoTargetColumns_ColumnOrder+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_ColumnOrder-useSink |63.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots |63.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots |63.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_data_source_reboots/schemeshard-ut_external_data_source_reboots >> TExportToS3Tests::ExportIndexTablePartitioningSettings [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TBSVWithReboots::SimultaneousCreateDropNbs [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:127:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:130:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:134:2058] recipient: [1:111:2142] 2025-06-03T10:27:42.298653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:42.298683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:42.298690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:42.298697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:42.298713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:42.298718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:42.298729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:42.298744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:42.298865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:42.298933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:42.343937Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:27:42.343966Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:42.344088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:176:2058] recipient: [1:15:2062] 2025-06-03T10:27:42.352796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:42.352839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:42.352881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:42.357087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:42.357202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:42.357342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:42.357448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:42.358679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:42.358738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:42.359034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:42.359048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:42.359085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:42.359095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:42.359103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:42.359125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:27:42.365828Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:27:42.433957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:42.434040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:42.434106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:42.434150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:42.434161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:42.436844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:42.436895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:42.436947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:42.436960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:42.436966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:42.436972Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:42.447907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:42.447941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:42.447951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:42.448646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:42.448661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:42.448669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:42.448678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:42.449531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:42.450118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:42.450174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:42.450379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:42.450409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:42.450417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:42.450477Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-03T10:28:16.697958Z node 94 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-03T10:28:16.698058Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-03T10:28:16.698095Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:28:16.698182Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:28:16.698208Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 Forgetting tablet 72075186233409548 Forgetting tablet 72075186233409547 2025-06-03T10:28:16.698472Z node 94 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-03T10:28:16.698552Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-03T10:28:16.698586Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 Forgetting tablet 72075186233409549 2025-06-03T10:28:16.698732Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:28:16.698739Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-03T10:28:16.698752Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:28:16.698825Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:28:16.698831Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:28:16.698856Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:28:16.698908Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-03T10:28:16.698970Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-03T10:28:16.701484Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:28:16.701513Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:28:16.701545Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:28:16.701548Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-03T10:28:16.701555Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:28:16.701558Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:28:16.702021Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-03T10:28:16.702037Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-03T10:28:16.702099Z node 94 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-03T10:28:16.702123Z node 94 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:28:16.702133Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:28:16.702139Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:28:16.702172Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:28:16.702596Z node 94 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 1004, wait until txId: 1004 TestWaitNotification wait txId: 1003 2025-06-03T10:28:16.702663Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-03T10:28:16.702674Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 TestWaitNotification wait txId: 1004 2025-06-03T10:28:16.702693Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1004: send EvNotifyTxCompletion 2025-06-03T10:28:16.702697Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1004 2025-06-03T10:28:16.702835Z node 94 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-03T10:28:16.702859Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-03T10:28:16.702865Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [94:535:2490] 2025-06-03T10:28:16.702886Z node 94 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1004, at schemeshard: 72057594046678944 2025-06-03T10:28:16.702910Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-03T10:28:16.702914Z node 94 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [94:535:2490] TestWaitNotification: OK eventTxId 1003 TestWaitNotification: OK eventTxId 1004 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted wait until 72075186233409548 is deleted 2025-06-03T10:28:16.702982Z node 94 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-03T10:28:16.702996Z node 94 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 2025-06-03T10:28:16.703004Z node 94 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409548 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 Deleted tabletId 72075186233409548 2025-06-03T10:28:16.703104Z node 94 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:28:16.703156Z node 94 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/USER_0" took 79us result status StatusPathDoesNotExist 2025-06-03T10:28:16.703204Z node 94 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/DirA/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/DirA\' (id: [OwnerId: 72057594046678944, LocalPathId: 2]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/DirA/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/DirA" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1000 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:28:16.703261Z node 94 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:28:16.703290Z node 94 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA" took 30us result status StatusSuccess 2025-06-03T10:28:16.703367Z node 94 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA" PathDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1000 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_serverless/unittest >> TSchemeShardServerLess::StorageBillingLabels [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:26:58.425805Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:26:58.425842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:58.425848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:26:58.425854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:26:58.425866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:26:58.425871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:26:58.425887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:26:58.425902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:26:58.426027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:26:58.426116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:26:58.478894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:26:58.478934Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:58.488740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:26:58.488943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:26:58.488988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:26:58.492424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:26:58.492516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:26:58.492683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:58.492758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:26:58.493685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:58.493789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:26:58.494220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:58.494238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:26:58.494249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:26:58.494267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:58.494275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:26:58.494305Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:26:58.496531Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:26:58.599050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:26:58.599162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:58.599255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:26:58.599320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:26:58.599339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:58.607988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:58.608051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:26:58.608146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:58.608163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:26:58.608173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:26:58.608182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:26:58.610030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:58.610064Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:26:58.610076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:26:58.610856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:58.610873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:26:58.610883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:58.610893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:26:58.611937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:26:58.612625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:26:58.612686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:26:58.612953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:26:58.612993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:26:58.613004Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:58.613088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:26:58.613099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:26:58.613144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:26:58.613159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:26:58.618177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:26:58.618205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:26:58.618358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... ESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 105, path id: 3 FAKE_COORDINATOR: Erasing txId 105 2025-06-03T10:26:58.845760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:26:58.845775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 5 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:26:58.845780Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 105 2025-06-03T10:26:58.845787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 5 2025-06-03T10:26:58.845794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-03T10:26:58.845812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-03T10:26:58.846793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5814: Handle TEvSyncTenantSchemeShard, at schemeshard: 72057594046678944, msg: DomainSchemeShard: 72057594046678944 DomainPathId: 3 TabletID: 72075186233409549 Generation: 2 EffectiveACLVersion: 0 SubdomainVersion: 2 UserAttributesVersion: 2 TenantHive: 18446744073709551615 TenantSysViewProcessor: 18446744073709551615 TenantRootACL: "" TenantStatisticsAggregator: 18446744073709551615 TenantGraphShard: 18446744073709551615 2025-06-03T10:26:58.846814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:26: TTxSyncTenant DoExecute, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:26:58.846839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:567: DoUpdateTenant no hasChanges, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], tenantLink: TSubDomainsLinks::TLink { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 3], Generation: 2, ActorId:[1:564:2501], EffectiveACLVersion: 0, SubdomainVersion: 2, UserAttributesVersion: 2, TenantHive: 18446744073709551615, TenantSysViewProcessor: 18446744073709551615, TenantStatisticsAggregator: 18446744073709551615, TenantGraphShard: 18446744073709551615, TenantRootACL: }, subDomain->GetVersion(): 2, actualEffectiveACLVersion: 0, actualUserAttrsVersion: 2, tenantHive: 18446744073709551615, tenantSysViewProcessor: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:26:58.847003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409549 2025-06-03T10:26:58.847010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409549, txId: 0, path id: [OwnerId: 72075186233409549, LocalPathId: 1] 2025-06-03T10:26:58.847061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409549 2025-06-03T10:26:58.847066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:661:2572], at schemeshard: 72075186233409549, txId: 0, path id: 1 2025-06-03T10:26:58.847270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186233409549, msg: Owner: 72075186233409549 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72075186233409549, cookie: 0 2025-06-03T10:26:58.847289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-03T10:26:58.847300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__sync_update_tenants.cpp:36: TTxSyncTenant DoComplete, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-03T10:26:58.847380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-03T10:26:58.847390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-03T10:26:58.847485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-03T10:26:58.847503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-03T10:26:58.847509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [1:743:2634] TestWaitNotification: OK eventTxId 105 ... waiting for metering 2025-06-03T10:27:03.846892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7306: Cannot get console configs 2025-06-03T10:27:03.846938Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:03.912468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7306: Cannot get console configs 2025-06-03T10:27:03.912512Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:03.960652Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7306: Cannot get console configs 2025-06-03T10:27:03.960696Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:24.946380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:24.946461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:90: TTxServerlessStorageBilling: initiate at first time, schemeshardId: 72075186233409549, domainId: [OwnerId: 72057594046678944, LocalPathId: 3], now: 1970-01-01T00:01:00.000000Z, set LastBillTime: 1970-01-01T00:01:00.000000Z, next retry at: 1970-01-01T00:02:00.000000Z 2025-06-03T10:27:24.952780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:25.084764Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6637: Handle: TEvRunConditionalErase, at schemeshard: 72057594046678944 2025-06-03T10:27:25.084836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-03T10:27:25.084864Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-03T10:27:25.177559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6637: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409546 2025-06-03T10:27:25.177632Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409546 2025-06-03T10:27:25.177658Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409546 2025-06-03T10:27:25.233607Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6637: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409549 2025-06-03T10:27:25.233682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409549 2025-06-03T10:27:25.233708Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409549 2025-06-03T10:27:51.077840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:51.077904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:121: TTxServerlessStorageBilling: too soon call, wait until current period ends, schemeshardId: 72075186233409549, domainId: [OwnerId: 72057594046678944, LocalPathId: 3], now: 1970-01-01T00:02:00.000000Z, LastBillTime: 1970-01-01T00:01:00.000000Z, lastBilled: 1970-01-01T00:01:00.000000Z--1970-01-01T00:01:59.000000Z, toBill: 1970-01-01T00:01:00.000000Z--1970-01-01T00:01:59.000000Z, next retry at: 1970-01-01T00:03:00.000000Z 2025-06-03T10:27:51.077928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:51.195748Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6637: Handle: TEvRunConditionalErase, at schemeshard: 72057594046678944 2025-06-03T10:27:51.195801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72057594046678944 2025-06-03T10:27:51.195823Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72057594046678944 2025-06-03T10:27:51.269481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6637: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409546 2025-06-03T10:27:51.269542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409546 2025-06-03T10:27:51.269564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409546 2025-06-03T10:27:51.328095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6637: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409549 2025-06-03T10:27:51.328152Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409549 2025-06-03T10:27:51.328173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409549 2025-06-03T10:28:17.083811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:28:17.084041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:191: TTxServerlessStorageBilling: make a bill, record: '{"usage":{"start":120,"quantity":59,"finish":179,"type":"delta","unit":"byte*second"},"tags":{"ydb_size":0},"id":"72057594046678944-3-120-179-0","cloud_id":"CLOUD_ID_VAL","source_wt":180,"source_id":"sless-docapi-ydb-storage","resource_id":"DATABASE_ID_VAL","schema":"ydb.serverless.v1","labels":{"k":"v"},"folder_id":"FOLDER_ID_VAL","version":"1.0.0"} ', schemeshardId: 72075186233409549, domainId: [OwnerId: 72057594046678944, LocalPathId: 3], now: 1970-01-01T00:03:00.000000Z, LastBillTime: 1970-01-01T00:01:00.000000Z, lastBilled: 1970-01-01T00:01:00.000000Z--1970-01-01T00:01:59.000000Z, toBill: 1970-01-01T00:02:00.000000Z--1970-01-01T00:02:59.000000Z, next retry at: 1970-01-01T00:04:00.000000Z 2025-06-03T10:28:17.085716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete ... blocking NKikimr::NMetering::TEvMetering::TEvWriteMeteringJson from FLAT_SCHEMESHARD_ACTOR to TFakeMetering cookie 0 ... waiting for metering (done) >> TExportToS3Tests::EnableChecksumsPersistance >> KqpPg::Returning+useSink [GOOD] >> KqpPg::Returning-useSink >> SystemView::QueryStats [GOOD] >> SystemView::QueryStatsFields >> TExportToS3Tests::ShouldSucceedOnConcurrentExport [GOOD] >> TExportToS3Tests::ShouldSucceedOnConcurrentImport >> KqpPg::PgAggregate-useSink [GOOD] >> KqpPg::MkqlTerminate >> TExportToS3Tests::EnableChecksumsPersistance [GOOD] >> TExportToS3Tests::EncryptedExport >> TExportToS3Tests::CheckItemProgress [GOOD] >> KqpPg::InsertNoTargetColumns_ColumnOrder-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_NotOneSize+useSink >> Cdc::KeysOnlyLog[YdsRunner] [GOOD] >> Cdc::KeysOnlyLog[TopicRunner] >> KqpPg::AlterColumnSetDefaultFromSequenceWithSchemaname [GOOD] >> KqpPg::CheckPgAutoParams+useSink >> TExportToS3Tests::CompletedExportEndTime >> TExportToS3Tests::ShouldSucceedOnConcurrentImport [GOOD] >> TExportToS3Tests::ShouldCheckQuotasExportsLimited >> SystemView::AuthGroups_TableRange [GOOD] >> SystemView::AuthOwners >> TExportToS3Tests::ShouldRetryAtFinalStage >> KqpPg::MkqlTerminate [GOOD] >> KqpPg::NoSelectFullScan >> TExportToS3Tests::DropSourceTableBeforeTransferring >> TExportToS3Tests::EncryptedExport [GOOD] |63.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore/ydb-core-tx-schemeshard-ut_incremental_restore |63.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore/ydb-core-tx-schemeshard-ut_incremental_restore |63.8%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_incremental_restore/ydb-core-tx-schemeshard-ut_incremental_restore >> TExportToS3Tests::ShouldCheckQuotasExportsLimited [GOOD] >> KqpPg::InsertNoTargetColumns_NotOneSize+useSink [GOOD] >> KqpPg::Returning-useSink [GOOD] >> KqpPg::SelectIndex+useSink >> KqpPg::InsertNoTargetColumns_NotOneSize-useSink >> KqpPg::TableSelect-useSink [GOOD] >> KqpPg::TableInsert+useSink >> TExportToS3Tests::ShouldCheckQuotasChildrenLimited >> TExportToS3Tests::ShouldOmitNonStrictStorageSettings >> TExportToS3Tests::CompletedExportEndTime [GOOD] >> SystemView::QueryStatsFields [GOOD] >> SystemView::PartitionStatsTtlFields >> TExportToS3Tests::RebootDuringCompletion >> KqpPg::V1CreateTable [GOOD] >> KqpPg::ValuesInsert+useSink >> TExportToS3Tests::Checksums >> TExportToS3Tests::DropSourceTableBeforeTransferring [GOOD] >> Cdc::KeysOnlyLog[TopicRunner] [GOOD] >> Cdc::KeysOnlyLogDebezium >> TExportToS3Tests::DropCopiesBeforeTransferring1 >> KqpPg::NoSelectFullScan [GOOD] >> KqpPg::LongDomainName >> TExportToS3Tests::ShouldCheckQuotasChildrenLimited [GOOD] >> TExportToS3Tests::Checksums [GOOD] >> TExportToS3Tests::ChecksumsWithCompression >> TExportToS3Tests::SchemaMappingEncryption ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::EncryptedExport [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:28:16.796220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:28:16.796251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:16.796257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:28:16.796262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:28:16.796275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:28:16.796279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:28:16.796290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:16.796303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:28:16.796408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:28:16.796490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:28:16.811898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:28:16.811926Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:16.816448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:28:16.816594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:28:16.816642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:28:16.819035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:28:16.819104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:28:16.819246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:16.819315Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:28:16.820111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:16.820164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:28:16.820497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:16.820511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:16.820521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:28:16.820531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:16.820537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:28:16.820561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:28:16.822159Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:28:16.843910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:28:16.843998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:16.844067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:28:16.844120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:28:16.844167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:16.844983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:16.845013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:28:16.845077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:16.845090Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:28:16.845096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:28:16.845102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:28:16.845759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:16.845780Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:28:16.845787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:28:16.846268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:16.846281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:16.846287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:16.846295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:28:16.847017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:28:16.847498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:28:16.847539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:28:16.847762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:16.847790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:16.847798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:16.847870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:28:16.847879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:16.847910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:28:16.847925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:28:16.848386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:16.848395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:16.848441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 0:28:18.992692Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-03T10:28:18.992698Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 11 2025-06-03T10:28:18.992704Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-03T10:28:18.992728Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 0/1, is published: true 2025-06-03T10:28:18.993066Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-03T10:28:18.993328Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710763, at schemeshard: 72057594046678944 2025-06-03T10:28:18.993340Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 0/1, is published: true 2025-06-03T10:28:18.993348Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710763, at schemeshard: 72057594046678944 2025-06-03T10:28:18.993508Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710763:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710763 msg type: 269090816 2025-06-03T10:28:18.993552Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 281474976710763, partId: 4294967295, tablet: 72057594046316545 2025-06-03T10:28:18.993646Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 FAKE_COORDINATOR: Add transaction: 281474976710763 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710763 at step: 5000010 2025-06-03T10:28:18.993777Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000010, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:18.993808Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710763 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 17179871339 } } Step: 5000010 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:18.993817Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:129: TRmDir HandleReply TEvOperationPlan, opId: 281474976710763:0, step: 5000010, at schemeshard: 72057594046678944 2025-06-03T10:28:18.993855Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:180: RmDir is done, opId: 281474976710763:0, at schemeshard: 72057594046678944 2025-06-03T10:28:18.993869Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710763:0 progress is 1/1 2025-06-03T10:28:18.993879Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-03T10:28:18.993886Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710763:0 progress is 1/1 2025-06-03T10:28:18.993889Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-03T10:28:18.993904Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:28:18.993917Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-03T10:28:18.993923Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 1/1, is published: false 2025-06-03T10:28:18.993933Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-03T10:28:18.993938Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710763:0 2025-06-03T10:28:18.993943Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976710763:0 2025-06-03T10:28:18.993958Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-03T10:28:18.993965Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976710763, publications: 2, subscribers: 1 2025-06-03T10:28:18.993970Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710763, [OwnerId: 72057594046678944, LocalPathId: 1], 13 2025-06-03T10:28:18.993974Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710763, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-03T10:28:18.994369Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-03T10:28:18.994791Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:18.994805Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710763, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:18.994863Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710763, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-03T10:28:18.994895Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:18.994901Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:206:2207], at schemeshard: 72057594046678944, txId: 281474976710763, path id: 1 2025-06-03T10:28:18.994907Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:206:2207], at schemeshard: 72057594046678944, txId: 281474976710763, path id: 4 FAKE_COORDINATOR: Erasing txId 281474976710763 2025-06-03T10:28:18.995129Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-03T10:28:18.995149Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-03T10:28:18.995155Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-03T10:28:18.995160Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 13 2025-06-03T10:28:18.995167Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-03T10:28:18.995571Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-03T10:28:18.995588Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-03T10:28:18.995595Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-03T10:28:18.995600Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-03T10:28:18.995607Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-03T10:28:18.995624Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710763, subscribers: 1 2025-06-03T10:28:18.995630Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [4:126:2151] 2025-06-03T10:28:18.996349Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-03T10:28:18.996741Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-03T10:28:18.996790Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvNotifyTxCompletionResult: txId# 281474976710763 2025-06-03T10:28:18.996810Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6753: Message: TxId: 281474976710763 2025-06-03T10:28:18.996826Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-03T10:28:18.996833Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1232: TExport::TTxProgress: OnNotifyResult: txId# 281474976710763 2025-06-03T10:28:18.996840Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1263: TExport::TTxProgress: OnNotifyResult: txId# 281474976710763, id# 103, itemIdx# 4294967295 2025-06-03T10:28:18.997470Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-03T10:28:18.997510Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:28:18.997520Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [4:1132:3008] TestWaitNotification: OK eventTxId 103 >> TExportToS3Tests::DropCopiesBeforeTransferring1 [GOOD] >> TExportToS3Tests::DropCopiesBeforeTransferring2 >> KqpPg::InsertNoTargetColumns_NotOneSize-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Alter+useSink >> TExportToS3Tests::RebootDuringCompletion [GOOD] >> TExportToS3Tests::RebootDuringAbortion >> TExportToS3Tests::ChecksumsWithCompression [GOOD] >> KqpPg::SelectIndex+useSink [GOOD] >> KqpPg::SelectIndex-useSink >> ShowCreateView::WithSingleQuotedTablePathPrefix [GOOD] >> ShowCreateView::WithTwoTablePathPrefixes >> KqpPg::LongDomainName [GOOD] >> TExportToS3Tests::Changefeeds >> SystemView::TopPartitionsByCpuFields [GOOD] >> SystemView::TopPartitionsByCpuFollowers >> TExportToS3Tests::SchemaMappingEncryption [GOOD] >> TExportToS3Tests::SchemaMappingEncryptionIncorrectKey >> KqpPg::CheckPgAutoParams+useSink [GOOD] >> KqpPg::CheckPgAutoParams-useSink >> TExportToS3Tests::DropCopiesBeforeTransferring2 [GOOD] >> SystemView::PartitionStatsFields [GOOD] >> SystemView::ConcurrentScans >> TExportToS3Tests::RebootDuringAbortion [GOOD] >> SystemView::AuthOwners [GOOD] >> SystemView::AuthOwners_Access >> TSchemeShardTestExtSubdomainReboots::SchemeLimits-AlterDatabaseCreateHiveFirst-false [GOOD] >> TExportToS3Tests::CorruptedDyNumber >> TExportToS3Tests::SchemaMappingEncryptionIncorrectKey [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/pg/unittest >> KqpPg::LongDomainName [GOOD] Test command err: Trying to start YDB, gRPC: 1245, MsgBus: 11905 2025-06-03T10:27:59.220101Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667931516882450:2078];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:59.220635Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002430/r3tmp/tmpVBJgOp/pdisk_1.dat 2025-06-03T10:27:59.297160Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1245, node 1 2025-06-03T10:27:59.320360Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:59.320394Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:59.321797Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:59.329435Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:59.329449Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:59.329452Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:59.329506Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11905 TClient is connected to server localhost:11905 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:59.551756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:59.565210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:27:59.955617Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667931516883057:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:59.955623Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667931516883069:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:59.955651Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:59.956512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:27:59.962870Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667931516883071:2332], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:00.046581Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667935811850418:2325] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 20496, MsgBus: 28644 2025-06-03T10:28:00.348200Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667935424060860:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:00.348224Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002430/r3tmp/tmp6p9aBb/pdisk_1.dat 2025-06-03T10:28:00.368246Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20496, node 2 2025-06-03T10:28:00.389039Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:00.389052Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:00.389054Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:00.389110Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28644 TClient is connected to server localhost:28644 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:00.448588Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:00.448615Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:00.449727Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:00.452184Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:00.461958Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:00.926241Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667935424061467:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:00.926268Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:00.926417Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667935424061479:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:00.927161Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:00.933999Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-03T10:28:00.934110Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511667935424061481:2332], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:01.006276Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511667939719028828:2323] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 27579, MsgBus: 7938 2025-06-03T10:28:01.405117Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511667940842736678:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:01.405139Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002430/r3tmp/tmpm5fddL/pdisk_1.dat 2025-06-03T10:28:01.438154Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27579, node 3 2025-06-03T10:28:01.461454Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:01.461465Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe ... d 2025-06-03T10:28:18.947196Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:18.947486Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:18.948223Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:19.258776Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7511668019929565787:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:19.258799Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7511668019929565775:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:19.258810Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:19.259724Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:19.262536Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7511668019929565804:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:19.321512Z node 10 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [10:7511668019929565855:2323] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:19.328736Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["pgbench_accounts"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["aid (null, 3)","aid [7, 7]"],"Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/pgbench_accounts","E-Rows":"No estimate","Table":"pgbench_accounts","ReadRangesKeys":["aid"],"ReadColumns":["abalance"],"E-Cost":"No estimate","ReadRangesExpectedSize":2}],"Node Type":"TableRangeScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":2}],"SortBy":"input.abalance","Name":"Sort"}],"Node Type":"Sort"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/pgbench_accounts","reads":[{"columns":["abalance"],"scan_by":["aid (null, 3)","aid [7, 7]"],"type":"Scan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":4,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["aid (null, 3)","aid [7, 7]"],"Name":"TableRangeScan","Path":"\/Root\/pgbench_accounts","E-Rows":"No estimate","Table":"pgbench_accounts","ReadRangesKeys":["aid"],"ReadColumns":["abalance"],"E-Cost":"No estimate","ReadRangesExpectedSize":2}],"Node Type":"TableRangeScan"}],"Operators":[{"SortBy":"input.abalance","Name":"Sort"}],"Node Type":"Sort"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["pgbench_accounts"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","ReadRange":["aid (4, 3)"],"E-Size":"No estimate","Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/pgbench_accounts","E-Rows":"No estimate","Table":"pgbench_accounts","ReadColumns":["abalance"],"E-Cost":"No estimate"}],"Node Type":"TableRangeScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/pgbench_accounts","reads":[{"columns":["abalance"],"scan_by":["aid (4, 3)"],"type":"Scan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":4,"Operators":[{"Scan":"Parallel","ReadRange":["aid (4, 3)"],"E-Size":"No estimate","Name":"TableRangeScan","Path":"\/Root\/pgbench_accounts","E-Rows":"No estimate","Table":"pgbench_accounts","ReadColumns":["abalance"],"E-Cost":"No estimate"}],"Node Type":"TableRangeScan"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} Trying to start YDB, gRPC: 21660, MsgBus: 27027 2025-06-03T10:28:19.780336Z node 11 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7511668017233156321:2093];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:19.780659Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002430/r3tmp/tmpvN28O9/pdisk_1.dat 2025-06-03T10:28:19.794735Z node 11 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21660, node 11 2025-06-03T10:28:19.823547Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:19.823565Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:19.823568Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:19.823645Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27027 TClient is connected to server localhost:27027 WaitRootIsUp 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'... TClient::Ls request: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_D... (TRUNCATED) WaitRootIsUp 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' success. 2025-06-03T10:28:19.878515Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:19.878558Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:19.879614Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:19.882580Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:19.884667Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:20.190837Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7511668021528124166:2327], DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:20.190868Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:20.191005Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7511668021528124193:2330], DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:20.191969Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:20.194130Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7511668021528124195:2331], DatabaseId: /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:20.257329Z node 11 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [11:7511668021528124246:2323] txid# 281474976715659, issues: { message: "Check failed: path: \'/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:20.263361Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 >> TExportToS3Tests::ExportStartTime >> Cdc::KeysOnlyLogDebezium [GOOD] >> Cdc::NewAndOldImagesLog[PqRunner] >> TExportToS3Tests::Changefeeds [GOOD] >> TExportToS3Tests::CorruptedDyNumber [GOOD] >> KqpPg::InsertNoTargetColumns_Alter+useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Alter-useSink >> TExportToS3Tests::DisableAutoDropping |63.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut |63.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut |63.8%| [LD] {RESULT} $(B)/ydb/library/ycloud/impl/ut/ydb-library-ycloud-impl-ut >> SystemView::ShowCreateTablePartitionSettings [GOOD] >> SystemView::ShowCreateTableReadReplicas >> TExportToS3Tests::ExportStartTime [GOOD] >> TExportToS3Tests::ShouldOmitNonStrictStorageSettings [GOOD] >> TExportToS3Tests::SchemaMapping >> SystemView::AuthUsers_LockUnlock [GOOD] >> SystemView::AuthUsers_Access >> KqpPg::SelectIndex-useSink [GOOD] >> KqpPg::TableDeleteAllData+useSink >> TExportToS3Tests::DisableAutoDropping [GOOD] >> TExportToS3Tests::ShouldPreserveIncrBackupFlag >> TExportToS3Tests::CancelUponCreatingExportDirShouldSucceed ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_extsubdomain_reboots/unittest >> TSchemeShardTestExtSubdomainReboots::SchemeLimits-AlterDatabaseCreateHiveFirst-false [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:127:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:130:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:134:2058] recipient: [1:111:2142] 2025-06-03T10:27:37.322675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:37.322704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:37.322711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:37.322716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:37.322723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:37.322728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:37.322739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:37.322754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:37.322865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:37.322950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:37.338575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:27:37.338603Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:37.338720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:176:2058] recipient: [1:15:2062] 2025-06-03T10:27:37.342936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:37.342980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:37.343031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:37.344589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:37.344645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:37.344779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:37.344893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:37.346117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:37.346174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:37.346484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:37.346500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:37.346540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:37.346551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:37.346559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:37.346586Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:27:37.348211Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:27:37.369619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:37.369710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:37.369783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:37.369833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:37.369844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:37.370723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:37.370756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:37.370830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:37.370842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:37.370850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:37.370857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:37.371360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:37.371375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:37.371381Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:37.371761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:37.371773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:37.371780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:37.371788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:37.372527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:37.372939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:37.372987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:37.373203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:37.373233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:37.373241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:37.373329Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... ntPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 5 2025-06-03T10:28:20.653033Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72075186233409546, cookie: 1005 2025-06-03T10:28:20.653070Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72075186233409546, cookie: 1005 2025-06-03T10:28:20.653075Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 1005 2025-06-03T10:28:20.653079Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 1005, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], version: 2 2025-06-03T10:28:20.653084Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 2 2025-06-03T10:28:20.653096Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1005, ready parts: 0/1, is published: true 2025-06-03T10:28:20.653465Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1005:4294967295 from tablet: 72075186233409546 to tablet: 72075186233409547 cookie: 0:1005 msg type: 269090816 2025-06-03T10:28:20.653513Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1005, partId: 4294967295, tablet: 72075186233409547 2025-06-03T10:28:20.654102Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 1005 2025-06-03T10:28:20.654128Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 1005 TestModificationResult got TxId: 1005, wait until txId: 1005 TestModificationResults wait txId: 1006 2025-06-03T10:28:20.654686Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_0" OperationType: ESchemeOpMkDir MkDir { Name: "B" } } TxId: 1006 TabletId: 72075186233409546 , at schemeshard: 72075186233409546 2025-06-03T10:28:20.654722Z node 96 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/USER_0/B, operationId: 1006:0, at schemeshard: 72075186233409546 2025-06-03T10:28:20.654745Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72075186233409546, LocalPathId: 1], parent name: MyRoot/USER_0, child name: B, child id: [OwnerId: 72075186233409546, LocalPathId: 3], at schemeshard: 72075186233409546 2025-06-03T10:28:20.654771Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72075186233409546, LocalPathId: 3] was 0 2025-06-03T10:28:20.654785Z node 96 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1006:1, propose status:StatusAccepted, reason: , at schemeshard: 72075186233409546 2025-06-03T10:28:20.654829Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 5 2025-06-03T10:28:20.654837Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72075186233409546, LocalPathId: 3] was 1 2025-06-03T10:28:20.655265Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1006, response: Status: StatusAccepted TxId: 1006 SchemeshardId: 72075186233409546 PathId: 3, at schemeshard: 72075186233409546 2025-06-03T10:28:20.655297Z node 96 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1006, database: /MyRoot/USER_0, subject: , status: StatusAccepted, operation: CREATE DIRECTORY, path: /MyRoot/USER_0/B 2025-06-03T10:28:20.655340Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-03T10:28:20.655347Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 1006, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-03T10:28:20.655378Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 1006, path id: [OwnerId: 72075186233409546, LocalPathId: 3] 2025-06-03T10:28:20.655395Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-03T10:28:20.655416Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [96:543:2476], at schemeshard: 72075186233409546, txId: 1006, path id: 1 2025-06-03T10:28:20.655421Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [96:543:2476], at schemeshard: 72075186233409546, txId: 1006, path id: 3 2025-06-03T10:28:20.655477Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1006:0, at schemeshard: 72075186233409546 2025-06-03T10:28:20.655485Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:63: MkDir::TPropose operationId# 1006:0 ProgressState, at schemeshard: 72075186233409546 2025-06-03T10:28:20.655494Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1006 ready parts: 1/1 2025-06-03T10:28:20.655522Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72075186233409547 message:Transaction { AffectedSet { TabletId: 72075186233409546 Flags: 2 } ExecLevel: 0 TxId: 1006 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72075186233409547 2025-06-03T10:28:20.655688Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72075186233409546, cookie: 1006 2025-06-03T10:28:20.655704Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 6 PathOwnerId: 72075186233409546, cookie: 1006 2025-06-03T10:28:20.655708Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 1006 2025-06-03T10:28:20.655714Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 1006, pathId: [OwnerId: 72075186233409546, LocalPathId: 1], version: 6 2025-06-03T10:28:20.655720Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 6 2025-06-03T10:28:20.655859Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72075186233409546, cookie: 1006 2025-06-03T10:28:20.655876Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72075186233409546, cookie: 1006 2025-06-03T10:28:20.655881Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 1006 2025-06-03T10:28:20.655886Z node 96 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 1006, pathId: [OwnerId: 72075186233409546, LocalPathId: 3], version: 2 2025-06-03T10:28:20.655891Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 3] was 2 2025-06-03T10:28:20.655903Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1006, ready parts: 0/1, is published: true 2025-06-03T10:28:20.656593Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1006:4294967295 from tablet: 72075186233409546 to tablet: 72075186233409547 cookie: 0:1006 msg type: 269090816 2025-06-03T10:28:20.656629Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1006, partId: 4294967295, tablet: 72075186233409547 2025-06-03T10:28:20.656959Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 1006 2025-06-03T10:28:20.657019Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 1006 TestModificationResult got TxId: 1006, wait until txId: 1006 TestModificationResults wait txId: 1007 2025-06-03T10:28:20.657791Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_0" OperationType: ESchemeOpMkDir MkDir { Name: "C" } } TxId: 1007 TabletId: 72075186233409546 , at schemeshard: 72075186233409546 2025-06-03T10:28:20.657846Z node 96 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/USER_0/C, operationId: 1007:0, at schemeshard: 72075186233409546 2025-06-03T10:28:20.657870Z node 96 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1007:1, propose status:StatusResourceExhausted, reason: Check failed: path: '/MyRoot/USER_0/C', error: paths count limit exceeded, limit: 2, paths: 2, delta: 1, source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155, at schemeshard: 72075186233409546 2025-06-03T10:28:20.658417Z node 96 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1007, response: Status: StatusResourceExhausted Reason: "Check failed: path: \'/MyRoot/USER_0/C\', error: paths count limit exceeded, limit: 2, paths: 2, delta: 1, source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155" TxId: 1007 SchemeshardId: 72075186233409546, at schemeshard: 72075186233409546 2025-06-03T10:28:20.658448Z node 96 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1007, database: /MyRoot/USER_0, subject: , status: StatusResourceExhausted, reason: Check failed: path: '/MyRoot/USER_0/C', error: paths count limit exceeded, limit: 2, paths: 2, delta: 1, source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155, operation: CREATE DIRECTORY, path: /MyRoot/USER_0/C TestModificationResult got TxId: 1007, wait until txId: 1007 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::SchemaMappingEncryptionIncorrectKey [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:28:18.701535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:28:18.701565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:18.701569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:28:18.701574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:28:18.701585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:28:18.701588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:28:18.701596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:18.701612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:28:18.701727Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:28:18.701826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:28:18.715552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:28:18.715577Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:18.720034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:28:18.720206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:28:18.720274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:28:18.723549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:28:18.723645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:28:18.723822Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:18.723908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:28:18.724884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:18.724948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:28:18.725381Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:18.725398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:18.725412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:28:18.725423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:18.725431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:28:18.725462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:28:18.727244Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:28:18.757847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:28:18.757945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:18.758021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:28:18.758082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:28:18.758117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:18.759137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:18.759181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:28:18.759262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:18.759276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:28:18.759284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:28:18.759291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:28:18.759910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:18.759926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:28:18.759934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:28:18.760473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:18.760489Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:18.760496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:18.760504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:28:18.761382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:28:18.761847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:28:18.761896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:28:18.762142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:18.762175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:18.762187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:18.762273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:28:18.762284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:18.762320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:28:18.762335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:28:18.762928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:18.762945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:18.763002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... hemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710758, at schemeshard: 72057594046678944 2025-06-03T10:28:20.930996Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710758:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710758 msg type: 269090816 2025-06-03T10:28:20.931026Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 281474976710758, partId: 4294967295, tablet: 72057594046316545 2025-06-03T10:28:20.931075Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 FAKE_COORDINATOR: Add transaction: 281474976710758 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710758 at step: 5000005 2025-06-03T10:28:20.931308Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:20.931336Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710758 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 17179871339 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:20.931345Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:129: TRmDir HandleReply TEvOperationPlan, opId: 281474976710758:0, step: 5000005, at schemeshard: 72057594046678944 2025-06-03T10:28:20.931374Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:180: RmDir is done, opId: 281474976710758:0, at schemeshard: 72057594046678944 2025-06-03T10:28:20.931385Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710758:0 progress is 1/1 2025-06-03T10:28:20.931390Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710758 ready parts: 1/1 2025-06-03T10:28:20.931396Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710758:0 progress is 1/1 2025-06-03T10:28:20.931403Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710758 ready parts: 1/1 2025-06-03T10:28:20.931412Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:28:20.931423Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:28:20.931429Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710758, ready parts: 1/1, is published: false 2025-06-03T10:28:20.931435Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710758 ready parts: 1/1 2025-06-03T10:28:20.931439Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710758:0 2025-06-03T10:28:20.931442Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976710758:0 2025-06-03T10:28:20.931449Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:28:20.931453Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976710758, publications: 2, subscribers: 1 2025-06-03T10:28:20.931456Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710758, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-03T10:28:20.931459Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710758, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-03T10:28:20.931550Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2025-06-03T10:28:20.931879Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:20.931890Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710758, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:20.931921Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710758, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-03T10:28:20.931946Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:20.931951Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:206:2207], at schemeshard: 72057594046678944, txId: 281474976710758, path id: 1 2025-06-03T10:28:20.931955Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:206:2207], at schemeshard: 72057594046678944, txId: 281474976710758, path id: 4 FAKE_COORDINATOR: Erasing txId 281474976710758 2025-06-03T10:28:20.932118Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976710758 2025-06-03T10:28:20.932133Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976710758 2025-06-03T10:28:20.932142Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710758 2025-06-03T10:28:20.932147Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710758, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-03T10:28:20.932153Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-03T10:28:20.932256Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710758 2025-06-03T10:28:20.932268Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710758 2025-06-03T10:28:20.932272Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710758 2025-06-03T10:28:20.932277Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710758, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-03T10:28:20.932281Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:28:20.932294Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710758, subscribers: 1 2025-06-03T10:28:20.932299Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [4:126:2151] 2025-06-03T10:28:20.932331Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:28:20.932337Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-03T10:28:20.932348Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:28:20.932937Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2025-06-03T10:28:20.933185Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710758 2025-06-03T10:28:20.933208Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvNotifyTxCompletionResult: txId# 281474976710758 2025-06-03T10:28:20.933222Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6753: Message: TxId: 281474976710758 2025-06-03T10:28:20.933232Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-03T10:28:20.933237Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1232: TExport::TTxProgress: OnNotifyResult: txId# 281474976710758 2025-06-03T10:28:20.933247Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1263: TExport::TTxProgress: OnNotifyResult: txId# 281474976710758, id# 103, itemIdx# 4294967295 2025-06-03T10:28:20.933351Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:28:20.933681Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete TestWaitNotification wait txId: 103 2025-06-03T10:28:20.933736Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-03T10:28:20.933745Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-03T10:28:20.933821Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-03T10:28:20.933844Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:28:20.933850Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [4:551:2509] TestWaitNotification: OK eventTxId 103 >> KqpPg::InsertNoTargetColumns_Alter-useSink [GOOD] >> KqpPg::InsertNoTargetColumns_Serial+useSink >> TExportToS3Tests::CancelUponCreatingExportDirShouldSucceed [GOOD] >> TExportToS3Tests::SchemaMapping [GOOD] >> TExportToS3Tests::CancelUponCopyingTablesShouldSucceed ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::Changefeeds [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:28:17.266612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:28:17.266641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:17.266646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:28:17.266651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:28:17.266662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:28:17.266666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:28:17.266676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:17.266688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:28:17.266800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:28:17.266883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:28:17.280848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:28:17.280882Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:17.286536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:28:17.286653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:28:17.286686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:28:17.291057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:28:17.291120Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:28:17.291245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:17.291301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:28:17.291923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:17.291967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:28:17.292239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:17.292250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:17.292260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:28:17.292271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:17.292277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:28:17.292303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:28:17.293730Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:28:17.319276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:28:17.319391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:17.319477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:28:17.319541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:28:17.319576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:17.321805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:17.321856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:28:17.321957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:17.321974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:28:17.321981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:28:17.321989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:28:17.322767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:17.322787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:28:17.322796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:28:17.323496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:17.323509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:17.323517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:17.323525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:28:17.324408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:28:17.324974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:28:17.325028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:28:17.325277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:17.325350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:17.325367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:17.325464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:28:17.325474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:17.325520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:28:17.325536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:28:17.326171Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:17.326184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:17.326249Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 10:28:21.190253Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-03T10:28:21.190257Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 9], version: 7 2025-06-03T10:28:21.190261Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 3 2025-06-03T10:28:21.190270Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-06-03T10:28:21.191216Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-03T10:28:21.191261Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-03T10:28:21.191268Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-06-03T10:28:21.191275Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-03T10:28:21.191297Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710761:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710761 msg type: 269090816 2025-06-03T10:28:21.191326Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 281474976710761, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 281474976710761 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710761 at step: 5000010 2025-06-03T10:28:21.191472Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000010, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:21.191501Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710761 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 21474838635 } } Step: 5000010 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:21.191510Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:129: TRmDir HandleReply TEvOperationPlan, opId: 281474976710761:0, step: 5000010, at schemeshard: 72057594046678944 2025-06-03T10:28:21.191542Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:180: RmDir is done, opId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-03T10:28:21.191553Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-03T10:28:21.191558Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-03T10:28:21.191564Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-03T10:28:21.191567Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-03T10:28:21.191577Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:28:21.191588Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 2 2025-06-03T10:28:21.191593Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: false 2025-06-03T10:28:21.191600Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-03T10:28:21.191606Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710761:0 2025-06-03T10:28:21.191610Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976710761:0 2025-06-03T10:28:21.191624Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 3 2025-06-03T10:28:21.191630Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976710761, publications: 2, subscribers: 1 2025-06-03T10:28:21.191635Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 1], 12 2025-06-03T10:28:21.191639Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 9], 18446744073709551615 2025-06-03T10:28:21.191732Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-03T10:28:21.191751Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-03T10:28:21.192156Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:21.192170Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:21.192208Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 9] 2025-06-03T10:28:21.192234Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:21.192240Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:205:2206], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 1 2025-06-03T10:28:21.192245Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:205:2206], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 9 FAKE_COORDINATOR: Erasing txId 281474976710761 2025-06-03T10:28:21.192407Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 12 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-03T10:28:21.192420Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 12 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-03T10:28:21.192425Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-03T10:28:21.192431Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 12 2025-06-03T10:28:21.192437Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:28:21.192534Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 9 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-03T10:28:21.192548Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 9 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-03T10:28:21.192553Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-03T10:28:21.192557Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 9], version: 18446744073709551615 2025-06-03T10:28:21.192562Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 2 2025-06-03T10:28:21.192573Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710761, subscribers: 1 2025-06-03T10:28:21.192578Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [5:126:2151] 2025-06-03T10:28:21.193366Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-03T10:28:21.193483Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-03T10:28:21.193506Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-03T10:28:21.193520Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6753: Message: TxId: 281474976710761 2025-06-03T10:28:21.193531Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-03T10:28:21.193537Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1232: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-03T10:28:21.193544Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1263: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 105, itemIdx# 4294967295 2025-06-03T10:28:21.193971Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-03T10:28:21.193997Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-03T10:28:21.194006Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [5:1379:3167] TestWaitNotification: OK eventTxId 105 >> Cdc::NewAndOldImagesLog[PqRunner] [GOOD] >> Cdc::NewAndOldImagesLog[YdsRunner] >> SystemView::ConcurrentScans [GOOD] >> SystemView::PDisksFields >> TExportToS3Tests::ShouldSucceedOnSingleShardTable >> KqpPg::CheckPgAutoParams-useSink [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::DisableAutoDropping [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:28:19.107879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:28:19.107915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:19.107923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:28:19.107929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:28:19.107943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:28:19.107948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:28:19.107961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:19.107981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:28:19.108130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:28:19.108244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:28:19.124588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:28:19.124618Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:19.129502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:28:19.129635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:28:19.129685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:28:19.133365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:28:19.133444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:28:19.133656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:19.133736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:28:19.134694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:19.134780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:28:19.135209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:19.135224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:19.135241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:28:19.135251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:19.135259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:28:19.135280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.137094Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:28:19.163079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:28:19.163172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.163243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:28:19.163300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:28:19.163331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.164453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:19.164492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:28:19.164551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.164562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:28:19.164568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:28:19.164574Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:28:19.165013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.165025Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:28:19.165032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:28:19.165462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.165477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.165483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:19.165491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:28:19.166246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:28:19.166775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:28:19.166828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:28:19.167071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:19.167101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:19.167111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:19.167194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:28:19.167203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:19.167239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:28:19.167253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:28:19.167751Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:19.167762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:19.167815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... T10:28:21.820800Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-03T10:28:21.820804Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 7 2025-06-03T10:28:21.820810Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:28:21.820824Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-06-03T10:28:21.822921Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-03T10:28:21.822971Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-03T10:28:21.822980Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 0/1, is published: true 2025-06-03T10:28:21.822986Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-03T10:28:21.823201Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710761:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710761 msg type: 269090816 2025-06-03T10:28:21.823245Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 281474976710761, partId: 4294967295, tablet: 72057594046316545 2025-06-03T10:28:21.823502Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 FAKE_COORDINATOR: Add transaction: 281474976710761 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710761 at step: 5000007 2025-06-03T10:28:21.823609Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000007, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:21.823642Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710761 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 21474838635 } } Step: 5000007 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:21.823652Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:129: TRmDir HandleReply TEvOperationPlan, opId: 281474976710761:0, step: 5000007, at schemeshard: 72057594046678944 2025-06-03T10:28:21.823698Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:180: RmDir is done, opId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-03T10:28:21.823712Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-03T10:28:21.823717Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-03T10:28:21.823724Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-03T10:28:21.823727Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-03T10:28:21.823744Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:28:21.823758Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:28:21.823765Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: false 2025-06-03T10:28:21.823774Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-03T10:28:21.823779Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710761:0 2025-06-03T10:28:21.823784Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976710761:0 2025-06-03T10:28:21.823811Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:28:21.823817Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976710761, publications: 2, subscribers: 1 2025-06-03T10:28:21.823822Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-03T10:28:21.823826Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-03T10:28:21.824035Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-03T10:28:21.824639Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:21.824659Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:21.824723Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:28:21.824756Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:21.824763Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:205:2206], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 1 2025-06-03T10:28:21.824773Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [5:205:2206], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 3 FAKE_COORDINATOR: Erasing txId 281474976710761 2025-06-03T10:28:21.825067Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-03T10:28:21.825095Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-03T10:28:21.825103Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-03T10:28:21.825112Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-03T10:28:21.825121Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:28:21.825326Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-03T10:28:21.825352Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-03T10:28:21.825360Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-03T10:28:21.825368Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-03T10:28:21.825376Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:28:21.825395Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710761, subscribers: 1 2025-06-03T10:28:21.825404Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [5:126:2151] 2025-06-03T10:28:21.829238Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-03T10:28:21.829503Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-03T10:28:21.829534Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-03T10:28:21.829580Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6753: Message: TxId: 281474976710761 2025-06-03T10:28:21.829604Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-03T10:28:21.829612Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1232: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-03T10:28:21.829620Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1263: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 102, itemIdx# 4294967295 2025-06-03T10:28:21.830383Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-03T10:28:21.830411Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:28:21.830418Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [5:612:2567] TestWaitNotification: OK eventTxId 102 >> KqpPg::InsertNoTargetColumns_Serial+useSink [GOOD] >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWoIndexes ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::SchemaMapping [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:28:19.666471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:28:19.666494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:19.666499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:28:19.666504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:28:19.666517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:28:19.666521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:28:19.666530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:19.666544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:28:19.666650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:28:19.666727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:28:19.683646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:28:19.683674Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:19.688346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:28:19.688467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:28:19.688509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:28:19.691229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:28:19.691298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:28:19.691449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:19.691519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:28:19.692384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:19.692433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:28:19.692763Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:19.692776Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:19.692789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:28:19.692798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:19.692804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:28:19.692825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.694393Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:28:19.717817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:28:19.717908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.717980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:28:19.718045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:28:19.718075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.718960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:19.718992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:28:19.719058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.719070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:28:19.719076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:28:19.719082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:28:19.719611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.719624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:28:19.719631Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:28:19.720254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.720275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.720282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:19.720290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:28:19.721130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:28:19.721744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:28:19.721792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:28:19.722085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:19.722123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:19.722132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:19.722213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:28:19.722222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:19.722257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:28:19.722270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:28:19.723066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:19.723080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:19.723139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 10:28:22.246590Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-03T10:28:22.246598Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 11 2025-06-03T10:28:22.246606Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-03T10:28:22.246630Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 0/1, is published: true 2025-06-03T10:28:22.247233Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-03T10:28:22.247309Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710763, at schemeshard: 72057594046678944 2025-06-03T10:28:22.247317Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 0/1, is published: true 2025-06-03T10:28:22.247324Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710763, at schemeshard: 72057594046678944 2025-06-03T10:28:22.247475Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976710763:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:281474976710763 msg type: 269090816 2025-06-03T10:28:22.247511Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 281474976710763, partId: 4294967295, tablet: 72057594046316545 2025-06-03T10:28:22.247592Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 FAKE_COORDINATOR: Add transaction: 281474976710763 at step: 5000010 FAKE_COORDINATOR: advance: minStep5000010 State->FrontStep: 5000009 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710763 at step: 5000010 2025-06-03T10:28:22.247663Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000010, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:22.247692Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710763 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 17179871339 } } Step: 5000010 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:22.247703Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:129: TRmDir HandleReply TEvOperationPlan, opId: 281474976710763:0, step: 5000010, at schemeshard: 72057594046678944 2025-06-03T10:28:22.247739Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:180: RmDir is done, opId: 281474976710763:0, at schemeshard: 72057594046678944 2025-06-03T10:28:22.247753Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710763:0 progress is 1/1 2025-06-03T10:28:22.247758Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-03T10:28:22.247765Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710763:0 progress is 1/1 2025-06-03T10:28:22.247769Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-03T10:28:22.247782Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:28:22.247795Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-03T10:28:22.247802Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710763, ready parts: 1/1, is published: false 2025-06-03T10:28:22.247809Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710763 ready parts: 1/1 2025-06-03T10:28:22.247819Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710763:0 2025-06-03T10:28:22.247824Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976710763:0 2025-06-03T10:28:22.247837Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-03T10:28:22.247845Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976710763, publications: 2, subscribers: 1 2025-06-03T10:28:22.247850Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710763, [OwnerId: 72057594046678944, LocalPathId: 1], 13 2025-06-03T10:28:22.247855Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710763, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-03T10:28:22.249152Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-03T10:28:22.249632Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:22.249648Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710763, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:22.249714Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710763, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-03T10:28:22.249752Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:22.249759Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:206:2207], at schemeshard: 72057594046678944, txId: 281474976710763, path id: 1 2025-06-03T10:28:22.249766Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:206:2207], at schemeshard: 72057594046678944, txId: 281474976710763, path id: 4 FAKE_COORDINATOR: Erasing txId 281474976710763 2025-06-03T10:28:22.249985Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-03T10:28:22.250000Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-03T10:28:22.250006Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-03T10:28:22.250012Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 13 2025-06-03T10:28:22.250019Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-03T10:28:22.250110Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-03T10:28:22.250124Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710763 2025-06-03T10:28:22.250129Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710763 2025-06-03T10:28:22.250134Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710763, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-03T10:28:22.250139Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-03T10:28:22.250149Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710763, subscribers: 1 2025-06-03T10:28:22.250155Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [4:126:2151] 2025-06-03T10:28:22.251726Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-03T10:28:22.251833Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710763 2025-06-03T10:28:22.251855Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvNotifyTxCompletionResult: txId# 281474976710763 2025-06-03T10:28:22.251873Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6753: Message: TxId: 281474976710763 2025-06-03T10:28:22.251888Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-03T10:28:22.251896Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1232: TExport::TTxProgress: OnNotifyResult: txId# 281474976710763 2025-06-03T10:28:22.251903Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:1263: TExport::TTxProgress: OnNotifyResult: txId# 281474976710763, id# 103, itemIdx# 4294967295 2025-06-03T10:28:22.252340Z node 4 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-03T10:28:22.252369Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:28:22.252378Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [4:852:2780] TestWaitNotification: OK eventTxId 103 >> SystemView::ShowCreateTableColumn [GOOD] >> SystemView::ShowCreateTableKeyBloomFilter >> TExportToS3Tests::ShouldSucceedOnSingleShardTable [GOOD] >> TExportToS3Tests::ShouldSucceedOnMultiShardTable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/pg/unittest >> KqpPg::CheckPgAutoParams-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 2501, MsgBus: 14549 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0023ca/r3tmp/tmp2BCxE8/pdisk_1.dat 2025-06-03T10:28:00.489414Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:00.514163Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:00.514255Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667934433763490:2079] 1748946480357489 != 1748946480357492 TServer::EnableGrpc on GrpcPort 2501, node 1 2025-06-03T10:28:00.564503Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:00.564532Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:00.565538Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:00.565551Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:00.565553Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:00.565599Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:28:00.566009Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14549 TClient is connected to server localhost:14549 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:00.703385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:00.714021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:01.162519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.234259Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill \x62797465612030 \x62797465612030 \x62797465612031 \x62797465612031 \x62797465612032 \x62797465612032 \x62797465612033 \x62797465612033 \x62797465612034 \x62797465612034 \x62797465612035 \x62797465612035 \x62797465612036 \x62797465612036 \x62797465612037 \x62797465612037 \x62797465612038 \x62797465612038 \x62797465612039 \x62797465612039 2025-06-03T10:28:01.254760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.297759Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill \x62797465612030 \x62797465612030 \x62797465612031 \x62797465612031 \x62797465612032 \x62797465612032 \x62797465612033 \x62797465612033 \x62797465612034 \x62797465612034 \x62797465612035 \x62797465612035 \x62797465612036 \x62797465612036 \x62797465612037 \x62797465612037 \x62797465612038 \x62797465612038 \x62797465612039 \x62797465612039 2025-06-03T10:28:01.321693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.345558Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {"\\x6130","\\x623130"} {"\\x6130","\\x623130"} {"\\x6131","\\x623131"} {"\\x6131","\\x623131"} {"\\x6132","\\x623132"} {"\\x6132","\\x623132"} {"\\x6133","\\x623133"} {"\\x6133","\\x623133"} {"\\x6134","\\x623134"} {"\\x6134","\\x623134"} {"\\x6135","\\x623135"} {"\\x6135","\\x623135"} {"\\x6136","\\x623136"} {"\\x6136","\\x623136"} {"\\x6137","\\x623137"} {"\\x6137","\\x623137"} {"\\x6138","\\x623138"} {"\\x6138","\\x623138"} {"\\x6139","\\x623139"} {"\\x6139","\\x623139"} 2025-06-03T10:28:01.364920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.387585Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {"\\x6130","\\x623130"} {"\\x6130","\\x623130"} {"\\x6131","\\x623131"} {"\\x6131","\\x623131"} {"\\x6132","\\x623132"} {"\\x6132","\\x623132"} {"\\x6133","\\x623133"} {"\\x6133","\\x623133"} {"\\x6134","\\x623134"} {"\\x6134","\\x623134"} {"\\x6135","\\x623135"} {"\\x6135","\\x623135"} {"\\x6136","\\x623136"} {"\\x6136","\\x623136"} {"\\x6137","\\x623137"} {"\\x6137","\\x623137"} {"\\x6138","\\x623138"} {"\\x6138","\\x623138"} {"\\x6139","\\x623139"} {"\\x6139","\\x623139"} 2025-06-03T10:28:01.400739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.417961Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill f f t t 2025-06-03T10:28:01.433455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.457918Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill f f t t 2025-06-03T10:28:01.473449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715682:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.555302Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {f,f} {f,f} {t,t} {t,t} 2025-06-03T10:28:01.570610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715686:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.598654Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {f,f} {f,f} {t,t} {t,t} 2025-06-03T10:28:01.618324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715690:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.639259Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 2025-06-03T10:28:01.678041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715694:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.716564Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 2025-06-03T10:28:01.735353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715698:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.751146Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {0,0} {0,0} {1,1} {1,1} {2,2} {2,2} {3,3} {3,3} {4,4} {4,4} {5,5} {5,5} {6,6} {6,6} {7,7} {7,7} {8,8} {8,8} {9,9} {9,9} 2025-06-03T10:28:01.763323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715702:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.822695Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {0,0} {0,0} {1,1} {1,1} {2,2} {2,2} {3,3} {3,3} {4,4} {4,4} {5,5} {5,5} {6,6} {6,6} {7,7} {7,7} {8,8} {8,8} {9,9} {9,9} 2025-06-03T10:28:01.835883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715706:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.850250Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 2025-06-03T10:28:01.860710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715710:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.874968Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 2025-06-03T10:28:01.889985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715714:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.906607Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {0,0} {0,0} {1,1} {1,1} {2,2} {2,2} {3,3} {3,3} {4,4} {4,4} {5,5} {5,5} {6,6} {6,6} {7,7} {7,7} {8,8} {8,8} {9,9} {9,9} 2025-06-03T10:28:01.918158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715718:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.931204Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill {0,0} {0,0} {1,1} {1,1} {2,2} {2,2} {3,3} {3,3} {4,4} {4,4} {5,5} {5,5} {6,6} {6,6} {7,7} {7,7} {8,8} {8,8} {9,9} {9,9} 2025-06-03T10:28:01.938190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation typ ... net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:21.761546Z node 14 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:21.761608Z node 14 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11386 2025-06-03T10:28:21.821918Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:21.821954Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:21.823821Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11386 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:28:21.841515Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:21.843222Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:22.142862Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7511668030617198562:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:22.142886Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7511668030617198589:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:22.142912Z node 14 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:22.144115Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:22.146888Z node 14 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [14:7511668030617198591:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:22.242310Z node 14 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [14:7511668030617198642:2323] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:22.253893Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:28:22.403153Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:22.527833Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:468: Get parsing result with error, self: [14:7511668030617198967:2390], owner: [14:7511668030617198551:2319], statement id: 0 2025-06-03T10:28:22.527921Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=14&id=NTcyOGQ3MGQtMjg0MzNhMWUtZjc4OGM5MGMtZDM4NzJkNDU=, ActorId: [14:7511668030617198965:2389], ActorState: ExecuteState, TraceId: 01jwtnbgvyb2tw5d1q905n8dge, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-03T10:28:22.559426Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [14:7511668030617198992:2400], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: RemovePrefixMembers, At function: PgSelect, At tuple, At tuple, At tuple, At function: PgSetItem, At tuple
: Error: At tuple
:1:1: Error: At function: PgWhere, At lambda
:2:55: Error: At function: PgOp
:2:55: Error: Unable to find an overload for operator = with given argument type(s): (text,int4) 2025-06-03T10:28:22.560070Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=14&id=MjBmYzgzNWItY2Q2MGExM2ItYjAwYmY1NzAtNTk4ZGZlMzY=, ActorId: [14:7511668030617198989:2398], ActorState: ExecuteState, TraceId: 01jwtnbgwv01sqwjrbq17n1kcy, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-03T10:28:22.566259Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [14:7511668030617199004:2406], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: RemovePrefixMembers, At function: PgSelect, At tuple, At tuple, At tuple, At function: PgSetItem, At tuple
: Error: At tuple
:1:1: Error: At function: PgWhere, At lambda
:2:57: Error: At function: PgAnd
:2:67: Error: At function: PgOp
:2:67: Error: Unable to find an overload for operator = with given argument type(s): (text,int4) 2025-06-03T10:28:22.567204Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=14&id=OWNhMmYzMTctMTU5ZTQ4YTYtNGQxMTY4M2QtZjE3MzRjYQ==, ActorId: [14:7511668030617199001:2404], ActorState: ExecuteState, TraceId: 01jwtnbgx1bfwh4dp4byqve6np, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-03T10:28:22.571777Z node 14 :KQP_EXECUTER CRIT: kqp_literal_executer.cpp:112: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jwtnbgx916wke5f17ygjy8qp, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=OTgxMjVjODYtNjlmYjM4MmUtODU1ODcwN2EtZmM4ZWEwNDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, unexpected exception caught: (NKikimr::NMiniKQL::TTerminateException) Terminate was called, reason(51): ERROR: invalid input syntax for type integer: "a" 2025-06-03T10:28:22.571887Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=14&id=OTgxMjVjODYtNjlmYjM4MmUtODU1ODcwN2EtZmM4ZWEwNDU=, ActorId: [14:7511668030617199013:2410], ActorState: ExecuteState, TraceId: 01jwtnbgx916wke5f17ygjy8qp, Create QueryResponse for error on request, msg: 2025-06-03T10:28:22.580311Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:28:22.601643Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:28:22.624632Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [14:7511668030617199180:2435], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: values have 3 columns, INSERT INTO expects: 2 2025-06-03T10:28:22.625433Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=14&id=ZjBlMTM1YjMtNjZiYWE0NjgtZjI0ZTYzNjgtZWFlODg2NTY=, ActorId: [14:7511668030617199177:2433], ActorState: ExecuteState, TraceId: 01jwtnbgyt4jbsjvqg54hbsmkj, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-03T10:28:22.630121Z node 14 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [14:7511668030617199193:2441], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Failed to convert type: List> to List>
:1:1: Error: Failed to convert 'id': pgunknown to Optional
:1:1: Error: Row type mismatch for table: db.[/Root/PgTable2] 2025-06-03T10:28:22.630650Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=14&id=ODU0NGU0MTUtZmY2ODg3YWQtNTdhNGM0ZTUtODVjNWY0YTg=, ActorId: [14:7511668030617199190:2439], ActorState: ExecuteState, TraceId: 01jwtnbgz2apwx1yp3wndmgpvn, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-03T10:28:22.692402Z node 14 :KQP_EXECUTER CRIT: kqp_literal_executer.cpp:112: ActorId: [0:0:0] TxId: 0. Ctx: { TraceId: 01jwtnbgz74e4pt4gyybphbejy, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=MzIzODI3Ni1kNTk1NTY4Ny1jNTg4MWNhNy05MjViN2E1, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. TKqpLiteralExecuter, unexpected exception caught: (NKikimr::NMiniKQL::TTerminateException) Terminate was called, reason(51): ERROR: invalid input syntax for type integer: "a" 2025-06-03T10:28:22.692572Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=14&id=MzIzODI3Ni1kNTk1NTY4Ny1jNTg4MWNhNy05MjViN2E1, ActorId: [14:7511668030617199202:2445], ActorState: ExecuteState, TraceId: 01jwtnbgz74e4pt4gyybphbejy, Create QueryResponse for error on request, msg: 2025-06-03T10:28:22.701474Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480 2025-06-03T10:28:22.794063Z node 14 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 14, TabletId: 72075186224037892 not found 2025-06-03T10:28:22.794629Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/pg/unittest >> KqpPg::InsertNoTargetColumns_Serial+useSink [GOOD] Test command err: Trying to start YDB, gRPC: 18395, MsgBus: 11987 2025-06-03T10:27:59.872496Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667930158308117:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:59.872756Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0023ff/r3tmp/tmpALS5xq/pdisk_1.dat 2025-06-03T10:28:00.069729Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:00.070665Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667930158307948:2079] 1748946479861057 != 1748946479861060 2025-06-03T10:28:00.076837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:00.076870Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:00.078133Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18395, node 1 2025-06-03T10:28:00.113572Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:00.113586Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:00.113589Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:00.113635Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11987 TClient is connected to server localhost:11987 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:00.259257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:00.265732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 16 2025-06-03T10:28:00.446306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:28:00.501551Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:00.503341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:28:00.522471Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:00.530663Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667934453276073:2346], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:00.530685Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:00.530826Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667934453276085:2349], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:00.531791Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480 2025-06-03T10:28:00.538868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715662, at schemeshard: 72057594046644480 2025-06-03T10:28:00.538976Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667934453276087:2350], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-03T10:28:00.628784Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667934453276138:2434] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } f f t t 18 2025-06-03T10:28:00.716549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:00.758244Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:00.759312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:00.777817Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 21 2025-06-03T10:28:00.888370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:28:00.958318Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:00.959290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:28:00.999990Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 23 2025-06-03T10:28:01.078821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.105062Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:01.106095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715680:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.123928Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 20 2025-06-03T10:28:01.205901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715684:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.224007Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:01.225167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715686:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.244529Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 700 2025-06-03T10:28:01.405329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715690:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.438254Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:01.439065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715692:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.466144Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0.5 0.5 1.5 1.5 2.5 2.5 3.5 3.5 4.5 4.5 5.5 5.5 6.5 6.5 7.5 7.5 8.5 8.5 9.5 9.5 701 2025-06-03T10:28:01.551873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715696:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.603357Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:01.604522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715698:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.623334Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 0.5 0.5 1.5 1.5 2.5 2.5 3.5 3.5 4.5 4.5 5.5 5.5 6.5 6.5 7.5 7.5 8.5 8.5 9.5 9.5 25 2025-06-03T10:28:01.693741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715702:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.728304Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:01.729391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsaf ... hard: 72057594046644480 Trying to start YDB, gRPC: 7816, MsgBus: 9272 2025-06-03T10:28:21.207811Z node 11 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[11:7511668026404907860:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:21.207877Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0023ff/r3tmp/tmpZXKTQN/pdisk_1.dat 2025-06-03T10:28:21.245569Z node 11 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7816, node 11 2025-06-03T10:28:21.263473Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:21.263490Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:21.263492Z node 11 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:21.263543Z node 11 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9272 2025-06-03T10:28:21.318893Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:21.318929Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:21.319832Z node 11 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(11, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9272 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:21.344528Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:21.353878Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:21.712067Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7511668026404908473:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:21.712099Z node 11 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [11:7511668026404908484:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:21.712110Z node 11 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:21.713212Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:21.718992Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-03T10:28:21.719109Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [11:7511668026404908487:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:21.775589Z node 11 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [11:7511668026404908538:2323] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:21.780391Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:28:21.849778Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 15703, MsgBus: 10035 2025-06-03T10:28:22.148565Z node 12 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[12:7511668031418207923:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:22.148871Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0023ff/r3tmp/tmpzcfJKm/pdisk_1.dat TServer::EnableGrpc on GrpcPort 15703, node 12 2025-06-03T10:28:22.181136Z node 12 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:22.193584Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:22.193600Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:22.193604Z node 12 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:22.193670Z node 12 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10035 2025-06-03T10:28:22.254743Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:22.254782Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:22.255937Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(12, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10035 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:22.284402Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:22.286444Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:22.615609Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7511668031418208445:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:22.615633Z node 12 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:22.615781Z node 12 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [12:7511668031418208480:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:22.616635Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:22.619263Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-03T10:28:22.619444Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [12:7511668031418208482:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:22.698160Z node 12 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [12:7511668031418208533:2324] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:22.703417Z node 12 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 >> SystemView::AuthOwners_Access [GOOD] >> SystemView::AuthOwners_ResultOrder >> Cdc::DocApi[PqRunner] >> TExportToS3Tests::UidAsIdempotencyKey >> TExportToS3Tests::ShouldSucceedOnMultiShardTable [GOOD] >> TExportToS3Tests::ShouldPreserveIncrBackupFlag [GOOD] >> Cdc::NewAndOldImagesLog[YdsRunner] [GOOD] >> Cdc::NewAndOldImagesLog[TopicRunner] >> TExportToS3Tests::ShouldSucceedOnManyTables >> TExportToS3Tests::ShouldExcludeBackupTableFromStats >> TExportToS3Tests::UidAsIdempotencyKey [GOOD] >> Cdc::UuidExchange[PqRunner] >> ShowCreateView::WithTwoTablePathPrefixes [GOOD] >> SystemView::AuthGroups >> SystemView::TopPartitionsByCpuTables [GOOD] >> SystemView::TopPartitionsByCpuRanges >> TExportToS3Tests::UserSID >> SystemView::AuthUsers_Access [GOOD] >> SystemView::AuthUsers_ResultOrder >> KqpPg::TableInsert+useSink [GOOD] >> KqpPg::TableInsert-useSink >> TExportToS3Tests::UserSID [GOOD] >> TExportToS3Tests::Topics >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWoIndexes [GOOD] >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWithSyncIndex >> TEvLocalSyncDataTests::SqueezeBlocks1 [GOOD] >> TEvLocalSyncDataTests::SqueezeBlocks2 [GOOD] >> TExportToS3Tests::ShouldSucceedOnManyTables [GOOD] >> TExportToS3Tests::Topics [GOOD] >> TExportToS3Tests::TopicsWithPermissions |63.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_sysview_reboots/unittest >> TExportToS3Tests::TablePermissions >> TExportToS3Tests::TopicsWithPermissions [GOOD] >> TExternalDataSourceTestReboots::CreateDroppedExternalDataSourceAndDropWithReboots |63.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/syncer/ut/unittest >> TEvLocalSyncDataTests::SqueezeBlocks2 [GOOD] |63.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut |63.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut |63.9%| [LD] {RESULT} $(B)/ydb/core/http_proxy/ut/ydb-core-http_proxy-ut >> TExportToS3Tests::TablePermissions [GOOD] >> Cdc::NewAndOldImagesLog[TopicRunner] [GOOD] >> Cdc::NewAndOldImagesLogDebezium >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldHandleDataShardReboot [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldNotCompactAfterDrop >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWithSyncIndex [GOOD] >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWithAsyncIndex >> SystemView::AuthOwners_ResultOrder [GOOD] >> SystemView::AuthOwners_TableRange >> Cdc::UuidExchange[PqRunner] [GOOD] >> Cdc::UuidExchange[YdsRunner] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::TopicsWithPermissions [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:28:24.074846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:28:24.074872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:24.074878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:28:24.074884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:28:24.074896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:28:24.074900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:28:24.074909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:24.074922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:28:24.075036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:28:24.075118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:28:24.090246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:28:24.090275Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:24.095437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:28:24.095535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:28:24.095564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:28:24.099505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:28:24.099579Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:28:24.099709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:24.099771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:28:24.101066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:24.101122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:28:24.101468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:24.101484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:24.101498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:28:24.101508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:24.101515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:28:24.101540Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:28:24.103186Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:28:24.125402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:28:24.125493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:24.125558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:28:24.125609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:28:24.125637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:24.126871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:24.126909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:28:24.126979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:24.126991Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:28:24.126997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:28:24.127003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:28:24.127858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:24.127875Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:28:24.127881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:28:24.128645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:24.128663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:24.128671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:24.128680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:28:24.129381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:28:24.129866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:28:24.129914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:28:24.130137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:24.130174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:24.130182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:24.130261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:28:24.130270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:24.130304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:28:24.130317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:28:24.130849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:24.130860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:24.130909Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... RD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:25.177346Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710757, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:25.177389Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710757, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:28:25.177405Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:25.177410Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:206:2207], at schemeshard: 72057594046678944, txId: 281474976710757, path id: 1 2025-06-03T10:28:25.177417Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:206:2207], at schemeshard: 72057594046678944, txId: 281474976710757, path id: 3 2025-06-03T10:28:25.177550Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710757:0, at schemeshard: 72057594046678944 2025-06-03T10:28:25.177562Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 281474976710757:0 ProgressState 2025-06-03T10:28:25.177581Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710757:0 progress is 1/1 2025-06-03T10:28:25.177588Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710757 ready parts: 1/1 2025-06-03T10:28:25.177596Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710757:0 progress is 1/1 2025-06-03T10:28:25.177605Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710757 ready parts: 1/1 2025-06-03T10:28:25.177612Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710757, ready parts: 1/1, is published: false 2025-06-03T10:28:25.177621Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710757 ready parts: 1/1 2025-06-03T10:28:25.177629Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710757:0 2025-06-03T10:28:25.177634Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976710757:0 2025-06-03T10:28:25.177651Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:28:25.177657Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976710757, publications: 2, subscribers: 1 2025-06-03T10:28:25.177661Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710757, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-03T10:28:25.177665Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710757, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-03T10:28:25.177868Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 281474976710757 2025-06-03T10:28:25.177883Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 281474976710757 2025-06-03T10:28:25.177890Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710757 2025-06-03T10:28:25.177895Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710757, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-03T10:28:25.177901Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:28:25.178028Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710757 2025-06-03T10:28:25.178039Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710757 2025-06-03T10:28:25.178044Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710757 2025-06-03T10:28:25.178048Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710757, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-03T10:28:25.178052Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:28:25.178066Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710757, subscribers: 1 2025-06-03T10:28:25.178071Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [4:126:2151] 2025-06-03T10:28:25.178950Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710757 2025-06-03T10:28:25.179218Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710757 2025-06-03T10:28:25.179244Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvNotifyTxCompletionResult: txId# 281474976710757 2025-06-03T10:28:25.179258Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6753: Message: TxId: 281474976710757 2025-06-03T10:28:25.181328Z node 4 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: PathId: 2 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:25.181868Z node 4 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:44: Tablet 72057594046678944 describe pathId 2 took 64us result status StatusSuccess 2025-06-03T10:28:25.182018Z node 4 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Topic" PathDescription { Self { Name: "Topic" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 1 } ChildrenExist: false BalancerTabletID: 72075186233409548 } PersQueueGroup { Name: "Topic" PathId: 2 TotalGroupCount: 2 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { LifetimeSeconds: 10 } YdbDatabasePath: "/MyRoot" } Partitions { PartitionId: 0 TabletId: 72075186233409547 Status: Active } Partitions { PartitionId: 1 TabletId: 72075186233409546 Status: Active } AlterVersion: 1 BalancerTabletID: 72075186233409548 NextPartitionId: 2 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 2 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 102 2025-06-03T10:28:25.186019Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-03T10:28:25.186039Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-03T10:28:25.186159Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:62: NotifyTxCompletion export in-flight, txId: 102, at schemeshard: 72057594046678944 2025-06-03T10:28:25.186167Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 102, at schemeshard: 72057594046678944 REQUEST: PUT /create_topic.pb HTTP/1.1 HEADERS: Host: localhost:4586 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 44C18413-67B3-405B-806C-8D500D698801 amz-sdk-request: attempt=1 content-length: 468 content-md5: eolrX6cGdcMGCBM8sb+6PQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /create_topic.pb / / 468 REQUEST: PUT /permissions.pb HTTP/1.1 HEADERS: Host: localhost:4586 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 9FEFF3BB-D7A3-44D2-9EB8-A5A8F811D6E7 amz-sdk-request: attempt=1 content-length: 43 content-md5: JIqMFsQjXF0c+sG0y+coog== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /permissions.pb / / 43 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:4586 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: D4EC6D66-7E68-4E1B-B73C-595D116E5C32 amz-sdk-request: attempt=1 content-length: 31 content-md5: NIbLWVScnysfZNPAOZgBoA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 31 2025-06-03T10:28:25.191879Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:28:25.191900Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [4:621:2549] TestWaitNotification: OK eventTxId 102 >> KqpPg::ValuesInsert+useSink [GOOD] >> KqpPg::ValuesInsert-useSink >> SystemView::ShowCreateTableReadReplicas [GOOD] >> SystemView::ShowCreateTableTtlSettings >> TExternalDataSourceTestReboots::CreateDroppedExternalDataSourceWithReboots >> TExportToS3Tests::CancelUponCopyingTablesShouldSucceed [GOOD] >> TExportToS3Tests::AuditCompletedExport ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::TablePermissions [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:28:23.086040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:28:23.086071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:23.086077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:28:23.086083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:28:23.086096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:28:23.086100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:28:23.086110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:23.086123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:28:23.086231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:28:23.086309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:28:23.101170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:28:23.101199Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:23.105809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:28:23.105937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:28:23.105982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:28:23.108851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:28:23.108919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:28:23.109068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:23.109131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:28:23.109988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:23.110053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:28:23.110367Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:23.110380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:23.110403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:28:23.110412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:23.110418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:28:23.110439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:28:23.111947Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:28:23.132614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:28:23.132700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:23.132757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:28:23.132797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:28:23.132821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:23.133731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:23.133766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:28:23.133849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:23.133861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:28:23.133868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:28:23.133873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:28:23.134487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:23.134519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:28:23.134527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:28:23.135005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:23.135018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:23.135025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:23.135033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:28:23.135810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:28:23.136379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:28:23.136428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:28:23.136662Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:23.136707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:23.136720Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:23.136796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:28:23.136804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:23.136838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:28:23.136853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:28:23.137359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:23.137366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:23.137403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710759 at step: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72075186233409547 for txId: 281474976710759 at step: 5000005 2025-06-03T10:28:25.505190Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:25.505217Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710759 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 17179871339 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:25.505227Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 281474976710759:0 HandleReply TEvOperationPlan, stepId: 5000005, at schemeshard: 72057594046678944 2025-06-03T10:28:25.505283Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710759:0 128 -> 129 2025-06-03T10:28:25.505353Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:8791 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 04DD17E8-5C6D-4541-AF19-9A54225CE082 amz-sdk-request: attempt=1 content-length: 73 content-md5: q/ySd5GvS6I/qOVxS/4Thg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /metadata.json / / 73 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000005 2025-06-03T10:28:25.516626Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:25.516652Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710759, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-03T10:28:25.516773Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:25.516781Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:206:2207], at schemeshard: 72057594046678944, txId: 281474976710759, path id: 4 2025-06-03T10:28:25.516974Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-03T10:28:25.516990Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 281474976710759:0 ProgressState, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 281474976710759 2025-06-03T10:28:25.518089Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710759 2025-06-03T10:28:25.518133Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710759 2025-06-03T10:28:25.518142Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710759 2025-06-03T10:28:25.518151Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710759, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 3 2025-06-03T10:28:25.518162Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-03T10:28:25.518202Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 0/1, is published: true REQUEST: PUT /permissions.pb HTTP/1.1 HEADERS: Host: localhost:8791 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: DBBDA96F-94E6-4F41-A6B9-A1ACCC7CE60A amz-sdk-request: attempt=1 content-length: 137 content-md5: WeIr3D5bqIjvqMGEjx2JrA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /permissions.pb / / 137 2025-06-03T10:28:25.520748Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710759 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:8791 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 656B62F9-3988-469B-9479-BC84BDE308DA amz-sdk-request: attempt=1 content-length: 355 content-md5: 4DhJNWgTpoG3PVvZ0uCHUA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /scheme.pb / / 355 REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:8791 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 7A377979-1C17-45BF-8BD9-73BAE1B4C328 amz-sdk-request: attempt=1 content-length: 0 content-md5: 1B2M2Y8AsgTpgAmY7PhCfg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv / / 0 2025-06-03T10:28:25.535850Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 452 RawX2: 17179871604 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-03T10:28:25.535894Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 281474976710759, tablet: 72075186233409547, partId: 0 2025-06-03T10:28:25.535942Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944, message: Source { RawX1: 452 RawX2: 17179871604 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-03T10:28:25.535962Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 281474976710759:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 452 RawX2: 17179871604 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 0 RowsProcessed: 0 } 2025-06-03T10:28:25.535984Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710759:0, shardIdx: 72057594046678944:2, datashard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:25.535991Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-03T10:28:25.535997Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 281474976710759:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-03T10:28:25.536008Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710759:0 129 -> 240 2025-06-03T10:28:25.536083Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 281474976710759:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:25.537123Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-03T10:28:25.537209Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-03T10:28:25.537222Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 281474976710759:0 ProgressState 2025-06-03T10:28:25.537271Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-03T10:28:25.537277Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-03T10:28:25.537290Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-03T10:28:25.537312Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-03T10:28:25.537318Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 1/1, is published: true 2025-06-03T10:28:25.537346Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [4:126:2151] message: TxId: 281474976710759 2025-06-03T10:28:25.537359Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-03T10:28:25.537365Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710759:0 2025-06-03T10:28:25.537371Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976710759:0 2025-06-03T10:28:25.537427Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-03T10:28:25.538959Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvNotifyTxCompletionResult: txId# 281474976710759 2025-06-03T10:28:25.539001Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6753: Message: TxId: 281474976710759 2025-06-03T10:28:25.539852Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:28:25.539874Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [4:482:2442] TestWaitNotification: OK eventTxId 103 |63.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore/unittest >> SystemView::AuthGroups [GOOD] >> SystemView::AuthGroupMembers >> Cdc::DocApi[PqRunner] [GOOD] >> Cdc::DocApi[YdsRunner] >> Cdc::NewAndOldImagesLogDebezium [GOOD] >> Cdc::OldImageLogDebezium >> AsyncIndexChangeExchange::SenderShouldBeActivatedOnTableWithAsyncIndex [GOOD] >> AsyncIndexChangeExchange::SenderShouldShakeHandsOnce >> SystemView::AuthUsers_ResultOrder [GOOD] >> SystemView::AuthUsers_TableRange >> Cdc::UuidExchange[YdsRunner] [GOOD] >> Cdc::UuidExchange[TopicRunner] |63.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore/unittest >> TExportToS3Tests::AuditCompletedExport [GOOD] >> TExportToS3Tests::AuditCancelledExport >> Cdc::OldImageLogDebezium [GOOD] >> Cdc::NewImageLogDebezium >> TExportToS3Tests::AuditCancelledExport [GOOD] >> AsyncIndexChangeExchange::SenderShouldShakeHandsOnce [GOOD] >> TExportToS3Tests::AutoDropping >> AsyncIndexChangeExchange::SenderShouldShakeHandsTwice >> SystemView::PDisksFields [GOOD] >> SystemView::GroupsFields >> SystemView::ShowCreateTableKeyBloomFilter [GOOD] >> SystemView::ShowCreateTable >> FolderServiceTest::TFolderServiceTransitional >> Cdc::UuidExchange[TopicRunner] [GOOD] >> Cdc::UpdatesLog[PqRunner] >> TPQTest::TestPQReadAhead [GOOD] >> TPQTest::TestOwnership >> TExportToS3Tests::AutoDropping [GOOD] >> TAccessServiceTest::Authenticate >> SystemView::AuthOwners_TableRange [GOOD] >> SystemView::AuthPermissions >> SystemView::AuthGroupMembers [GOOD] >> SystemView::AuthGroupMembers_Access >> Cdc::DocApi[YdsRunner] [GOOD] >> Cdc::DocApi[TopicRunner] >> KqpPg::TableDeleteAllData+useSink [GOOD] >> KqpPg::TableDeleteAllData-useSink >> SystemView::AuthUsers_TableRange [GOOD] >> SystemView::AuthPermissions_ResultOrder >> TAccessServiceTest::Authenticate [GOOD] >> Cdc::NewImageLogDebezium [GOOD] >> Cdc::NaN[PqRunner] |63.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ycloud/impl/ut/unittest >> AsyncIndexChangeExchange::SenderShouldShakeHandsTwice [GOOD] >> AsyncIndexChangeExchange::SenderShouldShakeHandsAfterAddingIndex ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::AutoDropping [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046678944 is [1:123:2148] sender: [1:128:2058] recipient: [1:110:2141] 2025-06-03T10:28:22.015259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:28:22.015285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:22.015291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:28:22.015297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:28:22.015309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:28:22.015314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:28:22.015324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:22.015338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:28:22.015466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:28:22.015553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:28:22.031042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:28:22.031070Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:22.037828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:28:22.038234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:28:22.038283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:28:22.041542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:28:22.041596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:28:22.041743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:22.042031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:28:22.043035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:22.043082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:28:22.043379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:22.043390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:22.043429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:28:22.043438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:22.043444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:28:22.043463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:28:22.045065Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:123:2148] sender: [1:240:2058] recipient: [1:15:2062] 2025-06-03T10:28:22.067691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:28:22.067767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:22.067825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:28:22.067874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:28:22.067904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:22.068624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:22.068670Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:28:22.068724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:22.068734Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:28:22.068740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:28:22.068746Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:28:22.069464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:22.069480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:28:22.069488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:28:22.070010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:22.070030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:22.070037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:22.070045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:28:22.070864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:28:22.071507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:28:22.071558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:28:22.071818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:22.071854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 136 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:22.071862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:22.071942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:28:22.071950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:22.071986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:28:22.072000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:28:22.072504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:22.072513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:22.072557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... -03T10:28:29.091228Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-03T10:28:29.091245Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-03T10:28:29.091262Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6753: Message: TxId: 281474976710761 2025-06-03T10:28:29.091275Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-03T10:28:29.091282Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1232: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-03T10:28:29.091289Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1263: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 102, itemIdx# 4294967295 2025-06-03T10:28:29.091758Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-03T10:28:29.091783Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:28:29.091792Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [5:469:2429] TestWaitNotification: OK eventTxId 102 2025-06-03T10:28:29.092065Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:28:29.092112Z node 5 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 66us result status StatusSuccess 2025-06-03T10:28:29.092273Z node 5 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 11 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 11 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 9 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 desc: 1 2025-06-03T10:28:29.092359Z node 5 :EXPORT DEBUG: schemeshard_export__forget.cpp:79: TExport::TTxForget, dropping export tables, info: { Id: 102 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 3] UserSID: '(empty maybe)' PeerName: '' State: Done WaitTxId: 281474976710761 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 } 2025-06-03T10:28:29.092861Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-03T10:28:29.092877Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:732: TExport::TTxProgress: Resume: id# 102 2025-06-03T10:28:29.092891Z node 5 :EXPORT INFO: schemeshard_export__create.cpp:530: TExport::TTxProgress: Allocate txId: info# { Id: 102 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 3] UserSID: '(empty maybe)' PeerName: '' State: Dropping WaitTxId: 0 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 } 2025-06-03T10:28:29.092901Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-03T10:28:29.092923Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 102, at schemeshard: 72057594046678944 2025-06-03T10:28:29.092929Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-03T10:28:29.092935Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:852: TExport::TTxProgress: OnAllocateResult: txId# 281474976710762, id# 102 2025-06-03T10:28:29.092942Z node 5 :EXPORT INFO: schemeshard_export__create.cpp:522: TExport::TTxProgress: Drop propose: info# { Id: 102 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 3] UserSID: '(empty maybe)' PeerName: '' State: Dropping WaitTxId: 0 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 }, txId# 281474976710762 2025-06-03T10:28:29.092960Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-03T10:28:29.093848Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpRmDir Drop { Name: "export-102" } Internal: true } TxId: 281474976710762 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:28:29.093888Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_rmdir.cpp:29: TRmDir Propose, path: /MyRoot/export-102, pathId: 0, opId: 281474976710762:0, at schemeshard: 72057594046678944 2025-06-03T10:28:29.093929Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710762:1, propose status:StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/export-102', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761, source_location: ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:37, at schemeshard: 72057594046678944 2025-06-03T10:28:29.094529Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976710762, response: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/export-102\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761, source_location: ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:37" TxId: 281474976710762 SchemeshardId: 72057594046678944 PathId: 3 PathDropTxId: 281474976710761, at schemeshard: 72057594046678944 2025-06-03T10:28:29.094591Z node 5 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710762, database: /MyRoot, subject: , status: StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/export-102', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761, source_location: ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:37, operation: DROP DIRECTORY, path: /MyRoot/export-102 2025-06-03T10:28:29.094631Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6702: Handle: TEvModifySchemeTransactionResult: txId# 281474976710762, status# StatusPathDoesNotExist 2025-06-03T10:28:29.094643Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6704: Message: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/export-102\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761, source_location: ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:37" TxId: 281474976710762 SchemeshardId: 72057594046678944 PathId: 3 PathDropTxId: 281474976710761 2025-06-03T10:28:29.094654Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-03T10:28:29.094660Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:913: TExport::TTxProgress: OnModifyResult: txId# 281474976710762, status# StatusPathDoesNotExist 2025-06-03T10:28:29.094669Z node 5 :EXPORT TRACE: schemeshard_export__create.cpp:914: Message: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/export-102\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000007, drop txId: 281474976710761, source_location: ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:37" TxId: 281474976710762 SchemeshardId: 72057594046678944 PathId: 3 PathDropTxId: 281474976710761 2025-06-03T10:28:29.094700Z node 5 :EXPORT INFO: schemeshard_export__create.cpp:1095: TExport::TTxProgress: Wait for completion: info# { Id: 102 Uid: '' Kind: S3 DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1] ExportPathId: [OwnerId: 72057594046678944, LocalPathId: 3] UserSID: '(empty maybe)' PeerName: '' State: Dropping WaitTxId: 281474976710761 Issue: '' Items: 1 PendingItems: 0 PendingDropItems: 0 }, itemIdx# 4294967295, txId# 281474976710761 2025-06-03T10:28:29.095127Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete 2025-06-03T10:28:29.095162Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710761, at schemeshard: 72057594046678944 2025-06-03T10:28:29.095184Z node 5 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-03T10:28:29.095197Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6753: Message: TxId: 281474976710761 2025-06-03T10:28:29.095203Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-03T10:28:29.095208Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1232: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-03T10:28:29.095214Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:1263: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 102, itemIdx# 4294967295 2025-06-03T10:28:29.095630Z node 5 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete TestWaitNotification wait txId: 102 2025-06-03T10:28:29.095701Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-03T10:28:29.095712Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-03T10:28:29.095818Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-03T10:28:29.095840Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:28:29.095846Z node 5 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [5:682:2637] TestWaitNotification: OK eventTxId 102 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ycloud/impl/ut/unittest >> TAccessServiceTest::Authenticate [GOOD] Test command err: 2025-06-03T10:28:29.192620Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668059257528685:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:29.192638Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f32/r3tmp/tmp155qZl/pdisk_1.dat 2025-06-03T10:28:29.279179Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668059257528666:2079] 1748946509192467 != 1748946509192470 2025-06-03T10:28:29.281045Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:5239 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:29.338665Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:29.343607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:28:29.347531Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:29.347571Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:29.348471Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [50c13c580330] Connect to grpc://localhost:8224 2025-06-03T10:28:29.348960Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [50c13c580330] Request AuthenticateRequest { iam_token: "**** (047D44F1)" } 2025-06-03T10:28:29.349759Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:29.358444Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [50c13c580330] Status 7 Permission Denied 2025-06-03T10:28:29.358736Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [50c13c580330] Request AuthenticateRequest { iam_token: "**** (342498C1)" } 2025-06-03T10:28:29.360021Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [50c13c580330] Response AuthenticateResponse { subject { user_account { id: "1234" } } } >> FolderServiceTest::TFolderServiceTransitional [GOOD] >> Cdc::UpdatesLog[PqRunner] [GOOD] >> Cdc::UpdatesLog[YdsRunner] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ycloud/impl/ut/unittest >> FolderServiceTest::TFolderServiceTransitional [GOOD] Test command err: 2025-06-03T10:28:28.969975Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668054781765316:2211];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:28.970058Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f34/r3tmp/tmp1tjXkQ/pdisk_1.dat 2025-06-03T10:28:29.030677Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668054781765144:2079] 1748946508969203 != 1748946508969206 2025-06-03T10:28:29.032634Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:63390 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:28:29.072487Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:29.072523Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:29.073666Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:29.104197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:29.107614Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:29.108377Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [519fbedf2330] Connect to grpc://localhost:31449 2025-06-03T10:28:29.110826Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [519fbedf2330] Request ListFoldersRequest { id: "i_am_not_exists" } 2025-06-03T10:28:29.113755Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [519fbedf2330] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:31449: Failed to connect to remote host: Connection refused 2025-06-03T10:28:29.115120Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [519fbedf2330] Request ListFoldersRequest { id: "i_am_not_exists" } 2025-06-03T10:28:29.115350Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [519fbedf2330] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:31449: Failed to connect to remote host: Connection refused 2025-06-03T10:28:30.116923Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [519fbedf2330] Request ListFoldersRequest { id: "i_am_not_exists" } 2025-06-03T10:28:30.118286Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [519fbedf2330] Status 5 Not Found 2025-06-03T10:28:30.118549Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [519fbedf2330] Request ListFoldersRequest { id: "i_am_exists" } 2025-06-03T10:28:30.120308Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [519fbedf2330] Response ListFoldersResponse { result { cloud_id: "response_cloud_id" } } >> SystemView::AuthPermissions [GOOD] >> SystemView::AuthPermissions_Access >> Cdc::UpdatesLog[YdsRunner] [GOOD] >> Cdc::UpdatesLog[TopicRunner] >> AsyncIndexChangeExchange::SenderShouldShakeHandsAfterAddingIndex [GOOD] >> AsyncIndexChangeExchange::ShouldDeliverChangesOnFreshTable >> SystemView::AuthGroupMembers_Access [GOOD] >> SystemView::AuthGroupMembers_ResultOrder >> Cdc::DocApi[TopicRunner] [GOOD] >> Cdc::HugeKey[PqRunner] >> Cdc::NaN[PqRunner] [GOOD] >> Cdc::NaN[YdsRunner] >> FolderServiceTest::TFolderService >> SystemView::AuthPermissions_ResultOrder [GOOD] >> SystemView::AuthPermissions_Selects |63.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut |63.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut |63.9%| [LD] {RESULT} $(B)/ydb/core/viewer/ut/ydb-core-viewer-ut |63.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ycloud/impl/ut/unittest >> FolderServiceTest::TFolderServiceAdapter >> SystemView::TopPartitionsByCpuFollowers [GOOD] >> SystemView::SystemViewFailOps >> Cdc::UpdatesLog[TopicRunner] [GOOD] >> Cdc::VirtualTimestamps[PqRunner] |63.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost |63.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost |63.9%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/cost/ydb-core-kqp-ut-cost >> TUserAccountServiceTest::Get >> FolderServiceTest::TFolderServiceAdapter [GOOD] >> AsyncIndexChangeExchange::ShouldDeliverChangesOnFreshTable [GOOD] >> AsyncIndexChangeExchange::ShouldDeliverChangesOnAlteredTable >> Cdc::HugeKey[PqRunner] [GOOD] >> Cdc::HugeKey[YdsRunner] >> SystemView::AuthPermissions_Access [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ycloud/impl/ut/unittest >> FolderServiceTest::TFolderServiceAdapter [GOOD] Test command err: 2025-06-03T10:28:32.913630Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668075416392694:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:32.913962Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f1e/r3tmp/tmp6dYE5J/pdisk_1.dat 2025-06-03T10:28:32.980635Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668075416392640:2079] 1748946512912393 != 1748946512912396 2025-06-03T10:28:32.980805Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:5562 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:28:33.014421Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:33.014453Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: 2025-06-03T10:28:33.015617Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:33.052946Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:33.058447Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [5050bd153930] Connect to grpc://localhost:4413 2025-06-03T10:28:33.058646Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5050bd153930] Request ListFoldersRequest { id: "i_am_exists" } 2025-06-03T10:28:33.060770Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [5050bd153930] Response ListFoldersResponse { result { cloud_id: "cloud_from_old_service" } } 2025-06-03T10:28:33.061011Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [5050bd15ab70] Connect to grpc://localhost:17783 2025-06-03T10:28:33.061104Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5050bd15ab70] Request ResolveFoldersRequest { folder_ids: "i_am_exists" } 2025-06-03T10:28:33.062757Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [5050bd15ab70] Response ResolveFoldersResponse { resolved_folders { cloud_id: "cloud_from_new_service" } } 2025-06-03T10:28:33.062907Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5050bd15ab70] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-06-03T10:28:33.063430Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5050bd15ab70] Status 5 Not Found 2025-06-03T10:28:33.063545Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [5050bd153930] Request ListFoldersRequest { id: "i_am_not_exists" } 2025-06-03T10:28:33.064049Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [5050bd153930] Status 5 Not Found >> TUserAccountServiceTest::Get [GOOD] >> SystemView::AuthGroupMembers_ResultOrder [GOOD] >> SystemView::AuthGroupMembers_TableRange >> KqpPg::TableInsert-useSink [GOOD] >> KqpPg::TempTablesSessionsIsolation ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ycloud/impl/ut/unittest >> TUserAccountServiceTest::Get [GOOD] Test command err: 2025-06-03T10:28:33.411061Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668076414540017:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:33.411087Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f12/r3tmp/tmpmMMbuy/pdisk_1.dat 2025-06-03T10:28:33.490793Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668076414539995:2079] 1748946513410830 != 1748946513410833 2025-06-03T10:28:33.492982Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:3927 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:28:33.514237Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:33.514274Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: 2025-06-03T10:28:33.515388Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:33.553156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... >> SystemView::SystemViewFailOps [GOOD] >> SystemView::TabletsFields >> Cdc::VirtualTimestamps[PqRunner] [GOOD] >> Cdc::VirtualTimestamps[YdsRunner] >> SystemView::AuthPermissions_Selects [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/ut/unittest >> SystemView::AuthPermissions_Access [GOOD] Test command err: 2025-06-03T10:28:11.725475Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667982255117134:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:11.725598Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002aef/r3tmp/tmpkM7nuB/pdisk_1.dat 2025-06-03T10:28:11.861652Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:11.862750Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:11.862767Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:11.864603Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25777, node 1 2025-06-03T10:28:11.919319Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:11.919340Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:11.919342Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:11.919402Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15396 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:11.980453Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:11.999550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:12.028132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "Tenant1" } } TxId: 281474976715658 TabletId: 72057594046644480 Owner: "root@builtin" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-03T10:28:12.028227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_extsubdomain.cpp:58: TCreateExtSubDomain Propose, path/Root/Tenant1, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:28:12.028246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: Tenant1, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-03T10:28:12.028320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-06-03T10:28:12.028370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 1 2025-06-03T10:28:12.028402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:28:12.028407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:28:12.028427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-03T10:28:12.028439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 2 2025-06-03T10:28:12.038451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976715658, response: Status: StatusAccepted TxId: 281474976715658 SchemeshardId: 72057594046644480 PathId: 2, at schemeshard: 72057594046644480 2025-06-03T10:28:12.038496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715658, database: /Root, subject: root@builtin, status: StatusAccepted, operation: CREATE DATABASE, path: /Root/Tenant1 2025-06-03T10:28:12.038568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-03T10:28:12.038577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715658, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-03T10:28:12.038624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 281474976715658, path id: [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-03T10:28:12.038648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-03T10:28:12.038657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7511667982255117581:2384], at schemeshard: 72057594046644480, txId: 281474976715658, path id: 1 2025-06-03T10:28:12.038661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:7511667982255117581:2384], at schemeshard: 72057594046644480, txId: 281474976715658, path id: 2 2025-06-03T10:28:12.038672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:28:12.038677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:28:12.038683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 281474976715658:0, at tablet# 72057594046644480 2025-06-03T10:28:12.038690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 281474976715658 ready parts: 1/1 2025-06-03T10:28:12.039565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 281474976715658 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:28:12.040121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976715658 2025-06-03T10:28:12.040144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976715658 2025-06-03T10:28:12.040146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976715658 2025-06-03T10:28:12.040151Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715658, pathId: [OwnerId: 72057594046644480, LocalPathId: 1], version: 4 2025-06-03T10:28:12.040158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-06-03T10:28:12.040227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976715658 2025-06-03T10:28:12.040240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976715658 2025-06-03T10:28:12.040241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976715658 2025-06-03T10:28:12.040243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715658, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 2 2025-06-03T10:28:12.040245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 3 2025-06-03T10:28:12.040252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976715658, ready parts: 0/1, is published: true waiting... 2025-06-03T10:28:12.040903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976715658:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:281474976715658 msg type: 269090816 2025-06-03T10:28:12.040943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 281474976715658, partId: 4294967295, tablet: 72057594046316545 2025-06-03T10:28:12.040975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715658 2025-06-03T10:28:12.040983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, ... 94046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user6tenant1admin },{ Sid: user2 },{ Sid: user1rootadmin }] Groups: [] } Children [.metadata,Dir1,Dir2,Table0,Tenant1,Tenant2] }] } 2025-06-03T10:28:32.879926Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [41:7511668072569734411:2417], row count: 4, finished: 0 2025-06-03T10:28:32.879944Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:209: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:28:32.880143Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:170: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata TableId: [72057594046644480:5:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [workload_manager] }] } 2025-06-03T10:28:32.880159Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [41:7511668072569734411:2417], row count: 0, finished: 0 2025-06-03T10:28:32.880232Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:209: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:28:32.881783Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:170: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager TableId: [72057594046644480:6:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [pools] }] } 2025-06-03T10:28:32.881805Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [41:7511668072569734411:2417], row count: 0, finished: 0 2025-06-03T10:28:32.881987Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:209: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/pools TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:28:32.885745Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:170: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/pools TableId: [72057594046644480:7:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [default] }] } 2025-06-03T10:28:32.885763Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [41:7511668072569734411:2417], row count: 0, finished: 0 2025-06-03T10:28:32.885888Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:209: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/pools/default TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:28:32.889439Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:170: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/.metadata/workload_manager/pools/default TableId: [72057594046644480:8:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindResourcePool DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:28:32.889478Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [41:7511668072569734411:2417], row count: 6, finished: 0 2025-06-03T10:28:32.889506Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:209: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:28:32.890144Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:170: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [72057594046644480:9:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-06-03T10:28:32.890168Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [41:7511668072569734411:2417], row count: 1, finished: 0 2025-06-03T10:28:32.890184Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:209: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir2 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:28:32.890479Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:170: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir2 TableId: [72057594046644480:10:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-06-03T10:28:32.890493Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [41:7511668072569734411:2417], row count: 0, finished: 0 2025-06-03T10:28:32.890502Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:209: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table0 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:28:32.890627Z node 41 :SYSTEM_VIEWS TRACE: auth_scan_base.h:170: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table0 TableId: [72057594046644480:4:1] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:28:32.890638Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [41:7511668072569734411:2417], row count: 0, finished: 0 2025-06-03T10:28:32.890648Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [41:7511668072569734411:2417], owner: [41:7511668072569734407:2415], scan id: 0, table id: [72057594046644480:1:0:auth_permissions] 2025-06-03T10:28:32.890943Z node 41 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [41:7511668068274765097:2078], database# , query hash# 12107705915200741666, cpu time# 26557 2025-06-03T10:28:32.891108Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946512878, txId: 281474976715692] shutting down 2025-06-03T10:28:32.894862Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 44 2025-06-03T10:28:32.895094Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(44, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:28:32.895324Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 42 2025-06-03T10:28:32.895367Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(42, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:28:32.895656Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 45 2025-06-03T10:28:32.895776Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(45, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:28:32.895883Z node 41 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 43 2025-06-03T10:28:32.895983Z node 41 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(43, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:28:32.897170Z node 43 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:28:32.895782Z node 45 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:28:32.899489Z node 41 :HIVE WARN: hive_impl.cpp:934: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[43:7511668071478423891:2109], Type=268959746 2025-06-03T10:28:32.899503Z node 41 :HIVE WARN: hive_impl.cpp:934: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[45:7511668069103670864:2106], Type=268959746 >> FolderServiceTest::TFolderService [GOOD] >> KqpPg::ValuesInsert-useSink [GOOD] >> PgCatalog::PgType >> TExternalDataSourceTestReboots::CreateDroppedExternalDataSourceWithReboots [GOOD] >> TExternalDataSourceTestReboots::CreateDroppedExternalDataSourceAndDropWithReboots [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ycloud/impl/ut/unittest >> FolderServiceTest::TFolderService [GOOD] Test command err: 2025-06-03T10:28:32.477725Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668075140680358:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:32.477770Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f20/r3tmp/tmpvF1OcF/pdisk_1.dat 2025-06-03T10:28:32.546575Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668075140680337:2079] 1748946512477547 != 1748946512477550 2025-06-03T10:28:32.548567Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:26088 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:32.618266Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:32.618305Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:32.619274Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:32.619310Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:28:32.623722Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [13133c550070] Connect to grpc://localhost:31608 2025-06-03T10:28:32.626167Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [13133c550070] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-06-03T10:28:32.628001Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [13133c550070] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:31608: Failed to connect to remote host: Connection refused 2025-06-03T10:28:32.628542Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [13133c550070] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-06-03T10:28:32.628746Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [13133c550070] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:31608: Failed to connect to remote host: Connection refused 2025-06-03T10:28:33.628966Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [13133c550070] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-06-03T10:28:33.629249Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [13133c550070] Status 14 failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:31608: Failed to connect to remote host: Connection refused 2025-06-03T10:28:34.630731Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [13133c550070] Request ResolveFoldersRequest { folder_ids: "i_am_not_exists" } 2025-06-03T10:28:34.631909Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [13133c550070] Status 5 Not Found 2025-06-03T10:28:34.632831Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [13133c550070] Request ResolveFoldersRequest { folder_ids: "i_am_exists" } 2025-06-03T10:28:34.635775Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [13133c550070] Response ResolveFoldersResponse { resolved_folders { cloud_id: "response_cloud_id" } } >> KqpPg::TempTablesSessionsIsolation [GOOD] >> KqpPg::TempTablesDrop >> SystemView::TabletsFields [GOOD] >> SystemView::TabletsShards >> DbCounters::TabletsSimple [GOOD] >> LabeledDbCounters::OneTablet >> Cdc::HugeKey[YdsRunner] [GOOD] >> Cdc::HugeKey[TopicRunner] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/ut/unittest >> SystemView::AuthPermissions_Selects [GOOD] Test command err: 2025-06-03T10:28:09.417343Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667973154685849:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:09.417360Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002b01/r3tmp/tmpd8ywxv/pdisk_1.dat 2025-06-03T10:28:09.491713Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6898, node 1 2025-06-03T10:28:09.510071Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:09.510082Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:09.510084Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:09.510131Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:28:09.518607Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:09.518642Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:09.519884Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8950 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:09.564518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:09.568967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:09.910364Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667973154686541:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:09.910400Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:09.910510Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667973154686553:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:09.911444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:28:09.914440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-03T10:28:09.914565Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667973154686555:2334], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:28:10.006415Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667977449653902:2377] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:10.075161Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtnb4hn8rz2zragxhkadec2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGM2MDU1YzQtMTAwNDViZDAtM2Q0Y2JmYWItZjc4MmFjMzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:10.119697Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtnb4r55q968ndtzfqtf1jh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGMyOTMwZWItNGFhYzgzZmUtM2QxNmNlMzItNjRjNTk4OWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:10.148255Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7511667977449653991:2356] TxId: 281474976715664. Ctx: { TraceId: 01jwtnb4ra3624p2w85aqqdtfx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTdhODkxMGItNGVhOGZjOTYtYzFlYTlhZDUtMzRhNGNhZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database 2025-06-03T10:28:10.148336Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jwtnb4ra3624p2w85aqqdtfx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTdhODkxMGItNGVhOGZjOTYtYzFlYTlhZDUtMzRhNGNhZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:10.148959Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [1:7511667977449653998:2361], owner: [1:7511667977449653994:2359], scan id: 0, table id: [72057594046644480:1:0:top_queries_by_read_bytes_one_minute] 2025-06-03T10:28:10.149853Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [1:7511667977449653998:2361], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:10.150000Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [1:7511667977449653998:2361], row count: 2, finished: 1 2025-06-03T10:28:10.150008Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [1:7511667977449653998:2361], owner: [1:7511667977449653994:2359], scan id: 0, table id: [72057594046644480:1:0:top_queries_by_read_bytes_one_minute] 2025-06-03T10:28:10.151249Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946490147, txId: 281474976715663] shutting down 2025-06-03T10:28:10.327886Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667978067493952:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:10.327899Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002b01/r3tmp/tmpuRasGl/pdisk_1.dat 2025-06-03T10:28:10.349604Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29915, node 2 2025-06-03T10:28:10.371322Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:10.371338Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:10.371340Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:10.371399Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32104 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:10.428252Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:10.428286Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:10.429463Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:10.435590Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:10.439585Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:10.441758Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:10.793180Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511667978067494635:2330], Datab ... Name: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:28:33.619368Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:170: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user2 },{ Sid: user1 }] Groups: [] } Children [.metadata,Dir1,Table0,Tenant1,Tenant2] }] } 2025-06-03T10:28:33.619383Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [33:7511668078263602232:2412], row count: 0, finished: 0 2025-06-03T10:28:33.619395Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:209: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:28:33.642568Z node 35 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:28:33.645885Z node 37 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:28:33.619503Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:170: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [72057594046644480:9:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [SubDir1,SubDir2] }] } 2025-06-03T10:28:33.619517Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [33:7511668078263602232:2412], row count: 0, finished: 0 2025-06-03T10:28:33.619531Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:209: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1/SubDir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:28:33.619569Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:170: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1/SubDir1 TableId: [72057594046644480:10:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-06-03T10:28:33.619581Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [33:7511668078263602232:2412], row count: 2, finished: 0 2025-06-03T10:28:33.619600Z node 33 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [33:7511668078263602232:2412], owner: [33:7511668078263602228:2410], scan id: 0, table id: [72057594046644480:1:0:auth_permissions] 2025-06-03T10:28:33.620922Z node 33 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [33:7511668073968632988:2097], database# , query hash# 3187945588805523718, cpu time# 17742 2025-06-03T10:28:33.621065Z node 33 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946513618, txId: 281474976715687] shutting down 2025-06-03T10:28:33.639199Z node 33 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715690. Ctx: { TraceId: 01jwtnbvpncwd7mhcq846a6jnj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=33&id=YTgyNGQ1ZGQtYzQ0NGFmODItZjZiZGE0OTMtZDc4YjgwYmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:33.639671Z node 33 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [33:7511668078263602267:2421], owner: [33:7511668078263602263:2419], scan id: 0, table id: [72057594046644480:1:0:auth_permissions] 2025-06-03T10:28:33.639923Z node 33 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [33:7511668078263602267:2421], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:33.639929Z node 33 :SYSTEM_VIEWS DEBUG: auth_scan_base.h:99: ProceedToScan, tenant name: /Root tenant owner: root@builtin subject sid: empty require admin access: 0 is admin: 1 2025-06-03T10:28:33.639946Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:209: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:28:33.640006Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:170: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [{ Sid: user2 },{ Sid: user1 }] Groups: [] } Children [.metadata,Dir1,Table0,Tenant1,Tenant2] }] } 2025-06-03T10:28:33.640016Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [33:7511668078263602267:2421], row count: 0, finished: 0 2025-06-03T10:28:33.640027Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:209: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:28:33.640135Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:170: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1 TableId: [72057594046644480:9:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [SubDir1,SubDir2] }] } 2025-06-03T10:28:33.640142Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [33:7511668078263602267:2421], row count: 0, finished: 0 2025-06-03T10:28:33.640151Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:209: Navigate { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1/SubDir1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:28:33.640281Z node 33 :SYSTEM_VIEWS TRACE: auth_scan_base.h:170: Got navigate: { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Dir1/SubDir1 TableId: [72057594046644480:10:0] RequestType: ByPath Operation: OpList RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } Children [] }] } 2025-06-03T10:28:33.640299Z node 33 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [33:7511668078263602267:2421], row count: 1, finished: 0 2025-06-03T10:28:33.640320Z node 33 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [33:7511668078263602267:2421], owner: [33:7511668078263602263:2419], scan id: 0, table id: [72057594046644480:1:0:auth_permissions] 2025-06-03T10:28:33.641021Z node 33 :SYSTEM_VIEWS TRACE: sysview_service.cpp:900: Collect query stats: service id# [33:7511668073968632988:2097], database# , query hash# 15123460272068726277, cpu time# 16955 2025-06-03T10:28:33.641162Z node 33 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946513638, txId: 281474976715689] shutting down 2025-06-03T10:28:33.642614Z node 33 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 35 2025-06-03T10:28:33.642783Z node 33 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(35, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:28:33.642815Z node 33 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 34 2025-06-03T10:28:33.642857Z node 33 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(34, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:28:33.643069Z node 33 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 36 2025-06-03T10:28:33.643108Z node 33 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(36, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:28:33.643125Z node 33 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 37 2025-06-03T10:28:33.643198Z node 33 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:28:33.957223Z node 35 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:33.958479Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=incorrect path status: LookupError; >> AsyncIndexChangeExchange::ShouldDeliverChangesOnAlteredTable [GOOD] >> AsyncIndexChangeExchange::ShouldRemoveRecordsAfterDroppingIndex >> Cdc::NaN[YdsRunner] [GOOD] >> Cdc::NaN[TopicRunner] >> JsonProtoConversion::ProtoMapToJson_ReceiveMessageResult [GOOD] >> PgCatalog::PgType [GOOD] >> PgCatalog::InformationSchema >> SystemView::GroupsFields [GOOD] >> SystemView::Describe ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> TExternalDataSourceTestReboots::CreateDroppedExternalDataSourceWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:28:26.481598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:28:26.481633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:26.481641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:28:26.481647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:28:26.481656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:28:26.481661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:28:26.481672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:26.481689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:28:26.481866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:28:26.481963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:28:26.499826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:28:26.499856Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:26.499971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:28:26.504077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:28:26.504199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:28:26.504233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:28:26.506399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:28:26.506480Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:28:26.506617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:26.506690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:28:26.507215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:26.507267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:28:26.507542Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:26.507553Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:26.507570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:28:26.507580Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:26.507587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:28:26.507630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:28:26.509187Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:28:26.532647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:28:26.532745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:26.532817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:28:26.532862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:28:26.532873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:26.533742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:26.533776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:28:26.533835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:26.533845Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:28:26.533851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:28:26.533858Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:28:26.534320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:26.534332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:28:26.534339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:28:26.534720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:26.534733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:26.534740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:26.534748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:28:26.535560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:28:26.536026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:28:26.536070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:28:26.536294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:26.536322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:26.536330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:26.536403Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... Board Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:28:34.999118Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1005 2025-06-03T10:28:34.999123Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 2 2025-06-03T10:28:34.999128Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-03T10:28:34.999139Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1005, ready parts: 0/1, is published: true 2025-06-03T10:28:34.999978Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1005:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1005 msg type: 269090816 2025-06-03T10:28:35.000028Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1005, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1005 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1005 at step: 5000006 2025-06-03T10:28:35.000158Z node 32 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000006, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:35.000187Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1005 Coordinator: 72057594046316545 AckTo { RawX1: 124 RawX2: 137438955621 } } Step: 5000006 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:35.000198Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_external_data_source.cpp:40: [72057594046678944] TDropExternalDataSource TPropose opId# 1005:0 HandleReply TEvOperationPlan: step# 5000006 2025-06-03T10:28:35.000237Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:28:35.000258Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1005:0 128 -> 240 2025-06-03T10:28:35.000305Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:28:35.000322Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:28:35.000557Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-03T10:28:35.000623Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 FAKE_COORDINATOR: Erasing txId 1005 2025-06-03T10:28:35.001128Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:35.001139Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:35.001208Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-03T10:28:35.001243Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:35.001250Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [32:204:2205], at schemeshard: 72057594046678944, txId: 1005, path id: 1 2025-06-03T10:28:35.001256Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [32:204:2205], at schemeshard: 72057594046678944, txId: 1005, path id: 4 2025-06-03T10:28:35.001331Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1005:0, at schemeshard: 72057594046678944 2025-06-03T10:28:35.001343Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1005:0 ProgressState 2025-06-03T10:28:35.001362Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1005:0 progress is 1/1 2025-06-03T10:28:35.001367Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:28:35.001373Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1005:0 progress is 1/1 2025-06-03T10:28:35.001376Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:28:35.001386Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1005, ready parts: 1/1, is published: false 2025-06-03T10:28:35.001393Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:28:35.001399Z node 32 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1005:0 2025-06-03T10:28:35.001404Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1005:0 2025-06-03T10:28:35.001425Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:28:35.001432Z node 32 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1005, publications: 2, subscribers: 0 2025-06-03T10:28:35.001437Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 1], 13 2025-06-03T10:28:35.001441Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-03T10:28:35.001562Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:28:35.001576Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:28:35.001583Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1005 2025-06-03T10:28:35.001589Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-03T10:28:35.001594Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:28:35.001655Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:28:35.001662Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-03T10:28:35.001674Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:28:35.001703Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:28:35.001713Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:28:35.001717Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1005 2025-06-03T10:28:35.001726Z node 32 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 13 2025-06-03T10:28:35.001730Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:28:35.001739Z node 32 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1005, subscribers: 0 2025-06-03T10:28:35.002553Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-03T10:28:35.002581Z node 32 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:28:35.002593Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 TestModificationResult got TxId: 1005, wait until txId: 1005 TestWaitNotification wait txId: 1005 2025-06-03T10:28:35.002664Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1005: send EvNotifyTxCompletion 2025-06-03T10:28:35.002675Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1005 2025-06-03T10:28:35.002768Z node 32 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1005, at schemeshard: 72057594046678944 2025-06-03T10:28:35.002794Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1005: got EvNotifyTxCompletionResult 2025-06-03T10:28:35.002800Z node 32 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1005: satisfy waiter [32:406:2396] TestWaitNotification: OK eventTxId 1005 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source_reboots/unittest >> TExternalDataSourceTestReboots::CreateDroppedExternalDataSourceAndDropWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:127:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:130:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:134:2058] recipient: [1:111:2142] 2025-06-03T10:28:25.308294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:28:25.308328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:25.308336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:28:25.308343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:28:25.308348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:28:25.308351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:28:25.308360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:25.308374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:28:25.308550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:28:25.308652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:28:25.330224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:28:25.330254Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:25.330388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:176:2058] recipient: [1:15:2062] 2025-06-03T10:28:25.334951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:28:25.335004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:28:25.335048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:28:25.336863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:28:25.336966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:28:25.337129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:25.337247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:28:25.338471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:25.338548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:28:25.338829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:25.338840Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:25.338881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:28:25.338891Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:25.338898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:28:25.338927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:28:25.341215Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:28:25.367162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:28:25.367266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:25.367344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:28:25.367401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:28:25.367415Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:25.368393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:25.368430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:28:25.368504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:25.368517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:28:25.368524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:28:25.368530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:28:25.369130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:25.369145Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:28:25.369153Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:28:25.369583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:25.369597Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:25.369604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:25.369611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:28:25.370406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:28:25.370912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:28:25.370962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:28:25.371195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:25.371230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:25.371238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:25.371328Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... ep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1005 at step: 5000006 2025-06-03T10:28:35.034155Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000006, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:35.034218Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1005 Coordinator: 72057594046316545 AckTo { RawX1: 131 RawX2: 141733922922 } } Step: 5000006 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:35.034232Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_external_data_source.cpp:40: [72057594046678944] TDropExternalDataSource TPropose opId# 1005:0 HandleReply TEvOperationPlan: step# 5000006 2025-06-03T10:28:35.034273Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:28:35.034306Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1005:0 128 -> 240 2025-06-03T10:28:35.034343Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:28:35.034353Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:28:35.034634Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-03T10:28:35.035029Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 FAKE_COORDINATOR: Erasing txId 1005 2025-06-03T10:28:35.035447Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:35.035458Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:35.035507Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-03T10:28:35.035538Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:35.035543Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [33:207:2208], at schemeshard: 72057594046678944, txId: 1005, path id: 1 2025-06-03T10:28:35.035549Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [33:207:2208], at schemeshard: 72057594046678944, txId: 1005, path id: 4 2025-06-03T10:28:35.035631Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1005:0, at schemeshard: 72057594046678944 2025-06-03T10:28:35.035641Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1005:0 ProgressState 2025-06-03T10:28:35.035659Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1005:0 progress is 1/1 2025-06-03T10:28:35.035665Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:28:35.035671Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1005:0 progress is 1/1 2025-06-03T10:28:35.035674Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:28:35.035685Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1005, ready parts: 1/1, is published: false 2025-06-03T10:28:35.035691Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:28:35.035697Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1005:0 2025-06-03T10:28:35.035702Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1005:0 2025-06-03T10:28:35.035721Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:28:35.035728Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1005, publications: 2, subscribers: 0 2025-06-03T10:28:35.035732Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 1], 13 2025-06-03T10:28:35.035736Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-03T10:28:35.035851Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:28:35.035864Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:28:35.035869Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1005 2025-06-03T10:28:35.035874Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-03T10:28:35.035879Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:28:35.035927Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:28:35.035933Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-03T10:28:35.035945Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:28:35.035974Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:28:35.035984Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:28:35.035988Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1005 2025-06-03T10:28:35.035995Z node 33 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 13 2025-06-03T10:28:35.035999Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:28:35.036007Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1005, subscribers: 0 2025-06-03T10:28:35.036936Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-03T10:28:35.036966Z node 33 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:28:35.036978Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 TestModificationResult got TxId: 1005, wait until txId: 1005 TestWaitNotification wait txId: 1005 2025-06-03T10:28:35.037046Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1005: send EvNotifyTxCompletion 2025-06-03T10:28:35.037055Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1005 2025-06-03T10:28:35.037137Z node 33 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1005, at schemeshard: 72057594046678944 2025-06-03T10:28:35.037161Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1005: got EvNotifyTxCompletionResult 2025-06-03T10:28:35.037166Z node 33 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1005: satisfy waiter [33:407:2397] TestWaitNotification: OK eventTxId 1005 2025-06-03T10:28:35.037279Z node 33 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:28:35.037340Z node 33 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 76us result status StatusPathDoesNotExist 2025-06-03T10:28:35.037386Z node 33 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalDataSource\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/ExternalDataSource" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |64.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector |64.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector |64.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_collector/ydb-core-tx-datashard-ut_change_collector >> TPQTest::TestOwnership [GOOD] >> TPQTest::TestPQCacheSizeManagement >> JsonProtoConversion::JsonToProtoSingleValue [GOOD] >> KqpPg::TempTablesDrop [GOOD] >> KqpPg::TempTablesWithCache >> SystemView::ShowCreateTable [GOOD] >> SystemView::ShowCreateTableChangefeeds >> Cdc::VirtualTimestamps[YdsRunner] [GOOD] >> Cdc::VirtualTimestamps[TopicRunner] >> SystemView::TabletsShards [GOOD] >> SystemView::TabletsFollowers |64.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::ProtoMapToJson_ReceiveMessageResult [GOOD] >> TServiceAccountServiceTest::IssueToken [GOOD] >> TAccessServiceTest::PassRequestId >> PgCatalog::InformationSchema [GOOD] >> PgCatalog::CheckSetConfig >> SystemView::AuthGroupMembers_TableRange [GOOD] >> SystemView::AuthEffectivePermissions |64.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::JsonToProtoSingleValue [GOOD] >> JsonProtoConversion::NlohmannJsonToProtoMap [GOOD] >> SystemView::TabletsFollowers [GOOD] >> SystemView::TabletsRanges |64.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/unittest |64.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::NlohmannJsonToProtoMap [GOOD] >> KqpPg::TableDeleteAllData-useSink [GOOD] >> KqpPg::PgUpdateCompoundKey+useSink >> JsonProtoConversion::JsonToProtoMap [GOOD] >> SystemView::TopPartitionsByCpuRanges [GOOD] >> SystemView::TopPartitionsByTliFields >> TAccessServiceTest::PassRequestId [GOOD] >> Cdc::HugeKey[TopicRunner] [GOOD] >> Cdc::HugeKeyDebezium ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ycloud/impl/ut/unittest >> TServiceAccountServiceTest::IssueToken [GOOD] Test command err: 2025-06-03T10:28:35.387822Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668086790002152:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:35.387847Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f02/r3tmp/tmpDnCF11/pdisk_1.dat 2025-06-03T10:28:35.458240Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668086790002135:2079] 1748946515387650 != 1748946515387653 2025-06-03T10:28:35.460613Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:62950 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:35.528124Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:35.528168Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:35.529092Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:35.529096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:35.903285Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668086767177286:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:35.903596Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f02/r3tmp/tmpfPkQOx/pdisk_1.dat 2025-06-03T10:28:35.919376Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:35.919607Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511668086767177264:2079] 1748946515903098 != 1748946515903101 TClient is connected to server localhost:29333 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:36.008861Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:36.008898Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:36.009488Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:36.010061Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:28:36.011768Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 >> KqpPg::TempTablesWithCache [GOOD] >> KqpPg::TableDeleteWhere+useSink >> AsyncIndexChangeExchange::ShouldRemoveRecordsAfterDroppingIndex [GOOD] >> AsyncIndexChangeExchange::ShouldRemoveRecordsAfterCancelIndexBuild >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldNotCompactAfterDrop [GOOD] |64.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/unittest >> SystemView::ShowCreateTableTtlSettings [GOOD] >> SystemView::ShowCreateTableTemporary >> SystemView::Describe [GOOD] >> SystemView::DescribeSystemFolder |64.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::JsonToProtoMap [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ycloud/impl/ut/unittest >> TAccessServiceTest::PassRequestId [GOOD] Test command err: 2025-06-03T10:28:36.255957Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668089332098306:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:36.256135Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ef1/r3tmp/tmpP5UhFo/pdisk_1.dat 2025-06-03T10:28:36.332757Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668089332098281:2079] 1748946516255596 != 1748946516255599 2025-06-03T10:28:36.335938Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:20526 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:28:36.359170Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:36.359207Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: 2025-06-03T10:28:36.360145Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:36.403796Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:36.409603Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [72d47d171bf0]{trololo} Connect to grpc://localhost:20210 2025-06-03T10:28:36.410086Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [72d47d171bf0]{trololo} Request AuthenticateRequest { iam_token: "**** (717F937C)" } 2025-06-03T10:28:36.412679Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [72d47d171bf0]{trololo} Response AuthenticateResponse { subject { user_account { id: "1234" } } } >> Cdc::VirtualTimestamps[TopicRunner] [GOOD] >> Cdc::Write[PqRunner] |64.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldNotCompactAfterDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:27:43.994338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:43.994373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:43.994379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:43.994386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:43.994403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:43.994408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:43.994419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:43.994435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:43.994555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:43.994627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:44.010635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:27:44.010661Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:44.015333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:44.015476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:44.015553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:44.017701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:44.017784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:44.017910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:44.017983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:44.018671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:44.018730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:44.019094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:44.019107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:44.019117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:44.019126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:44.019132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:44.019159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:27:44.020706Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:27:44.043072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:44.043176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:44.043256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:44.043351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:44.043365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:44.044299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:44.044330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:44.044396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:44.044408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:44.044415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:44.044422Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:44.044902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:44.044913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:44.044919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:44.045232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:44.045240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:44.045247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:44.045255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:44.046037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:44.046475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:44.046522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:44.046738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:44.046764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:44.046787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:44.046882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:27:44.046891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:44.046930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:44.046942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:27:44.047372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:44.047382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:44.047435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... rId: 72057594046678944, LocalPathId: 2], pathId map=Simple, is column=0, is olap=0, RowCount 100, DataSize 13940 2025-06-03T10:28:36.247266Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409546, followerId 0 2025-06-03T10:28:36.247287Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:219: [BackgroundCompaction] [Update] Updated shard# 72057594046678944:1 with partCount# 1, rowCount# 100, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:18.000000Z at schemeshard 72057594046678944 2025-06-03T10:28:36.247313Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:477: Do not want to split tablet 72075186233409546 by size, its table already has 1 out of 1 partitions 2025-06-03T10:28:36.247340Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:28:36.257593Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:126:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-03T10:28:36.257625Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-03T10:28:36.257634Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-03T10:28:36.595999Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:126:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:28:36.596033Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4890: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:28:36.596052Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:315:2301]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-03T10:28:36.596085Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124999, Sender [3:126:2151], Recipient [3:126:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:28:36.596090Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4889: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:28:36.636889Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: operation_queue_timer.h:92: Operation queue wakeup 2025-06-03T10:28:36.636965Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_compaction.cpp:31: [BackgroundCompaction] [Start] Compacting for pathId# [OwnerId: 72057594046678944, LocalPathId: 2], datashard# 72075186233409546, compactionInfo# {72057594046678944:1, SH# 1, Rows# 100, Deletes# 0, Compaction# 1970-01-01T00:00:18.000000Z}, next wakeup in# 0.000000s, rate# 1, in queue# 1 shards, waiting after compaction# 0 shards, running# 0 shards at schemeshard 72057594046678944 2025-06-03T10:28:36.637015Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: operation_queue_timer.h:84: Operation queue set wakeup after delta# 30 seconds 2025-06-03T10:28:36.637108Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553210, Sender [3:126:2151], Recipient [3:315:2301]: NKikimrTxDataShard.TEvCompactTable PathId { OwnerId: 72057594046678944 LocalId: 2 } CompactSinglePartedShards: true 2025-06-03T10:28:36.637213Z node 3 :TX_DATASHARD INFO: datashard__compaction.cpp:141: Started background compaction# 7 of 72075186233409546 tableId# 2 localTid# 1001, requested from [3:126:2151], partsCount# 1, memtableSize# 0, memtableWaste# 0, memtableRows# 0 2025-06-03T10:28:36.637599Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186233409546, table# 1001, finished edge# 6, ts 1970-01-01T00:00:19.154000Z 2025-06-03T10:28:36.637618Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186233409546, table# 1001, finished edge# 6, front# 7 2025-06-03T10:28:36.638808Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435080, Sender [3:1265:3200], Recipient [3:315:2301]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvAsyncTableStats 2025-06-03T10:28:36.638847Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3437: TEvPeriodicTableStats from datashard 72075186233409546, FollowerId 0, tableId 2 2025-06-03T10:28:36.639094Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269553162, Sender [3:315:2301], Recipient [3:126:2151]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409546 TableLocalId: 2 Generation: 2 Round: 5 TableStats { DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 19 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 5432 Memory: 124088 Storage: 14156 } ShardState: 2 UserTablePartOwners: 72075186233409546 NodeId: 3 StartTime: 42 TableOwnerId: 72057594046678944 FollowerId: 0 2025-06-03T10:28:36.639108Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4919: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-03T10:28:36.639130Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 13940 rowCount 100 cpuUsage 0.5432 2025-06-03T10:28:36.639154Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] raw table stats: DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 19 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-03T10:28:36.639164Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-06-03T10:28:36.640012Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268828683, Sender [3:304:2292], Recipient [3:315:2301]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-03T10:28:36.641410Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:203: CompactionComplete of tablet# 72075186233409546, table# 1001, finished edge# 7, ts 1970-01-01T00:00:20.154000Z 2025-06-03T10:28:36.641442Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:240: ReplyCompactionWaiters of tablet# 72075186233409546, table# 1001, finished edge# 7, front# 7 2025-06-03T10:28:36.641456Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:260: ReplyCompactionWaiters of tablet# 72075186233409546, table# 1001 sending TEvCompactTableResult to# [3:126:2151]pathId# [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:28:36.641647Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269553211, Sender [3:315:2301], Recipient [3:126:2151]: NKikimrTxDataShard.TEvCompactTableResult TabletId: 72075186233409546 PathId { OwnerId: 72057594046678944 LocalId: 2 } Status: OK 2025-06-03T10:28:36.641659Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4958: StateWork, processing event TEvDataShard::TEvCompactTableResult 2025-06-03T10:28:36.641684Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: operation_queue_timer.h:84: Operation queue set wakeup after delta# 0 seconds 2025-06-03T10:28:36.641705Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_compaction.cpp:112: [BackgroundCompaction] [Finished] Compaction completed for pathId# [OwnerId: 72057594046678944, LocalPathId: 2], datashard# 72075186233409546, shardIdx# 72057594046678944:1 in# 4 ms, with status# 0, next wakeup in# 0.996000s, rate# 1, in queue# 1 shards, waiting after compaction# 0 shards, running# 0 shards at schemeshard 72057594046678944 2025-06-03T10:28:36.642345Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268828683, Sender [3:304:2292], Recipient [3:315:2301]: NKikimr::TEvTablet::TEvFollowerGcApplied 2025-06-03T10:28:36.653513Z node 3 :TX_DATASHARD DEBUG: datashard__compaction.cpp:189: Updated last full compaction of tablet# 72075186233409546, tableId# 2, last full compaction# 1970-01-01T00:00:20.154000Z 2025-06-03T10:28:36.704597Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:126:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-03T10:28:36.704643Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-03T10:28:36.704651Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 1 2025-06-03T10:28:36.704699Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 1 2025-06-03T10:28:36.704706Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 1 2025-06-03T10:28:36.704757Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 13940 row count 100 2025-06-03T10:28:36.704788Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Simple, is column=0, is olap=0, RowCount 100, DataSize 13940 2025-06-03T10:28:36.704797Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409546, followerId 0 2025-06-03T10:28:36.704837Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__background_compaction.cpp:219: [BackgroundCompaction] [Update] Updated shard# 72057594046678944:1 with partCount# 1, rowCount# 100, searchHeight# 1, lastFullCompaction# 1970-01-01T00:00:19.000000Z at schemeshard 72057594046678944 2025-06-03T10:28:36.704869Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:477: Do not want to split tablet 72075186233409546 by size, its table already has 1 out of 1 partitions 2025-06-03T10:28:36.704916Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:28:36.715196Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:126:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-03T10:28:36.715235Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-03T10:28:36.715243Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 >> TPQTest::TestPQCacheSizeManagement [GOOD] >> TPQTest::TestOffsetEstimation [GOOD] >> KqpPg::PgUpdateCompoundKey+useSink [GOOD] >> KqpPg::PgUpdateCompoundKey-useSink >> JsonProtoConversion::NlohmannJsonToProtoArray [GOOD] >> SystemView::TabletsRanges [GOOD] >> SystemView::TabletsRangesPredicateExtractDisabled >> Cdc::Write[PqRunner] [GOOD] >> Cdc::Write[YdsRunner] |64.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::NlohmannJsonToProtoArray [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/unittest >> TPQTest::TestOffsetEstimation [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:104:2057] recipient: [1:102:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:104:2057] recipient: [1:102:2135] Leader for TabletID 72057594037927937 is [1:108:2139] sender: [1:109:2057] recipient: [1:102:2135] 2025-06-03T10:27:28.219137Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:27:28.219173Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:150:2057] recipient: [1:148:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [1:150:2057] recipient: [1:148:2170] Leader for TabletID 72057594037927938 is [1:154:2174] sender: [1:155:2057] recipient: [1:148:2170] Leader for TabletID 72057594037927937 is [1:108:2139] sender: [1:180:2057] recipient: [1:14:2061] 2025-06-03T10:27:28.224228Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:27:28.227812Z node 1 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037927937] Config applied version 1 actor [1:178:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "aaa" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 1 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 1 ReadRuleGenerations: 1 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 1 Important: false } Consumers { Name: "aaa" Generation: 1 Important: true } 2025-06-03T10:27:28.228182Z node 1 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [1:186:2198] 2025-06-03T10:27:28.228895Z node 1 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 2 [1:186:2198] 2025-06-03T10:27:28.229799Z node 1 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [1:187:2199] 2025-06-03T10:27:28.230457Z node 1 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 2 [1:187:2199] 2025-06-03T10:27:28.233503Z node 1 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|b144e095-96e68620-556e0782-7c100ebf_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 1 } Cookie: 123 } via pipe: [1:178:2192] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:104:2057] recipient: [2:102:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:104:2057] recipient: [2:102:2135] Leader for TabletID 72057594037927937 is [2:108:2139] sender: [2:109:2057] recipient: [2:102:2135] 2025-06-03T10:27:28.589991Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:27:28.590034Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:150:2057] recipient: [2:148:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [2:150:2057] recipient: [2:148:2170] Leader for TabletID 72057594037927938 is [2:154:2174] sender: [2:155:2057] recipient: [2:148:2170] Leader for TabletID 72057594037927937 is [2:108:2139] sender: [2:180:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:108:2139]) on event NKikimr::TEvPersQueue::TEvUpdateConfigBuilder ! Leader for TabletID 72057594037927937 is [2:108:2139] sender: [2:182:2057] recipient: [2:100:2134] Leader for TabletID 72057594037927937 is [2:108:2139] sender: [2:185:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:108:2139] sender: [2:186:2057] recipient: [2:184:2195] Leader for TabletID 72057594037927937 is [2:187:2196] sender: [2:188:2057] recipient: [2:184:2195] 2025-06-03T10:27:28.638026Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:27:28.638064Z node 2 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info !Reboot 72057594037927937 (actor [2:108:2139]) rebooted! !Reboot 72057594037927937 (actor [2:108:2139]) tablet resolver refreshed! new actor is[2:187:2196] Leader for TabletID 72057594037927937 is [2:187:2196] sender: [2:267:2057] recipient: [2:14:2061] 2025-06-03T10:27:30.503900Z node 2 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:27:30.504163Z node 2 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037927937] Config applied version 2 actor [2:178:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "aaa" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 2 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 2 ReadRuleGenerations: 2 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 2 Important: false } Consumers { Name: "aaa" Generation: 2 Important: true } 2025-06-03T10:27:30.504404Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [2:273:2258] 2025-06-03T10:27:30.505202Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [2:273:2258] 2025-06-03T10:27:30.506142Z node 2 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [2:274:2259] 2025-06-03T10:27:30.506740Z node 2 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [2:274:2259] 2025-06-03T10:27:30.515725Z node 2 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|172fe936-6b9b90a4-fadf7486-4eda3234_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 1 } Cookie: 123 } via pipe: [2:178:2192] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:104:2057] recipient: [3:102:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:104:2057] recipient: [3:102:2135] Leader for TabletID 72057594037927937 is [3:108:2139] sender: [3:109:2057] recipient: [3:102:2135] 2025-06-03T10:27:31.072840Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:27:31.072878Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:150:2057] recipient: [3:148:2170] IGNORE Leader for TabletID 72057594037927938 is [0:0:0] sender: [3:150:2057] recipient: [3:148:2170] Leader for TabletID 72057594037927938 is [3:154:2174] sender: [3:155:2057] recipient: [3:148:2170] Leader for TabletID 72057594037927937 is [3:108:2139] sender: [3:180:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:108:2139]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:108:2139] sender: [3:182:2057] recipient: [3:100:2134] Leader for TabletID 72057594037927937 is [3:108:2139] sender: [3:185:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:108:2139] sender: [3:186:2057] recipient: [3:184:2195] Leader for TabletID 72057594037927937 is [3:187:2196] sender: [3:188:2057] recipient: [3:184:2195] 2025-06-03T10:27:31.103765Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:27:31.103801Z node 3 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info !Reboot 72057594037927937 (actor [3:108:2139]) rebooted! !Reboot 72057594037927937 (actor [3:108:2139]) tablet resolver refreshed! new actor is[3:187:2196] Leader for TabletID 72057594037927937 is [3:187:2196] sender: [3:267:2057] recipient: [3:14:2061] 2025-06-03T10:27:32.960034Z node 3 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:27:32.960276Z node 3 :PERSQUEUE INFO: pq_impl.cpp:1487: [PQ: 72057594037927937] Config applied version 3 actor [3:178:2192] txId 12345 config: CacheSize: 10485760 PartitionConfig { MaxCountInPartition: 20000000 MaxSizeInPartition: 104857600 LifetimeSeconds: 0 ImportantClientId: "aaa" LowWatermark: 6291456 SourceIdLifetimeSeconds: 3600 MaxWriteInflightSize: 90000000 } PartitionIds: 0 PartitionIds: 1 TopicName: "rt3.dc1--asdfgs--topic" Version: 3 LocalDC: true Topic: "topic" TopicPath: "/Root/PQ/rt3.dc1--asdfgs--topic" Partitions { PartitionId: 0 } Partitions { PartitionId: 1 } ReadRuleGenerations: 3 ReadRuleGenerations: 3 MeteringMode: METERING_MODE_RESERVED_CAPACITY AllPartitions { PartitionId: 0 } AllPartitions { PartitionId: 1 } Consumers { Name: "user" ReadFromTimestampsMs: 0 Generation: 3 Important: false } Consumers { Name: "aaa" Generation: 3 Important: true } 2025-06-03T10:27:32.960472Z node 3 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [3:273:2258] 2025-06-03T10:27:32.961165Z node 3 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 3 [3:273:2258] 2025-06-03T10:27:32.961923Z node 3 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [3:274:2259] 2025-06-03T10:27:32.962423Z node 3 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 3 [3:274:2259] 2025-06-03T10:27:32.979116Z node 3 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie default|53be18b1-8e5ade55-ab7268b4-a333b7eb_0 generated for partition 0 topic 'rt3.dc1--asdfgs--topic' owner default Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 1 } Cookie: 123 } via pipe: [3:178:2192] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:104:2057] recipient: [4:102:2135] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:104:2057] recipient: [4:102:2135] Leader for TabletID 72057594037927937 is [4:108:2139] sender: [4:109:2057] recipient: [4:102 ... on 0 generation 7 [77:657:2549] 2025-06-03T10:28:37.301413Z node 77 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 0 partno 0 count 7 parts 16 size 8364507 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 104857600 } Cookie: 123 } via pipe: [77:176:2190] Leader for TabletID 72057594037927937 is [77:600:2500] sender: [77:688:2057] recipient: [77:14:2061] Leader for TabletID 72057594037927937 is [77:600:2500] sender: [77:691:2057] recipient: [77:100:2134] Leader for TabletID 72057594037927937 is [77:600:2500] sender: [77:694:2057] recipient: [77:14:2061] Leader for TabletID 72057594037927937 is [77:600:2500] sender: [77:695:2057] recipient: [77:693:2570] Leader for TabletID 72057594037927937 is [77:696:2571] sender: [77:697:2057] recipient: [77:693:2570] 2025-06-03T10:28:37.313368Z node 77 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:28:37.313485Z node 77 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-03T10:28:37.313683Z node 77 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [77:755:2622] 2025-06-03T10:28:37.314364Z node 77 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [77:756:2623] 2025-06-03T10:28:37.316772Z node 77 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:28:37.316798Z node 77 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 8 [77:756:2623] 2025-06-03T10:28:37.321513Z node 77 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:28:37.321556Z node 77 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 8 [77:755:2622] 2025-06-03T10:28:37.331665Z node 77 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 0 partno 0 count 7 parts 16 size 8364507 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 104857600 } Cookie: 123 } via pipe: [77:176:2190] Leader for TabletID 72057594037927937 is [77:696:2571] sender: [77:786:2057] recipient: [77:14:2061] Leader for TabletID 72057594037927937 is [77:696:2571] sender: [77:789:2057] recipient: [77:100:2134] Leader for TabletID 72057594037927937 is [77:696:2571] sender: [77:792:2057] recipient: [77:14:2061] Leader for TabletID 72057594037927937 is [77:696:2571] sender: [77:793:2057] recipient: [77:791:2643] Leader for TabletID 72057594037927937 is [77:794:2644] sender: [77:795:2057] recipient: [77:791:2643] 2025-06-03T10:28:37.342076Z node 77 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:28:37.342105Z node 77 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-03T10:28:37.342255Z node 77 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [77:855:2697] 2025-06-03T10:28:37.342971Z node 77 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [77:856:2698] 2025-06-03T10:28:37.345312Z node 77 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:28:37.345340Z node 77 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 9 [77:856:2698] 2025-06-03T10:28:37.349359Z node 77 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:28:37.349398Z node 77 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 9 [77:855:2697] 2025-06-03T10:28:37.361752Z node 77 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 0 partno 0 count 7 parts 16 size 8364507 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 104857600 } Cookie: 123 } via pipe: [77:176:2190] Leader for TabletID 72057594037927937 is [77:794:2644] sender: [77:886:2057] recipient: [77:14:2061] Leader for TabletID 72057594037927937 is [77:794:2644] sender: [77:889:2057] recipient: [77:100:2134] Leader for TabletID 72057594037927937 is [77:794:2644] sender: [77:892:2057] recipient: [77:14:2061] Leader for TabletID 72057594037927937 is [77:794:2644] sender: [77:893:2057] recipient: [77:891:2718] Leader for TabletID 72057594037927937 is [77:894:2719] sender: [77:895:2057] recipient: [77:891:2718] 2025-06-03T10:28:37.377282Z node 77 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:28:37.377334Z node 77 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-03T10:28:37.377510Z node 77 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [77:957:2774] 2025-06-03T10:28:37.378397Z node 77 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [77:958:2775] 2025-06-03T10:28:37.381732Z node 77 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:28:37.381764Z node 77 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 10 [77:958:2775] 2025-06-03T10:28:37.386597Z node 77 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:28:37.386643Z node 77 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 10 [77:957:2774] 2025-06-03T10:28:37.395873Z node 77 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 0 partno 0 count 7 parts 16 size 8364507 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 104857600 } Cookie: 123 } via pipe: [77:176:2190] Leader for TabletID 72057594037927937 is [77:894:2719] sender: [77:990:2057] recipient: [77:14:2061] Leader for TabletID 72057594037927937 is [77:894:2719] sender: [77:993:2057] recipient: [77:100:2134] Leader for TabletID 72057594037927937 is [77:894:2719] sender: [77:996:2057] recipient: [77:14:2061] Leader for TabletID 72057594037927937 is [77:894:2719] sender: [77:997:2057] recipient: [77:995:2797] Leader for TabletID 72057594037927937 is [77:998:2798] sender: [77:999:2057] recipient: [77:995:2797] 2025-06-03T10:28:37.408699Z node 77 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:28:37.408731Z node 77 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-03T10:28:37.408920Z node 77 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [77:1063:2855] 2025-06-03T10:28:37.409756Z node 77 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [77:1064:2856] 2025-06-03T10:28:37.412277Z node 77 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:28:37.412304Z node 77 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 11 [77:1064:2856] 2025-06-03T10:28:37.416664Z node 77 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:28:37.416709Z node 77 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 11 [77:1063:2855] 2025-06-03T10:28:37.425822Z node 77 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 0 partno 0 count 7 parts 16 size 8364507 Send read request: PartitionRequest { Partition: 0 CmdRead { ClientId: "user" SessionId: "" Offset: 0 Count: 1 Bytes: 104857600 } Cookie: 123 } via pipe: [77:176:2190] Leader for TabletID 72057594037927937 is [77:998:2798] sender: [77:1096:2057] recipient: [77:14:2061] Leader for TabletID 72057594037927937 is [77:998:2798] sender: [77:1099:2057] recipient: [77:100:2134] Leader for TabletID 72057594037927937 is [77:998:2798] sender: [77:1102:2057] recipient: [77:14:2061] Leader for TabletID 72057594037927937 is [77:998:2798] sender: [77:1103:2057] recipient: [77:1101:2878] Leader for TabletID 72057594037927937 is [77:1104:2879] sender: [77:1105:2057] recipient: [77:1101:2878] 2025-06-03T10:28:37.438390Z node 77 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:28:37.438422Z node 77 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-03T10:28:37.438633Z node 77 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 0, State: StateInit] bootstrapping 0 [77:1171:2938] 2025-06-03T10:28:37.439372Z node 77 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72057594037927937, Partition: 1, State: StateInit] bootstrapping 1 [77:1172:2939] 2025-06-03T10:28:37.442031Z node 77 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:28:37.442061Z node 77 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 1, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 1 generation 12 [77:1172:2939] 2025-06-03T10:28:37.446251Z node 77 :PERSQUEUE INFO: partition_init.cpp:774: [rt3.dc1--asdfgs--topic:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:28:37.446288Z node 77 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72057594037927937, Partition: 0, State: StateInit] init complete for topic 'rt3.dc1--asdfgs--topic' partition 0 generation 12 [77:1171:2938] 2025-06-03T10:28:37.454745Z node 77 :PERSQUEUE WARN: pq_l2_cache.cpp:94: PQ Cache (L2). Same blob insertion. Tablet '72057594037927937' partition 0 offset 0 partno 0 count 7 parts 16 size 8364507 |64.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> BootstrapperTest::LoneBootstrapper |64.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> Cdc::NaN[TopicRunner] [GOOD] >> Cdc::RacyRebootAndSplitWithTxInflight >> BootstrapperTest::LoneBootstrapper [GOOD] >> BootstrapperTest::MultipleBootstrappers >> KqpCost::IndexLookupAtLeast8BytesInStorage+useSink >> Cdc::HugeKeyDebezium [GOOD] >> Cdc::Drop[PqRunner] >> KqpCost::IndexLookupJoin-StreamLookupJoin >> Cdc::Write[YdsRunner] [GOOD] >> Cdc::Write[TopicRunner] >> KqpPg::PgUpdateCompoundKey-useSink [GOOD] >> SystemView::TabletsRangesPredicateExtractDisabled [GOOD] |64.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> AsyncIndexChangeExchange::ShouldRemoveRecordsAfterCancelIndexBuild [GOOD] >> AsyncIndexChangeExchange::ShouldDeliverChangesOnSplitMerge |64.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> Cdc::Drop[PqRunner] [GOOD] >> Cdc::Drop[YdsRunner] >> Cdc::Write[TopicRunner] [GOOD] >> Cdc::UpdateStream >> SystemView::DescribeSystemFolder [GOOD] >> SystemView::DescribeAccessDenied >> AsyncIndexChangeCollector::UpsertToSameKey ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/pg/unittest >> KqpPg::PgUpdateCompoundKey-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 7142, MsgBus: 2027 2025-06-03T10:28:00.319345Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667935539261056:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:00.327926Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00240d/r3tmp/tmpe9t3wS/pdisk_1.dat 2025-06-03T10:28:00.387064Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667935539260899:2079] 1748946480315787 != 1748946480315790 2025-06-03T10:28:00.399898Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7142, node 1 2025-06-03T10:28:00.420646Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:00.420661Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:00.420663Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:00.420710Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:28:00.433890Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:00.433919Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:00.434995Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2027 TClient is connected to server localhost:2027 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:00.535839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:00.539319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 16 2025-06-03T10:28:00.927183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:28:00.996813Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1000_b (key, value) VALUES ( '0'::int2, ARRAY ['false'::bool, 'false'::bool] ); 2025-06-03T10:28:01.000101Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667935539261655:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:01.000130Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:01.000274Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667935539261667:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:01.001213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480 2025-06-03T10:28:01.005079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-03T10:28:01.005183Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667935539261669:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-03T10:28:01.084284Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667939834229016:2383] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } --!syntax_pg INSERT INTO Pg1000_b (key, value) VALUES ( '1'::int2, ARRAY ['true'::bool, 'true'::bool] ); 18 2025-06-03T10:28:01.179998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.202899Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1002_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::"char", '0'::"char"] ); --!syntax_pg INSERT INTO Pg1002_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::"char", '1'::"char"] ); --!syntax_pg INSERT INTO Pg1002_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::"char", '2'::"char"] ); 21 2025-06-03T10:28:01.345960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.368708Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1005_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::int2, '0'::int2] ); --!syntax_pg INSERT INTO Pg1005_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::int2, '1'::int2] ); --!syntax_pg INSERT INTO Pg1005_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::int2, '2'::int2] ); 23 2025-06-03T10:28:01.524070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.545669Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1007_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::int4, '0'::int4] ); --!syntax_pg INSERT INTO Pg1007_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::int4, '1'::int4] ); --!syntax_pg INSERT INTO Pg1007_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::int4, '2'::int4] ); 20 2025-06-03T10:28:01.659742Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715683:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.679870Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1016_b (key, value) VALUES ( '0'::int2, ARRAY ['0'::int8, '0'::int8] ); --!syntax_pg INSERT INTO Pg1016_b (key, value) VALUES ( '1'::int2, ARRAY ['1'::int8, '1'::int8] ); --!syntax_pg INSERT INTO Pg1016_b (key, value) VALUES ( '2'::int2, ARRAY ['2'::int8, '2'::int8] ); 700 2025-06-03T10:28:01.785489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715689:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.800475Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1021_b (key, value) VALUES ( '0'::int2, ARRAY ['0.5'::float4, '0.5'::float4] ); --!syntax_pg INSERT INTO Pg1021_b (key, value) VALUES ( '1'::int2, ARRAY ['1.5'::float4, '1.5'::float4] ); --!syntax_pg INSERT INTO Pg1021_b (key, value) VALUES ( '2'::int2, ARRAY ['2.5'::float4, '2.5'::float4] ); 701 2025-06-03T10:28:01.909882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715695:0, at schemeshard: 72057594046644480 2025-06-03T10:28:01.926043Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1022_b (key, value) VALUES ( '0'::int2, ARRAY ['0.5'::float8, '0.5'::float8] ); --!syntax_pg INSERT INTO Pg1022_b (key, value) VALUES ( '1'::int2, ARRAY ['1.5'::float8, '1.5'::float8] ); --!syntax_pg INSERT INTO Pg1022_b (key, value) VALUES ( '2'::int2, ARRAY ['2.5'::float8, '2.5'::float8] ); 25 2025-06-03T10:28:02.067405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715701:0, at schemeshard: 72057594046644480 2025-06-03T10:28:02.099412Z node 1 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill --!syntax_pg INSERT INTO Pg1009_b (key, value) VALUES ( '0'::int2, ARRAY ['text 0'::text, 'text 0'::text] ); --!syntax_pg INSERT INTO Pg1009_b (key, value) VALUES ( '1'::int2, ARRAY ['text 1'::text, 'text 1'::text] ); --!syntax_pg INSERT INTO Pg1009_b (key, value) VALUES ( '2'::i ... oChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:36.855063Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:36.858048Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:37.226827Z node 9 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [9:7511668096125459272:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:37.226857Z node 9 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:37.230509Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:28:37.252020Z node 9 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [9:7511668096125459375:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:37.252064Z node 9 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:37.252198Z node 9 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [9:7511668096125459381:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:37.252963Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:28:37.261432Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [9:7511668096125459383:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:28:37.334369Z node 9 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [9:7511668096125459434:2381] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:37.371736Z node 9 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [9:7511668096125459478:2355], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Cannot update primary key column: key1
:1:1: Error: Cannot update primary key column: key2 2025-06-03T10:28:37.372411Z node 9 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=9&id=NDU4YjQ2NjYtZjZhMGZmYjItZmRkNjg1OWUtNzZiZmY2Nzk=, ActorId: [9:7511668096125459471:2351], ActorState: ExecuteState, TraceId: 01jwtnbzbj7709r3pd3bhndfgp, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-03T10:28:37.383552Z node 9 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 waiting... Trying to start YDB, gRPC: 27622, MsgBus: 22250 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00240d/r3tmp/tmpmP5TMS/pdisk_1.dat 2025-06-03T10:28:37.729389Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:37.733525Z node 10 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27622, node 10 2025-06-03T10:28:37.758607Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:37.758621Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:37.758623Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:37.758680Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22250 2025-06-03T10:28:37.809360Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:37.809389Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:37.810412Z node 10 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(10, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22250 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:37.816447Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:38.141965Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7511668098227654842:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:38.141999Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:38.144624Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:28:38.159270Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7511668098227654945:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:38.159314Z node 10 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:38.159373Z node 10 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [10:7511668098227654950:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:38.160147Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:28:38.166595Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [10:7511668098227654952:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:28:38.240212Z node 10 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [10:7511668098227655003:2385] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:38.289442Z node 10 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [10:7511668098227655067:2362], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:1: Error: At function: KiWriteTable!
:1:1: Error: Cannot update primary key column: key1
:1:1: Error: Cannot update primary key column: key2 2025-06-03T10:28:38.289526Z node 10 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=10&id=NDc4ZWRiMjgtMTExZTVkOWMtNmZmNWRhNzgtM2VhYzBjZDM=, ActorId: [10:7511668098227655060:2358], ActorState: ExecuteState, TraceId: 01jwtnc08d9c8p6dw66kdrm6p5, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-03T10:28:38.291050Z node 10 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 waiting... ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/ut/unittest >> SystemView::TabletsRangesPredicateExtractDisabled [GOOD] Test command err: 2025-06-03T10:28:05.590036Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667959566269214:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:05.590059Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002b38/r3tmp/tmpCIgAIY/pdisk_1.dat 2025-06-03T10:28:05.703288Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:05.703331Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:05.704689Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 28956, node 1 2025-06-03T10:28:05.720786Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:05.737562Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:05.737578Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:05.737580Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:05.737632Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64033 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:05.777634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:05.780219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:06.047121Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667963861237109:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:06.047145Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667963861237128:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:06.047156Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:06.048279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:06.050638Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667963861237138:2329], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:06.143617Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667963861237189:2324] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:06.209411Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7511667963861237217:2324] TxId: 281474976715661. Ctx: { TraceId: 01jwtnb0gt3ea9v9hjmzs0mk8v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODY5MmRjN2EtZjJmMjgxOTItOGI1NmZjODctYzAxN2Y2NTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database 2025-06-03T10:28:06.209510Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtnb0gt3ea9v9hjmzs0mk8v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODY5MmRjN2EtZjJmMjgxOTItOGI1NmZjODctYzAxN2Y2NTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:06.215950Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [1:7511667963861237223:2337], owner: [1:7511667963861237220:2335], scan id: 0, table id: [72057594046644480:1:0:ds_storage_pools] 2025-06-03T10:28:06.221520Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [1:7511667963861237223:2337], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:06.225511Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [1:7511667963861237223:2337], row count: 0, finished: 1 2025-06-03T10:28:06.225528Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [1:7511667963861237223:2337], owner: [1:7511667963861237220:2335], scan id: 0, table id: [72057594046644480:1:0:ds_storage_pools] 2025-06-03T10:28:06.227693Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946486208, txId: 281474976715660] shutting down 2025-06-03T10:28:07.276450Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jwtnb1xycw09jjncfw7j880v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmYwMjdhYzktNmNkODJjZTItYTM1YTJhMmUtNTkzY2FhMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:07.277356Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [1:7511667968156204571:2351], owner: [1:7511667968156204568:2349], scan id: 0, table id: [72057594046644480:1:0:ds_storage_pools] 2025-06-03T10:28:07.281726Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [1:7511667968156204571:2351], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:07.281866Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [1:7511667968156204571:2351], row count: 0, finished: 1 2025-06-03T10:28:07.281880Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [1:7511667968156204571:2351], owner: [1:7511667968156204568:2349], scan id: 0, table id: [72057594046644480:1:0:ds_storage_pools] 2025-06-03T10:28:07.290406Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946487264, txId: 281474976715662] shutting down 2025-06-03T10:28:08.323254Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jwtnb2zp03t3ttcjbmwds2k3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjU5YWQ0YTAtMTgwOWM5YjAtZTVlOWM1MTItMWNjZjZmM2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:08.323859Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [1:7511667972451171904:2362], owner: [1:7511667972451171901:2360], scan id: 0, table id: [72057594046644480:1:0:ds_storage_pools] 2025-06-03T10:28:08.325585Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [1:7511667972451171904:2362], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:08.325663Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [1:7511667972451171904:2362], row count: 0, finished: 1 2025-06-03T10:28:08.325679Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [1:7511667972451171904:2362], owner: [1:7511667972451171901:2360], scan id: 0, table id: [72057594046644480:1:0:ds_storage_pools] 2025-06-03T10:28:08.326590Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946488322, txId: 281474976715664] shutting down 2025-06-03T10:28:09.344324Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jwtnb3zjb4hpss7xf5g3rwee, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWQzM2JmZWUtZGZmZmFjNzYtZTk3NzFlNWEtZDI0MjRlYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:09.344758Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [1:7511667976746139238:2373], owner: [1:7511667976746139234:2371], scan id: 0, table id: [72057594046644480:1:0:ds_storage_pools] 2025-06-03T10:28:09.344951Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [1:7511667976746139238:2373], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:09.345034Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [1:7511667976746139238:2373], row count: 0, finished: 1 2025-06-03T10:28:09.345049Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [1:7511667976746139238:2373], owner: [1:7511667976746139234:2371], scan id: 0, table id: [72057594046644480:1:0:ds_storage_pools] 2025-06-03T10:28:09.345616Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946489343, txId: 281474976715666] shutting down 2025-06-03T10:28:10.365042Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715669. Ctx: { TraceId: 01jwtnb4zcdkwrq314gmy90vsx, Database: , DatabaseId: /Root, SessionId: ydb://sess ... can_actor_base_impl.h:321: Scan prepared, actor: [20:7511668095510620581:2425], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:37.441477Z node 20 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [20:7511668095510620581:2425], row count: 3, finished: 1 2025-06-03T10:28:37.441491Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [20:7511668095510620581:2425], owner: [20:7511668095510620577:2423], scan id: 0, table id: [72057594046644480:1:0:hive_tablets] 2025-06-03T10:28:37.442296Z node 20 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946517439, txId: 281474976715675] shutting down 2025-06-03T10:28:37.462014Z node 20 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715678. Ctx: { TraceId: 01jwtnbze3djp2db30vhc3ewm8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=20&id=ZTNmZDYyMmItMjAyZGY1ZDEtNWQyODQxY2EtNTM2NzBkNDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:37.462624Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [20:7511668095510620612:2434], owner: [20:7511668095510620609:2432], scan id: 0, table id: [72057594046644480:1:0:hive_tablets] 2025-06-03T10:28:37.465773Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [20:7511668095510620612:2434], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:37.466808Z node 20 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [20:7511668095510620612:2434], row count: 4, finished: 1 2025-06-03T10:28:37.466826Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [20:7511668095510620612:2434], owner: [20:7511668095510620609:2432], scan id: 0, table id: [72057594046644480:1:0:hive_tablets] 2025-06-03T10:28:37.467778Z node 20 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946517461, txId: 281474976715677] shutting down 2025-06-03T10:28:37.488152Z node 20 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715680. Ctx: { TraceId: 01jwtnbzew7f9t4e58fjgjtht7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=20&id=YjE2ODgxYmEtMTg1YTgwYzQtNmM4ZGViMDYtN2FhNGVkZmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:37.488905Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [20:7511668095510620644:2443], owner: [20:7511668095510620641:2441], scan id: 0, table id: [72057594046644480:1:0:hive_tablets] 2025-06-03T10:28:37.489232Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [20:7511668095510620644:2443], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:37.489388Z node 20 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [20:7511668095510620644:2443], row count: 4, finished: 1 2025-06-03T10:28:37.489409Z node 20 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [20:7511668095510620644:2443], owner: [20:7511668095510620641:2441], scan id: 0, table id: [72057594046644480:1:0:hive_tablets] 2025-06-03T10:28:37.490252Z node 20 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946517484, txId: 281474976715679] shutting down 2025-06-03T10:28:37.776070Z node 21 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[21:7511668095276581784:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:37.776091Z node 21 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002b38/r3tmp/tmpQEC5b4/pdisk_1.dat 2025-06-03T10:28:37.810102Z node 21 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26415, node 21 2025-06-03T10:28:37.827853Z node 21 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:37.827870Z node 21 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:37.827876Z node 21 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:37.827935Z node 21 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14218 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:37.876730Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:37.876769Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:37.878199Z node 21 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(21, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:37.884179Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:37.888616Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:38.222619Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:28:38.239597Z node 21 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [21:7511668099571549959:2350], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:38.239608Z node 21 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [21:7511668099571549950:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:38.239626Z node 21 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:38.240466Z node 21 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:28:38.242413Z node 21 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [21:7511668099571549964:2351], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:28:38.341631Z node 21 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [21:7511668099571550015:2479] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:38.371603Z node 21 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtnc06y6y2g4bezw5kbp2hf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=21&id=MjBmODhiYzgtYTZmZGY2MTMtNmUyYTc5MzYtYTE0YmY0NDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:38.372189Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [21:7511668099571550051:2361], owner: [21:7511668099571550050:2360], scan id: 0, table id: [72057594046644480:1:0:hive_tablets] 2025-06-03T10:28:38.372403Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [21:7511668099571550051:2361], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:38.372601Z node 21 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [21:7511668099571550051:2361], row count: 4, finished: 1 2025-06-03T10:28:38.372614Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [21:7511668099571550051:2361], owner: [21:7511668099571550050:2360], scan id: 0, table id: [72057594046644480:1:0:hive_tablets] 2025-06-03T10:28:38.372655Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [21:7511668099571550057:2364], owner: [21:7511668099571550050:2360], scan id: 0, table id: [72057594046644480:1:0:hive_tablets] 2025-06-03T10:28:38.373229Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [21:7511668099571550057:2364], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:38.373359Z node 21 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [21:7511668099571550057:2364], row count: 4, finished: 1 2025-06-03T10:28:38.373370Z node 21 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [21:7511668099571550057:2364], owner: [21:7511668099571550050:2360], scan id: 0, table id: [72057594046644480:1:0:hive_tablets] 2025-06-03T10:28:38.374120Z node 21 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946518370, txId: 281474976715661] shutting down >> SystemView::AuthEffectivePermissions [GOOD] >> CdcStreamChangeCollector::InsertSingleRow >> AsyncIndexChangeCollector::InsertSingleRow >> CdcStreamChangeCollector::UpsertToSameKey >> Cdc::Drop[YdsRunner] [GOOD] >> Cdc::Drop[TopicRunner] >> Cdc::UpdateStream [GOOD] >> Cdc::UpdateShardCount >> KqpCost::IndexLookupAtLeast8BytesInStorage+useSink [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/ut/unittest >> SystemView::AuthEffectivePermissions [GOOD] Test command err: 2025-06-03T10:28:12.615570Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667987857961173:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:12.615878Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002aea/r3tmp/tmp8XmPFb/pdisk_1.dat 2025-06-03T10:28:12.781511Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31764, node 1 2025-06-03T10:28:12.838854Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:12.838870Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:12.838873Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:12.838930Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14700 TClient is connected to server localhost:14700 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:28:12.933658Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:12.933689Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:12.942506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:13.005668Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:13.209001Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667992152929532:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:13.209006Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667992152929524:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:13.209031Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:13.209910Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:13.215760Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667992152929538:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:13.297230Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667992152929617:2744] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:13.624489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:1, at schemeshard: 72057594046644480 2025-06-03T10:28:13.762769Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:13.847058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480 2025-06-03T10:28:13.946738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:28:14.036956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:28:14.107771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:28:14.187771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:28:14.211479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:28:14.802720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715697:0, at schemeshard: 72057594046644480 2025-06-03T10:28:15.391731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715710:0, at schemeshard: 72057594046644480 2025-06-03T10:28:15.432615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715711:3, at schemeshard: 72057594046644480 2025-06-03T10:28:15.482968Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [1:7511668000742866756:2885], owner: [1:7511668000742866753:2883], scan id: 0, table id: [1:0:0:show_create] 2025-06-03T10:28:15.482999Z node 1 :SYSTEM_VIEWS INFO: show_create.cpp:106: Scan prepared, actor: [1:7511668000742866756:2885] 2025-06-03T10:28:15.488818Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [1:7511668000742866756:2885], row count: 1, finished: 1 2025-06-03T10:28:15.488849Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [1:7511668000742866756:2885], owner: [1:7511668000742866753:2883], scan id: 0, table id: [1:0:0:show_create] 2025-06-03T10:28:16.656414Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511668005534346425:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:16.656439Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002aea/r3tmp/tmpGRWnGr/pdisk_1.dat 2025-06-03T10:28:16.682230Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15387, node 6 2025-06-03T10:28:16.720009Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:16.720022Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:16.720024Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:16.720099Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19344 2025-06-03T10:28:16.756836Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:16.756869Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:16.758339Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19344 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:16.772038Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:17.080857Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors. ... , at schemeshard: 72075186224037888, txId: 0, path id: [OwnerId: 72075186224037888, LocalPathId: 2] 2025-06-03T10:28:36.892183Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [41:7511668089760428917:2371], at schemeshard: 72057594046644480, txId: 281474976715663, path id: 1 2025-06-03T10:28:37.790617Z node 44 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186224037888, txId: 0, path id: [OwnerId: 72075186224037888, LocalPathId: 1] 2025-06-03T10:28:36.892192Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [41:7511668089760428917:2371], at schemeshard: 72057594046644480, txId: 281474976715663, path id: 5 2025-06-03T10:28:37.791544Z node 44 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976715675, response: Status: StatusSuccess TxId: 281474976715675 SchemeshardId: 72075186224037888, at schemeshard: 72075186224037888 2025-06-03T10:28:36.892195Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [41:7511668089760428917:2371], at schemeshard: 72057594046644480, txId: 281474976715663, path id: 5 2025-06-03T10:28:37.791589Z node 44 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976715675, database: /Root/Tenant1, subject: , status: StatusSuccess, operation: MODIFY ACL, path: /Root/Tenant1/Dir2, add access: +(SR):user2 2025-06-03T10:28:37.791623Z node 44 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186224037888 2025-06-03T10:28:37.791627Z node 44 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [44:7511668089104378900:2300], at schemeshard: 72075186224037888, txId: 0, path id: 3 2025-06-03T10:28:37.791639Z node 44 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [44:7511668089104378900:2300], at schemeshard: 72075186224037888, txId: 0, path id: 2 2025-06-03T10:28:37.791642Z node 44 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [44:7511668089104378900:2300], at schemeshard: 72075186224037888, txId: 0, path id: 1 2025-06-03T10:28:37.791657Z node 44 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186224037888 2025-06-03T10:28:37.791661Z node 44 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186224037888, txId: 281474976715675, path id: [OwnerId: 72075186224037888, LocalPathId: 3] 2025-06-03T10:28:37.791675Z node 44 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186224037888, txId: 281474976715675, path id: [OwnerId: 72075186224037888, LocalPathId: 1] 2025-06-03T10:28:37.791694Z node 44 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186224037888 2025-06-03T10:28:37.791698Z node 44 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [44:7511668089104378900:2300], at schemeshard: 72075186224037888, txId: 281474976715675, path id: 3 2025-06-03T10:28:37.791701Z node 44 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [44:7511668089104378900:2300], at schemeshard: 72075186224037888, txId: 281474976715675, path id: 1 2025-06-03T10:28:36.892197Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [41:7511668089760428917:2371], at schemeshard: 72057594046644480, txId: 281474976715663, path id: 6 2025-06-03T10:28:36.892200Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [41:7511668089760428917:2371], at schemeshard: 72057594046644480, txId: 281474976715663, path id: 1 2025-06-03T10:28:36.892203Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [41:7511668089760428917:2371], at schemeshard: 72057594046644480, txId: 281474976715663, path id: 6 2025-06-03T10:28:37.792606Z node 44 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186224037888, msg: Owner: 72075186224037888 Generation: 1 LocalPathId: 3 Version: 5 PathOwnerId: 72075186224037888, cookie: 0 2025-06-03T10:28:36.892205Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [41:7511668089760428917:2371], at schemeshard: 72057594046644480, txId: 281474976715663, path id: 7 2025-06-03T10:28:36.892207Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [41:7511668089760428917:2371], at schemeshard: 72057594046644480, txId: 281474976715663, path id: 5 2025-06-03T10:28:36.892213Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [41:7511668089760428917:2371], at schemeshard: 72057594046644480, txId: 281474976715663, path id: 7 2025-06-03T10:28:36.892217Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [41:7511668089760428917:2371], at schemeshard: 72057594046644480, txId: 281474976715663, path id: 8 2025-06-03T10:28:36.892219Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [41:7511668089760428917:2371], at schemeshard: 72057594046644480, txId: 281474976715663, path id: 6 2025-06-03T10:28:37.792614Z node 44 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186224037888, msg: Owner: 72075186224037888 Generation: 1 LocalPathId: 3 Version: 5 PathOwnerId: 72075186224037888, cookie: 281474976715675 2025-06-03T10:28:36.892228Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715663:3, at schemeshard: 72057594046644480 2025-06-03T10:28:36.892236Z node 41 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_resource_pool.cpp:45: [72057594046644480] TCreateResourcePool TPropose, operationId: 281474976715663:3, ProgressState 2025-06-03T10:28:36.892246Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 281474976715663 ready parts: 1/4 2025-06-03T10:28:37.792627Z node 44 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186224037888, msg: Owner: 72075186224037888 Generation: 1 LocalPathId: 3 Version: 5 PathOwnerId: 72075186224037888, cookie: 281474976715675 2025-06-03T10:28:36.892309Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715663:1, at schemeshard: 72057594046644480 2025-06-03T10:28:37.792631Z node 44 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72075186224037888, txId: 281474976715675 2025-06-03T10:28:36.892314Z node 41 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:63: MkDir::TPropose operationId# 281474976715663:1 ProgressState, at schemeshard: 72057594046644480 2025-06-03T10:28:36.892317Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 281474976715663 ready parts: 2/4 2025-06-03T10:28:37.792636Z node 44 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186224037888, txId: 281474976715675, pathId: [OwnerId: 72075186224037888, LocalPathId: 3], version: 5 2025-06-03T10:28:36.892327Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:36.892329Z node 41 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:63: MkDir::TPropose operationId# 281474976715663:0 ProgressState, at schemeshard: 72057594046644480 2025-06-03T10:28:36.892332Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 281474976715663 ready parts: 3/4 2025-06-03T10:28:36.892342Z node 41 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976715663:2, at schemeshard: 72057594046644480 2025-06-03T10:28:36.892345Z node 41 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:63: MkDir::TPropose operationId# 281474976715663:2 ProgressState, at schemeshard: 72057594046644480 2025-06-03T10:28:37.792642Z node 44 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186224037888, LocalPathId: 3] was 1 2025-06-03T10:28:37.792730Z node 44 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186224037888, msg: Owner: 72075186224037888 Generation: 1 LocalPathId: 2 Version: 4 PathOwnerId: 72075186224037888, cookie: 0 2025-06-03T10:28:37.792736Z node 44 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186224037888, msg: Owner: 72075186224037888 Generation: 1 LocalPathId: 1 Version: 11 PathOwnerId: 72075186224037888, cookie: 0 2025-06-03T10:28:37.792741Z node 44 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186224037888, msg: Owner: 72075186224037888 Generation: 1 LocalPathId: 1 Version: 11 PathOwnerId: 72075186224037888, cookie: 281474976715675 2025-06-03T10:28:37.792749Z node 44 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186224037888, msg: Owner: 72075186224037888 Generation: 1 LocalPathId: 1 Version: 11 PathOwnerId: 72075186224037888, cookie: 281474976715675 2025-06-03T10:28:37.792750Z node 44 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72075186224037888, txId: 281474976715675 2025-06-03T10:28:37.792753Z node 44 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186224037888, txId: 281474976715675, pathId: [OwnerId: 72075186224037888, LocalPathId: 1], version: 11 2025-06-03T10:28:37.792755Z node 44 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186224037888, LocalPathId: 1] was 9 2025-06-03T10:28:37.792763Z node 44 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72075186224037888, txId: 281474976715675, subscribers: 0 2025-06-03T10:28:37.794710Z node 44 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186224037888, cookie: 281474976715675 2025-06-03T10:28:37.794747Z node 44 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186224037888, cookie: 281474976715675 2025-06-03T10:28:37.846457Z node 44 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 >> AsyncIndexChangeCollector::UpsertSingleRow >> PgCatalog::CheckSetConfig [GOOD] >> PgCatalog::PgDatabase+useSink >> KqpCost::IndexLookupJoin-StreamLookupJoin [GOOD] >> Cdc::Drop[TopicRunner] [GOOD] >> Cdc::DescribeStream >> AsyncIndexChangeCollector::CoveredIndexUpdateCoveredColumn ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupAtLeast8BytesInStorage+useSink [GOOD] Test command err: Trying to start YDB, gRPC: 22240, MsgBus: 29763 2025-06-03T10:28:38.422270Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668099922390742:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:38.422337Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f87/r3tmp/tmpeuCcuB/pdisk_1.dat 2025-06-03T10:28:38.516851Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:38.517560Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668099922390719:2079] 1748946518422071 != 1748946518422074 TServer::EnableGrpc on GrpcPort 22240, node 1 2025-06-03T10:28:38.541575Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:38.541593Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:38.541595Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:38.541646Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29763 2025-06-03T10:28:38.580098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:38.580136Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:38.581130Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:29763 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:28:38.614688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:38.618174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:28:38.622708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:38.694865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:38.720712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:38.735758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:38.897136Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668099922392365:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:38.897188Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:38.947653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:38.957792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:38.971846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:38.985784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:39.042476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:39.055869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:39.070145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:39.090360Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668104217360315:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:39.090398Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:39.090455Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668104217360320:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:39.091545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:39.101873Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668104217360322:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:28:39.165202Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668104217360373:3395] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:39.369819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 /Root/SecondaryKeys/Index/indexImplTable 1 8 /Root/SecondaryKeys 1 8 >> Cdc::UpdateShardCount [GOOD] >> Cdc::UpdateRetentionPeriod >> CdcStreamChangeCollector::UpsertManyRows ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupJoin-StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 28095, MsgBus: 6730 2025-06-03T10:28:38.601912Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668097805599185:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:38.601936Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f83/r3tmp/tmpk1Y2iO/pdisk_1.dat 2025-06-03T10:28:38.694054Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:38.694102Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668097805599167:2079] 1748946518601766 != 1748946518601769 TServer::EnableGrpc on GrpcPort 28095, node 1 2025-06-03T10:28:38.714228Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:38.714242Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:38.714244Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:38.714305Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6730 2025-06-03T10:28:38.764942Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:38.764976Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:38.765986Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6730 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:38.789032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:38.802352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:38.823084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:38.847434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:38.860338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:39.040354Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668102100568092:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:39.040402Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:39.100494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:39.109822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:39.166202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:39.223443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:39.233581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:39.245794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:39.259927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:39.281881Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668102100568751:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:39.281903Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668102100568756:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:39.281911Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:39.283326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:39.287745Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668102100568758:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:28:39.377548Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668102100568809:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:39.579212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:28:39.590639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:28:39.601907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 /Root/Join1_2 1 19 /Root/Join1_1 8 136 >> AsyncIndexChangeCollector::DeleteNothing >> AsyncIndexChangeCollector::UpsertToSameKey [GOOD] >> AsyncIndexChangeCollector::UpsertWithoutIndexedValue >> PgCatalog::PgDatabase+useSink [GOOD] >> PgCatalog::PgDatabase-useSink >> Cdc::RacyRebootAndSplitWithTxInflight [GOOD] >> Cdc::RacyActivateAndEnqueue >> Cdc::DescribeStream [GOOD] >> Cdc::DecimalKey >> AsyncIndexChangeCollector::InsertSingleRow [GOOD] >> AsyncIndexChangeCollector::InsertManyRows >> Cdc::UpdateRetentionPeriod [GOOD] >> Cdc::SupportedTypes >> CdcStreamChangeCollector::InsertSingleRow [GOOD] >> CdcStreamChangeCollector::InsertSingleUuidRow |64.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> CdcStreamChangeCollector::UpsertToSameKey [GOOD] >> CdcStreamChangeCollector::UpsertToSameKeyWithImages >> BasicStatistics::SimpleGlobalIndex [GOOD] >> AsyncIndexChangeCollector::UpsertSingleRow [GOOD] >> AsyncIndexChangeCollector::UpsertManyRows >> SystemView::DescribeAccessDenied [GOOD] >> SystemView::CollectScriptingQueries >> PgCatalog::PgDatabase-useSink [GOOD] >> PgCatalog::PgRoles |64.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> SystemView::ShowCreateTableChangefeeds [FAIL] >> SystemView::ShowCreateTableColumnAlterColumn >> KqpPg::TableDeleteWhere+useSink [GOOD] >> KqpPg::TableDeleteWhere-useSink >> Viewer::JsonAutocompleteEmpty >> SystemView::ShowCreateTableTemporary [GOOD] >> SystemView::ShowCreateTableSequences >> AsyncIndexChangeCollector::CoveredIndexUpdateCoveredColumn [GOOD] >> AsyncIndexChangeCollector::CoveredIndexUpsert ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> BasicStatistics::SimpleGlobalIndex [GOOD] Test command err: 2025-06-03T10:26:25.541718Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:25.541758Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:26:25.541767Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001c92/r3tmp/tmpYxaCbn/pdisk_1.dat 2025-06-03T10:26:25.647711Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23866, node 1 2025-06-03T10:26:25.760140Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:25.760166Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:25.760171Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:25.760237Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:25.760954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:26:25.839961Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:25.840006Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:25.852481Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:32520 2025-06-03T10:26:26.210507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:26:26.952840Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:26:26.959888Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:26.959930Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:27.013578Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:26:27.014356Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:27.174134Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:27.174322Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:27.174493Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:27.174585Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:27.174637Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:27.174668Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:27.174686Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:27.174708Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:27.174731Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:27.327590Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:27.327638Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:27.338862Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:27.370147Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:27.383612Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:26:27.383659Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:26:27.395351Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:26:27.395418Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:26:27.395451Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:26:27.395458Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:26:27.395465Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:26:27.395472Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:26:27.395479Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:26:27.395488Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:26:27.395648Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:26:27.409442Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:27.409473Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:27.411367Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:26:27.412100Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:26:27.412199Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:26:27.414271Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:26:27.417640Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:26:27.417666Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:26:27.417675Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:26:27.421461Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:26:27.423605Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:26:27.423651Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:26:27.528718Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:26:27.608248Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:26:27.661402Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:26:28.187744Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2214:3059], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:28.187790Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:28.190820Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:26:28.319146Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2425:3104], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:28.319188Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:28.324390Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2430:3108]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:28.324431Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:26:28.324439Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2432:3110] 2025-06-03T10:26:28.324447Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2432:3110] 2025-06-03T10:26:28.324603Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2433:2917] 2025-06-03T10:26:28.324672Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2432:3110], server id = [2:2433:2917], tablet id = 72075186224037894, status = OK 2025-06-03T10:26:28.324718Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2433:2917], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:26:28.324730Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-03T10:26:28.324781Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:26:28.324788Z node 1 :STATISTICS DEBU ... StatService::TEvGetStatistics] RequestId[ 120 ], ReplyToActorId[ [2:6720:4770]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:34.536825Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 120 ] 2025-06-03T10:28:34.536835Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 120, ReplyToActorId = [2:6720:4770], StatRequests.size() = 1 2025-06-03T10:28:35.188942Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:28:35.188983Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:28:35.188998Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-03T10:28:35.189005Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:28:35.189156Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:28:35.196630Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:28:35.198257Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6743:4789], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:35.198295Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6754:4794], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:35.198313Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:35.201237Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897 2025-06-03T10:28:35.216090Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:6757:4797], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-03T10:28:35.377425Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:6855:4845] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:35.409502Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [2:6884:4860]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:35.409561Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-03T10:28:35.409567Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [2:6884:4860], StatRequests.size() = 1 2025-06-03T10:28:35.440934Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NWU2OTQ0MTQtMmU3ZjZhYTItOGYxM2M3NWMtMmQ4YzM0NmE=, TxId: 2025-06-03T10:28:35.440970Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NWU2OTQ0MTQtMmU3ZjZhYTItOGYxM2M3NWMtMmQ4YzM0NmE=, TxId: 2025-06-03T10:28:35.442741Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:28:35.455991Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:28:35.456021Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:28:35.910994Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [2:6912:4876]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:35.911108Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-03T10:28:35.911115Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [2:6912:4876], StatRequests.size() = 1 2025-06-03T10:28:37.104767Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [2:6947:4894]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:37.104902Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-03T10:28:37.104913Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [2:6947:4894], StatRequests.size() = 1 2025-06-03T10:28:37.753068Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:28:37.763486Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:28:37.763527Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:28:37.763540Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 6] is data table. 2025-06-03T10:28:37.763546Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 6] 2025-06-03T10:28:37.763648Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:28:37.764538Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:28:37.768777Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ODQ1YTdlNjYtYTJiYzc3MDItMjRkMWVhZWQtN2IwYmMzOWE=, TxId: 2025-06-03T10:28:37.768806Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ODQ1YTdlNjYtYTJiYzc3MDItMjRkMWVhZWQtN2IwYmMzOWE=, TxId: 2025-06-03T10:28:37.768945Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:28:37.782068Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 6] 2025-06-03T10:28:37.782099Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:28:38.329485Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [2:7011:4930]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:38.329616Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-03T10:28:38.329626Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [2:7011:4930], StatRequests.size() = 1 2025-06-03T10:28:39.634941Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 125 ], ReplyToActorId[ [2:7050:4950]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:39.635088Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 125 ] 2025-06-03T10:28:39.635101Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 125, ReplyToActorId = [2:7050:4950], StatRequests.size() = 1 2025-06-03T10:28:40.290943Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-03T10:28:40.291150Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:28:40.291262Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:28:40.301881Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:28:40.301923Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:28:40.301937Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-03T10:28:40.301945Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:28:40.302097Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:28:40.303099Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:28:40.307146Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZWM3NjllMWMtNmY5ZDliZTktOTY3MDE3ZmUtM2U1NjExNTk=, TxId: 2025-06-03T10:28:40.307182Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZWM3NjllMWMtNmY5ZDliZTktOTY3MDE3ZmUtM2U1NjExNTk=, TxId: 2025-06-03T10:28:40.307467Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:28:40.321204Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:28:40.321237Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:28:40.869858Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 126 ], ReplyToActorId[ [2:7109:4983]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:40.869980Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 126 ] 2025-06-03T10:28:40.869989Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 126, ReplyToActorId = [2:7109:4983], StatRequests.size() = 1 >> KqpCost::OltpWriteRow-isSink >> AsyncIndexChangeCollector::DeleteNothing [GOOD] >> AsyncIndexChangeCollector::DeleteSingleRow >> AsyncIndexChangeCollector::UpsertWithoutIndexedValue [GOOD] >> CdcStreamChangeCollector::DeleteNothing >> AsyncIndexChangeExchange::ShouldDeliverChangesOnSplitMerge [GOOD] >> AsyncIndexChangeExchange::ShouldRejectChangesOnQueueOverflowByCount >> Cdc::DecimalKey [GOOD] >> CdcStreamChangeCollector::UpsertManyRows [GOOD] >> CdcStreamChangeCollector::UpsertIntoTwoStreams >> Cdc::DropColumn >> PgCatalog::PgRoles [GOOD] >> PgCatalog::PgTables >> AsyncIndexChangeCollector::InsertManyRows [GOOD] >> AsyncIndexChangeCollector::MultiIndexedTableInsertSingleRow >> Cdc::SupportedTypes [GOOD] >> Cdc::SplitTopicPartition_TopicAutoPartitioning >> SystemView::CollectScriptingQueries [GOOD] |64.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> CdcStreamChangeCollector::UpsertToSameKeyWithImages [GOOD] >> CdcStreamChangeCollector::UpsertModifyDelete >> CdcStreamChangeCollector::InsertSingleUuidRow [GOOD] >> CdcStreamChangeCollector::IndexAndStreamUpsert >> BootstrapperTest::MultipleBootstrappers [GOOD] >> AsyncIndexChangeCollector::UpsertManyRows [GOOD] >> AsyncIndexChangeCollector::MultiIndexedTableUpdateOneIndexedColumn |64.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookup+useSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/ut/unittest >> SystemView::CollectScriptingQueries [GOOD] Test command err: 2025-06-03T10:28:11.242733Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667981578958855:2219];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:11.242828Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002af8/r3tmp/tmpAivHRI/pdisk_1.dat 2025-06-03T10:28:11.321810Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6295, node 1 2025-06-03T10:28:11.343427Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:11.343442Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:11.343444Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:11.343508Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5980 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:11.380885Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:11.380920Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:11.382200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:11.389146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:11.533786Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:11.552687Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7511667981983480694:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:11.557247Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:28:11.560333Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:11.560361Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:11.561549Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:11.561575Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:11.561983Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-03T10:28:11.562527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:11.563153Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:11.570539Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-03T10:28:11.571898Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:11.645544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:11.665789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:28:11.654079Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511667982848684512:2157];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:11.666529Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; waiting... 2025-06-03T10:28:11.682770Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:11.682804Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:11.682829Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:11.682845Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:11.684370Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:28:11.684387Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-03T10:28:11.686647Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:11.686775Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:12.104102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:12.202091Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667985873927258:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:12.202096Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667985873927270:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:12.202122Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:12.203018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715663:3, at schemeshard: 72057594046644480 2025-06-03T10:28:12.216557Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667985873927272:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715663 completed, doublechecking } 2025-06-03T10:28:12.311372Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667985873927350:2929] txid# 281474976715664, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:12.416191Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jwtnb6s90ebzd2tv4rs91x0k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDM3ZTUzOWMtMTg4ZGIzNmMtYzMwZjQ1ZWQtODgzZGQ3ZTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:12.426321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:12.488570Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jwtnb71t3ghqe1vfhd53wdr2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDM3ZTUzOWMtMTg4ZGIzNmMtYzMwZjQ1ZWQtODgzZGQ3ZTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:12.495131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:12.590885Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715669. Ctx: { TraceId: 01jwtnb751fq1h4p8khtm53f4r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDM3ZTUzOWMtMTg4ZGIzNmMtYzMwZjQ1ZWQtODgzZGQ3ZTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:12.640778Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715671. Ctx: { TraceId: 01jwtnb75sfep5g70greegr134, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWJhMDFhNjEtNzFjYWYwNWUtYmQ2MTIzNmItMjNmNjk2OWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:12.641490Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [1:7511667985873927620:2374], owner: [1:7511667985873927617:2372], scan id: 0, table id: [72057594046644480:2:0:nodes] 2025-06-03T10:28:12.642870Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [1:75116 ... 2025-06-03T10:28:40.213531Z node 32 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:40.296051Z node 32 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715669. Ctx: { TraceId: 01jwtnc26s8xtpf285brqyprmb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=32&id=ZDczNTE3ODgtYjdmZDdlYmEtY2U1YTU3MjEtY2FhOGI3NDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:40.307444Z node 32 :TX_PROXY ERROR: describe.cpp:395: Access denied for user0@builtin with access DescribeSchema to path Root
: Error: Access denied 2025-06-03T10:28:40.311198Z node 32 :TX_PROXY ERROR: describe.cpp:395: Access denied for user0@builtin with access DescribeSchema to path Root/Tenant1
: Error: Access denied 2025-06-03T10:28:40.314625Z node 32 :TX_PROXY ERROR: describe.cpp:395: Access denied for user0@builtin with access DescribeSchema to path Root/.sys
: Error: Access denied 2025-06-03T10:28:40.318841Z node 32 :TX_PROXY ERROR: describe.cpp:395: Access denied for user0@builtin with access DescribeSchema to path Root/Tenant1/.sys
: Error: Access denied 2025-06-03T10:28:40.324764Z node 32 :TX_PROXY ERROR: describe.cpp:395: Access denied for user0@builtin with access DescribeSchema to path Root/.sys/partition_stats
: Error: Access denied 2025-06-03T10:28:40.333096Z node 32 :TX_PROXY ERROR: describe.cpp:395: Access denied for user0@builtin with access DescribeSchema to path Root/Tenant1/.sys/partition_stats
: Error: Access denied 2025-06-03T10:28:40.348005Z node 32 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 33 2025-06-03T10:28:40.348141Z node 32 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(33, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:28:40.348176Z node 32 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 34 2025-06-03T10:28:40.348250Z node 32 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(34, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:28:40.348265Z node 32 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 36 2025-06-03T10:28:40.348322Z node 32 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(36, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:28:40.348460Z node 32 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 35 2025-06-03T10:28:40.348546Z node 32 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(35, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:28:40.349557Z node 32 :HIVE WARN: hive_impl.cpp:934: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[35:7511668103039457205:2104], Type=268959746 2025-06-03T10:28:40.349568Z node 32 :HIVE WARN: hive_impl.cpp:934: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[35:7511668103039457205:2104], Type=268959746 2025-06-03T10:28:40.349572Z node 32 :HIVE WARN: hive_impl.cpp:934: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[35:7511668103039457205:2104], Type=268959746 2025-06-03T10:28:40.349577Z node 32 :HIVE WARN: hive_impl.cpp:934: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[35:7511668103039457205:2104], Type=268959746 2025-06-03T10:28:40.568529Z node 33 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:40.568672Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:41.507312Z node 37 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[37:7511668111117461502:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:41.507340Z node 37 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002af8/r3tmp/tmpWSaac1/pdisk_1.dat 2025-06-03T10:28:41.523629Z node 37 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11581, node 37 2025-06-03T10:28:41.541629Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:41.541646Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:41.541649Z node 37 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:41.541711Z node 37 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8869 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:41.607612Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:41.607651Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:41.608767Z node 37 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(37, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:41.612168Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:41.615413Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:41.948654Z node 37 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [37:7511668111117462187:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:41.948676Z node 37 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [37:7511668111117462198:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:41.948686Z node 37 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:41.949590Z node 37 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:28:41.952510Z node 37 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [37:7511668111117462201:2334], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:28:42.036121Z node 37 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [37:7511668115412429549:2377] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:42.047571Z node 37 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtnc3tw1263csmwtkpg455m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=37&id=NDdhZDcxNGUtNzljODNkNGQtZTRmYWI5OTYtN2VjYmQwMTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:42.070164Z node 37 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jwtnc3y99yqxf7thjktjw4b2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=37&id=ZjlkM2JkYWUtN2Y2YjQyNDktODMxMjQxMWEtYzBiM2I3MDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:42.074153Z node 37 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946522113, txId: 281474976715662] shutting down 2025-06-03T10:28:42.101356Z node 37 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jwtnc3yzfwm2397bwjkgxp7a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=37&id=YjczM2M4YzUtMWZmZjMyZGItNDY2ZWVmZGYtMTY4MzliNmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:42.101992Z node 37 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [37:7511668115412429672:2363], owner: [37:7511668115412429668:2361], scan id: 0, table id: [72057594046644480:1:0:top_queries_by_read_bytes_one_minute] 2025-06-03T10:28:42.102237Z node 37 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [37:7511668115412429672:2363], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:42.102340Z node 37 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [37:7511668115412429672:2363], row count: 2, finished: 1 2025-06-03T10:28:42.102352Z node 37 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [37:7511668115412429672:2363], owner: [37:7511668115412429668:2361], scan id: 0, table id: [72057594046644480:1:0:top_queries_by_read_bytes_one_minute] 2025-06-03T10:28:42.103094Z node 37 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946522100, txId: 281474976715664] shutting down ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> BootstrapperTest::MultipleBootstrappers [GOOD] Test command err: ... waiting for pipe to connect ... stopping current instance ... waiting for pipe to disconnect ... waiting for pipe to connect ... sleeping for 2 seconds 2025-06-03T10:28:38.492936Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-03T10:28:38.492954Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-03T10:28:38.492961Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-03T10:28:38.493072Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: NODATA, leader: [0:0:0] 2025-06-03T10:28:38.493077Z node 3 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 8427358873417017059 2025-06-03T10:28:38.493119Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: NODATA, leader: [0:0:0] 2025-06-03T10:28:38.493123Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 314095936534775797 2025-06-03T10:28:38.493127Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: NODATA, leader: [0:0:0] 2025-06-03T10:28:38.493130Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 4772764162469967008 2025-06-03T10:28:38.493332Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 5 state: UNKNOWN 2025-06-03T10:28:38.493369Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 4 state: UNKNOWN 2025-06-03T10:28:38.493374Z node 3 :BOOTSTRAPPER NOTICE: bootstrapper.cpp:680: tablet: 9437184, type: Dummy, boot 2025-06-03T10:28:38.493452Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 3 state: OWNER 2025-06-03T10:28:38.493457Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:571: tablet: 9437184, type: Dummy, become watch on node 3 (owner) 2025-06-03T10:28:38.493465Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 3 state: OWNER 2025-06-03T10:28:38.493468Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:571: tablet: 9437184, type: Dummy, become watch on node 3 (owner) ... waiting for pipe to connect ... tablet initially started on node 3 (idx 1) in gen 2 ... disconnecting other nodes ... sleeping for 2 seconds (tablet expected to survive) 2025-06-03T10:28:39.597174Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:643: tablet: 9437184, type: Dummy, disconnected from 3, round 16045690984833335025 2025-06-03T10:28:39.597198Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-03T10:28:39.597275Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:643: tablet: 9437184, type: Dummy, disconnected from 3, round 16045690984833335025 2025-06-03T10:28:39.597284Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-03T10:28:39.597814Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [3:278:2096] 2025-06-03T10:28:39.597832Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [3:278:2096] 2025-06-03T10:28:39.598192Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-06-03T10:28:39.598203Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting 2025-06-03T10:28:39.598267Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-06-03T10:28:39.598272Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting ... disconnecting other nodes (new tablet connections fail) ... sleeping for 2 seconds (tablet expected to survive) 2025-06-03T10:28:40.412573Z node 3 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [9437184] NodeDisconnected NodeId# 5 2025-06-03T10:28:40.412600Z node 3 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [9437184] NodeDisconnected NodeId# 4 2025-06-03T10:28:40.412647Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:303: tablet: 9437184, type: Dummy, disconnected 2025-06-03T10:28:40.412659Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-03T10:28:40.412669Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:303: tablet: 9437184, type: Dummy, disconnected 2025-06-03T10:28:40.412676Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-03T10:28:40.412869Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [3:278:2096] 2025-06-03T10:28:40.412897Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [3:278:2096] ... disconnecting nodes 1 <-> 2 (tablet connect attempt) ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to TABLET_ACTOR cookie 1 ... disconnecting nodes 1 <-> 3 (tablet connect attempt) ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to TABLET_ACTOR cookie 1 2025-06-03T10:28:40.413068Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: ERROR 2025-06-03T10:28:40.413076Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 7384299258267454889 2025-06-03T10:28:40.413104Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: ERROR 2025-06-03T10:28:40.413109Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 16879683490511761896 2025-06-03T10:28:40.413244Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 3 state: OWNER 2025-06-03T10:28:40.413253Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:571: tablet: 9437184, type: Dummy, become watch on node 3 (owner) 2025-06-03T10:28:40.413262Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 3 state: OWNER 2025-06-03T10:28:40.413267Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:571: tablet: 9437184, type: Dummy, become watch on node 3 (owner) ... disconnect other nodes (new owner expected) ... sleeping for 2 seconds (new tablet expected to start once) 2025-06-03T10:28:41.203103Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:643: tablet: 9437184, type: Dummy, disconnected from 3, round 16045690984833335028 2025-06-03T10:28:41.203136Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-03T10:28:41.203150Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:643: tablet: 9437184, type: Dummy, disconnected from 3, round 16045690984833335028 2025-06-03T10:28:41.203158Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-03T10:28:41.203345Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [3:278:2096] 2025-06-03T10:28:41.203417Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [3:278:2096] ... disconnecting nodes 1 <-> 2 (tablet connect attempt) ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to TABLET_ACTOR cookie 1 ... disconnecting nodes 1 <-> 3 (tablet connect attempt) ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to TABLET_ACTOR cookie 1 2025-06-03T10:28:41.203521Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: ERROR 2025-06-03T10:28:41.203528Z node 4 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 5581027938793353785 2025-06-03T10:28:41.203584Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: ERROR 2025-06-03T10:28:41.203589Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:330: tablet:9437184, type: Dummy, begin new round, seed: 8470239763125230813 ... disconnecting nodes 1 <-> 2 (bootstrap watch attempt) ... blocking NKikimr::TEvBootstrapper::TEvWatch from TABLET_BOOTSTRAPPER to TABLET_BOOTSTRAPPER cookie 16045690984833335030 2025-06-03T10:28:41.203626Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 5 state: UNKNOWN 2025-06-03T10:28:41.203642Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:403: tablet: 9437184, type: Dummy, disconnected from 3, round 16045690984833335030 2025-06-03T10:28:41.203646Z node 4 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 3 state: DISCONNECTED 2025-06-03T10:28:41.203651Z node 4 :BOOTSTRAPPER NOTICE: bootstrapper.cpp:680: tablet: 9437184, type: Dummy, boot ... disconnecting nodes 1 <-> 3 (bootstrap watch attempt) ... blocking NKikimr::TEvBootstrapper::TEvWatch from TABLET_BOOTSTRAPPER to TABLET_BOOTSTRAPPER cookie 16045690984833335030 2025-06-03T10:28:41.203773Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:403: tablet: 9437184, type: Dummy, disconnected from 3, round 16045690984833335030 2025-06-03T10:28:41.203777Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 3 state: DISCONNECTED 2025-06-03T10:28:41.203785Z node 5 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:428: tablet: 9437184, type: Dummy, apply alien 4 state: OWNER 2025-06-03T10:28:41.203790Z node 5 :BOOTSTRAPPER INFO: bootstrapper.cpp:571: tablet: 9437184, type: Dummy, become watch on node 4 (owner) 2025-06-03T10:28:41.204096Z node 3 :BOOTSTRAPPER INFO: bootstrapper.cpp:715: tablet: 9437184, type: Dummy, tablet dead 2025-06-03T10:28:41.204107Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:193: tablet: 9437184, type: Dummy, begin new cycle (lookup in state storage) 2025-06-03T10:28:41.205784Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:215: tablet: 9437184, type: Dummy, lookup: OK, leader: [4:423:2096] 2025-06-03T10:28:41.210002Z node 3 :BOOTSTRAPPER DEBUG: bootstrapper.cpp:266: tablet: 9437184, type: Dummy, connect: OK 2025-06-03T10:28:41.210028Z node 3 :BOOTSTRAPPER INFO: bootstrapper.cpp:277: tablet: 9437184, type: Dummy, connected to leader, waiting ... waiting for pipe to connect ... disconnecting nodes 1 <-> 0 (tablet connect attempt) ... blocking NKikimr::TEvTabletPipe::TEvConnect from TABLET_PIPE_CLIENT to cookie 1 >> Viewer::JsonAutocompleteEmpty [GOOD] >> Viewer::JsonAutocompleteEndOfDatabaseName >> AsyncIndexChangeCollector::CoveredIndexUpsert [GOOD] >> AsyncIndexChangeCollector::AllColumnsInPk >> KqpCost::OltpWriteRow-isSink [GOOD] >> AsyncIndexChangeCollector::DeleteSingleRow [GOOD] >> AsyncIndexChangeCollector::IndexedPrimaryKeyDeleteSingleRow >> PgCatalog::PgTables [GOOD] >> Cdc::RacyActivateAndEnqueue [GOOD] >> Cdc::RacyCreateAndSend >> CdcStreamChangeCollector::DeleteNothing [GOOD] >> CdcStreamChangeCollector::DeleteSingleRow >> AsyncIndexChangeCollector::MultiIndexedTableInsertSingleRow [GOOD] >> AsyncIndexChangeCollector::IndexedPrimaryKeyInsertSingleRow >> AsyncIndexChangeExchange::ShouldRejectChangesOnQueueOverflowByCount [GOOD] >> AsyncIndexChangeExchange::ShouldRejectChangesOnQueueOverflowBySize ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::OltpWriteRow-isSink [GOOD] Test command err: Trying to start YDB, gRPC: 12202, MsgBus: 65364 2025-06-03T10:28:41.920925Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668111366987456:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:41.920962Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f68/r3tmp/tmpaBlytH/pdisk_1.dat 2025-06-03T10:28:41.995981Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668111366987433:2079] 1748946521920754 != 1748946521920757 2025-06-03T10:28:41.997860Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12202, node 1 2025-06-03T10:28:42.011897Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:42.011911Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:42.011913Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:42.011959Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:28:42.024127Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:42.024164Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:42.025127Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:65364 TClient is connected to server localhost:65364 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:42.081669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:42.093956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:42.118514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:42.179562Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:42.193801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:42.377047Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668115661956380:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:42.377083Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:42.436979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:42.446424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:42.457951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:42.472394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:42.486583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:42.500548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:42.514501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:42.531100Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668115661957032:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:42.531133Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:42.531179Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668115661957037:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:42.532227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:42.541232Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668115661957039:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:28:42.604007Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668115661957090:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:42.807899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 query_phases { duration_us: 192 cpu_time_us: 192 } query_phases { duration_us: 2045 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 536 affected_shards: 1 } compilation { duration_us: 14189 cpu_time_us: 12905 } process_cpu_time_us: 343 total_duration_us: 17022 total_cpu_time_us: 13976 query_phases { duration_us: 273 cpu_time_us: 273 } query_phases { duration_us: 2293 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 626 affected_shards: 1 } compilation { duration_us: 14090 cpu_time_us: 12968 } process_cpu_time_us: 471 total_duration_us: 18297 total_cpu_time_us: 14338 2025-06-03T10:28:42.931608Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7511668115661957478:2536], TxId: 281474976715677, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=MjQ0Y2E4ZDYtYjVmNmQ3NzMtYTAxMTJiNjktYmM5ZGM2NGE=. TraceId : 01jwtnc4r22r15hdafef5m54ng. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Conflict with existing key., code: 2012 }. 2025-06-03T10:28:42.931853Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7511668115661957480:2537], TxId: 281474976715677, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=MjQ0Y2E4ZDYtYjVmNmQ3NzMtYTAxMTJiNjktYmM5ZGM2NGE=. CustomerSuppliedId : . TraceId : 01jwtnc4r22r15hdafef5m54ng. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [1:7511668115661957475:2498], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-03T10:28:42.931984Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=MjQ0Y2E4ZDYtYjVmNmQ3NzMtYTAxMTJiNjktYmM5ZGM2NGE=, ActorId: [1:7511668115661957313:2498], ActorState: ExecuteState, TraceId: 01jwtnc4r22r15hdafef5m54ng, Create QueryResponse for error on request, msg: query_phases { duration_us: 334 cpu_time_us: 334 } query_phases { duration_us: 1447 table_access { name: "/Root/TestTable" reads { rows: 1 bytes: 8 } partitions_count: 1 } cpu_time_us: 1614 affected_shards: 1 } query_phases { duration_us: 3454 cpu_time_us: 3430 } compilation { duration_us: 41037 cpu_time_us: 38659 } process_cpu_time_us: 673 total_duration_us: 49430 total_cpu_time_us: 44710 query_phases { duration_us: 283 cpu_time_us: 283 } query_phases { duration_us: 6521 table_access { name: "/Root/TestTable" partitions_count: 1 } cpu_time_us: 5706 affected_shards: 1 } query_phases { duration_us: 709 cpu_time_us: 765 } query_phases { duration_us: 3279 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 597 affected_shards: 1 } compilation { duration_us: 42330 cpu_time_us: 39951 } process_cpu_time_us: 696 total_duration_us: 57107 total_cpu_time_us: 47998 query_phases { duration_us: 300 cpu_time_us: 300 } query_phases { duration_us: 1224 table_access { name: "/Root/TestTable" partitions_count: 1 } cpu_time_us: 1198 affected_shards: 1 } query_phases { duration_us: 530 cpu_time_us: 372 affected_shards: 1 } compilation { duration_us: 33131 cpu_time_us: 31287 } process_cpu_time_us: 630 total_duration_us: 36621 total_cpu_time_us: 33787 query_phases { duration_us: 187 cpu_time_us: 187 } query_phases { duration_us: 950 table_access { name: "/Root/TestTable" reads { rows: 1 bytes: 8 } partitions_count: 1 } cpu_time_us: 950 affected_shards: 1 } query_phases { duration_us: 1633 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 453 affected_shards: 1 } compilation { duration_us: 25901 cpu_time_us: 24455 } process_cpu_time_us: 422 total_duration_us: 29958 total_cpu_time_us: 26467 query_phases { duration_us: 240 cpu_time_us: 240 } query_phases { duration_us: 2060 table_access { name: "/Root/TestTable" deletes { rows: 1 } partitions_count: 1 } cpu_time_us: 525 affected_shards: 1 } compilation { duration_us: 12086 cpu_time_us: 10932 } process_cpu_time_us: 403 total_duration_us: 15025 total_cpu_time_us: 12100 query_phases { duration_us: 193 cpu_time_us: 193 } query_phases { duration_us: 1938 table_access { name: "/Root/TestTable" deletes { rows: 1 } partitions_count: 1 } cpu_time_us: 471 affected_shards: 1 } compilation { duration_us: 13616 cpu_time_us: 12164 } process_cpu_time_us: 350 total_duration_us: 16409 total_cpu_time_us: 13178 >> CdcStreamChangeCollector::UpsertIntoTwoStreams [GOOD] >> CdcStreamChangeCollector::PageFaults |64.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/pg/unittest >> PgCatalog::PgTables [GOOD] Test command err: Trying to start YDB, gRPC: 23962, MsgBus: 6059 2025-06-03T10:28:01.026559Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667941882922892:2090];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:01.026910Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0023c8/r3tmp/tmpnmveYc/pdisk_1.dat 2025-06-03T10:28:01.141515Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667937587955518:2079] 1748946481001639 != 1748946481001642 2025-06-03T10:28:01.142794Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23962, node 1 2025-06-03T10:28:01.185338Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:01.185399Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:01.188507Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:01.209527Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:01.209541Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:01.209543Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:01.209586Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6059 TClient is connected to server localhost:6059 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:01.346196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:01.353895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 1042 2025-06-03T10:28:01.862212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce_pgbpchar_17472595041006102391_17823623939509273229' Typemod mismatch, got type pgbpchar for column value, type mod , but expected 2 --!syntax_pg INSERT INTO Coerce_pgbpchar_17472595041006102391_17823623939509273229 (key, value) VALUES ( '0'::int2, 'abcd'::bpchar ) 2025-06-03T10:28:01.911278Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667941882923567:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:01.911302Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:01.911446Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667941882923579:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:01.912196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:28:01.914880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-03T10:28:01.914950Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667941882923581:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:28:01.994557Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667941882923632:2385] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:02.058511Z node 1 :TX_DATASHARD CRIT: execute_kqp_data_tx_unit.cpp:449: Exception while executing KQP transaction [0:281474976715663] at 72075186224037888: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-06-03T10:28:02.060830Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715663 at tablet 72075186224037888 status: EXEC_ERROR errors: UNKNOWN (Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) ) | 2025-06-03T10:28:02.060917Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:863: ActorId: [1:7511667946177890988:2336] TxId: 281474976715663. Ctx: { TraceId: 01jwtnawqpbgma3c3xhpkah0hq, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmEwYjU0MzUtNDQwOGY3YzEtNzVlMmRjYjUtMTIwZWYyZWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. EXEC_ERROR: [UNKNOWN] Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) ; 2025-06-03T10:28:02.062848Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=ZmEwYjU0MzUtNDQwOGY3YzEtNzVlMmRjYjUtMTIwZWYyZWM=, ActorId: [1:7511667941882923564:2336], ActorState: ExecuteState, TraceId: 01jwtnawqpbgma3c3xhpkah0hq, Create QueryResponse for error on request, msg:
: Error: Error executing transaction (ExecError): Execution failed
: Error: [UNKNOWN] Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-06-03T10:28:02.068661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce__pgbpchar_17472595041006102391_5352544928909966465' Typemod mismatch, got type _pgbpchar for column value, type mod , but expected 2 --!syntax_pg INSERT INTO Coerce__pgbpchar_17472595041006102391_5352544928909966465 (key, value) VALUES ( '0'::int2, '{abcd,abcd}'::_bpchar )
: Error: Error executing transaction (ExecError): Execution failed
: Error: [UNKNOWN] Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 1042 2025-06-03T10:28:02.136539Z node 1 :TX_DATASHARD CRIT: execute_kqp_data_tx_unit.cpp:449: Exception while executing KQP transaction [0:281474976715668] at 72075186224037889: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-06-03T10:28:02.138613Z node 1 :TX_DATASHARD ERROR: finish_propose_unit.cpp:174: Errors while proposing transaction txid 281474976715668 at tablet 72075186224037889 status: EXEC_ERROR errors: UNKNOWN (Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) ) | 2025-06-03T10:28:02.138686Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:863: ActorId: [1:7511667946177891118:2370] TxId: 281474976715668. Ctx: { TraceId: 01jwtnawxc2kxhe1w5eqv9rebh, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmFlZDFhZWYtZmRmZTZiMTMtY2IxYTM1MzgtODM4NmVmNDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. EXEC_ERROR: [UNKNOWN] Tx was terminated: ydb/core/tx/datashard/datashard_kqp_upsert_rows.cpp:87: Apply(): requirement !error failed. Incorrect value: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) ; 2025-06-03T10:28:02.138745Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=NmFlZDFhZWYtZmRmZTZiMTMtY2IxYTM1MzgtODM4NmVmNDg=, ActorId: [1:7511667946177891078:2370], ActorState: ExecuteState, TraceId: 01jwtnawxc2kxhe1w5eqv9rebh, Create QueryResponse for error on request, msg: 2025-06-03T10:28:02.143363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce_pgbpchar_2169371982377735806_17823623939509273229' Typemod mismatch, got type pgbpchar for column value, type mod , but expected 4 --!syntax_pg INSERT INTO Coerce_pgbpchar_2169371982377735806_17823623939509273229 (key, value) VALUES ( '0'::int2, 'abcd'::bpchar ) abcd ... ize from file: (empty maybe) 2025-06-03T10:28:41.581518Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:41.581572Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24292 TClient is connected to server localhost:24292 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:41.650237Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:41.650266Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:41.651414Z node 13 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:41.653652Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:41.941130Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7511668110462875140:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:41.941172Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7511668110462875124:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:41.941268Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:41.942190Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:41.944400Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7511668110462875153:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:42.007651Z node 13 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [13:7511668114757842500:2323] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 14753, MsgBus: 28686 2025-06-03T10:28:42.270998Z node 14 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[14:7511668117086948706:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:42.271027Z node 14 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0023c8/r3tmp/tmpDllXPt/pdisk_1.dat 2025-06-03T10:28:42.287488Z node 14 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14753, node 14 2025-06-03T10:28:42.304855Z node 14 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:42.304869Z node 14 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:42.304871Z node 14 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:42.304918Z node 14 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28686 TClient is connected to server localhost:28686 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:42.371968Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:42.372000Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:42.372690Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:42.375521Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:42.792553Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7511668117086949296:2327], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:42.792591Z node 14 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:42.792695Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7511668117086949331:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:42.793645Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:42.796788Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-03T10:28:42.796857Z node 14 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [14:7511668117086949333:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:42.865730Z node 14 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [14:7511668117086949384:2323] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:42.875040Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:28:42.891896Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:28:43.308202Z node 14 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 14, TabletId: 72075186224037888 not found 2025-06-03T10:28:43.310812Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715671:0, at schemeshard: 72057594046644480 2025-06-03T10:28:43.439214Z node 14 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [14:7511668121381917196:2431], TxId: 281474976715672, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=14&id=NGUzN2NkNjgtMzMyMGE3NWMtOTAzYjI1ZTItYmZmM2YzY2E=. TraceId : 01jwtnc5779jfe8x1677xrys22. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: PRECONDITION_FAILED DEFAULT_ERROR: {
: Error: Terminate was called, reason(57): ERROR: invalid input syntax for type boolean: "pg_proc" }. 2025-06-03T10:28:43.439532Z node 14 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [14:7511668121381917198:2432], TxId: 281474976715672, task: 2. Ctx: { SessionId : ydb://session/3?node_id=14&id=NGUzN2NkNjgtMzMyMGE3NWMtOTAzYjI1ZTItYmZmM2YzY2E=. TraceId : 01jwtnc5779jfe8x1677xrys22. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [14:7511668121381917193:2428], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-03T10:28:43.439693Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=14&id=NGUzN2NkNjgtMzMyMGE3NWMtOTAzYjI1ZTItYmZmM2YzY2E=, ActorId: [14:7511668121381917187:2428], ActorState: ExecuteState, TraceId: 01jwtnc5779jfe8x1677xrys22, Create QueryResponse for error on request, msg: >> CdcStreamChangeCollector::UpsertModifyDelete [GOOD] >> Cdc::DropColumn [GOOD] >> Cdc::DropIndex >> CdcStreamChangeCollector::IndexAndStreamUpsert [GOOD] >> CdcStreamChangeCollector::NewImage |64.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk |64.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk |64.2%| [LD] {RESULT} $(B)/ydb/core/persqueue/ut/ut_with_sdk/ydb-core-persqueue-ut-ut_with_sdk >> KqpCost::IndexLookup+useSink [GOOD] >> KqpCost::ScanQueryRangeFullScan+SourceRead >> AsyncIndexChangeCollector::MultiIndexedTableUpdateOneIndexedColumn [GOOD] >> AsyncIndexChangeCollector::MultiIndexedTableReplaceSingleRow >> KqpCost::OlapRangeFullScan |64.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> Viewer::JsonAutocompleteEndOfDatabaseName [GOOD] >> Viewer::JsonAutocompleteScheme ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::UpsertModifyDelete [GOOD] Test command err: 2025-06-03T10:28:40.062301Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:40.062377Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:40.062399Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002048/r3tmp/tmpV6y4S7/pdisk_1.dat 2025-06-03T10:28:40.189428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:40.209489Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:40.210530Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1191: Update config MemoryLimit: 33554432 2025-06-03T10:28:40.210723Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946519562109 != 1748946519562113 2025-06-03T10:28:40.253290Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:40.253354Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:40.264124Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:40.341498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:40.359603Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:28:40.359699Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:28:40.370276Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:28:40.370326Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:28:40.370501Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:28:40.370511Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:28:40.370519Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:28:40.370583Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:28:40.370604Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:28:40.370618Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:28:40.380962Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:28:40.386353Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:28:40.386453Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:28:40.386485Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:28:40.386491Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:40.386497Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:28:40.386504Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:40.386684Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:28:40.386712Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:28:40.386843Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:40.386878Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:40.386889Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:28:40.386896Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:40.386912Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:28:40.386952Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:28:40.387010Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:28:40.387032Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:28:40.387416Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:40.397744Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:28:40.397798Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:28:40.543666Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-06-03T10:28:40.545031Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-03T10:28:40.545059Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:40.545126Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:40.545137Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:28:40.545150Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-03T10:28:40.545252Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-03T10:28:40.545290Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-03T10:28:40.545459Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:40.545473Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-03T10:28:40.545927Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-03T10:28:40.546021Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:40.546363Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-03T10:28:40.546371Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:40.546555Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-03T10:28:40.546568Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:40.546715Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:40.546723Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:40.546730Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-03T10:28:40.546746Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:28:40.546757Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:28:40.546768Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:40.547706Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:40.547985Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:28:40.548099Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-03T10:28:40.548108Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:28:40.552369Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:28:40.552416Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-03T10:28:40.552430Z node 1 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-06-03T10:28:40.552436Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-06-03T10:28:40.552614Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Dis ... 24037888 state Ready 2025-06-03T10:28:43.446572Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:28:43.446686Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:28:43.450674Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:28:43.450721Z node 3 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-03T10:28:43.450735Z node 3 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-06-03T10:28:43.450741Z node 3 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-06-03T10:28:43.450866Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:43.471951Z node 3 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:28:43.635544Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715658 at step 1500 at tablet 72075186224037888 { Transactions { TxId: 281474976715658 AckTo { RawX1: 0 RawX2: 0 } } Step: 1500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-03T10:28:43.635573Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:43.635627Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:43.635636Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:28:43.635646Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1500:281474976715658] in PlanQueue unit at 72075186224037888 2025-06-03T10:28:43.635702Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1500:281474976715658 keys extracted: 0 2025-06-03T10:28:43.635733Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-03T10:28:43.635846Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:43.636003Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:43.670339Z node 3 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-06-03T10:28:43.670374Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:43.670379Z node 3 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:43.670389Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:43.670416Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [3:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:28:43.670429Z node 3 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-06-03T10:28:43.670441Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:43.670897Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-06-03T10:28:43.670909Z node 3 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:28:43.674243Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:876:2714], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:43.674277Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:886:2719], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:43.674287Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:43.675226Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:28:43.676117Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:43.831171Z node 3 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:43.831587Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:890:2722], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:28:43.853551Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:946:2759] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:43.868115Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtnc5gs4ck55h42v7xwgfhy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OGNhODI5Yy01Yzg3ZTVmYy02YWRmMDc0Ny00NmQ5MmJiMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:43.868771Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:977:2776], serverId# [3:978:2777], sessionId# [0:0:0] 2025-06-03T10:28:43.868941Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:3] at 72075186224037888 2025-06-03T10:28:43.869044Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1748946523868986 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-03T10:28:43.869099Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:3] at 72075186224037888, row count=1 2025-06-03T10:28:43.879701Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-03T10:28:43.879746Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:43.889914Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtnc5q9dg2j86mz03gnsh6n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MjZjY2YyYmYtNGRhMWZkNjUtYTBlYzU3YjktNTdkMTRjYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:43.890500Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:4] at 72075186224037888 2025-06-03T10:28:43.890588Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1748946523890560 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 50b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-03T10:28:43.890620Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:4] at 72075186224037888, row count=1 2025-06-03T10:28:43.901048Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 50 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-03T10:28:43.901075Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:43.914403Z node 3 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jwtnc5qy6pqx89xx6zm6pgk8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZjM2NDExZTEtMWRmMzc3ZTMtNjNhNmIwMS1iMjEyNGYzZg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:43.915115Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:5] at 72075186224037888 2025-06-03T10:28:43.915220Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 3 Group: 1748946523915185 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-03T10:28:43.915254Z node 3 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:5] at 72075186224037888, row count=1 2025-06-03T10:28:43.925694Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 3 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-03T10:28:43.925727Z node 3 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:43.926402Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:1024:2806], serverId# [3:1025:2807], sessionId# [0:0:0] 2025-06-03T10:28:43.927453Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [3:1026:2808], serverId# [3:1027:2809], sessionId# [0:0:0] >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureBlock42Count6Idx0 [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookup+useSink [GOOD] Test command err: Trying to start YDB, gRPC: 2730, MsgBus: 63092 2025-06-03T10:28:43.143474Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668119539928247:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:43.143495Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f57/r3tmp/tmpvqV9bo/pdisk_1.dat 2025-06-03T10:28:43.209746Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:43.209838Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668119539928223:2079] 1748946523143282 != 1748946523143285 TServer::EnableGrpc on GrpcPort 2730, node 1 2025-06-03T10:28:43.223273Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:43.223284Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:43.223286Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:43.223328Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63092 TClient is connected to server localhost:63092 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:28:43.278957Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:43.278990Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:43.280231Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:43.288222Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:43.295653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:43.367964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:43.428449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:43.441183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:43.536386Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668119539929890:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:43.536411Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:43.579872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:43.591022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:43.600886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:43.613263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:43.669663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:43.683253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:43.697216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:43.713761Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668119539930543:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:43.713789Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668119539930548:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:43.713797Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:43.714723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:43.723880Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668119539930550:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:28:43.823859Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668119539930602:3397] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:43.979309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 /Root/SecondaryKeys/Index/indexImplTable 1 8 /Root/SecondaryKeys 1 8 >> Cdc::SplitTopicPartition_TopicAutoPartitioning [GOOD] >> Cdc::ShouldDeliverChangesOnSplitMerge >> AsyncIndexChangeCollector::AllColumnsInPk [GOOD] >> AsyncIndexChangeCollector::CoverIndexedColumn >> AsyncIndexChangeCollector::IndexedPrimaryKeyDeleteSingleRow [GOOD] >> AsyncIndexChangeCollector::ImplicitlyUpdateCoveredColumn >> CdcStreamChangeCollector::DeleteSingleRow [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_ftol/unittest >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureBlock42Count6Idx0 [GOOD] Test command err: iteration# 0 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 6 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 12 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 18 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 24 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 30 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 36 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 42 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 48 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 54 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 60 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 66 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 72 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 78 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 84 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 90 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 96 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 102 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 108 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 114 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 120 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 126 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 132 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 138 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 144 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 150 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 156 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 162 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 168 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 174 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 180 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 186 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 192 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 198 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 204 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 210 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 216 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 222 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 228 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 234 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 240 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 246 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 252 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 258 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 264 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 270 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 276 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 282 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 288 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 294 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 300 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 306 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 312 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 318 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 324 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 330 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 336 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 342 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 348 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 354 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 360 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 366 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 372 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 378 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 384 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 390 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 396 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 402 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 408 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 414 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 420 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 426 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 432 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 438 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 444 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 450 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 456 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 462 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 468 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 474 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 480 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 486 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 492 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 498 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 504 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 510 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 516 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 522 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 528 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 534 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 540 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 546 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 552 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 558 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 564 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 570 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 576 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 582 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 588 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 594 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 600 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 606 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 612 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 618 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 624 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 630 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 636 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 642 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 648 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 654 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 660 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 666 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 672 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 678 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 684 BlobsWritten# 2041 blobsWrittenFul ... blobsUnwritten# 1218 iteration# 1368 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1374 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1380 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1386 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1392 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1398 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1404 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1410 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1416 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1422 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1428 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1434 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1440 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1446 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1452 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1458 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1464 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1470 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1476 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1482 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1488 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1494 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1500 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1506 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1512 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1518 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1524 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1530 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1536 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1542 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1548 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1554 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1560 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1566 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1572 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1578 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1584 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1590 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1596 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1602 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1608 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1614 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1620 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1626 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1632 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1638 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1644 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1650 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1656 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1662 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1668 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1674 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1680 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1686 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1692 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1698 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1704 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1710 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1716 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1722 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1728 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1734 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1740 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1746 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1752 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1758 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1764 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1770 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1776 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1782 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1788 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1794 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1800 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1806 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1812 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1818 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1824 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1830 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1836 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1842 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1848 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1854 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1860 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1866 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1872 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1878 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1884 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1890 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1896 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1902 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1908 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1914 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1920 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1926 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1932 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1938 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1944 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1950 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1956 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1962 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1968 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1974 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1980 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1986 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1992 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1998 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2004 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2010 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2016 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2022 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2028 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2034 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2040 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 >> AsyncIndexChangeCollector::IndexedPrimaryKeyInsertSingleRow [GOOD] >> Cdc::RacyCreateAndSend [GOOD] >> Cdc::RacySplitAndDropTable |64.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanScriptingRangeFullScan-SourceRead |64.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> Viewer::JsonAutocompleteScheme [GOOD] >> Viewer::JsonAutocompleteEmptyColumns >> AsyncIndexChangeExchange::ShouldRejectChangesOnQueueOverflowBySize [GOOD] >> AsyncIndexChangeExchange::ShouldNotReorderChangesOnRace >> AsyncIndexChangeCollector::MultiIndexedTableReplaceSingleRow [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::IndexedPrimaryKeyInsertSingleRow [GOOD] Test command err: 2025-06-03T10:28:39.994483Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:39.994582Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:39.994620Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002046/r3tmp/tmpLRJSJh/pdisk_1.dat 2025-06-03T10:28:40.109826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:40.136767Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:40.138397Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946519501973 != 1748946519501977 2025-06-03T10:28:40.182794Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:40.182850Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:40.194136Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:40.273949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:40.298766Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:675:2576] 2025-06-03T10:28:40.298887Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:28:40.311872Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:677:2578] 2025-06-03T10:28:40.311960Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:28:40.313804Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:28:40.313855Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:28:40.314066Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:28:40.314078Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:28:40.314087Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:28:40.314158Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:28:40.314185Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:28:40.314201Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:708:2576] in generation 1 2025-06-03T10:28:40.314299Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:28:40.314317Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:28:40.314447Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-03T10:28:40.314454Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-03T10:28:40.314461Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-03T10:28:40.314490Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:28:40.314503Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:28:40.314512Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:709:2578] in generation 1 2025-06-03T10:28:40.324956Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:28:40.332732Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:28:40.332869Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:28:40.332906Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:712:2597] 2025-06-03T10:28:40.332914Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:40.332921Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:28:40.332929Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:40.333064Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:28:40.333074Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-03T10:28:40.333087Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:28:40.333098Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:713:2598] 2025-06-03T10:28:40.333102Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-03T10:28:40.333107Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-03T10:28:40.333111Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:40.333287Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:28:40.333350Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:28:40.333399Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:40.333410Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:40.333422Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:28:40.333429Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:40.333437Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-03T10:28:40.333447Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-03T10:28:40.333479Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:668:2572], serverId# [1:690:2585], sessionId# [0:0:0] 2025-06-03T10:28:40.333486Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-03T10:28:40.333491Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:40.333495Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-03T10:28:40.333501Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-03T10:28:40.333682Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:28:40.333756Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:28:40.333783Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:28:40.333918Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:669:2573], serverId# [1:696:2589], sessionId# [0:0:0] 2025-06-03T10:28:40.333983Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-03T10:28:40.334016Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-03T10:28:40.334031Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-03T10:28:40.334418Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:40.334440Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-03T10:28:40.344985Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:28:40.345050Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:28:40.345317Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-03T10:28:40.345337Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-03T10:28:40.496494Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:731:2610], serverId# [1:734:2613], sessionId# [0:0:0] 2025-06-03T10:28:40.496741Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:733:2612], serverId# [1:735:2614], sessionId# [0:0:0] 2025-06-03T10:28:40.498096Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2025-06-03T10:28:40.498143Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:40.498316Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-03T10:28:40.498334Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72 ... keys extracted: 0 2025-06-03T10:28:44.726132Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-03T10:28:44.726193Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:44.726200Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:28:44.726207Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-03T10:28:44.726245Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-03T10:28:44.726269Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-03T10:28:44.726286Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-03T10:28:44.726303Z node 4 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037889 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-03T10:28:44.726453Z node 4 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037889 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-03T10:28:44.726574Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:44.727179Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:44.727205Z node 4 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 4] schema version# 1 2025-06-03T10:28:44.727311Z node 4 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-03T10:28:44.727413Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:44.727673Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037889 time 0 2025-06-03T10:28:44.727686Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:44.728041Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 1000} 2025-06-03T10:28:44.728062Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-03T10:28:44.728408Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-03T10:28:44.728425Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:44.728511Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-03T10:28:44.728520Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:44.728771Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-03T10:28:44.728786Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-03T10:28:44.728794Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037889 2025-06-03T10:28:44.728818Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037889 at tablet 72075186224037889 send result to client [4:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:28:44.728833Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:28:44.728847Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:44.729241Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:44.729265Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-03T10:28:44.729420Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:44.729432Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:44.729438Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-03T10:28:44.729455Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:28:44.729465Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:28:44.729477Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:44.730230Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2025-06-03T10:28:44.730254Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-03T10:28:44.730389Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:28:44.730436Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:28:44.730489Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-03T10:28:44.730498Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:28:44.735214Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:779:2650], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:44.735247Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:790:2655], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:44.735259Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:44.736446Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:44.738038Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:44.738077Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-03T10:28:44.885631Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:44.885684Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-03T10:28:44.886247Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:793:2658], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:44.918146Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:864:2698] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:44.930373Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jwtnc6hy2mvcs1dbp4w838hd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=OGZmZDllMDEtZmRhYTU4NTktYjgwNTIyNDctNmY1ZTQ3OTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:44.931135Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:933:2729], serverId# [4:934:2730], sessionId# [0:0:0] 2025-06-03T10:28:44.931319Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:2] at 72075186224037889 2025-06-03T10:28:44.931426Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1748946524931384 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-03T10:28:44.931464Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:2] at 72075186224037889, row count=1 2025-06-03T10:28:44.941954Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-03T10:28:44.941989Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:44.945107Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:940:2735], serverId# [4:941:2736], sessionId# [0:0:0] 2025-06-03T10:28:44.946242Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:942:2737], serverId# [4:943:2738], sessionId# [0:0:0] >> KqpCost::OlapRangeFullScan [GOOD] >> KqpCost::ScanQueryRangeFullScan+SourceRead [GOOD] >> CdcStreamChangeCollector::NewImage [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::DeleteSingleRow [GOOD] Test command err: 2025-06-03T10:28:39.816140Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:39.816233Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:39.816265Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002049/r3tmp/tmpz47RNh/pdisk_1.dat 2025-06-03T10:28:39.950241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:39.972453Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:39.978980Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946519324370 != 1748946519324374 2025-06-03T10:28:40.023061Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:40.023100Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:40.033566Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:40.107534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:40.133718Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:675:2576] 2025-06-03T10:28:40.133804Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:28:40.146053Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:677:2578] 2025-06-03T10:28:40.146126Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:28:40.147690Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:28:40.147735Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:28:40.147922Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:28:40.147931Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:28:40.147938Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:28:40.147996Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:28:40.148018Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:28:40.148034Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:708:2576] in generation 1 2025-06-03T10:28:40.148116Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:28:40.148131Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:28:40.148243Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-03T10:28:40.148250Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-03T10:28:40.148256Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-03T10:28:40.148282Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:28:40.148293Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:28:40.148300Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:709:2578] in generation 1 2025-06-03T10:28:40.158739Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:28:40.164024Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:28:40.164109Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:28:40.164135Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:712:2597] 2025-06-03T10:28:40.164142Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:40.164147Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:28:40.164154Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:40.164241Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:28:40.164248Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-03T10:28:40.164257Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:28:40.164268Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:713:2598] 2025-06-03T10:28:40.164272Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-03T10:28:40.164276Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-03T10:28:40.164280Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:40.164397Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:28:40.164425Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:28:40.164460Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:40.164468Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:40.164478Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:28:40.164484Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:40.164490Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-03T10:28:40.164500Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-03T10:28:40.164524Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:668:2572], serverId# [1:690:2585], sessionId# [0:0:0] 2025-06-03T10:28:40.164531Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-03T10:28:40.164535Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:40.164539Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-03T10:28:40.164544Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-03T10:28:40.164658Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:28:40.164713Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:28:40.164735Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:28:40.164832Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:669:2573], serverId# [1:696:2589], sessionId# [0:0:0] 2025-06-03T10:28:40.164876Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-03T10:28:40.164901Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-03T10:28:40.164913Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-03T10:28:40.165242Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:40.165259Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-03T10:28:40.175808Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:28:40.175861Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:28:40.176037Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-03T10:28:40.176048Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-03T10:28:40.320555Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:731:2610], serverId# [1:734:2613], sessionId# [0:0:0] 2025-06-03T10:28:40.320706Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:733:2612], serverId# [1:735:2614], sessionId# [0:0:0] 2025-06-03T10:28:40.321683Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2025-06-03T10:28:40.321713Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:40.321814Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-03T10:28:40.321823Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72 ... 88 step# 1000} 2025-06-03T10:28:44.494879Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:44.495083Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:44.495095Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:44.495103Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-03T10:28:44.495120Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:28:44.495133Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:28:44.495146Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:44.495365Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:44.495782Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-03T10:28:44.495803Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:28:44.495979Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:28:44.500750Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:28:44.500802Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-03T10:28:44.500815Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-06-03T10:28:44.500822Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-06-03T10:28:44.501100Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:44.522294Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:28:44.685688Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715658 at step 1500 at tablet 72075186224037888 { Transactions { TxId: 281474976715658 AckTo { RawX1: 0 RawX2: 0 } } Step: 1500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-03T10:28:44.685720Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:44.685787Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:44.685798Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:28:44.685809Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1500:281474976715658] in PlanQueue unit at 72075186224037888 2025-06-03T10:28:44.685865Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1500:281474976715658 keys extracted: 0 2025-06-03T10:28:44.685899Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-03T10:28:44.685958Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:44.686083Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:44.720186Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-06-03T10:28:44.720224Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:44.720232Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:44.720245Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:44.720268Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:28:44.720283Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-06-03T10:28:44.720297Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:44.720833Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-06-03T10:28:44.720848Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:28:44.724034Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:876:2714], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:44.724056Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:886:2719], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:44.724078Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:44.724916Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:28:44.726129Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:44.882388Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:44.882846Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:890:2722], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:28:44.904454Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:946:2759] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:44.913399Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtnc6hk63jakhy67dhb2ckw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=YzQyMWIyYjAtOGMyOTA2MGYtZjhlYjViMTMtY2I1NTRmN2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:44.914024Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:977:2776], serverId# [4:978:2777], sessionId# [0:0:0] 2025-06-03T10:28:44.914158Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:3] at 72075186224037888 2025-06-03T10:28:44.914228Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1748946524914198 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-03T10:28:44.914250Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:3] at 72075186224037888, row count=1 2025-06-03T10:28:44.924638Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-03T10:28:44.924667Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:44.935242Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtnc6qx1228vz2rj5tn0ym8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ZjYyZTc5NWYtYTk3MDVmODktZWJjNDNlODItMTEzMTdhMzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:44.935797Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:4] at 72075186224037888 2025-06-03T10:28:44.935884Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1748946524935858 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 34b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-03T10:28:44.935915Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:4] at 72075186224037888, row count=1 2025-06-03T10:28:44.946283Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 34 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-03T10:28:44.946305Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:44.946822Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:1005:2795], serverId# [4:1006:2796], sessionId# [0:0:0] 2025-06-03T10:28:44.947826Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:1007:2797], serverId# [4:1008:2798], sessionId# [0:0:0] >> KqpPg::TableDeleteWhere-useSink [GOOD] >> KqpCost::IndexLookupAtLeast8BytesInStorage-useSink |64.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest |64.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::MultiIndexedTableReplaceSingleRow [GOOD] Test command err: 2025-06-03T10:28:40.507688Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:40.507790Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:40.507824Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002042/r3tmp/tmpYlQk8i/pdisk_1.dat 2025-06-03T10:28:40.629841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:40.647767Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:40.648972Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946520019822 != 1748946520019826 2025-06-03T10:28:40.690914Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:40.690949Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:40.701616Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:40.775232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:40.794183Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:675:2576] 2025-06-03T10:28:40.794284Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:28:40.803075Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:677:2578] 2025-06-03T10:28:40.803135Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:28:40.804200Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:28:40.804228Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:28:40.804375Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:28:40.804382Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:28:40.804387Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:28:40.804427Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:28:40.804444Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:28:40.804453Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:708:2576] in generation 1 2025-06-03T10:28:40.804508Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:28:40.804519Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:28:40.804587Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-03T10:28:40.804591Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-03T10:28:40.804595Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-03T10:28:40.804610Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:28:40.804618Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:28:40.804623Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:709:2578] in generation 1 2025-06-03T10:28:40.814920Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:28:40.819046Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:28:40.819146Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:28:40.819177Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:712:2597] 2025-06-03T10:28:40.819184Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:40.819190Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:28:40.819197Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:40.819300Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:28:40.819309Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-03T10:28:40.819320Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:28:40.819329Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:713:2598] 2025-06-03T10:28:40.819336Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-03T10:28:40.819340Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-03T10:28:40.819344Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:40.819475Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:28:40.819507Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:28:40.819549Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:40.819557Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:40.819567Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:28:40.819574Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:40.819581Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-03T10:28:40.819592Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-03T10:28:40.819620Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:668:2572], serverId# [1:690:2585], sessionId# [0:0:0] 2025-06-03T10:28:40.819629Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-03T10:28:40.819634Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:40.819639Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-03T10:28:40.819645Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-03T10:28:40.819788Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:28:40.819849Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:28:40.819871Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:28:40.819978Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:669:2573], serverId# [1:696:2589], sessionId# [0:0:0] 2025-06-03T10:28:40.820027Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-03T10:28:40.820051Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-03T10:28:40.820061Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-03T10:28:40.820364Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:40.820378Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-03T10:28:40.830739Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:28:40.830786Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:28:40.830958Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-03T10:28:40.830969Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-03T10:28:40.975096Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:731:2610], serverId# [1:734:2613], sessionId# [0:0:0] 2025-06-03T10:28:40.975219Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:733:2612], serverId# [1:735:2614], sessionId# [0:0:0] 2025-06-03T10:28:40.976188Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2025-06-03T10:28:40.976208Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:40.976292Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-03T10:28:40.976302Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72 ... 2025-06-03T10:28:45.112397Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-03T10:28:45.112407Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-03T10:28:45.112496Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-03T10:28:45.112501Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-03T10:28:45.112505Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037890 2025-06-03T10:28:45.112512Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037890 at tablet 72075186224037890 send result to client [4:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:28:45.112518Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037890 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:28:45.112527Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-03T10:28:45.113274Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:28:45.113533Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2025-06-03T10:28:45.113545Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-03T10:28:45.113592Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:28:45.113629Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037890 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:28:45.113692Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-03T10:28:45.113699Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:28:45.113818Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037890 state Ready 2025-06-03T10:28:45.113827Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037890 Got TEvSchemaChangedResult from SS at 72075186224037890 2025-06-03T10:28:45.117527Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:828:2687], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:45.117564Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:838:2692], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:45.117575Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:45.118503Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:45.119523Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:45.119554Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-03T10:28:45.119570Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-03T10:28:45.265902Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:45.265945Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-03T10:28:45.265963Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-03T10:28:45.266491Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:842:2695], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:45.298182Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:914:2736] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:45.310757Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jwtnc6xx5fs87m3wq8fk5f8b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=YzE2N2E4ZjAtYzgxMGQ2NTctYmE5OWM0MDQtYzM3ODc4MDY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:45.311374Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:1020:2780], serverId# [4:1021:2781], sessionId# [0:0:0] 2025-06-03T10:28:45.311487Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:2] at 72075186224037889 2025-06-03T10:28:45.311567Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1748946525311535 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-03T10:28:45.311596Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1748946525311535 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-03T10:28:45.311613Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:2] at 72075186224037889, row count=1 2025-06-03T10:28:45.322000Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-03T10:28:45.322027Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:45.331400Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtnc74a9bv8t70vsezf9pky, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=YmY3M2ViNTItY2VmMzUzYmMtZjM0Nzc4MWYtNjM3OTljM2Y=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:45.331962Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:3] at 72075186224037889 2025-06-03T10:28:45.332039Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 3 Group: 1748946525332015 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-03T10:28:45.332061Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 4 Group: 1748946525332015 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-03T10:28:45.332070Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 5 Group: 1748946525332015 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-03T10:28:45.332078Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 6 Group: 1748946525332015 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] Kind: AsyncIndex Source: Unspecified Body: 24b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-03T10:28:45.332089Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:3] at 72075186224037889, row count=1 2025-06-03T10:28:45.342468Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 3 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 5 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 6 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] BodySize: 24 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-03T10:28:45.342501Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:45.344806Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:1070:2821], serverId# [4:1071:2822], sessionId# [0:0:0] 2025-06-03T10:28:45.345727Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:1072:2823], serverId# [4:1073:2824], sessionId# [0:0:0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::NewImage [GOOD] Test command err: 2025-06-03T10:28:39.934540Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:39.934642Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:39.934676Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002043/r3tmp/tmpqMzYxF/pdisk_1.dat 2025-06-03T10:28:40.057936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:40.076405Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:40.077763Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1191: Update config MemoryLimit: 33554432 2025-06-03T10:28:40.078026Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946519447099 != 1748946519447103 2025-06-03T10:28:40.121949Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:40.121990Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:40.132570Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:40.207980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:40.229106Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:28:40.229233Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:28:40.242528Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:28:40.242574Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:28:40.242757Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:28:40.242766Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:28:40.242774Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:28:40.242831Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:28:40.242851Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:28:40.242865Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:28:40.253321Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:28:40.258690Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:28:40.258769Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:28:40.258793Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:28:40.258799Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:40.258804Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:28:40.258810Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:40.258965Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:28:40.258989Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:28:40.259095Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:40.259104Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:40.259113Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:28:40.259118Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:40.259131Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:28:40.259165Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:28:40.259216Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:28:40.259234Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:28:40.259593Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:40.269931Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:28:40.269981Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:28:40.415982Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-06-03T10:28:40.417094Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-03T10:28:40.417120Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:40.417214Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:40.417224Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:28:40.417236Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-03T10:28:40.417324Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-03T10:28:40.417361Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-03T10:28:40.417531Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:40.417548Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-03T10:28:40.418014Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-03T10:28:40.418114Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:40.418501Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-03T10:28:40.418511Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:40.418739Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-03T10:28:40.418754Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:40.418935Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:40.418944Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:40.418951Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-03T10:28:40.418970Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:28:40.418982Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:28:40.418993Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:40.420023Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:40.420337Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:28:40.420479Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-03T10:28:40.420489Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:28:40.425226Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:28:40.425283Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-03T10:28:40.425368Z node 1 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-06-03T10:28:40.425377Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-06-03T10:28:40.425678Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Dis ... 88 step# 1000} 2025-06-03T10:28:44.917080Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:44.917267Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:44.917276Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:44.917280Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-03T10:28:44.917316Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:28:44.917330Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:28:44.917343Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:44.917511Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:44.917870Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-03T10:28:44.917891Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:28:44.918042Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:28:44.921497Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:28:44.921534Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-03T10:28:44.921545Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-06-03T10:28:44.921548Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-06-03T10:28:44.921734Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:44.942717Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:28:45.103556Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715658 at step 1500 at tablet 72075186224037888 { Transactions { TxId: 281474976715658 AckTo { RawX1: 0 RawX2: 0 } } Step: 1500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-03T10:28:45.103587Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:45.103651Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:45.103660Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:28:45.103670Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1500:281474976715658] in PlanQueue unit at 72075186224037888 2025-06-03T10:28:45.103722Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1500:281474976715658 keys extracted: 0 2025-06-03T10:28:45.103745Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-03T10:28:45.103794Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:45.103918Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:45.137649Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-06-03T10:28:45.137690Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:45.137699Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:45.137712Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:45.137735Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:28:45.137752Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-06-03T10:28:45.137769Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:45.138391Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-06-03T10:28:45.138410Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:28:45.142289Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:876:2714], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:45.142320Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:886:2719], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:45.142333Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:45.143493Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:28:45.144939Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:45.300276Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:45.300896Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:890:2722], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:28:45.322637Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:946:2759] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:45.331599Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtnc6yn934wfb68b359n2q9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=Y2Y1ZTg5NmYtZTk4NDU5NTYtYWIzMDI2NmMtZjIyOWNlNTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:45.332110Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:977:2776], serverId# [4:978:2777], sessionId# [0:0:0] 2025-06-03T10:28:45.332218Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:3] at 72075186224037888 2025-06-03T10:28:45.332292Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1748946525332264 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 40b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-03T10:28:45.332323Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:3] at 72075186224037888, row count=1 2025-06-03T10:28:45.342702Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 40 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-03T10:28:45.342730Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:45.355567Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtnc74zf4ekctv2cfchyech, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NWZiNjI3MzEtYjUxMTJkMTQtZTUzNDE4NzctNzYxNGRhZmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:45.356203Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:4] at 72075186224037888 2025-06-03T10:28:45.356288Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1748946525356259 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 18b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-03T10:28:45.356318Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:4] at 72075186224037888, row count=1 2025-06-03T10:28:45.366715Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 18 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-03T10:28:45.366743Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:45.367391Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:1005:2795], serverId# [4:1006:2796], sessionId# [0:0:0] 2025-06-03T10:28:45.368451Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:1007:2797], serverId# [4:1008:2798], sessionId# [0:0:0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanQueryRangeFullScan+SourceRead [GOOD] Test command err: Trying to start YDB, gRPC: 15928, MsgBus: 24517 2025-06-03T10:28:44.295554Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668124902644819:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:44.295587Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f43/r3tmp/tmpNioKeD/pdisk_1.dat 2025-06-03T10:28:44.368361Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:44.368485Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668124902644799:2079] 1748946524295398 != 1748946524295401 TServer::EnableGrpc on GrpcPort 15928, node 1 2025-06-03T10:28:44.384110Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:44.384125Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:44.384128Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:44.384168Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24517 TClient is connected to server localhost:24517 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:28:44.438811Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:44.438854Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: 2025-06-03T10:28:44.439895Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:44.453672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:44.466966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:44.532416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:44.603705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:44.624395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:44.755288Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668124902646460:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:44.755318Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:44.801325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:44.809567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:44.824557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:44.881350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:44.894197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:44.909583Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:44.967713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:44.980640Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668124902647116:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:44.980678Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668124902647121:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:44.980686Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:44.981659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:44.991355Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668124902647123:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:28:45.091878Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668129197614470:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:45.265033Z node 1 :KQP_GATEWAY DEBUG: kqp_metadata_loader.cpp:884: Load table metadata from cache by path, request Path: /Root/Test 2025-06-03T10:28:45.283596Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_snapshot_manager.cpp:37: Start KqpSnapshotManager at [1:7511668129197614708:2498] 2025-06-03T10:28:45.283618Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_snapshot_manager.cpp:58: KqpSnapshotManager: got snapshot request from [1:7511668129197614694:2498] 2025-06-03T10:28:45.286730Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_snapshot_manager.cpp:172: KqpSnapshotManager: snapshot 1748946525333:281474976715672 created 2025-06-03T10:28:45.286884Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:585: ActorId: [1:7511668129197614719:2498] TxId: 281474976715673. Ctx: { TraceId: 01jwtnc72accmqcd9f34rmw4h8, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Executing physical tx, type: 3, stages: 2 2025-06-03T10:28:45.286910Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 1, OutputsCount: 1 2025-06-03T10:28:45.286917Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,1], InputsCount: 1, OutputsCount: 1 2025-06-03T10:28:45.287086Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715673. Resolved key sets: 1 2025-06-03T10:28:45.287161Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:295: TxId: 281474976715673. Resolved key: { TableId: [OwnerId: 72057594046644480, LocalPathId: 9] Access: 1 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL, String : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-06-03T10:28:45.287182Z node 1 :KQP_EXECUTER DEBUG: kqp_scan_executer.cpp:146: ActorId: [1:7511668129197614719:2498] TxId: 281474976715673. Ctx: { TraceId: 01jwtnc72accmqcd9f34rmw4h8, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Start resolving tablets nodes... (1) 2025-06-03T10:28:45.287236Z node 1 :KQP_EXECUTER DEBUG: kqp_shards_resolver.cpp:76: [ShardsResolver] TxId: 281474976715673. Shard resolve complete, resolved shards: 1 2025-06-03T10:28:45.287251Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:265: ActorId: [1:7511668129197614719:2498] TxId: 281474976715673. Ctx: { TraceId: 01jwtnc72accmqcd9f34rmw4h8, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolved, success: 1 ... ropping batch for read #0 2025-06-03T10:28:45.291064Z node 1 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:459: TxId: 281474976715673, task: 1, CA Id [1:7511668129197614723:2505]. effective maxinflight 1 sorted 1 2025-06-03T10:28:45.291066Z node 1 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:481: TxId: 281474976715673, task: 1, CA Id [1:7511668129197614723:2505]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, 2025-06-03T10:28:45.291071Z node 1 :KQP_COMPUTE DEBUG: kqp_read_actor.cpp:1428: TxId: 281474976715673, task: 1, CA Id [1:7511668129197614723:2505]. returned async data processed rows 3 left freeSpace 8388548 received rows 3 running reads 0 pending shards 0 finished = 1 has limit 0 limit reached 0 2025-06-03T10:28:45.291176Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668129197614724:2506], TxId: 281474976715673, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=. TraceId : 01jwtnc72accmqcd9f34rmw4h8. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646923 2025-06-03T10:28:45.291178Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7511668129197614723:2505], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=. TraceId : 01jwtnc72accmqcd9f34rmw4h8. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-03T10:28:45.291182Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668129197614723:2505], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=. TraceId : 01jwtnc72accmqcd9f34rmw4h8. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-03T10:28:45.291192Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:670: TxId: 281474976715673, task: 1. Tasks execution finished, waiting for chunk delivery in output channelId: 1, seqNo: [1] 2025-06-03T10:28:45.291198Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976715673, task: 2. Finish input channelId: 1, from: [1:7511668129197614723:2505] 2025-06-03T10:28:45.291209Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668129197614723:2505], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=. TraceId : 01jwtnc72accmqcd9f34rmw4h8. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646927 2025-06-03T10:28:45.291212Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668129197614724:2506], TxId: 281474976715673, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=. TraceId : 01jwtnc72accmqcd9f34rmw4h8. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-03T10:28:45.291216Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668129197614723:2505], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=. TraceId : 01jwtnc72accmqcd9f34rmw4h8. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-03T10:28:45.291219Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715673, task: 1. Tasks execution finished 2025-06-03T10:28:45.291223Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7511668129197614723:2505], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=. TraceId : 01jwtnc72accmqcd9f34rmw4h8. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Compute state finished. All channels and sinks finished 2025-06-03T10:28:45.291257Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715673, task: 1. pass away 2025-06-03T10:28:45.291280Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7511668129197614724:2506], TxId: 281474976715673, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=. TraceId : 01jwtnc72accmqcd9f34rmw4h8. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-03T10:28:45.291304Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976715673;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-03T10:28:45.291344Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:434: ActorId: [1:7511668129197614719:2498] TxId: 281474976715673. Ctx: { TraceId: 01jwtnc72accmqcd9f34rmw4h8, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [1:7511668129197614723:2505], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 2846 Tasks { TaskId: 1 CpuTimeUs: 434 FinishTimeMs: 1748946525291 OutputRows: 1 OutputBytes: 19 Tables { TablePath: "/Root/Test" ReadRows: 1 ReadBytes: 20 AffectedPartitions: 1 } IngressRows: 3 ComputeCpuTimeUs: 69 BuildCpuTimeUs: 365 HostName: "ghrun-pyvh3niaay" NodeId: 1 StartTimeMs: 1748946525291 CreateTimeMs: 1748946525287 UpdateTimeMs: 1748946525291 } MaxMemoryUsage: 1048576 } 2025-06-03T10:28:45.291363Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715673. Ctx: { TraceId: 01jwtnc72accmqcd9f34rmw4h8, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [1:7511668129197614723:2505] 2025-06-03T10:28:45.291371Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715673, taskId: 1. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-03T10:28:45.291386Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:645: ActorId: [1:7511668129197614719:2498] TxId: 281474976715673. Ctx: { TraceId: 01jwtnc72accmqcd9f34rmw4h8, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [1:7511668129197614724:2506], 2025-06-03T10:28:45.291440Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:349: ActorId: [1:7511668129197614719:2498] TxId: 281474976715673. Ctx: { TraceId: 01jwtnc72accmqcd9f34rmw4h8, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send TEvStreamData to [1:7511668129197614694:2498], seqNo: 1, nRows: 1 2025-06-03T10:28:45.292106Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:415: TxId: 281474976715673, send ack to channelId: 2, seqNo: 1, enough: 0, freeSpace: 8388470, to: [1:7511668129197614726:2506] 2025-06-03T10:28:45.292130Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668129197614724:2506], TxId: 281474976715673, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=. TraceId : 01jwtnc72accmqcd9f34rmw4h8. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-03T10:28:45.292143Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715673, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-03T10:28:45.292145Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715673, task: 2. Tasks execution finished 2025-06-03T10:28:45.292147Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7511668129197614724:2506], TxId: 281474976715673, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=. TraceId : 01jwtnc72accmqcd9f34rmw4h8. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Compute state finished. All channels and sinks finished 2025-06-03T10:28:45.292168Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715673, task: 2. pass away 2025-06-03T10:28:45.292188Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976715673;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-03T10:28:45.292216Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:434: ActorId: [1:7511668129197614719:2498] TxId: 281474976715673. Ctx: { TraceId: 01jwtnc72accmqcd9f34rmw4h8, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [1:7511668129197614724:2506], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 797 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 230 FinishTimeMs: 1748946525292 InputRows: 1 InputBytes: 19 OutputRows: 1 OutputBytes: 19 ResultRows: 1 ResultBytes: 19 ComputeCpuTimeUs: 58 BuildCpuTimeUs: 172 HostName: "ghrun-pyvh3niaay" NodeId: 1 CreateTimeMs: 1748946525289 UpdateTimeMs: 1748946525292 } MaxMemoryUsage: 1048576 } 2025-06-03T10:28:45.292231Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715673. Ctx: { TraceId: 01jwtnc72accmqcd9f34rmw4h8, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [1:7511668129197614724:2506] 2025-06-03T10:28:45.292240Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715673, taskId: 2. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-03T10:28:45.292267Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2151: ActorId: [1:7511668129197614719:2498] TxId: 281474976715673. Ctx: { TraceId: 01jwtnc72accmqcd9f34rmw4h8, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-03T10:28:45.292284Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:839: ActorId: [1:7511668129197614719:2498] TxId: 281474976715673. Ctx: { TraceId: 01jwtnc72accmqcd9f34rmw4h8, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODhhMzk4ZjgtMWE1M2JiNDctNjkwMDcyZmItMjU4NTQzYTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.003643s ReadRows: 1 ReadBytes: 20 ru: 2 rate limiter was not found force flag: 1 2025-06-03T10:28:45.292545Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946525333, txId: 281474976715672] shutting down ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::OlapRangeFullScan [GOOD] Test command err: Trying to start YDB, gRPC: 1546, MsgBus: 16933 2025-06-03T10:28:44.287775Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668126700770276:2059];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:44.287800Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f4a/r3tmp/tmpqhOsUV/pdisk_1.dat 2025-06-03T10:28:44.362825Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668126700770258:2079] 1748946524287586 != 1748946524287589 2025-06-03T10:28:44.364527Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1546, node 1 2025-06-03T10:28:44.378387Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:44.378400Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:44.378404Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:44.378472Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16933 TClient is connected to server localhost:16933 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:28:44.434718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:44.434763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: 2025-06-03T10:28:44.436207Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:44.449832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:44.463898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:44.530850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:44.552481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:44.565551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:44.711886Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668126700771888:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:44.711921Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:44.767458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:44.774804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:44.829842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:44.837285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:44.851519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:44.865202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:44.879648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:44.938728Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668126700772545:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:44.938752Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:44.938772Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668126700772550:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:44.939542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:44.949335Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668126700772552:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:28:45.005173Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668130995739899:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:45.163696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.193091Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[1:7511668130995740267:2507];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:28:45.193090Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[1:7511668130995740275:2509];tablet_id=72075186224037925;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:28:45.193170Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[1:7511668130995740275:2509];tablet_id=72075186224037925;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:28:45.193204Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[1:7511668130995740267:2507];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:28:45.193305Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[1:7511668130995740275:2509];tablet_id=72075186224037925;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:28:45.193310Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[1:7511668130995740267:2507];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:28:45.193338Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[1:7511668130995740267:2507];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:28:45.193342Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[1:7511668130995740275:2509];tablet_id=72075186224037925;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:28:45.193371Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[1:7511668130995740267:2507];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:28:45.193378Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[1:7511668130995740275:2509];tablet_id=72075186224037925;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:28:45.193416Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self ... og.cpp:784: tablet_id=72075186224037923;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:28:45.218868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:28:45.218886Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:28:45.218891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:28:45.218904Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:28:45.218913Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:28:45.218920Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037923;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:28:45.218930Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:28:45.218935Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:28:45.218995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-03T10:28:45.219004Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037923;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-03T10:28:45.219366Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-03T10:28:45.219380Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-03T10:28:45.219392Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-03T10:28:45.219401Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-03T10:28:45.219417Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-03T10:28:45.219426Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-03T10:28:45.219436Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-03T10:28:45.219445Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-06-03T10:28:45.219453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-06-03T10:28:45.219462Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-03T10:28:45.219467Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-03T10:28:45.219472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-03T10:28:45.219491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:28:45.219506Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:28:45.219528Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:28:45.219537Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:28:45.219548Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:28:45.219556Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:28:45.219562Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:28:45.219567Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:28:45.219576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:28:45.219633Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-03T10:28:45.219642Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037928;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-03T10:28:45.230482Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:45.230484Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:45.231977Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:45.232076Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:45.233371Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:45.233387Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:45.234676Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:45.234699Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:45.235824Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:45.235874Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:45.252388Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:1913: ActorId: [1:7511668130995740686:2498] TxId: 281474976715673. Ctx: { TraceId: 01jwtnc71p14abvm8r3qsgdf3n, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODk3Njc3MGItOGU1ZTllMmQtOTBiNTI2YjMtYzFjODg1ZmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-03T10:28:45.262734Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=281474976715674;tx_id=281474976715674;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715674; 2025-06-03T10:28:45.262734Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976715674;tx_id=281474976715674;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715674; 2025-06-03T10:28:45.262874Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976715674;tx_id=281474976715674;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715674; 2025-06-03T10:28:45.305841Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:1913: ActorId: [1:7511668130995740767:2498] TxId: 281474976715675. Ctx: { TraceId: 01jwtnc72gb392nn77q49tk9ph, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODk3Njc3MGItOGU1ZTllMmQtOTBiNTI2YjMtYzFjODg1ZmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root query_phases { duration_us: 35022 table_access { name: "/Root/TestTable" reads { rows: 2 bytes: 72 } } cpu_time_us: 26062 } compilation { duration_us: 39860 cpu_time_us: 38872 } process_cpu_time_us: 118 total_duration_us: 75707 total_cpu_time_us: 65052 >> KqpCost::IndexLookupJoin+StreamLookupJoin >> AsyncIndexChangeCollector::CoverIndexedColumn [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/pg/unittest >> KqpPg::TableDeleteWhere-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 2593, MsgBus: 17531 2025-06-03T10:27:59.601732Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667929820944785:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:59.601758Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002427/r3tmp/tmpnWZDHi/pdisk_1.dat 2025-06-03T10:27:59.701405Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:59.701440Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:59.702953Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:59.704021Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2593, node 1 2025-06-03T10:27:59.724945Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:59.724959Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:59.724962Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:59.725007Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17531 TClient is connected to server localhost:17531 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:59.805969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:00.084685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce_pgbpchar_17472595041006102391_17823623939509273229' Unable to coerce value for pgbpchar: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-06-03T10:28:00.163800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce__pgbpchar_17472595041006102391_5352544928909966465' Unable to coerce value for _pgbpchar: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character(2) 2025-06-03T10:28:00.182214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 abcd 2025-06-03T10:28:00.222788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 {abcd,abcd} 2025-06-03T10:28:00.259134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 abcd 2025-06-03T10:28:00.290588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 {"abcd ","abcd "} 2025-06-03T10:28:00.318779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce_pgvarchar_17472595041006102391_17823623939509273229' Unable to coerce value for pgvarchar: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character varying(2) 2025-06-03T10:28:00.343396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce__pgvarchar_17472595041006102391_5352544928909966465' Unable to coerce value for _pgvarchar: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: value too long for type character varying(2) 2025-06-03T10:28:00.368005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 abcd 2025-06-03T10:28:00.414983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480 {abcd,abcd} 2025-06-03T10:28:00.470811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715680:0, at schemeshard: 72057594046644480 abcd 2025-06-03T10:28:00.512678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715683:0, at schemeshard: 72057594046644480 {abcd,abcd} 2025-06-03T10:28:00.546246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715686:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce_pgbit_17472595041006102391_5866627432374416336' Unable to coerce value for pgbit: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: bit string length 4 does not match type bit(2) 2025-06-03T10:28:00.565897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715687:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce__pgbit_17472595041006102391_11087201080355820517' Unable to coerce value for _pgbit: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: bit string length 4 does not match type bit(2) 2025-06-03T10:28:00.603929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715688:0, at schemeshard: 72057594046644480 1111 2025-06-03T10:28:00.641121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715691:0, at schemeshard: 72057594046644480 {1111,1111} 2025-06-03T10:28:00.669704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715694:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce_pgbit_10103374131519304989_5866627432374416336' Unable to coerce value for pgbit: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: bit string length 4 does not match type bit(6) 2025-06-03T10:28:00.688667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715695:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce__pgbit_10103374131519304989_11087201080355820517' Unable to coerce value for _pgbit: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: bit string length 4 does not match type bit(6) 2025-06-03T10:28:00.720325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715696:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce_pgvarbit_17472595041006102391_5866627432374416336' Unable to coerce value for pgvarbit: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: bit string too long for type bit varying(2) 2025-06-03T10:28:00.747067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715697:0, at schemeshard: 72057594046644480
: Error: Bulk upsert to table '/Root/Coerce__pgvarbit_17472595041006102391_11087201080355820517' Unable to coerce value for _pgvarbit: Error while coercing value, reason: yql/essentials/minikql/mkql_terminator.cpp:47: ERROR: bit string too long for type bit varying(2) 2025-06-03T10:28:00.791347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715698:0, at schemeshard: 72057594046644480 1111 2025-06-03T10:28:00.827458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715701:0, at schemeshard: 72057594046644480 {1111,1111} 2025-06-03T10:28:00.872722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715704:0, at schemeshard: 72057594046644480 1111 2025-06-03T1 ... _read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:44.993433Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715841:0, at schemeshard: 72057594046644480 602 2025-06-03T10:28:45.004316Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.006724Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715843:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.017445Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.019663Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715845:0, at schemeshard: 72057594046644480 604 2025-06-03T10:28:45.030379Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.032289Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715847:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.046944Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.049807Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715849:0, at schemeshard: 72057594046644480 718 2025-06-03T10:28:45.061023Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.063577Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715851:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.075347Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.077518Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715853:0, at schemeshard: 72057594046644480 869 2025-06-03T10:28:45.090567Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.093176Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715855:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.105015Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.107579Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715857:0, at schemeshard: 72057594046644480 650 2025-06-03T10:28:45.125243Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.127990Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715859:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.142400Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.144788Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715861:0, at schemeshard: 72057594046644480 829 2025-06-03T10:28:45.157278Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.159653Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715863:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.170618Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.173085Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715865:0, at schemeshard: 72057594046644480 774 2025-06-03T10:28:45.185074Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.187708Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715867:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.199293Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.201865Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715869:0, at schemeshard: 72057594046644480 2950 2025-06-03T10:28:45.214884Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.217467Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715871:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.229758Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.233172Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715873:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.249214Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 114 2025-06-03T10:28:45.252168Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715875:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.264406Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.267003Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715877:0, at schemeshard: 72057594046644480 3802 2025-06-03T10:28:45.279877Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.282637Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715879:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.296611Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.298792Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715881:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.311130Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 4072 2025-06-03T10:28:45.313433Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715883:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.325954Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.328350Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715885:0, at schemeshard: 72057594046644480 142 2025-06-03T10:28:45.340488Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.342969Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715887:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.354318Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.356446Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715889:0, at schemeshard: 72057594046644480 3615 2025-06-03T10:28:45.367667Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.369849Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715891:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.428738Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.431384Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715893:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.444388Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 3614 2025-06-03T10:28:45.446615Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715895:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.457842Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.459975Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715897:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.471050Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 22 2025-06-03T10:28:45.473182Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715899:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.485790Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill 2025-06-03T10:28:45.488379Z node 11 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715901:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.500836Z node 11 :READ_TABLE_API WARN: rpc_read_table.cpp:116: ForgetAction occurred, send TEvPoisonPill >> AsyncIndexChangeCollector::ImplicitlyUpdateCoveredColumn [GOOD] >> SystemView::ShowCreateTableColumnAlterColumn [GOOD] >> SystemView::ShowCreateTableColumnUpsertOptions >> KqpCost::ScanScriptingRangeFullScan-SourceRead [GOOD] |64.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::ImplicitlyUpdateCoveredColumn [GOOD] Test command err: 2025-06-03T10:28:41.085455Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:41.085556Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:41.085593Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00203b/r3tmp/tmpWW6gCt/pdisk_1.dat 2025-06-03T10:28:41.206937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:41.223919Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:41.225097Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946520654376 != 1748946520654380 2025-06-03T10:28:41.267018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:41.267062Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:41.277712Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:41.350848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:41.368821Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:675:2576] 2025-06-03T10:28:41.368884Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:28:41.376887Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:677:2578] 2025-06-03T10:28:41.376952Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:28:41.378233Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:28:41.378277Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:28:41.378446Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:28:41.378454Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:28:41.378460Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:28:41.378503Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:28:41.378520Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:28:41.378531Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:708:2576] in generation 1 2025-06-03T10:28:41.378594Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:28:41.378606Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:28:41.378683Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-03T10:28:41.378688Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-03T10:28:41.378694Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-03T10:28:41.378722Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:28:41.378735Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:28:41.378741Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:709:2578] in generation 1 2025-06-03T10:28:41.389051Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:28:41.393055Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:28:41.393165Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:28:41.393193Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:712:2597] 2025-06-03T10:28:41.393197Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:41.393202Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:28:41.393207Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:41.393321Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:28:41.393332Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-03T10:28:41.393343Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:28:41.393352Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:713:2598] 2025-06-03T10:28:41.393357Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-03T10:28:41.393361Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-03T10:28:41.393365Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:41.393492Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:28:41.393520Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:28:41.393555Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:41.393562Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:41.393573Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:28:41.393580Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:41.393587Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-03T10:28:41.393596Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-03T10:28:41.393622Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:668:2572], serverId# [1:690:2585], sessionId# [0:0:0] 2025-06-03T10:28:41.393629Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-03T10:28:41.393632Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:41.393635Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-03T10:28:41.393639Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-03T10:28:41.393754Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:28:41.393802Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:28:41.393818Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:28:41.393928Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:669:2573], serverId# [1:696:2589], sessionId# [0:0:0] 2025-06-03T10:28:41.393978Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-03T10:28:41.394000Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-03T10:28:41.394009Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-03T10:28:41.394359Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:41.394375Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-03T10:28:41.404728Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:28:41.404770Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:28:41.404939Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-03T10:28:41.404949Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-03T10:28:41.548494Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:731:2610], serverId# [1:734:2613], sessionId# [0:0:0] 2025-06-03T10:28:41.548600Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:733:2612], serverId# [1:735:2614], sessionId# [0:0:0] 2025-06-03T10:28:41.549457Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2025-06-03T10:28:41.549479Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:41.549570Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-03T10:28:41.549579Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72 ... 7594046644480, LocalPathId: 4] schema version# 1 2025-06-03T10:28:45.806608Z node 4 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-03T10:28:45.806691Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:45.806928Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037889 time 0 2025-06-03T10:28:45.806937Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:45.807272Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 1000} 2025-06-03T10:28:45.807290Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-03T10:28:45.807581Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-03T10:28:45.807594Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:45.807675Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-03T10:28:45.807683Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:45.807931Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-03T10:28:45.807944Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-03T10:28:45.807952Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037889 2025-06-03T10:28:45.807974Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037889 at tablet 72075186224037889 send result to client [4:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:28:45.807983Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:28:45.807995Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:45.808297Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:45.808313Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-03T10:28:45.808392Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:45.808399Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:45.808404Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-03T10:28:45.808417Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:28:45.808425Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:28:45.808435Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:45.808989Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2025-06-03T10:28:45.850716Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-03T10:28:45.851196Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:28:45.851275Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:28:45.851378Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-03T10:28:45.851391Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:28:45.855601Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:779:2650], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:45.855631Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:790:2655], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:45.855642Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:45.856693Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:45.857956Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:45.857986Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-03T10:28:46.004994Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:46.005053Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-03T10:28:46.005712Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:793:2658], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:46.037726Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:864:2698] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:46.050478Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jwtnc7mz810x11jdvv2t3tdr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=MWYyY2Y1NjMtMTQwMjE0NjktZWI4OTNmZWUtMTdlODYyZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:46.051177Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:933:2729], serverId# [4:934:2730], sessionId# [0:0:0] 2025-06-03T10:28:46.051340Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:2] at 72075186224037889 2025-06-03T10:28:46.051445Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1748946526051401 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 42b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-03T10:28:46.051484Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:2] at 72075186224037889, row count=1 2025-06-03T10:28:46.061952Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 42 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-03T10:28:46.061983Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:46.073714Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtnc7vf1kmq0pjz0geb4ydb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NTllNzBjZDctNDM0NTg5YTYtNGRlZTY5MjgtZjdlZjhkMzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:46.074585Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:3] at 72075186224037889 2025-06-03T10:28:46.074717Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1748946526074671 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 28b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-03T10:28:46.074758Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 3 Group: 1748946526074671 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 42b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-03T10:28:46.074780Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:3] at 72075186224037889, row count=1 2025-06-03T10:28:46.085371Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 28 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 3 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 42 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-03T10:28:46.085414Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:46.088975Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:974:2761], serverId# [4:975:2762], sessionId# [0:0:0] 2025-06-03T10:28:46.090056Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:976:2763], serverId# [4:977:2764], sessionId# [0:0:0] |64.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_change_collector/unittest >> AsyncIndexChangeCollector::CoverIndexedColumn [GOOD] Test command err: 2025-06-03T10:28:40.890663Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:40.890786Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:40.890832Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00203f/r3tmp/tmpkLZ9Up/pdisk_1.dat 2025-06-03T10:28:41.009637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:41.026897Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:41.028126Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946520404410 != 1748946520404414 2025-06-03T10:28:41.070377Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:41.070443Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:41.081089Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:41.155137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:41.179326Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:675:2576] 2025-06-03T10:28:41.179435Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:28:41.191252Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037889 actor [1:677:2578] 2025-06-03T10:28:41.191329Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:28:41.193205Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:28:41.193260Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:28:41.193462Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:28:41.193477Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:28:41.193485Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:28:41.193547Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:28:41.193579Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:28:41.193597Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:708:2576] in generation 1 2025-06-03T10:28:41.193701Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:28:41.193721Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:28:41.193855Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037889 2025-06-03T10:28:41.193863Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037889 2025-06-03T10:28:41.193870Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037889 2025-06-03T10:28:41.193902Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:28:41.193915Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:28:41.193925Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037889 persisting started state actor id [1:709:2578] in generation 1 2025-06-03T10:28:41.204340Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:28:41.209895Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:28:41.209998Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:28:41.210027Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:712:2597] 2025-06-03T10:28:41.210033Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:41.210039Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:28:41.210047Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:41.210147Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:28:41.210157Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037889 2025-06-03T10:28:41.210169Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037889 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:28:41.210178Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037889, actorId: [1:713:2598] 2025-06-03T10:28:41.210185Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-03T10:28:41.210189Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037889, state: WaitScheme 2025-06-03T10:28:41.210193Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:41.210318Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:28:41.210348Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:28:41.210385Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:41.210393Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:41.210402Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:28:41.210409Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:41.210416Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037889 2025-06-03T10:28:41.210426Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037889 2025-06-03T10:28:41.210455Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:668:2572], serverId# [1:690:2585], sessionId# [0:0:0] 2025-06-03T10:28:41.210462Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-03T10:28:41.210466Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:41.210470Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037889 TxInFly 0 2025-06-03T10:28:41.210476Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-03T10:28:41.210597Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:28:41.210656Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:28:41.210675Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:28:41.210775Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:669:2573], serverId# [1:696:2589], sessionId# [0:0:0] 2025-06-03T10:28:41.210825Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037889 2025-06-03T10:28:41.210853Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037889 txId 281474976715657 ssId 72057594046644480 seqNo 2:2 2025-06-03T10:28:41.210866Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037889 2025-06-03T10:28:41.211219Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:41.211233Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-03T10:28:41.291398Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:28:41.291460Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:28:41.291702Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037889 2025-06-03T10:28:41.291716Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037889 not sending time cast registration request in state WaitScheme 2025-06-03T10:28:41.436049Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:731:2610], serverId# [1:734:2613], sessionId# [0:0:0] 2025-06-03T10:28:41.436181Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:733:2612], serverId# [1:735:2614], sessionId# [0:0:0] 2025-06-03T10:28:41.437176Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037889 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037889 } 2025-06-03T10:28:41.437197Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:41.437275Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037889 2025-06-03T10:28:41.437285Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72 ... ress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:45.793866Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-03T10:28:45.793886Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:45.794048Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-03T10:28:45.813699Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037889 2025-06-03T10:28:45.813728Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037889 2025-06-03T10:28:45.813761Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037889 at tablet 72075186224037889 send result to client [4:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:28:45.813797Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037889 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:28:45.813860Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:45.814756Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:45.814774Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:45.814780Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-03T10:28:45.814798Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:28:45.814809Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:28:45.814821Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:45.814845Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037890 time 0 2025-06-03T10:28:45.814853Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-03T10:28:45.814936Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037890 step# 1000} 2025-06-03T10:28:45.814952Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-03T10:28:45.815442Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:45.815469Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-03T10:28:45.815484Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-03T10:28:45.815607Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037890 2025-06-03T10:28:45.815615Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037890 2025-06-03T10:28:45.815620Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037890 2025-06-03T10:28:45.815635Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037890 at tablet 72075186224037890 send result to client [4:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:28:45.815644Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037890 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:28:45.815654Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037890 2025-06-03T10:28:45.816429Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037889 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:28:45.816568Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037889 state Ready 2025-06-03T10:28:45.816580Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-03T10:28:45.816650Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:28:45.816683Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037890 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:28:45.816740Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-03T10:28:45.816747Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:28:45.816850Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037890 state Ready 2025-06-03T10:28:45.816858Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037890 Got TEvSchemaChangedResult from SS at 72075186224037890 2025-06-03T10:28:45.821227Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:838:2692], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:45.821256Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:828:2687], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:45.821349Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:45.822221Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:45.823625Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:45.823662Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-03T10:28:45.823679Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-03T10:28:45.970235Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:45.970289Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037889 2025-06-03T10:28:45.970308Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037890 2025-06-03T10:28:45.970825Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:842:2695], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:46.002717Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:914:2736] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:46.016520Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jwtnc7kw62ez2h3xc11ejpss, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ODAyZTNmNWItZjY4YzczNzctNmU4ZTJkMTAtMTA5YTFmNzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:46.017287Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:1019:2779], serverId# [4:1020:2780], sessionId# [0:0:0] 2025-06-03T10:28:46.017460Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:2] at 72075186224037889 2025-06-03T10:28:46.017574Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1748946526017524 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: AsyncIndex Source: Unspecified Body: 38b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-03T10:28:46.017609Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1748946526017524 Step: 1500 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] Kind: AsyncIndex Source: Unspecified Body: 42b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037889 2025-06-03T10:28:46.017629Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:2] at 72075186224037889, row count=1 2025-06-03T10:28:46.028107Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037889, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 38 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 }, { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 5] BodySize: 42 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 1 } 2025-06-03T10:28:46.028143Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:28:46.031603Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:1026:2785], serverId# [4:1027:2786], sessionId# [0:0:0] 2025-06-03T10:28:46.032846Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [4:1028:2787], serverId# [4:1029:2788], sessionId# [0:0:0] >> KqpCost::QuerySeviceRangeFullScan >> Cdc::ShouldDeliverChangesOnSplitMerge [GOOD] >> Cdc::ResolvedTimestamps >> Cdc::DropIndex [GOOD] >> Cdc::DisableStream >> SystemView::ShowCreateTableSequences [GOOD] >> SystemView::ShowCreateTablePartitionPolicyIndexTable >> KqpCost::IndexLookupAndTake-useSink >> KqpCost::PointLookup >> Viewer::JsonAutocompleteEmptyColumns [GOOD] >> CdcStreamChangeCollector::PageFaults [GOOD] >> CdcStreamChangeCollector::OldImage >> Viewer::JsonAutocompleteColumnsPOST >> KqpCost::IndexLookupAndTake+useSink >> KqpCost::IndexLookupAtLeast8BytesInStorage-useSink [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanScriptingRangeFullScan-SourceRead [GOOD] Test command err: Trying to start YDB, gRPC: 17296, MsgBus: 11347 2025-06-03T10:28:45.398988Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668130568988085:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:45.399024Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f2e/r3tmp/tmpGQjg38/pdisk_1.dat 2025-06-03T10:28:45.452353Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668130568988067:2079] 1748946525398847 != 1748946525398850 2025-06-03T10:28:45.452930Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17296, node 1 2025-06-03T10:28:45.464189Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:45.464200Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:45.464201Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:45.464253Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11347 TClient is connected to server localhost:11347 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:45.528867Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:45.528904Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:45.529968Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:45.531145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:45.542051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:45.560264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:45.584550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:45.596479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:45.740040Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668130568989702:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:45.740064Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:45.774046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.829829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.885147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.894344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.910041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.922984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.937017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:45.953324Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668130568990357:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:45.953355Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:45.953357Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668130568990362:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:45.954216Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:45.956034Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668130568990364:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:28:46.046703Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668134863957711:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:46.250915Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7511668134863957967:2503] TxId: 281474976715673. Ctx: { TraceId: 01jwtnc809978swkztn75mg70m, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Njc4YjY0MjctMzI3YjAzNzMtYjUxMzg4MDEtMjU5MWM3NmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-03T10:28:46.253845Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946526299, txId: 281474976715672] shutting down >> Cdc::RacySplitAndDropTable [GOOD] >> Cdc::RenameTable >> Balancing::Balancing_OneTopic_TopicApi >> TopicAutoscaling::PartitionSplit_PQv1 >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_BeforeAutoscaleAwareSDK >> TopicAutoscaling::PartitionSplit_BeforeAutoscaleAwareSDK >> CommitOffset::Commit_WithoutSession_TopPast ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupAtLeast8BytesInStorage-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 5997, MsgBus: 7173 2025-06-03T10:28:45.855335Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668129924538459:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:45.855363Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f1b/r3tmp/tmp3iMQtF/pdisk_1.dat 2025-06-03T10:28:45.910040Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668129924538437:2079] 1748946525855185 != 1748946525855188 2025-06-03T10:28:45.913843Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5997, node 1 2025-06-03T10:28:45.925484Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:45.925501Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:45.925502Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:45.925557Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7173 2025-06-03T10:28:45.957506Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:45.957532Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:45.958670Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7173 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:45.986363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:45.993354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:46.057630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:46.082963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:46.097412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:46.205516Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668134219507370:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:46.205550Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:46.261223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:46.270531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:46.280284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:46.294169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:46.308391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:46.322194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:46.336681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:46.353067Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668134219508021:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:46.353096Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668134219508026:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:46.353102Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:46.354061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:46.363145Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668134219508028:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:28:46.436214Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668134219508079:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:46.645608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 /Root/SecondaryKeys/Index/indexImplTable 1 8 /Root/SecondaryKeys 1 8 >> KqpCost::IndexLookupJoin+StreamLookupJoin [GOOD] >> TopicAutoscaling::ControlPlane_BackCompatibility >> CommitOffset::PartitionSplit_OffsetCommit >> TopicAutoscaling::ReadingAfterSplitTest_BeforeAutoscaleAwareSDK >> TopicAutoscaling::PartitionMerge_PreferedPartition_BeforeAutoscaleAwareSDK >> KqpCost::QuerySeviceRangeFullScan [GOOD] >> AsyncIndexChangeExchange::ShouldNotReorderChangesOnRace [GOOD] >> Cdc::AreJsonsEqualReturnsTrueOnEqual [GOOD] >> Cdc::AreJsonsEqualReturnsFalseOnDifferent [GOOD] >> Cdc::AreJsonsEqualFailsOnWildcardInArray [GOOD] >> Cdc::AlterViaTopicService ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupJoin+StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 14677, MsgBus: 21711 2025-06-03T10:28:46.114063Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668132929483878:2149];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:46.115193Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f11/r3tmp/tmpIJoWIq/pdisk_1.dat 2025-06-03T10:28:46.184150Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668132929483768:2079] 1748946526112789 != 1748946526112792 2025-06-03T10:28:46.184278Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14677, node 1 2025-06-03T10:28:46.199503Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:46.199521Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:46.199523Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:46.199582Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:28:46.216216Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:46.216251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:46.217373Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21711 TClient is connected to server localhost:21711 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:46.259604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:46.273466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:46.295260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:46.354920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:46.367627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:46.568321Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668132929485413:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:46.568364Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:46.627125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:46.635684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:46.644081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:46.657485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:46.672211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:46.686278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:46.700039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:46.718322Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668132929486065:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:46.718360Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:46.718460Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668132929486070:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:46.719644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:46.727649Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668132929486072:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:28:46.802266Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668132929486123:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:47.009732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.019436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.029201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 /Root/Join1_2 1 19 /Root/Join1_1 8 136 >> KqpCost::PointLookup [GOOD] >> TopicAutoscaling::Simple_BeforeAutoscaleAwareSDK >> KqpCost::IndexLookupAndTake-useSink [GOOD] >> KqpCost::IndexLookupAndTake+useSink [GOOD] >> Viewer::JsonAutocompleteColumnsPOST [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::QuerySeviceRangeFullScan [GOOD] Test command err: Trying to start YDB, gRPC: 12254, MsgBus: 9999 2025-06-03T10:28:46.740430Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668135864355008:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:46.740462Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001efe/r3tmp/tmploT4mQ/pdisk_1.dat 2025-06-03T10:28:46.815432Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668135864354987:2079] 1748946526740279 != 1748946526740282 2025-06-03T10:28:46.815693Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12254, node 1 2025-06-03T10:28:46.832518Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:46.832535Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:46.832537Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:46.832590Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9999 2025-06-03T10:28:46.882688Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:46.882727Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:46.883674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9999 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:46.901912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:46.907481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:46.972249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:47.031668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:47.047530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:47.187080Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668140159323921:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.187117Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.238022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.246284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.259824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.316625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.330059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.343883Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.359905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.376133Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668140159324576:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.376166Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.376185Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668140159324581:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.377197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:47.384724Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668140159324583:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:28:47.438671Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668140159324634:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> CdcStreamChangeCollector::OldImage [GOOD] >> Cdc::AlterViaTopicService [GOOD] >> Cdc::Alter ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupAndTake-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 21907, MsgBus: 26478 2025-06-03T10:28:46.749315Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668133692916782:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:46.749368Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001eef/r3tmp/tmpZoUAFv/pdisk_1.dat 2025-06-03T10:28:46.804805Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668133692916762:2079] 1748946526749122 != 1748946526749125 2025-06-03T10:28:46.807613Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21907, node 1 2025-06-03T10:28:46.820404Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:46.820420Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:46.820421Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:46.820461Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26478 TClient is connected to server localhost:26478 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:28:46.878776Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:46.878809Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:46.880094Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:46.883673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:46.890813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:46.907947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:46.969523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:46.981239Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:47.223138Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668137987885690:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.223169Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.279398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.287997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.302103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.316109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.330014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.345161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.359726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.374526Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668137987886342:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.374560Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.374567Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668137987886347:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.375459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:47.385197Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668137987886349:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:28:47.437660Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668137987886400:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:47.617344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 /Root/SecondaryKeys/Index/indexImplTable 2 16 /Root/SecondaryKeys 1 8 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/viewer/ut/unittest >> Viewer::JsonAutocompleteColumnsPOST [GOOD] Test command err: 2025-06-03T10:28:42.716178Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:319:2362], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:42.716259Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:42.716287Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 65459, node 1 TClient is connected to server localhost:11695 json result: {"Success":true,"Result":{"Total":5,"Entities":[{"Name":"/Root/test","Type":"ext_sub_domain"},{"Name":"/Root/slice","Type":"ext_sub_domain"},{"Name":"/Root/qwerty","Type":"ext_sub_domain"},{"Name":"/Root/MyDatabase","Type":"ext_sub_domain"},{"Name":"/Root/TestDatabase","Type":"ext_sub_domain"}]},"Version":2} 2025-06-03T10:28:43.926802Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:336:2378], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:43.926884Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:43.926909Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 16281, node 2 TClient is connected to server localhost:7149 json result: {"Success":true,"Result":{"Total":5,"Entities":[{"Name":"/Root/MyDatabase","Type":"ext_sub_domain"},{"Name":"/Root/TestDatabase","Type":"ext_sub_domain"},{"Name":"/Root/test","Type":"ext_sub_domain"},{"Name":"/Root/slice","Type":"ext_sub_domain"},{"Name":"/Root/qwerty","Type":"ext_sub_domain"}]},"Version":2} 2025-06-03T10:28:45.053281Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:336:2378], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:45.053368Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:45.053415Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 19160, node 3 TClient is connected to server localhost:62805 json result: {"Success":true,"Result":{"Total":3,"Entities":[{"Name":"clients","Type":"table"},{"Name":"orders","Type":"table"},{"Name":"products","Type":"table"}]},"Version":2} 2025-06-03T10:28:46.411412Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:317:2360], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:46.411488Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:46.411510Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 3606, node 4 TClient is connected to server localhost:26663 json result: {"Success":true,"Result":{"Total":3,"Entities":[{"Name":"id","Type":"column","Parent":"orders"},{"Name":"name","Type":"column","Parent":"orders"},{"Name":"description","Type":"column","Parent":"orders"}]},"Version":2} 2025-06-03T10:28:47.643253Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:339:2381], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:47.643356Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:47.643377Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 4892, node 5 TClient is connected to server localhost:18601 json result: {"Success":true,"Result":{"Total":6,"Entities":[{"Name":"name","Type":"column","Parent":"orders"},{"Name":"name","Type":"column","Parent":"products"},{"Name":"id","Type":"column","Parent":"orders"},{"Name":"id","Type":"column","Parent":"products"},{"Name":"description","Type":"column","Parent":"orders"},{"Name":"description","Type":"column","Parent":"products"}]},"Version":2} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::PointLookup [GOOD] Test command err: Trying to start YDB, gRPC: 15476, MsgBus: 6297 2025-06-03T10:28:46.899349Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668132803932107:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:46.899369Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001edf/r3tmp/tmpCwJPg5/pdisk_1.dat 2025-06-03T10:28:46.953710Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668132803932088:2079] 1748946526899227 != 1748946526899230 2025-06-03T10:28:46.955182Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15476, node 1 2025-06-03T10:28:46.967751Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:46.967776Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:46.967778Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:46.967825Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6297 2025-06-03T10:28:47.002377Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:47.002412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:47.003518Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6297 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:47.036387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:47.043299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:47.110416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:28:47.138069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.151324Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:47.327103Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668137098901029:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.327133Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.376289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.385106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.393022Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.407244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.422242Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.478217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.490976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.507748Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668137098901686:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.507796Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.507806Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668137098901691:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.508749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:47.518093Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668137098901693:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:28:47.595025Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668137098901744:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookupAndTake+useSink [GOOD] Test command err: Trying to start YDB, gRPC: 27873, MsgBus: 7496 2025-06-03T10:28:47.009815Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668138674770112:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:47.009853Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ec3/r3tmp/tmp5LwX8Z/pdisk_1.dat 2025-06-03T10:28:47.073341Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:47.073425Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668138674770091:2079] 1748946527009659 != 1748946527009662 TServer::EnableGrpc on GrpcPort 27873, node 1 2025-06-03T10:28:47.087274Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:47.087289Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:47.087291Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:47.087339Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7496 TClient is connected to server localhost:7496 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:28:47.145890Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:47.145930Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:47.146987Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:47.152348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:47.158178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:47.223816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:47.245094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:47.255886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:47.386907Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668138674771721:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.386952Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.443834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.452173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.462589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.476615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.490976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.505279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.519297Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.535285Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668138674772375:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.535318Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.535336Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668138674772380:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.536292Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:47.546118Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668138674772382:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:28:47.607631Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668138674772433:3395] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:47.776085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 /Root/SecondaryKeys/Index/indexImplTable 2 16 /Root/SecondaryKeys 1 8 |64.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> Cdc::DisableStream [GOOD] >> Cdc::InitialScan ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_change_collector/unittest >> CdcStreamChangeCollector::OldImage [GOOD] Test command err: 2025-06-03T10:28:41.035128Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:41.035229Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:41.035262Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00203e/r3tmp/tmpXa8yaQ/pdisk_1.dat 2025-06-03T10:28:41.163338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:41.182793Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:41.184271Z node 1 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1191: Update config MemoryLimit: 33554432 2025-06-03T10:28:41.184498Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946520553105 != 1748946520553109 2025-06-03T10:28:41.226715Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:41.226768Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:41.239933Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:41.318426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:41.336957Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:28:41.337041Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:28:41.345767Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:28:41.345820Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:28:41.346025Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:28:41.346035Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:28:41.346042Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:28:41.346108Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:28:41.346128Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:28:41.346142Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:28:41.356453Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:28:41.361617Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:28:41.361725Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:28:41.361757Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:28:41.361763Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:41.361769Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:28:41.361776Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:41.361959Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:28:41.361992Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:28:41.362120Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:41.362131Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:41.362142Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:28:41.362148Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:41.362162Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:28:41.362205Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:28:41.362268Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:28:41.362290Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:28:41.362665Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:41.373001Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:28:41.373062Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:28:41.517013Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-06-03T10:28:41.518147Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-03T10:28:41.518182Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:41.518286Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:41.518299Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:28:41.518315Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-03T10:28:41.518416Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-03T10:28:41.518466Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-03T10:28:41.518677Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:41.518698Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-03T10:28:41.519222Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-03T10:28:41.519355Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:41.519826Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-03T10:28:41.519841Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:41.520116Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-03T10:28:41.520133Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:41.520368Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:41.520381Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:41.520389Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-03T10:28:41.520412Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:28:41.520425Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:28:41.520441Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:41.521410Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:41.521780Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:28:41.521926Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-03T10:28:41.521939Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:28:41.525812Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:28:41.525858Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-03T10:28:41.525873Z node 1 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-06-03T10:28:41.525877Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-06-03T10:28:41.526053Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Dis ... 88 step# 1000} 2025-06-03T10:28:47.761281Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:47.761485Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:47.761496Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:47.761503Z node 4 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-03T10:28:47.761521Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [4:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:28:47.761532Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:28:47.761540Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:47.761717Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:47.762039Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-03T10:28:47.762068Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:28:47.762219Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:28:47.767358Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:28:47.767416Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715658 ssId 72057594046644480 seqNo 2:2 2025-06-03T10:28:47.767435Z node 4 :TX_DATASHARD INFO: check_scheme_tx_unit.cpp:234: Check scheme tx, proposed scheme version# 2 current version# 1 expected version# 2 at tablet# 72075186224037888 txId# 281474976715658 2025-06-03T10:28:47.767441Z node 4 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715658 at tablet 72075186224037888 2025-06-03T10:28:47.767698Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:47.789628Z node 4 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:28:47.957803Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715658 at step 1500 at tablet 72075186224037888 { Transactions { TxId: 281474976715658 AckTo { RawX1: 0 RawX2: 0 } } Step: 1500 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-03T10:28:47.957838Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:47.957887Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:47.957895Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:28:47.957904Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1500:281474976715658] in PlanQueue unit at 72075186224037888 2025-06-03T10:28:47.957959Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1500:281474976715658 keys extracted: 0 2025-06-03T10:28:47.957993Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-03T10:28:47.958060Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:47.958237Z node 4 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:47.993316Z node 4 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1500} 2025-06-03T10:28:47.993372Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:47.993382Z node 4 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:47.993398Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:47.993427Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1500 : 281474976715658] from 72075186224037888 at tablet 72075186224037888 send result to client [4:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:28:47.993446Z node 4 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715658 state Ready TxInFly 0 2025-06-03T10:28:47.993465Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:47.994211Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037888 state Ready 2025-06-03T10:28:47.994236Z node 4 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:28:47.998453Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:876:2714], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.998490Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:886:2719], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.998504Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.999694Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:28:48.001228Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:48.157767Z node 4 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:48.158468Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:890:2722], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:28:48.180621Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:946:2759] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:48.191947Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtnc9qy8546rkvzhckhad3q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=MWY3NTgzNDAtYzRlZjA1OGQtY2E4OThjY2MtYjQ4ZWYyZDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:48.192730Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:977:2776], serverId# [4:978:2777], sessionId# [0:0:0] 2025-06-03T10:28:48.192896Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:3] at 72075186224037888 2025-06-03T10:28:48.193002Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 1 Group: 1748946528192957 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 18b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-03T10:28:48.193039Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:3] at 72075186224037888, row count=1 2025-06-03T10:28:48.203578Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 1 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 18 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-03T10:28:48.203614Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:48.217751Z node 4 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtnc9yc6j80qvfb93afzeh8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=ZjMwNmFiYmQtMWFmYzI4Y2EtNWZiOTkxZS1mOTQ4ZDgzMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:48.218553Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:4] at 72075186224037888 2025-06-03T10:28:48.218674Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 2 Group: 1748946528218634 Step: 2000 TxId: 18446744073709551615 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcDataChange Source: Unspecified Body: 40b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-03T10:28:48.218713Z node 4 :TX_DATASHARD DEBUG: execute_write_unit.cpp:410: Executed write operation for [0:4] at 72075186224037888, row count=1 2025-06-03T10:28:48.229250Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 40 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 2 } 2025-06-03T10:28:48.229288Z node 4 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:48.230094Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:1005:2795], serverId# [4:1006:2796], sessionId# [0:0:0] 2025-06-03T10:28:48.231361Z node 4 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [4:1007:2797], serverId# [4:1008:2798], sessionId# [0:0:0] >> Cdc::RenameTable [GOOD] >> Cdc::InitialScan_WithTopicSchemeTx >> Cdc::Alter [GOOD] >> Cdc::AddColumn >> SystemView::TopPartitionsByTliFields [GOOD] >> ViewQuerySplit::Basic [GOOD] >> ViewQuerySplit::WithPragmaTablePathPrefix [GOOD] >> ViewQuerySplit::WithPairedPragmaTablePathPrefix [GOOD] >> ViewQuerySplit::WithComments [GOOD] >> ViewQuerySplit::Joins [GOOD] |64.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanQueryRangeFullScan-SourceRead |64.3%| [TA] $(B)/ydb/core/kqp/ut/pg/test-results/unittest/{meta.json ... results_accumulator.log} >> BasicStatistics::TwoTables [GOOD] >> KqpCost::AAARangeFullScan ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/ut/unittest >> ViewQuerySplit::Joins [GOOD] Test command err: 2025-06-03T10:28:08.655522Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667971977380764:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:08.655535Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002b32/r3tmp/tmprmDBBR/pdisk_1.dat 2025-06-03T10:28:08.733431Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:08.733672Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511667971977380741:2079] 1748946488655220 != 1748946488655223 TServer::EnableGrpc on GrpcPort 27525, node 1 2025-06-03T10:28:08.761700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:08.761745Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:08.765725Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:08.767442Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:08.767446Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:08.767449Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:08.767487Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17368 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:28:08.818949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:09.099515Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667976272348666:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:09.099553Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:09.099720Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667976272348701:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:09.100746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:09.103196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 2025-06-03T10:28:09.103440Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667976272348703:2329], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-03T10:28:09.161730Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667976272348754:2324] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:09.258064Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7511667976272348782:2323] TxId: 281474976710661. Ctx: { TraceId: 01jwtnb3fwda9xtt1xyfgxpjkm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGYxZDY5ZWQtYjBhNjU5MGYtYWM5MDA3NjUtM2M2YWY3N2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database 2025-06-03T10:28:09.258168Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710661. Ctx: { TraceId: 01jwtnb3fwda9xtt1xyfgxpjkm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGYxZDY5ZWQtYjBhNjU5MGYtYWM5MDA3NjUtM2M2YWY3N2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:09.264723Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [1:7511667976272348789:2337], owner: [1:7511667976272348785:2335], scan id: 0, table id: [72057594046644480:1:0:ds_vslots] 2025-06-03T10:28:09.264970Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [1:7511667976272348789:2337], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:09.266867Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [1:7511667976272348789:2337], row count: 1, finished: 1 2025-06-03T10:28:09.266896Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [1:7511667976272348789:2337], owner: [1:7511667976272348785:2335], scan id: 0, table id: [72057594046644480:1:0:ds_vslots] 2025-06-03T10:28:09.269039Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946489257, txId: 281474976710660] shutting down 2025-06-03T10:28:10.299100Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710663. Ctx: { TraceId: 01jwtnb4x23j8az5vbhgqnb5wx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2FjOWUxZTYtNjM5MDc3NTMtZGRhZjQ1N2EtZjU5NmNhN2I=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:10.300037Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [1:7511667980567316136:2351], owner: [1:7511667980567316133:2349], scan id: 0, table id: [72057594046644480:1:0:ds_vslots] 2025-06-03T10:28:10.302765Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [1:7511667980567316136:2351], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:10.304048Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [1:7511667980567316136:2351], row count: 1, finished: 1 2025-06-03T10:28:10.304078Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [1:7511667980567316136:2351], owner: [1:7511667980567316133:2349], scan id: 0, table id: [72057594046644480:1:0:ds_vslots] 2025-06-03T10:28:10.304824Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946490297, txId: 281474976710662] shutting down 2025-06-03T10:28:11.329684Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710665. Ctx: { TraceId: 01jwtnb5xadhcsnwe0932sfcrq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmQ1YWRjYzctNTVhNDI3ODctMjk2MjMxNi01MzA4NTk2Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:11.330348Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [1:7511667984862283470:2362], owner: [1:7511667984862283466:2360], scan id: 0, table id: [72057594046644480:1:0:ds_vslots] 2025-06-03T10:28:11.333595Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [1:7511667984862283470:2362], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:11.341886Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [1:7511667984862283470:2362], row count: 1, finished: 1 2025-06-03T10:28:11.341918Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [1:7511667984862283470:2362], owner: [1:7511667984862283466:2360], scan id: 0, table id: [72057594046644480:1:0:ds_vslots] 2025-06-03T10:28:11.342858Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946491328, txId: 281474976710664] shutting down 2025-06-03T10:28:12.372669Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710667. Ctx: { TraceId: 01jwtnb6xr53ff1rxg83hesye6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDVlYzc2NmItZGMwMTM5NmEtMzg1ZGE4MjEtMmYyMjFiNDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:12.373662Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [1:7511667989157250802:2373], owner: [1:7511667989157250799:2371], scan id: 0, table id: [72057594046644480:1:0:ds_vslots] 2025-06-03T10:28:12.380014Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [1:7511667989157250802:2373], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:12.380745Z node 1 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [1:7511667989157250802:2373], row count: 1, finished: 1 2025-06-03T10:28:12.380778Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [1:7511667989157250802:2373], owner: [1:7511667989157250799:2371], scan id: 0, table id: [72057594046644480:1:0:ds_vslots] 2025-06-03T10:28:12.381723Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946492371, txId: 281474976710666] shutting down 2025-06-03T10:28:13.410037Z node 1 :KQP_EXECUTER ... , processor ids count# 1, processor id to database count# 1 2025-06-03T10:28:48.000771Z node 13 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:408: Rotate logs: service id# [13:7511668095275482233:2076], query logs count# 0, processor ids count# 1, processor id to database count# 1 2025-06-03T10:28:48.000693Z node 16 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:593: Handle TEvPrivate::TEvProcessInterval: service id# [16:7511668091638161896:2063], interval end# 2025-06-03T10:28:48.000000Z, event interval end# 2025-06-03T10:28:48.000000Z 2025-06-03T10:28:48.000701Z node 16 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:408: Rotate logs: service id# [16:7511668091638161896:2063], query logs count# 0, processor ids count# 0, processor id to database count# 0 2025-06-03T10:28:48.000784Z node 14 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:593: Handle TEvPrivate::TEvProcessInterval: service id# [14:7511668089428349381:2063], interval end# 2025-06-03T10:28:48.000000Z, event interval end# 2025-06-03T10:28:48.000000Z 2025-06-03T10:28:48.000791Z node 14 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:593: Handle TEvPrivate::TEvProcessInterval: service id# [14:7511668093723316757:2074], interval end# 2025-06-03T10:28:48.000000Z, event interval end# 2025-06-03T10:28:48.000000Z 2025-06-03T10:28:48.000800Z node 14 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:408: Rotate logs: service id# [14:7511668093723316757:2074], query logs count# 0, processor ids count# 1, processor id to database count# 1 2025-06-03T10:28:48.000802Z node 14 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:408: Rotate logs: service id# [14:7511668089428349381:2063], query logs count# 0, processor ids count# 0, processor id to database count# 0 2025-06-03T10:28:48.009579Z node 12 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [12:7511668142602928745:2458], owner: [12:7511668138307961445:2456], scan id: 0, table id: [72075186224037888:1:0:top_partitions_by_tli_one_minute] 2025-06-03T10:28:48.010074Z node 12 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:593: Handle TEvPrivate::TEvProcessInterval: service id# [12:7511668091063319148:2076], interval end# 2025-06-03T10:28:48.000000Z, event interval end# 2025-06-03T10:28:48.000000Z 2025-06-03T10:28:48.010092Z node 12 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:408: Rotate logs: service id# [12:7511668091063319148:2076], query logs count# 0, processor ids count# 1, processor id to database count# 0 2025-06-03T10:28:48.015083Z node 12 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [12:7511668142602928745:2458], schemeshard id: 72075186224037888, hive id: 72057594037968897, database: /Root/Tenant1, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 2], database node count: 2 2025-06-03T10:28:48.015420Z node 16 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:641: [72075186224037893] Reply batch: range# From { IntervalEndUs: 0 Rank: 0 } InclusiveFrom: true To { IntervalEndUs: 18446744073709551615 Rank: 4294967295 } InclusiveTo: true Type: TOP_PARTITIONS_BY_TLI_ONE_MINUTE , rows# 4, bytes# 252, next# 2025-06-03T10:28:48.015691Z node 12 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [12:7511668142602928745:2458], row count: 4, finished: 1 2025-06-03T10:28:48.015703Z node 12 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [12:7511668142602928745:2458], owner: [12:7511668138307961445:2456], scan id: 0, table id: [72075186224037888:1:0:top_partitions_by_tli_one_minute] 2025-06-03T10:28:48.015809Z node 14 :SYSTEM_VIEWS DEBUG: tx_collect.cpp:14: [72075186224037899] TTxCollect::Execute 2025-06-03T10:28:48.015880Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:182: [72075186224037899] PersistPartitionTopResults: table id# 17, partition interval end# 2025-06-03T10:28:48.000000Z, partition count# 1 2025-06-03T10:28:48.015893Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:182: [72075186224037899] PersistPartitionTopResults: table id# 20, partition interval end# 2025-06-03T10:28:48.000000Z, partition count# 1 2025-06-03T10:28:48.015903Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:182: [72075186224037899] PersistPartitionTopResults: table id# 18, partition interval end# 2025-06-03T11:00:00.000000Z, partition count# 1 2025-06-03T10:28:48.015910Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:182: [72075186224037899] PersistPartitionTopResults: table id# 21, partition interval end# 2025-06-03T11:00:00.000000Z, partition count# 1 2025-06-03T10:28:48.015951Z node 14 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:333: [72075186224037899] Reset: interval end# 2025-06-03T10:28:48.000000Z 2025-06-03T10:28:48.017453Z node 14 :SYSTEM_VIEWS DEBUG: tx_collect.cpp:29: [72075186224037899] TTxCollect::Complete 2025-06-03T10:28:48.053687Z node 12 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715684. Ctx: { TraceId: 01jwtnc9n7fz6czny8gwnbs597, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=12&id=MjgwYzdjOGQtOTI3YzY2Mi0zNjBkMjlmOC03YWRkYzVhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:48.054807Z node 12 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946527963, txId: 281474976715682] shutting down 2025-06-03T10:28:48.054814Z node 16 :SYSTEM_VIEWS DEBUG: tx_collect.cpp:14: [72075186224037893] TTxCollect::Execute 2025-06-03T10:28:48.054871Z node 16 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:182: [72075186224037893] PersistPartitionTopResults: table id# 17, partition interval end# 2025-06-03T10:28:48.000000Z, partition count# 1 2025-06-03T10:28:48.054889Z node 16 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:182: [72075186224037893] PersistPartitionTopResults: table id# 20, partition interval end# 2025-06-03T10:28:48.000000Z, partition count# 1 2025-06-03T10:28:48.054904Z node 16 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:182: [72075186224037893] PersistPartitionTopResults: table id# 18, partition interval end# 2025-06-03T11:00:00.000000Z, partition count# 1 2025-06-03T10:28:48.054926Z node 16 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:182: [72075186224037893] PersistPartitionTopResults: table id# 21, partition interval end# 2025-06-03T11:00:00.000000Z, partition count# 1 2025-06-03T10:28:48.054952Z node 16 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:333: [72075186224037893] Reset: interval end# 2025-06-03T10:28:48.000000Z 2025-06-03T10:28:48.056748Z node 16 :SYSTEM_VIEWS DEBUG: tx_collect.cpp:29: [72075186224037893] TTxCollect::Complete 2025-06-03T10:28:48.087186Z node 12 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715686. Ctx: { TraceId: 01jwtnc9sqd7aj5y0qavfrn97j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=12&id=Y2U5YTcwYS1hZTE2Y2ZkMC05MmVjMTRkNi04OGViMDNlMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:48.087787Z node 12 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [12:7511668142602928785:2468], owner: [12:7511668142602928781:2466], scan id: 0, table id: [72075186224037888:1:0:top_partitions_by_tli_one_minute] 2025-06-03T10:28:48.088016Z node 12 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [12:7511668142602928785:2468], schemeshard id: 72075186224037888, hive id: 72057594037968897, database: /Root/Tenant1, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 2], database node count: 2 2025-06-03T10:28:48.088213Z node 16 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:641: [72075186224037893] Reply batch: range# From { IntervalEndUs: 1748946527000000 Rank: 0 } InclusiveFrom: true To { IntervalEndUs: 1748946527000000 Rank: 4294967295 } InclusiveTo: true Type: TOP_PARTITIONS_BY_TLI_ONE_MINUTE , rows# 1, bytes# 63, next# 2025-06-03T10:28:48.088291Z node 12 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [12:7511668142602928785:2468], row count: 1, finished: 1 2025-06-03T10:28:48.088301Z node 12 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [12:7511668142602928785:2468], owner: [12:7511668142602928781:2466], scan id: 0, table id: [72075186224037888:1:0:top_partitions_by_tli_one_minute] 2025-06-03T10:28:48.089069Z node 12 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946528086, txId: 281474976715685] shutting down 2025-06-03T10:28:48.090103Z node 12 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715687. Ctx: { TraceId: , Database: , DatabaseId: , SessionId: ydb://session/3?node_id=12&id=YmI3OWE1ZWYtMzQ0YTI1M2EtYjExZTYxZmItMWQzM2M5ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. Database not set, use /Root 2025-06-03T10:28:48.094271Z node 12 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 13 2025-06-03T10:28:48.094439Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(13, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:28:48.094490Z node 12 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 15 2025-06-03T10:28:48.094544Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(15, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:28:48.094862Z node 12 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 16 2025-06-03T10:28:48.094980Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(16, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:28:48.095001Z node 12 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 14 2025-06-03T10:28:48.095095Z node 12 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:28:48.095416Z node 15 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [15:7511668095973404632:2076], processor id# 72075186224037893, database# /Root/Tenant1 2025-06-03T10:28:48.095561Z node 15 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [15:7511668095973404632:2076], database# /Root/Tenant1, processor id# 72075186224037893 2025-06-03T10:28:48.095694Z node 16 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [16:7511668095933129316:2118], processor id# 72075186224037893, database# /Root/Tenant1 2025-06-03T10:28:48.095740Z node 16 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [16:7511668095933129316:2118], database# /Root/Tenant1, processor id# 72075186224037893 2025-06-03T10:28:48.095786Z node 13 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [13:7511668095275482233:2076], processor id# 72075186224037899, database# /Root/Tenant2 2025-06-03T10:28:48.095818Z node 13 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [13:7511668095275482233:2076], database# /Root/Tenant2, processor id# 72075186224037899 2025-06-03T10:28:48.096603Z node 14 :SYSTEM_VIEWS WARN: sysview_service.cpp:811: Summary delivery problem: service id# [14:7511668093723316757:2074], processor id# 72075186224037899, database# /Root/Tenant2 2025-06-03T10:28:48.096698Z node 14 :SYSTEM_VIEWS INFO: sysview_service.cpp:880: Navigate by database succeeded: service id# [14:7511668093723316757:2074], database# /Root/Tenant2, processor id# 72075186224037899 2025-06-03T10:28:48.098472Z node 12 :HIVE WARN: hive_impl.cpp:934: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[14:7511668093723316994:2112], Type=268959746 2025-06-03T10:28:48.098490Z node 12 :HIVE WARN: hive_impl.cpp:934: HIVE#72057594037968897 THive::Handle::TEvUndelivered Sender=[16:7511668095933129498:2108], Type=268959746 >> KqpCost::OltpWriteRow+isSink |64.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest |64.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest |64.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> TExportToS3Tests::ShouldExcludeBackupTableFromStats [GOOD] >> TExportToS3Tests::ShouldRestartOnScanErrors |64.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> BasicStatistics::TwoTables [GOOD] Test command err: 2025-06-03T10:26:28.758292Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:28.758321Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:26:28.758328Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001bcd/r3tmp/tmpuRRv3f/pdisk_1.dat 2025-06-03T10:26:28.858160Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29693, node 1 2025-06-03T10:26:28.961064Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:28.961081Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:28.961084Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:28.961124Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:28.961582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:26:29.037572Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:29.037612Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:29.049682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27508 2025-06-03T10:26:29.399171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:26:30.129071Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:26:30.139819Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:30.139865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:30.194042Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:26:30.194654Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:30.352574Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.352700Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.352822Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.352852Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.352887Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.352903Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.352914Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.352927Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.352942Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.506956Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:30.507000Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:30.518584Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:30.550110Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:30.562636Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:26:30.562666Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:26:30.572918Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:26:30.572988Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:26:30.573009Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:26:30.573013Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:26:30.573017Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:26:30.573022Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:26:30.573026Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:26:30.573035Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:26:30.573185Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:26:30.587396Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:30.587429Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:30.588690Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:26:30.589726Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:26:30.589848Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:26:30.591869Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:26:30.595442Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:26:30.595468Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:26:30.595482Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:26:30.599477Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:26:30.601320Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:26:30.601371Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:26:30.706274Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:26:30.783161Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:26:30.825738Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:26:31.350732Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2216:3061], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:31.350783Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:31.353999Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:26:31.461444Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2355:3096], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:31.461476Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:31.466503Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2360:3100]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:31.466542Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:26:31.466552Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2362:3102] 2025-06-03T10:26:31.466560Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2362:3102] 2025-06-03T10:26:31.466704Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2363:2866] 2025-06-03T10:26:31.466775Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2362:3102], server id = [2:2363:2866], tablet id = 72075186224037894, status = OK 2025-06-03T10:26:31.466810Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2363:2866], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:26:31.466826Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-03T10:26:31.466869Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:26:31.466877Z node 1 :STATISTICS DEBU ... [72075186224037894] ScheduleNextTraversal 2025-06-03T10:28:43.345638Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:28:43.345653Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 5] is data table. 2025-06-03T10:28:43.345661Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-03T10:28:43.345797Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:28:43.352032Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:28:43.353942Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6760:4788], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:43.353994Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6771:4793], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:43.354023Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:43.357635Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897 2025-06-03T10:28:43.373269Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:6774:4796], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-03T10:28:43.534080Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:6872:4843] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:43.545027Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [2:6901:4858]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:43.545098Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-03T10:28:43.545104Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [2:6901:4858], StatRequests.size() = 1 2025-06-03T10:28:43.566813Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YzNjNGYyOTgtMzkwNjY3ZTEtYzMyM2M4NDAtODFjZDQyOGM=, TxId: 2025-06-03T10:28:43.566849Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YzNjNGYyOTgtMzkwNjY3ZTEtYzMyM2M4NDAtODFjZDQyOGM=, TxId: 2025-06-03T10:28:43.567032Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:28:43.579026Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 5] 2025-06-03T10:28:43.579059Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:28:44.059253Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [2:6929:4874]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:44.059377Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-03T10:28:44.059384Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [2:6929:4874], StatRequests.size() = 1 2025-06-03T10:28:45.313992Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [2:6966:4892]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:45.314130Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-03T10:28:45.314141Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [2:6966:4892], StatRequests.size() = 1 2025-06-03T10:28:45.931420Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:28:45.941909Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:28:45.941941Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:28:45.941952Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-03T10:28:45.941959Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:28:45.942080Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:28:45.942921Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:28:45.946974Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YjM4ODA5MDMtNWI0YjliZmItNzdhZGNhZjItZDBmOTJmZTM=, TxId: 2025-06-03T10:28:45.947005Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YjM4ODA5MDMtNWI0YjliZmItNzdhZGNhZjItZDBmOTJmZTM=, TxId: 2025-06-03T10:28:45.947179Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:28:45.958925Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:28:45.958952Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:28:46.502593Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [2:7028:4928]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:46.502745Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-03T10:28:46.502754Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [2:7028:4928], StatRequests.size() = 1 2025-06-03T10:28:47.744450Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 125 ], ReplyToActorId[ [2:7067:4948]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:47.744563Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 125 ] 2025-06-03T10:28:47.744571Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 125, ReplyToActorId = [2:7067:4948], StatRequests.size() = 1 2025-06-03T10:28:48.401484Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-03T10:28:48.401663Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:28:48.401789Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:28:48.412293Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:28:48.412323Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:28:48.412332Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-03T10:28:48.412346Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:28:48.412429Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:28:48.413277Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:28:48.416358Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MWQ3ZjhjZDQtMmI1NzViYWYtMTFmYjJlNTktOWZiZmEyNWI=, TxId: 2025-06-03T10:28:48.416385Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MWQ3ZjhjZDQtMmI1NzViYWYtMTFmYjJlNTktOWZiZmEyNWI=, TxId: 2025-06-03T10:28:48.416530Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:28:48.428531Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:28:48.428559Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:28:48.983452Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 126 ], ReplyToActorId[ [2:7126:4979]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:48.983683Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 126 ] 2025-06-03T10:28:48.983700Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 126, ReplyToActorId = [2:7126:4979], StatRequests.size() = 1 2025-06-03T10:28:48.983903Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 127 ], ReplyToActorId[ [2:7128:4981]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:48.985426Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 127 ] 2025-06-03T10:28:48.985453Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 127, ReplyToActorId = [2:7128:4981], StatRequests.size() = 1 |64.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanQueryRangeFullScan-SourceRead [GOOD] >> BasicStatistics::Simple [GOOD] >> SystemView::ShowCreateTableColumnUpsertOptions [GOOD] >> SystemView::ShowCreateTableColumnUpsertIndex |64.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> Cdc::InitialScan [GOOD] >> Cdc::InitialScanDebezium >> KqpCost::OlapWriteRow >> Cdc::AddColumn [GOOD] >> Cdc::AddColumn_TopicAutoPartitioning >> KqpCost::AAARangeFullScan [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> BasicStatistics::Simple [GOOD] Test command err: 2025-06-03T10:26:28.075875Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:28.075909Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:26:28.075916Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001bde/r3tmp/tmpC1czuR/pdisk_1.dat 2025-06-03T10:26:28.176581Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23192, node 1 2025-06-03T10:26:28.281492Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:28.281516Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:28.281522Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:28.281581Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:28.282234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:26:28.360139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:28.360169Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:28.371830Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:62227 2025-06-03T10:26:28.719681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:26:29.507770Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:26:29.517045Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:29.517086Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:29.570805Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:26:29.571320Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:29.726038Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.726173Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.726308Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.726339Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.726371Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.726386Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.726400Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.726421Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.726435Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.878925Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:29.878979Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:29.890563Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:29.924910Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:29.938267Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:26:29.938310Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:26:29.947474Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:26:29.947543Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:26:29.947573Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:26:29.947579Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:26:29.947586Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:26:29.947592Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:26:29.947598Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:26:29.947605Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:26:29.947749Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:26:29.960942Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:29.960965Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:29.962075Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:26:29.962703Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:26:29.962792Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:26:29.964109Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:26:29.966830Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:26:29.966841Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:26:29.966851Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:26:29.970284Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:26:29.972360Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:26:29.972408Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:26:30.076638Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:26:30.147295Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:26:30.199694Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:26:30.713983Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2216:3061], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:30.714029Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:30.717125Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:26:30.824051Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2355:3096], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:30.824086Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:30.829090Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2360:3100]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:30.829132Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:26:30.829142Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2362:3102] 2025-06-03T10:26:30.829153Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2362:3102] 2025-06-03T10:26:30.829326Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2363:2866] 2025-06-03T10:26:30.829388Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2362:3102], server id = [2:2363:2866], tablet id = 72075186224037894, status = OK 2025-06-03T10:26:30.829422Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2363:2866], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:26:30.829433Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-03T10:26:30.829468Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:26:30.829475Z node 1 :STATISTICS DEBU ... chemeshard: 72057594046644480 2025-06-03T10:28:41.447190Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-03T10:28:42.186104Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 119 ], ReplyToActorId[ [2:6512:4651]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:42.186230Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 119 ] 2025-06-03T10:28:42.186240Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 119, ReplyToActorId = [2:6512:4651], StatRequests.size() = 1 2025-06-03T10:28:42.909426Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-03T10:28:42.909508Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:28:42.909705Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:28:42.936102Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7996: SendBaseStatsToSA(), path count: 2, at schemeshard: 72075186224037897 2025-06-03T10:28:42.936155Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 223.000000s, at schemeshard: 72075186224037897 2025-06-03T10:28:42.936311Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 49 2025-06-03T10:28:42.951678Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-03T10:28:43.536986Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 120 ], ReplyToActorId[ [2:6545:4667]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:43.537103Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 120 ] 2025-06-03T10:28:43.537111Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 120, ReplyToActorId = [2:6545:4667], StatRequests.size() = 1 2025-06-03T10:28:44.210845Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:28:44.210876Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:28:44.210888Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-03T10:28:44.210892Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:28:44.211013Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:28:44.215112Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:28:44.216263Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6568:4686], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:44.216298Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:6578:4691], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:44.216329Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:44.219354Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897 2025-06-03T10:28:44.235637Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:6582:4694], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-03T10:28:44.397816Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:6680:4742] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:44.410543Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [2:6709:4757]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:44.410634Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-03T10:28:44.410643Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [2:6709:4757], StatRequests.size() = 1 2025-06-03T10:28:44.436080Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NDQ4Mzg2MTctMzM0MTY1YzQtYjJkNDFjYzMtNzY5NWI4ZjQ=, TxId: 2025-06-03T10:28:44.436115Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NDQ4Mzg2MTctMzM0MTY1YzQtYjJkNDFjYzMtNzY5NWI4ZjQ=, TxId: 2025-06-03T10:28:44.436318Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:28:44.448102Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:28:44.448130Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:28:44.885730Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [2:6737:4773]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:44.885826Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-03T10:28:44.885832Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [2:6737:4773], StatRequests.size() = 1 2025-06-03T10:28:46.075826Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [2:6772:4791]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:46.075947Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-03T10:28:46.075956Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [2:6772:4791], StatRequests.size() = 1 2025-06-03T10:28:46.727047Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:28:46.727125Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:28:46.727142Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:28:46.727150Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-03T10:28:46.727154Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:28:46.727236Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:28:46.727886Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:28:46.731539Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YTIzNGYyODctZDkxOTFmZjMtNDcwMzA4Ny05MzVlMGYzZQ==, TxId: 2025-06-03T10:28:46.731569Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YTIzNGYyODctZDkxOTFmZjMtNDcwMzA4Ny05MzVlMGYzZQ==, TxId: 2025-06-03T10:28:46.731717Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:28:46.743296Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:28:46.743321Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:28:47.287880Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [2:6836:4827]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:47.288013Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-03T10:28:47.288024Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [2:6836:4827], StatRequests.size() = 1 2025-06-03T10:28:48.572207Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 125 ], ReplyToActorId[ [2:6875:4847]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:48.572325Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 125 ] 2025-06-03T10:28:48.572334Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 125, ReplyToActorId = [2:6875:4847], StatRequests.size() = 1 2025-06-03T10:28:49.253573Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-03T10:28:49.253674Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:28:49.253679Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:28:49.253765Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:28:49.253880Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:28:49.793764Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 126 ], ReplyToActorId[ [2:6910:4863]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:49.793883Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 126 ] 2025-06-03T10:28:49.793892Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 126, ReplyToActorId = [2:6910:4863], StatRequests.size() = 1 |64.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::ScanQueryRangeFullScan-SourceRead [GOOD] Test command err: Trying to start YDB, gRPC: 2853, MsgBus: 26255 2025-06-03T10:28:49.292115Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668148712019710:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:49.292164Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001eb9/r3tmp/tmpQlZepk/pdisk_1.dat 2025-06-03T10:28:49.363307Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:49.363787Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668148712019690:2079] 1748946529291932 != 1748946529291935 TServer::EnableGrpc on GrpcPort 2853, node 1 2025-06-03T10:28:49.378887Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:49.378903Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:49.378906Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:49.378960Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26255 TClient is connected to server localhost:26255 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:28:49.434012Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:49.434042Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: 2025-06-03T10:28:49.435143Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:49.447507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:49.452972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:49.470604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:28:49.493429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:28:49.506612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:49.686141Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668148712021341:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:49.686167Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:49.733153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:49.741541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:49.751185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:49.765781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:49.779705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:49.793956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:49.808155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:49.824054Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668148712021995:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:49.824088Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:49.824092Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668148712022000:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:49.824974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:49.827736Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668148712022002:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:28:49.904031Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668148712022054:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:50.099433Z node 1 :KQP_GATEWAY DEBUG: kqp_metadata_loader.cpp:884: Load table metadata from cache by path, request Path: /Root/Test 2025-06-03T10:28:50.115804Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_snapshot_manager.cpp:37: Start KqpSnapshotManager at [1:7511668153006989589:2498] 2025-06-03T10:28:50.115819Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_snapshot_manager.cpp:58: KqpSnapshotManager: got snapshot request from [1:7511668153006989575:2498] 2025-06-03T10:28:50.119042Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_snapshot_manager.cpp:172: KqpSnapshotManager: snapshot 1748946530163:281474976715672 created 2025-06-03T10:28:50.119154Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:585: ActorId: [1:7511668153006989599:2498] TxId: 281474976715673. Ctx: { TraceId: 01jwtncbsbfedb10kfa5t76d8a, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjNkZjlkZTQtZWE1OTE2ZjQtNzdlY2RjNTYtNjFiMzViNDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Executing physical tx, type: 3, stages: 2 2025-06-03T10:28:50.119170Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,0], InputsCount: 0, OutputsCount: 1 2025-06-03T10:28:50.119181Z node 1 :KQP_EXECUTER DEBUG: kqp_tasks_graph.cpp:25: StageInfo: StageId #[0,1], InputsCount: 1, OutputsCount: 1 2025-06-03T10:28:50.119278Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715673. Resolved key sets: 1 2025-06-03T10:28:50.119330Z node 1 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:295: TxId: 281474976715673. Resolved key: { TableId: [OwnerId: 72057594046644480, LocalPathId: 9] Access: 1 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL, String : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-06-03T10:28:50.119345Z node 1 :KQP_EXECUTER DEBUG: kqp_scan_executer.cpp:146: ActorId: [1:7511668153006989599:2498] TxId: 281474976715673. Ctx: { TraceId: 01jwtncbsbfedb10kfa5t76d8a, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjNkZjlkZTQtZWE1OTE2ZjQtNzdlY2RjNTYtNjFiMzViNDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Start resolving tablets nodes... (1) 2025-06-03T10:28:50.119375Z node 1 :KQP_EXECUTER DEBUG: kqp_shards_resolver.cpp:76: [ShardsResolver] TxId: 281474976715673. Shard resolve complete, resolved shards: 1 2025-06-03T10:28:50.119385Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:265: ActorId: [1:7511668153006989599:2498] TxId: 281474976715673. Ctx: { TraceId: 01jwtncbsbfedb10kfa5t76d8a, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjNkZjlkZTQtZWE1OTE2ZjQtNzdlY2RjNTYtNjFiMzViNDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolved, success: 1, ... anager.h:200;event=send_data_to_compute;space=8388608;queue=1;compute_actor_id=[1:7511668153006989604:2505];rows=3; 2025-06-03T10:28:50.121760Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:184;event=stop_scanner; 2025-06-03T10:28:50.121763Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:42;event=scan_ack_on_finished;actor_id=[1:7511668153006989611:2050]; 2025-06-03T10:28:50.121765Z node 1 :KQP_COMPUTE DEBUG: log.h:466: kqp_scan_compute_actor.cpp:175 :TEvSendData: [1:7511668153006989607:2507]/[1:7511668153006989604:2505] 2025-06-03T10:28:50.121768Z node 1 :KQP_COMPUTE DEBUG: kqp_scan_fetcher_actor.cpp:550: SelfId: [1:7511668153006989607:2507]. EVLOGKQP:0/0/3/3 2025-06-03T10:28:50.121772Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:383;event=scanner_finished;tablet_id=72075186224037914;stop_shard=1; 2025-06-03T10:28:50.121776Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:96;event=stop_scanner;actor_id=[1:7511668153006989611:2050];message=;final_flag=1; 2025-06-03T10:28:50.121800Z node 1 :KQP_COMPUTE DEBUG: kqp_scan_fetcher_actor.cpp:594: SelfId: [1:7511668153006989607:2507]. Scheduled table scans, in flight: 0 shards. pending shards to read: 0, pending resolve shards: 0, average read rows: 3, average read bytes: 0, 2025-06-03T10:28:50.121807Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_scan_compute_manager.h:430;event=wait_all_scanner_finished;scans=0; 2025-06-03T10:28:50.121814Z node 1 :KQP_COMPUTE DEBUG: kqp_scan_fetcher_actor.cpp:694: SelfId: [1:7511668153006989607:2507]. EVLOGKQP(max_in_flight:1) InFlightScans:InFlightShards:;wScans=0;wShards=0; {SHARD(72075186224037914):CHUNKS=1;D=0.000000s;PacksCount=1;RowsCount=3;BytesCount=0;MinPackSize=3;MaxPackSize=3;CAVG=0.000000s;CMIN=0.000000s;CMAX=0.000000s;}; 2025-06-03T10:28:50.121832Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7511668153006989604:2505], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jwtncbsbfedb10kfa5t76d8a. SessionId : ydb://session/3?node_id=1&id=YjNkZjlkZTQtZWE1OTE2ZjQtNzdlY2RjNTYtNjFiMzViNDg=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-03T10:28:50.121837Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668153006989605:2506], TxId: 281474976715673, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=YjNkZjlkZTQtZWE1OTE2ZjQtNzdlY2RjNTYtNjFiMzViNDg=. TraceId : 01jwtncbsbfedb10kfa5t76d8a. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646923 2025-06-03T10:28:50.121843Z node 1 :KQP_COMPUTE DEBUG: log.h:466: kqp_scan_compute_actor.cpp:205 :TEvFetcherFinished: [1:7511668153006989607:2507] 2025-06-03T10:28:50.121849Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:670: TxId: 281474976715673, task: 1. Tasks execution finished, waiting for chunk delivery in output channelId: 1, seqNo: [1] 2025-06-03T10:28:50.121849Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976715673, task: 2. Finish input channelId: 1, from: [1:7511668153006989604:2505] 2025-06-03T10:28:50.121858Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668153006989605:2506], TxId: 281474976715673, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=YjNkZjlkZTQtZWE1OTE2ZjQtNzdlY2RjNTYtNjFiMzViNDg=. TraceId : 01jwtncbsbfedb10kfa5t76d8a. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-03T10:28:50.121862Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715673, task: 1. Tasks execution finished 2025-06-03T10:28:50.121864Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7511668153006989604:2505], TxId: 281474976715673, task: 1. Ctx: { CustomerSuppliedId : . TraceId : 01jwtncbsbfedb10kfa5t76d8a. SessionId : ydb://session/3?node_id=1&id=YjNkZjlkZTQtZWE1OTE2ZjQtNzdlY2RjNTYtNjFiMzViNDg=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-03T10:28:50.121889Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715673, task: 1. pass away 2025-06-03T10:28:50.121908Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976715673;task_id=1;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-03T10:28:50.121932Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7511668153006989605:2506], TxId: 281474976715673, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=YjNkZjlkZTQtZWE1OTE2ZjQtNzdlY2RjNTYtNjFiMzViNDg=. TraceId : 01jwtncbsbfedb10kfa5t76d8a. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-03T10:28:50.121974Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:434: ActorId: [1:7511668153006989599:2498] TxId: 281474976715673. Ctx: { TraceId: 01jwtncbsbfedb10kfa5t76d8a, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjNkZjlkZTQtZWE1OTE2ZjQtNzdlY2RjNTYtNjFiMzViNDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [1:7511668153006989604:2505], task: 1, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 882 Tasks { TaskId: 1 CpuTimeUs: 354 FinishTimeMs: 1748946530121 OutputRows: 1 OutputBytes: 19 Tables { TablePath: "/Root/Test" ReadRows: 3 ReadBytes: 96 } ComputeCpuTimeUs: 38 BuildCpuTimeUs: 316 Sources { IngressName: "CS" Ingress { } } HostName: "ghrun-pyvh3niaay" NodeId: 1 StartTimeMs: 1748946530121 CreateTimeMs: 1748946530119 UpdateTimeMs: 1748946530121 } MaxMemoryUsage: 1048576 } 2025-06-03T10:28:50.121987Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715673. Ctx: { TraceId: 01jwtncbsbfedb10kfa5t76d8a, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjNkZjlkZTQtZWE1OTE2ZjQtNzdlY2RjNTYtNjFiMzViNDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [1:7511668153006989604:2505] 2025-06-03T10:28:50.122002Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715673, taskId: 1. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-03T10:28:50.122006Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:645: ActorId: [1:7511668153006989599:2498] TxId: 281474976715673. Ctx: { TraceId: 01jwtncbsbfedb10kfa5t76d8a, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjNkZjlkZTQtZWE1OTE2ZjQtNzdlY2RjNTYtNjFiMzViNDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Waiting for: CA [1:7511668153006989605:2506], 2025-06-03T10:28:50.122052Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:349: ActorId: [1:7511668153006989599:2498] TxId: 281474976715673. Ctx: { TraceId: 01jwtncbsbfedb10kfa5t76d8a, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjNkZjlkZTQtZWE1OTE2ZjQtNzdlY2RjNTYtNjFiMzViNDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Send TEvStreamData to [1:7511668153006989575:2498], seqNo: 1, nRows: 1 2025-06-03T10:28:50.122659Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:415: TxId: 281474976715673, send ack to channelId: 2, seqNo: 1, enough: 0, freeSpace: 8388470, to: [1:7511668153006989608:2506] 2025-06-03T10:28:50.122685Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668153006989605:2506], TxId: 281474976715673, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=YjNkZjlkZTQtZWE1OTE2ZjQtNzdlY2RjNTYtNjFiMzViNDg=. TraceId : 01jwtncbsbfedb10kfa5t76d8a. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-03T10:28:50.122699Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715673, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [1] 2025-06-03T10:28:50.122707Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715673, task: 2. Tasks execution finished 2025-06-03T10:28:50.122711Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7511668153006989605:2506], TxId: 281474976715673, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=YjNkZjlkZTQtZWE1OTE2ZjQtNzdlY2RjNTYtNjFiMzViNDg=. TraceId : 01jwtncbsbfedb10kfa5t76d8a. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-03T10:28:50.122735Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715673, task: 2. pass away 2025-06-03T10:28:50.122752Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976715673;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-03T10:28:50.122772Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:434: ActorId: [1:7511668153006989599:2498] TxId: 281474976715673. Ctx: { TraceId: 01jwtncbsbfedb10kfa5t76d8a, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjNkZjlkZTQtZWE1OTE2ZjQtNzdlY2RjNTYtNjFiMzViNDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: ExecuteState, got execution state from compute actor: [1:7511668153006989605:2506], task: 2, state: COMPUTE_STATE_FINISHED, stats: { CpuTimeUs: 1283 Tasks { TaskId: 2 StageId: 1 CpuTimeUs: 155 FinishTimeMs: 1748946530122 InputRows: 1 InputBytes: 19 OutputRows: 1 OutputBytes: 19 ResultRows: 1 ResultBytes: 19 ComputeCpuTimeUs: 62 BuildCpuTimeUs: 93 HostName: "ghrun-pyvh3niaay" NodeId: 1 CreateTimeMs: 1748946530120 UpdateTimeMs: 1748946530122 } MaxMemoryUsage: 1048576 } 2025-06-03T10:28:50.122783Z node 1 :KQP_EXECUTER INFO: kqp_planner.cpp:688: TxId: 281474976715673. Ctx: { TraceId: 01jwtncbsbfedb10kfa5t76d8a, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjNkZjlkZTQtZWE1OTE2ZjQtNzdlY2RjNTYtNjFiMzViNDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Compute actor has finished execution: [1:7511668153006989605:2506] 2025-06-03T10:28:50.122800Z node 1 :KQP_RESOURCE_MANAGER DEBUG: kqp_rm_service.cpp:404: TxId: 281474976715673, taskId: 2. Released resources, Memory: 0, Free Tier: 1048576, ExecutionUnits: 1. 2025-06-03T10:28:50.122828Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2151: ActorId: [1:7511668153006989599:2498] TxId: 281474976715673. Ctx: { TraceId: 01jwtncbsbfedb10kfa5t76d8a, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjNkZjlkZTQtZWE1OTE2ZjQtNzdlY2RjNTYtNjFiMzViNDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-03T10:28:50.122841Z node 1 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:839: ActorId: [1:7511668153006989599:2498] TxId: 281474976715673. Ctx: { TraceId: 01jwtncbsbfedb10kfa5t76d8a, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjNkZjlkZTQtZWE1OTE2ZjQtNzdlY2RjNTYtNjFiMzViNDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.002165s ReadRows: 3 ReadBytes: 96 ru: 3 rate limiter was not found force flag: 1 2025-06-03T10:28:50.123091Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946530163, txId: 281474976715672] shutting down >> Cdc::InitialScan_WithTopicSchemeTx [GOOD] >> Cdc::InitialScan_TopicAutoPartitioning >> KqpCost::OltpWriteRow+isSink [GOOD] >> KqpCost::OlapPointLookup |64.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest |64.4%| [TA] $(B)/ydb/core/tx/datashard/ut_change_collector/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpCost::OlapRange ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::AAARangeFullScan [GOOD] Test command err: Trying to start YDB, gRPC: 13414, MsgBus: 30077 2025-06-03T10:28:49.651758Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668146977924931:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:49.651785Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001eaa/r3tmp/tmpEGqdRr/pdisk_1.dat 2025-06-03T10:28:49.721158Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:49.721247Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668146977924910:2079] 1748946529651648 != 1748946529651651 TServer::EnableGrpc on GrpcPort 13414, node 1 2025-06-03T10:28:49.737534Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:49.737548Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:49.737549Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:49.737585Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:28:49.754938Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:49.754990Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:49.756037Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30077 TClient is connected to server localhost:30077 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:49.800175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:49.805525Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:49.825833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:49.848049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:49.858246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:50.063147Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668151272893847:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:50.063180Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:50.101764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:50.110100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:50.123040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:50.179409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:50.235262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:50.249054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:50.262972Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:50.279456Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668151272894502:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:50.279493Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:50.279516Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668151272894507:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:50.280518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:50.289891Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668151272894509:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:28:50.346910Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668151272894560:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } PONOS {"Plan":{"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["Test"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["Group (-∞, +∞)","Name (-∞, +∞)"],"Reverse":false,"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/Test","ReadRangesPointPrefixLen":"0","E-Rows":"No estimate","Table":"Test","ReadColumns":["Amount","Comment","Group","Name"],"E-Cost":"No estimate"}],"Node Type":"TableFullScan"}],"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"Limit","Limit":"1"},{"Inputs":[{"ExternalPlanNodeId":1}],"E-Rows":"No estimate","Predicate":"item.Amount \u003C 5000","Name":"Filter","E-Size":"No estimate","E-Cost":"No estimate"}],"Node Type":"Limit-Filter","Stats":{"UseLlvm":"undefined","Output":[{"Pop":{"Chunks":{"Count":1,"Sum":1,"Max":1,"Min":1},"Rows":{"Count":1,"Sum":1,"Max":1,"Min":1},"Bytes":{"Count":1,"Sum":19,"Max":19,"Min":19,"History":[0,19]}},"Name":"4","Push":{"WaitTimeUs":{"Count":1,"Sum":371,"Max":371,"Min":371,"History":[0,371]},"WaitPeriods":{"Count":1,"Sum":1,"Max":1,"Min":1},"Chunks":{"Count":1,"Sum":1,"Max":1,"Min":1},"Rows":{"Count":1,"Sum":1,"Max":1,"Min":1}}}],"MaxMemoryUsage":{"Count":1,"Sum":1048576,"Max":1048576,"Min":1048576,"History":[0,1048576]},"Tasks":1,"OutputRows":{"Count":1,"Sum":1,"Max":1,"Min":1},"FinishedTasks":1,"IngressRows":{"Count":1,"Sum":3,"Max":3,"Min":3},"PhysicalStageId":0,"StageDurationUs":0,"Table":[{"Path":"\/Root\/Test","ReadRows":{"Count":1,"Sum":1,"Max":1,"Min":1},"ReadBytes":{"Count":1,"Sum":20,"Max":20,"Min":20}}],"BaseTimeMs":1748946530556,"OutputBytes":{"Count":1,"Sum":19,"Max":19,"Min":19},"CpuTimeUs":{"Count":1,"Sum":388,"Max":388,"Min":388,"History":[0,388]},"Ingress":[{"Pop":{"Chunks":{"Count":1,"Sum":1,"Max":1,"Min":1},"Rows":{"Count":1,"Sum":3,"Max":3,"Min":3},"Bytes":{"Count":1,"Sum":192,"Max":192,"Min":192,"History":[0,192]}},"External":{},"Name":"KqpReadRangesSource","Ingress":{},"Push":{"WaitTimeUs":{"Count":1,"Sum":375,"Max":375,"Min":375,"History":[0,375]},"WaitPeriods":{"Count":1,"Sum":1,"Max":1,"Min":1},"Chunks":{"Count":1,"Sum":1,"Max":1,"Min":1},"Rows":{"Count":1,"Sum":3,"Max":3,"Min":3},"Bytes":{"Count":1,"Sum":192,"Max":192,"Min":192,"History":[0,192]}}}]}}],"Node Type":"Merge","SortColumns":["Group (Asc)"],"PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"ExternalPlanNodeId":3}],"Name":"Limit","Limit":"1"}],"Node Type":"Limit","Stats":{"UseLlvm":"undefined","Output":[{"Pop":{"Chunks":{"Count":1,"Sum":1,"Max":1,"Min":1},"Rows":{"Count":1,"Sum":1,"Max":1,"Min":1},"Bytes":{"Count":1,"Sum":19,"Max":19,"Min":19,"History":[1,19]}},"Name":"RESULT","Push":{"WaitTimeUs":{"Count":1,"Sum":247,"Max":247,"Min":247,"History":[1,247]},"WaitPeriods":{"Count":1,"Sum":1,"Max":1,"Min":1},"Chunks":{"Count":1,"Sum":1,"Max":1,"Min":1},"Rows":{"Count":1,"Sum":1,"Max":1,"Min":1}}}],"MaxMemoryUsage":{"Count":1,"Sum":1048576,"Max":1048576,"Min":1048576,"History":[1,1048576]},"InputBytes":{"Count":1,"Sum":19,"Max":19,"Min":19},"ResultRows":{"Count":1,"Sum":1,"Max":1,"Min":1},"Tasks": ... e":"2","Push":{"WaitTimeUs":{"Count":1,"Sum":244,"Max":244,"Min":244,"History":[1,244]},"WaitPeriods":{"Count":1,"Sum":1,"Max":1,"Min":1},"Chunks":{"Count":1,"Sum":1,"Max":1,"Min":1},"Rows":{"Count":1,"Sum":1,"Max":1,"Min":1},"Bytes":{"Count":1,"Sum":19,"Max":19,"Min":19,"History":[1,19]}}}]}}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"Compilation":{"FromCache":false,"DurationUs":34106,"CpuTimeUs":33083},"ProcessCpuTimeUs":128,"TotalDurationUs":37216,"ResourcePoolId":"default","QueuedTimeUs":245},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":6,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["Group (-∞, +∞)","Name (-∞, +∞)"],"Reverse":false,"Name":"TableFullScan","Path":"\/Root\/Test","ReadRangesPointPrefixLen":"0","E-Rows":"No estimate","Table":"Test","ReadColumns":["Amount","Comment","Group","Name"],"E-Cost":"No estimate"}],"Node Type":"TableFullScan"}],"Operators":[{"E-Rows":"No estimate","Predicate":"item.Amount \u003C 5000","Name":"Filter","E-Size":"No estimate","E-Cost":"No estimate"}],"Node Type":"Filter"}],"Operators":[{"A-Rows":1,"A-SelfCpu":0.388,"A-Cpu":0.388,"A-Size":19,"Name":"Limit","Limit":"1"}],"Node Type":"Limit"}],"Operators":[{"A-Rows":1,"A-SelfCpu":0.146,"A-Cpu":0.534,"A-Size":19,"Name":"Limit","Limit":"1"}],"Node Type":"Limit"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","PlanNodeType":"Query"}} query_phases { duration_us: 1712 table_access { name: "/Root/Test" reads { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 1484 affected_shards: 1 } compilation { duration_us: 34106 cpu_time_us: 33083 } process_cpu_time_us: 128 query_plan: "{\"Plan\":{\"Plans\":[{\"PlanNodeId\":5,\"Plans\":[{\"PlanNodeId\":4,\"Plans\":[{\"PlanNodeId\":3,\"Plans\":[{\"PlanNodeId\":2,\"Plans\":[{\"Tables\":[\"Test\"],\"PlanNodeId\":1,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"No estimate\",\"ReadRanges\":[\"Group (-\342\210\236, +\342\210\236)\",\"Name (-\342\210\236, +\342\210\236)\"],\"Reverse\":false,\"Name\":\"TableFullScan\",\"Inputs\":[],\"Path\":\"\\/Root\\/Test\",\"ReadRangesPointPrefixLen\":\"0\",\"E-Rows\":\"No estimate\",\"Table\":\"Test\",\"ReadColumns\":[\"Amount\",\"Comment\",\"Group\",\"Name\"],\"E-Cost\":\"No estimate\"}],\"Node Type\":\"TableFullScan\"}],\"Operators\":[{\"Inputs\":[{\"InternalOperatorId\":1}],\"Name\":\"Limit\",\"Limit\":\"1\"},{\"Inputs\":[{\"ExternalPlanNodeId\":1}],\"E-Rows\":\"No estimate\",\"Predicate\":\"item.Amount \\u003C 5000\",\"Name\":\"Filter\",\"E-Size\":\"No estimate\",\"E-Cost\":\"No estimate\"}],\"Node Type\":\"Limit-Filter\",\"Stats\":{\"UseLlvm\":\"undefined\",\"Output\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Bytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19,\"History\":[0,19]}},\"Name\":\"4\",\"Push\":{\"WaitTimeUs\":{\"Count\":1,\"Sum\":371,\"Max\":371,\"Min\":371,\"History\":[0,371]},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1}}}],\"MaxMemoryUsage\":{\"Count\":1,\"Sum\":1048576,\"Max\":1048576,\"Min\":1048576,\"History\":[0,1048576]},\"Tasks\":1,\"OutputRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FinishedTasks\":1,\"IngressRows\":{\"Count\":1,\"Sum\":3,\"Max\":3,\"Min\":3},\"PhysicalStageId\":0,\"StageDurationUs\":0,\"Table\":[{\"Path\":\"\\/Root\\/Test\",\"ReadRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"ReadBytes\":{\"Count\":1,\"Sum\":20,\"Max\":20,\"Min\":20}}],\"BaseTimeMs\":1748946530556,\"OutputBytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19},\"CpuTimeUs\":{\"Count\":1,\"Sum\":388,\"Max\":388,\"Min\":388,\"History\":[0,388]},\"Ingress\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":3,\"Max\":3,\"Min\":3},\"Bytes\":{\"Count\":1,\"Sum\":192,\"Max\":192,\"Min\":192,\"History\":[0,192]}},\"External\":{},\"Name\":\"KqpReadRangesSource\",\"Ingress\":{},\"Push\":{\"WaitTimeUs\":{\"Count\":1,\"Sum\":375,\"Max\":375,\"Min\":375,\"History\":[0,375]},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":3,\"Max\":3,\"Min\":3},\"Bytes\":{\"Count\":1,\"Sum\":192,\"Max\":192,\"Min\":192,\"History\":[0,192]}}}]}}],\"Node Type\":\"Merge\",\"SortColumns\":[\"Group (Asc)\"],\"PlanNodeType\":\"Connection\"}],\"Operators\":[{\"Inputs\":[{\"ExternalPlanNodeId\":3}],\"Name\":\"Limit\",\"Limit\":\"1\"}],\"Node Type\":\"Limit\",\"Stats\":{\"UseLlvm\":\"undefined\",\"Output\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Bytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19,\"History\":[1,19]}},\"Name\":\"RESULT\",\"Push\":{\"WaitTimeUs\":{\"Count\":1,\"Sum\":247,\"Max\":247,\"Min\":247,\"History\":[1,247]},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1}}}],\"MaxMemoryUsage\":{\"Count\":1,\"Sum\":1048576,\"Max\":1048576,\"Min\":1048576,\"History\":[1,1048576]},\"InputBytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19},\"ResultRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Tasks\":1,\"ResultBytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19},\"OutputRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"FinishedTasks\":1,\"InputRows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"PhysicalStageId\":1,\"StageDurationUs\":0,\"BaseTimeMs\":1748946530556,\"OutputBytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19},\"CpuTimeUs\":{\"Count\":1,\"Sum\":146,\"Max\":146,\"Min\":146,\"History\":[1,146]},\"Input\":[{\"Pop\":{\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Bytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19,\"History\":[1,19]}},\"Name\":\"2\",\"Push\":{\"WaitTimeUs\":{\"Count\":1,\"Sum\":244,\"Max\":244,\"Min\":244,\"History\":[1,244]},\"WaitPeriods\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Chunks\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Rows\":{\"Count\":1,\"Sum\":1,\"Max\":1,\"Min\":1},\"Bytes\":{\"Count\":1,\"Sum\":19,\"Max\":19,\"Min\":19,\"History\":[1,19]}}}]}}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"Stats\":{\"Compilation\":{\"FromCache\":false,\"DurationUs\":34106,\"CpuTimeUs\":33083},\"ProcessCpuTimeUs\":128,\"TotalDurationUs\":37216,\"ResourcePoolId\":\"default\",\"QueuedTimeUs\":245},\"PlanNodeType\":\"Query\"},\"meta\":{\"version\":\"0.2\",\"type\":\"query\"},\"SimplifiedPlan\":{\"PlanNodeId\":0,\"Plans\":[{\"PlanNodeId\":1,\"Plans\":[{\"PlanNodeId\":2,\"Plans\":[{\"PlanNodeId\":4,\"Plans\":[{\"PlanNodeId\":5,\"Plans\":[{\"PlanNodeId\":6,\"Operators\":[{\"Scan\":\"Parallel\",\"E-Size\":\"No estimate\",\"ReadRanges\":[\"Group (-\342\210\236, +\342\210\236)\",\"Name (-\342\210\236, +\342\210\236)\"],\"Reverse\":false,\"Name\":\"TableFullScan\",\"Path\":\"\\/Root\\/Test\",\"ReadRangesPointPrefixLen\":\"0\",\"E-Rows\":\"No estimate\",\"Table\":\"Test\",\"ReadColumns\":[\"Amount\",\"Comment\",\"Group\",\"Name\"],\"E-Cost\":\"No estimate\"}],\"Node Type\":\"TableFullScan\"}],\"Operators\":[{\"E-Rows\":\"No estimate\",\"Predicate\":\"item.Amount \\u003C 5000\",\"Name\":\"Filter\",\"E-Size\":\"No estimate\",\"E-Cost\":\"No estimate\"}],\"Node Type\":\"Filter\"}],\"Operators\":[{\"A-Rows\":1,\"A-SelfCpu\":0.388,\"A-Cpu\":0.388,\"A-Size\":19,\"Name\":\"Limit\",\"Limit\":\"1\"}],\"Node Type\":\"Limit\"}],\"Operators\":[{\"A-Rows\":1,\"A-SelfCpu\":0.146,\"A-Cpu\":0.534,\"A-Size\":19,\"Name\":\"Limit\",\"Limit\":\"1\"}],\"Node Type\":\"Limit\"}],\"Node Type\":\"ResultSet\",\"PlanNodeType\":\"ResultSet\"}],\"Node Type\":\"Query\",\"PlanNodeType\":\"Query\"}}" query_ast: "(\n(let $1 (KqpTable \'\"/Root/Test\" \'\"72057594046644480:9\" \'\"\" \'1))\n(let $2 \'(\'\"Amount\" \'\"Comment\" \'\"Group\" \'\"Name\"))\n(let $3 (KqpRowsSourceSettings $1 $2 \'(\'(\'\"Sorted\")) (Void) \'()))\n(let $4 (Uint64 \'1))\n(let $5 (OptionalType (DataType \'String)))\n(let $6 (StructType \'(\'\"Amount\" (OptionalType (DataType \'Uint64))) \'(\'\"Comment\" $5) \'(\'\"Group\" (OptionalType (DataType \'Uint32))) \'(\'\"Name\" $5)))\n(let $7 \'(\'(\'\"_logical_id\" \'559) \'(\'\"_id\" \'\"b58b2bad-e2a02c0c-22ae4c35-8e3d6bea\") \'(\'\"_wide_channels\" $6)))\n(let $8 (DqPhyStage \'((DqSource (DataSource \'\"KqpReadRangesSource\") $3)) (lambda \'($12) (block \'(\n (let $13 (lambda \'($16) (block \'(\n (let $17 (Member $16 \'\"Amount\"))\n (return $17 (Member $16 \'\"Comment\") (Member $16 \'\"Group\") (Member $16 \'\"Name\") (Coalesce (< $17 (Uint64 \'\"5000\")) (Bool \'false)))\n ))))\n (let $14 (WideFilter (ExpandMap (ToFlow $12) $13) (lambda \'($18 $19 $20 $21 $22) $22) $4))\n (let $15 (lambda \'($23 $24 $25 $26 $27) $23 $24 $25 $26))\n (return (FromFlow (WideMap $14 $15)))\n))) $7))\n(let $9 (DqCnMerge (TDqOutput $8 \'0) \'(\'(\'\"2\" \'\"Asc\"))))\n(let $10 (DqPhyStage \'($9) (lambda \'($28) (FromFlow (NarrowMap (Take (ToFlow $28) $4) (lambda \'($29 $30 $31 $32) (AsStruct \'(\'\"Amount\" $29) \'(\'\"Comment\" $30) \'(\'\"Group\" $31) \'(\'\"Name\" $32)))))) \'(\'(\'\"_logical_id\" \'572) \'(\'\"_id\" \'\"987fea9e-49c86692-2efe21fe-769c06a\"))))\n(let $11 (DqCnResult (TDqOutput $10 \'0) \'()))\n(return (KqpPhysicalQuery \'((KqpPhysicalTx \'($8 $10) \'($11) \'() \'(\'(\'\"type\" \'\"data\")))) \'((KqpTxResultBinding (ListType $6) \'0 \'0)) \'(\'(\'\"type\" \'\"data_query\"))))\n)\n" total_duration_us: 37216 total_cpu_time_us: 34695 query_meta: "{\"query_database\":\"/Root\",\"query_parameter_types\":{},\"table_metadata\":[\"{\\\"DoesExist\\\":true,\\\"Cluster\\\":\\\"db\\\",\\\"Name\\\":\\\"/Root/Test\\\",\\\"SysView\\\":\\\"\\\",\\\"PathId\\\":{\\\"OwnerId\\\":72057594046644480,\\\"TableId\\\":9},\\\"SchemaVersion\\\":1,\\\"Kind\\\":1,\\\"Columns\\\":[{\\\"Name\\\":\\\"Amount\\\",\\\"Id\\\":3,\\\"Type\\\":\\\"Uint64\\\",\\\"TypeId\\\":4,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"Comment\\\",\\\"Id\\\":4,\\\"Type\\\":\\\"String\\\",\\\"TypeId\\\":4097,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"Group\\\",\\\"Id\\\":1,\\\"Type\\\":\\\"Uint32\\\",\\\"TypeId\\\":2,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}},{\\\"Name\\\":\\\"Name\\\",\\\"Id\\\":2,\\\"Type\\\":\\\"String\\\",\\\"TypeId\\\":4097,\\\"NotNull\\\":false,\\\"DefaultFromSequence\\\":\\\"\\\",\\\"DefaultKind\\\":0,\\\"DefaultFromLiteral\\\":{},\\\"IsBuildInProgress\\\":false,\\\"DefaultFromSequencePathId\\\":{\\\"OwnerId\\\":18446744073709551615,\\\"TableId\\\":18446744073709551615}}],\\\"KeyColunmNames\\\":[\\\"Group\\\",\\\"Name\\\"],\\\"RecordsCount\\\":0,\\\"DataSize\\\":0,\\\"StatsLoaded\\\":false}\"],\"table_meta_serialization_type\":2,\"created_at\":\"1748946530\",\"query_type\":\"QUERY_TYPE_SQL_DML\",\"query_syntax\":\"1\",\"query_cluster\":\"db\",\"query_id\":\"be047096-8f5bdef-6d692d9d-3030af57\",\"version\":\"1.0\"}" >> KqpCost::IndexLookup-useSink >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldCompactBorrowedBeforeSplit [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldCompactBorrowedAfterSplitMerge ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::OltpWriteRow+isSink [GOOD] Test command err: Trying to start YDB, gRPC: 12403, MsgBus: 9083 2025-06-03T10:28:49.722162Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668147330786083:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:49.722185Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ea8/r3tmp/tmpX3FyUK/pdisk_1.dat 2025-06-03T10:28:49.774775Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:49.774893Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668147330786064:2079] 1748946529722046 != 1748946529722049 TServer::EnableGrpc on GrpcPort 12403, node 1 2025-06-03T10:28:49.787833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:49.787848Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:49.787851Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:49.787892Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9083 TClient is connected to server localhost:9083 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:28:49.850522Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:49.850561Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:49.851683Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:49.853143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:49.860712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:49.881623Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:49.904941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:49.922126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:50.124772Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668151625754992:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:50.124816Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:50.172922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:50.181186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:50.193039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:50.207393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:50.221092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:50.235281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:50.292505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:50.308469Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668151625755651:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:50.308469Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668151625755646:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:50.308483Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:50.309521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:50.318070Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668151625755653:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:28:50.380424Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668151625755704:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:50.560820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 query_phases { duration_us: 2019 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 585 affected_shards: 1 } compilation { duration_us: 10756 cpu_time_us: 9900 } process_cpu_time_us: 216 total_duration_us: 13434 total_cpu_time_us: 10701 query_phases { duration_us: 1949 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 401 affected_shards: 1 } compilation { duration_us: 10609 cpu_time_us: 9734 } process_cpu_time_us: 203 total_duration_us: 13195 total_cpu_time_us: 10338 2025-06-03T10:28:50.633259Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=5; 2025-06-03T10:28:50.634568Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 5 at tablet 72075186224037922 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-03T10:28:50.634622Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 5 at tablet 72075186224037922 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-03T10:28:50.634685Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:815: SelfId: [1:7511668151625756123:2507], Table: `/Root/TestTable` ([72057594046644480:17:1]), SessionActorId: [1:7511668151625755968:2507]Got CONSTRAINT VIOLATION for table `/Root/TestTable`. ShardID=72075186224037922, Sink=[1:7511668151625756123:2507].{
: Error: Conflict with existing key., code: 2012 } 2025-06-03T10:28:50.634820Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2935: SelfId: [1:7511668151625756116:2507], SessionActorId: [1:7511668151625755968:2507], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/TestTable`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[1:7511668151625755968:2507]. isRollback=0 2025-06-03T10:28:50.634890Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1848: SessionId: ydb://session/3?node_id=1&id=ZGUwYTk1ZGMtNDg2YjU5MTgtZTU3YzIzODktNjQ5NzYxMjM=, ActorId: [1:7511668151625755968:2507], ActorState: ExecuteState, TraceId: 01jwtncc9x9dxg3dh7vs3ws628, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [1:7511668151625756117:2507] from: [1:7511668151625756116:2507] 2025-06-03T10:28:50.634917Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [1:7511668151625756117:2507] TxId: 281474976715676. Ctx: { TraceId: 01jwtncc9x9dxg3dh7vs3ws628, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGUwYTk1ZGMtNDg2YjU5MTgtZTU3YzIzODktNjQ5NzYxMjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/TestTable`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-03T10:28:50.634974Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=ZGUwYTk1ZGMtNDg2YjU5MTgtZTU3YzIzODktNjQ5NzYxMjM=, ActorId: [1:7511668151625755968:2507], ActorState: ExecuteState, TraceId: 01jwtncc9x9dxg3dh7vs3ws628, Create QueryResponse for error on request, msg: query_phases { duration_us: 2576 cpu_time_us: 448 affected_shards: 1 } compilation { duration_us: 10003 cpu_time_us: 9303 } process_cpu_time_us: 158 total_duration_us: 13106 total_cpu_time_us: 9909 query_phases { duration_us: 2128 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 508 affected_shards: 1 } compilation { duration_us: 10670 cpu_time_us: 9924 } process_cpu_time_us: 169 total_duration_us: 13420 total_cpu_time_us: 10601 query_phases { duration_us: 915 cpu_time_us: 477 affected_shards: 1 } compilation { duration_us: 14346 cpu_time_us: 13581 } process_cpu_time_us: 202 total_duration_us: 15869 total_cpu_time_us: 14260 query_phases { duration_us: 2324 table_access { name: "/Root/TestTable" updates { rows: 1 bytes: 20 } partitions_count: 1 } cpu_time_us: 513 affected_shards: 1 } compilation { duration_us: 11248 cpu_time_us: 10434 } process_cpu_time_us: 176 total_duration_us: 14177 total_cpu_time_us: 11123 query_phases { duration_us: 2323 table_access { name: "/Root/TestTable" deletes { rows: 1 } partitions_count: 1 } cpu_time_us: 567 affected_shards: 1 } compilation { duration_us: 9852 cpu_time_us: 8999 } process_cpu_time_us: 182 total_duration_us: 12855 total_cpu_time_us: 9748 query_phases { duration_us: 2854 table_access { name: "/Root/TestTable" deletes { rows: 1 } partitions_count: 1 } cpu_time_us: 648 affected_shards: 1 } compilation { duration_us: 9575 cpu_time_us: 8652 } process_cpu_time_us: 181 total_duration_us: 13013 total_cpu_time_us: 9481 >> Cdc::ResolvedTimestamps [GOOD] >> Cdc::ResolvedTimestampsMultiplePartitions >> SystemView::ShowCreateTablePartitionPolicyIndexTable [GOOD] >> SystemView::StoragePoolsFields >> TExportToS3Tests::ShouldRestartOnScanErrors [GOOD] |64.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb |64.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb >> KqpCost::OlapWriteRow [FAIL] >> KqpCost::Range |64.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest |64.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookup-useSink [GOOD] >> KqpCost::OlapPointLookup [GOOD] >> KqpCost::OlapRange [GOOD] >> Cdc::InitialScanDebezium [GOOD] >> Cdc::AddColumn_TopicAutoPartitioning [GOOD] >> Cdc::AddIndex >> Cdc::InitialScanRacyCompleteAndRequest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::IndexLookup-useSink [GOOD] Test command err: Trying to start YDB, gRPC: 19272, MsgBus: 27683 2025-06-03T10:28:51.354184Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668153644239693:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:51.354543Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001e77/r3tmp/tmpRl5IGn/pdisk_1.dat 2025-06-03T10:28:51.405648Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668153644239672:2079] 1748946531353991 != 1748946531353994 2025-06-03T10:28:51.407766Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19272, node 1 2025-06-03T10:28:51.420318Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:51.420337Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:51.420339Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:51.420394Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27683 TClient is connected to server localhost:27683 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:28:51.482598Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:51.482634Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:51.483647Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:51.486295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:51.492471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:51.513579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:51.540575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:51.552557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:51.784330Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668153644241326:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:51.784376Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:51.821978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.830181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.844441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.900033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.908383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.922795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.935704Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.951513Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668153644241982:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:51.951550Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668153644241987:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:51.951555Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:51.952353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:51.955220Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668153644241989:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:28:52.027940Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668157939209336:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:52.179285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 /Root/SecondaryKeys/Index/indexImplTable 1 8 /Root/SecondaryKeys 1 8 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::OlapPointLookup [GOOD] Test command err: Trying to start YDB, gRPC: 31664, MsgBus: 6120 2025-06-03T10:28:51.077269Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668157255527863:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:51.077289Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001e7f/r3tmp/tmpGfiFEW/pdisk_1.dat 2025-06-03T10:28:51.145811Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668157255527843:2079] 1748946531077135 != 1748946531077138 2025-06-03T10:28:51.147224Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31664, node 1 2025-06-03T10:28:51.161397Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:51.161414Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:51.161416Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:51.161471Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6120 TClient is connected to server localhost:6120 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:28:51.220656Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:51.220690Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:51.221726Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:51.234724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:51.248855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:51.269037Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:51.290886Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:51.304997Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:51.441918Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668157255529472:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:51.441940Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:51.503633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.514058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.525799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.536647Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.551342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.565045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.579337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.598836Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668157255530123:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:51.598880Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668157255530128:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:51.598886Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:51.599858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:51.606113Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668157255530130:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:28:51.667988Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668157255530181:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:51.855559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.894809Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7511668157255530555:2507];tablet_id=72075186224037929;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:28:51.894864Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[1:7511668157255530556:2508];tablet_id=72075186224037925;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:28:51.894883Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[1:7511668157255530556:2508];tablet_id=72075186224037925;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:28:51.894884Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7511668157255530555:2507];tablet_id=72075186224037929;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:28:51.894993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7511668157255530555:2507];tablet_id=72075186224037929;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:28:51.894993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[1:7511668157255530556:2508];tablet_id=72075186224037925;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:28:51.895045Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[1:7511668157255530556:2508];tablet_id=72075186224037925;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:28:51.895051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7511668157255530555:2507];tablet_id=72075186224037929;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:28:51.895120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7511668157255530555:2507];tablet_id=72075186224037929;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:28:51.895127Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037925;self_id=[1:7511668157255530556:2508];tablet_id=72075186224037925;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:28:51.895150Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_ ... description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-03T10:28:51.919734Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-03T10:28:51.920087Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;self_id=[1:7511668157255530562:2514];tablet_id=72075186224037927;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:28:51.920117Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;self_id=[1:7511668157255530562:2514];tablet_id=72075186224037927;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:28:51.920173Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;self_id=[1:7511668157255530562:2514];tablet_id=72075186224037927;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:28:51.920202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;self_id=[1:7511668157255530562:2514];tablet_id=72075186224037927;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:28:51.920230Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;self_id=[1:7511668157255530562:2514];tablet_id=72075186224037927;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:28:51.920257Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;self_id=[1:7511668157255530562:2514];tablet_id=72075186224037927;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:28:51.920281Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;self_id=[1:7511668157255530562:2514];tablet_id=72075186224037927;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:28:51.920307Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;self_id=[1:7511668157255530562:2514];tablet_id=72075186224037927;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:28:51.920332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;self_id=[1:7511668157255530562:2514];tablet_id=72075186224037927;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:28:51.920356Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;self_id=[1:7511668157255530562:2514];tablet_id=72075186224037927;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:28:51.920390Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;self_id=[1:7511668157255530562:2514];tablet_id=72075186224037927;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:28:51.920416Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;self_id=[1:7511668157255530562:2514];tablet_id=72075186224037927;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:28:51.921109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-03T10:28:51.921125Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-03T10:28:51.921139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-03T10:28:51.921144Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-03T10:28:51.921164Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-03T10:28:51.921174Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-03T10:28:51.921187Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-03T10:28:51.921198Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-06-03T10:28:51.921208Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-06-03T10:28:51.921214Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-03T10:28:51.921226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-03T10:28:51.921232Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-03T10:28:51.921260Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:28:51.921272Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:28:51.921321Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:28:51.921338Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:28:51.921352Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:28:51.921357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:28:51.921365Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:28:51.921377Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:28:51.921384Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:28:51.921482Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-03T10:28:51.921492Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-03T10:28:51.928361Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:51.928432Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:51.929428Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:51.929730Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:51.930865Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:51.930901Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:51.932036Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:51.932070Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:51.932695Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:51.933171Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:51.959439Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=281474976715674;tx_id=281474976715674;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715674; 2025-06-03T10:28:51.959440Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976715674;tx_id=281474976715674;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715674; 2025-06-03T10:28:51.959538Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976715674;tx_id=281474976715674;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715674; 2 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::OlapRange [GOOD] Test command err: Trying to start YDB, gRPC: 28747, MsgBus: 61301 2025-06-03T10:28:51.169532Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668154070191376:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:51.169550Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001e86/r3tmp/tmpDl2qcx/pdisk_1.dat 2025-06-03T10:28:51.231265Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668154070191352:2079] 1748946531169296 != 1748946531169299 2025-06-03T10:28:51.231548Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28747, node 1 2025-06-03T10:28:51.245968Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:51.245981Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:51.245982Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:51.246036Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61301 TClient is connected to server localhost:61301 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:28:51.304664Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:51.304694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:51.305815Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:51.308199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:51.310411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:28:51.315413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:51.378868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:51.399389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:51.410437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:51.539991Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668154070192984:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:51.540025Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:51.585024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.594139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.607396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.663281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.677174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.691533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.705408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.720757Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668154070193638:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:51.720777Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:51.720810Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668154070193643:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:51.721625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:51.724354Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668154070193645:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:28:51.803430Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668154070193696:3398] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:51.947329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.970925Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7511668154070194074:2510];tablet_id=72075186224037929;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:28:51.970972Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7511668154070194074:2510];tablet_id=72075186224037929;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:28:51.971019Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7511668154070194074:2510];tablet_id=72075186224037929;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:28:51.971054Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7511668154070194074:2510];tablet_id=72075186224037929;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:28:51.971075Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7511668154070194074:2510];tablet_id=72075186224037929;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:28:51.971096Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7511668154070194074:2510];tablet_id=72075186224037929;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:28:51.971113Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7511668154070194074:2510];tablet_id=72075186224037929;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:28:51.971129Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7511668154070194074:2510];tablet_id=72075186224037929;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:28:51.971150Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7511668154070194074:2510];tablet_id=72075186224037929;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:28:51.971166Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037929;self_id=[1:7511668154070194074:2510 ... chema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:28:51.993073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:28:51.993085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:28:51.993105Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:28:51.993120Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:28:51.993126Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:28:51.993130Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037926;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:28:51.993135Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:28:51.993141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:28:51.993189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-03T10:28:51.993197Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037926;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-03T10:28:51.993287Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-03T10:28:51.993324Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-03T10:28:51.993335Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-03T10:28:51.993340Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-03T10:28:51.993357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-03T10:28:51.993362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-03T10:28:51.993370Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-03T10:28:51.993378Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-06-03T10:28:51.993385Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-06-03T10:28:51.993392Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-03T10:28:51.993396Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-03T10:28:51.993402Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-03T10:28:51.993416Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:28:51.993423Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:28:51.993435Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:28:51.993442Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:28:51.993450Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:28:51.993457Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:28:51.993461Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:28:51.993465Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:28:51.993472Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:28:51.993507Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-03T10:28:51.993515Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037924;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-03T10:28:52.010032Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710672; 2025-06-03T10:28:52.010554Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710672; 2025-06-03T10:28:52.011301Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710672; 2025-06-03T10:28:52.011826Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710672; 2025-06-03T10:28:52.012296Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710672; 2025-06-03T10:28:52.013075Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710672; 2025-06-03T10:28:52.013262Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710672; 2025-06-03T10:28:52.014090Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710672; 2025-06-03T10:28:52.014610Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710672; 2025-06-03T10:28:52.015031Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976710672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710672; 2025-06-03T10:28:52.028873Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:1913: ActorId: [1:7511668158365161778:2498] TxId: 281474976710673. Ctx: { TraceId: 01jwtncdnhedwt6dq9zf8zcbbj, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTg1MTQ3YzQtNmExZDBmMy05YmRkYTRhZi1mNTcwMjNhNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-03T10:28:52.040933Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;self_id=[1:7511668154070194089:2514];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037930;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037931;receive=72075186224037926; 2025-06-03T10:28:52.041041Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710674; 2025-06-03T10:28:52.041124Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710674; 2025-06-03T10:28:52.041186Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=281474976710674;tx_id=281474976710674;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976710674; 2025-06-03T10:28:52.082968Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:1913: ActorId: [1:7511668158365161851:2498] TxId: 281474976710675. Ctx: { TraceId: 01jwtncdpbemv0fsy4833ste15, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTg1MTQ3YzQtNmExZDBmMy05YmRkYTRhZi1mNTcwMjNhNg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::ShouldRestartOnScanErrors [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:28:19.402659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:28:19.402684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:19.402690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:28:19.402695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:28:19.402707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:28:19.402711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:28:19.402721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:19.402735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:28:19.402843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:28:19.402916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:28:19.417969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:28:19.417992Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:19.424608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:28:19.424722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:28:19.424760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:28:19.427007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:28:19.427061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:28:19.427197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:19.427254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:28:19.428312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:19.428352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:28:19.428608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:19.428616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:19.428625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:28:19.428631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:19.428635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:28:19.428650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.429812Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:28:19.445025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:28:19.445100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.445154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:28:19.445191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:28:19.445217Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.446157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:19.446208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:28:19.446274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.446286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:28:19.446293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:28:19.446299Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:28:19.446902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.446922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:28:19.446929Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:28:19.447427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.455504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:19.455540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:19.455550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:28:19.456202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:28:19.456882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:28:19.456925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:28:19.457165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:19.457189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:19.457195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:19.457332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:28:19.457343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:19.457377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:28:19.457387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:28:19.457857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:19.457864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:19.457900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... or txid 281474976710759:0 128 -> 129 2025-06-03T10:28:51.261008Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:7740 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 343F9740-8337-4A0A-8807-3D77031568F9 amz-sdk-request: attempt=1 content-length: 73 content-md5: q/ySd5GvS6I/qOVxS/4Thg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /metadata.json / / 73 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000005 2025-06-03T10:28:51.267751Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:51.267766Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710759, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-03T10:28:51.267841Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:51.267846Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [4:206:2207], at schemeshard: 72057594046678944, txId: 281474976710759, path id: 4 2025-06-03T10:28:51.267960Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-03T10:28:51.267969Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 281474976710759:0 ProgressState, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 281474976710759 2025-06-03T10:28:51.268140Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710759 2025-06-03T10:28:51.268150Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 281474976710759 2025-06-03T10:28:51.268154Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 281474976710759 2025-06-03T10:28:51.268158Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710759, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 3 2025-06-03T10:28:51.268163Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-03T10:28:51.268179Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 0/1, is published: true REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:7740 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: BA76684E-C16E-4E4E-9BF6-BF9D0B675F92 amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-03T10:28:51.268836Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710759 TestWaitNotification wait txId: 102 2025-06-03T10:28:51.268876Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-03T10:28:51.268882Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-03T10:28:51.270130Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:62: NotifyTxCompletion export in-flight, txId: 102, at schemeshard: 72057594046678944 2025-06-03T10:28:51.270146Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 102, at schemeshard: 72057594046678944 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:7740 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 3DF8E8B2-F031-4C35-B7D5-D5CDCDA3B707 amz-sdk-request: attempt=1 content-length: 73 content-md5: q/ySd5GvS6I/qOVxS/4Thg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /metadata.json / / 73 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:7740 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 7F7E2B9D-63F1-445E-B37C-7F609C4B001F amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /scheme.pb / / 357 REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:7740 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: A67921F4-0EF5-4C1E-8E7B-3DCCD4DE5FDE amz-sdk-request: attempt=1 content-length: 11 content-md5: bj4KQf2rit2DOGLxvSlUww== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv / / 11 2025-06-03T10:28:51.768208Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 445 RawX2: 17179871597 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:28:51.768227Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 281474976710759, tablet: 72075186233409547, partId: 0 2025-06-03T10:28:51.768246Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944, message: Source { RawX1: 445 RawX2: 17179871597 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:28:51.768257Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 281474976710759:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 445 RawX2: 17179871597 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:28:51.768268Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710759:0, shardIdx: 72057594046678944:2, datashard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:51.768272Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-03T10:28:51.768275Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 281474976710759:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-03T10:28:51.768280Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710759:0 129 -> 240 2025-06-03T10:28:51.768312Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 281474976710759:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:51.768675Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-03T10:28:51.768707Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-03T10:28:51.768713Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 281474976710759:0 ProgressState 2025-06-03T10:28:51.768723Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-03T10:28:51.768727Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-03T10:28:51.768731Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-03T10:28:51.768733Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-03T10:28:51.768736Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 1/1, is published: true 2025-06-03T10:28:51.768746Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [4:126:2151] message: TxId: 281474976710759 2025-06-03T10:28:51.768750Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-03T10:28:51.768754Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710759:0 2025-06-03T10:28:51.768757Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976710759:0 2025-06-03T10:28:51.768781Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-03T10:28:51.769078Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvNotifyTxCompletionResult: txId# 281474976710759 2025-06-03T10:28:51.769113Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6753: Message: TxId: 281474976710759 2025-06-03T10:28:51.769511Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:28:51.769528Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [4:588:2543] TestWaitNotification: OK eventTxId 102 >> Cdc::InitialScan_TopicAutoPartitioning [GOOD] >> KqpCost::Range [GOOD] >> Cdc::InitialScanUpdatedRows >> TInterconnectTest::TestCrossConnect [GOOD] >> TInterconnectTest::TestManyEventsWithReconnect >> Cdc::ResolvedTimestampsMultiplePartitions [GOOD] >> Cdc::ResolvedTimestampsVolatileOutOfOrder >> TInterconnectTest::TestManyEventsWithReconnect [GOOD] >> TInterconnectTest::TestEventWithPayloadSerialization >> TopicAutoscaling::ControlPlane_BackCompatibility [GOOD] >> TopicAutoscaling::ControlPlane_AutoscalingWithStorageSizeRetention >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerlessAfterDisable [GOOD] >> JsonProtoConversion::ProtoMapToJson [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::Range [GOOD] Test command err: Trying to start YDB, gRPC: 23609, MsgBus: 26078 2025-06-03T10:28:51.967932Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668154983416082:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:51.967954Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001e64/r3tmp/tmpoXJxtU/pdisk_1.dat 2025-06-03T10:28:52.038192Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:52.038613Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668154983416058:2079] 1748946531967740 != 1748946531967743 TServer::EnableGrpc on GrpcPort 23609, node 1 2025-06-03T10:28:52.054887Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:52.054898Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:52.054900Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:52.054945Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:28:52.070289Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:52.070317Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:52.071393Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26078 TClient is connected to server localhost:26078 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:52.113366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:52.119549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:52.183337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:52.204718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:52.216848Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:52.381525Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668159278384990:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:52.381592Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:52.439230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:52.447850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:52.504258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:52.516781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:52.530893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:52.544602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:52.559100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:52.575673Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668159278385645:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:52.575700Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668159278385650:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:52.575703Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:52.576549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:52.586210Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668159278385652:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:28:52.674636Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668159278385703:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } |64.4%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/pg/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/cost/unittest >> KqpCost::OlapWriteRow [FAIL] Test command err: Trying to start YDB, gRPC: 1812, MsgBus: 19739 2025-06-03T10:28:50.805550Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668150364557223:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:50.805583Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001e95/r3tmp/tmpJvMuiY/pdisk_1.dat 2025-06-03T10:28:50.876814Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668150364557203:2079] 1748946530805416 != 1748946530805419 2025-06-03T10:28:50.879421Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1812, node 1 2025-06-03T10:28:50.893699Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:50.893731Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:50.893734Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:50.893812Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19739 TClient is connected to server localhost:19739 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:28:50.949317Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:50.949348Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: 2025-06-03T10:28:50.950740Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:50.964880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:50.978591Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:51.002425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:51.062777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:51.076286Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:51.204110Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668154659526148:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:51.204147Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:51.260954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.316519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.371998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.382666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.396967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.410719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.424897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.441232Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668154659526805:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:51.441267Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:51.441310Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668154659526810:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:51.442074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:28:51.444098Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668154659526812:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:28:51.516209Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668154659526864:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:51.651934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:28:51.670110Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[1:7511668154659527281:2519];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:28:51.670152Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7511668154659527279:2518];tablet_id=72075186224037931;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:28:51.670154Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[1:7511668154659527281:2519];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:28:51.670163Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7511668154659527279:2518];tablet_id=72075186224037931;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:28:51.670213Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[1:7511668154659527281:2519];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:28:51.670219Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7511668154659527279:2518];tablet_id=72075186224037931;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:28:51.670234Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[1:7511668154659527281:2519];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:28:51.670234Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7511668154659527279:2518];tablet_id=72075186224037931;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:28:51.670292Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;self_id=[1:7511668154659527279:2518];tablet_id=72075186224037931;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:28:51.670298Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;self_id=[1:7511668154659527281:2519];tablet_id=72075186224037922;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:28:51.670314Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037931;self ... xUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-03T10:28:51.687198Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-03T10:28:51.687202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-06-03T10:28:51.687202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-03T10:28:51.687205Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-03T10:28:51.687211Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-06-03T10:28:51.687220Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:28:51.687221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-03T10:28:51.687223Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:28:51.687227Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-03T10:28:51.687235Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:28:51.687238Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:28:51.687238Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-03T10:28:51.687245Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:28:51.687252Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:28:51.687256Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037930;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:28:51.687260Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:28:51.687261Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:28:51.687262Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:28:51.687265Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:28:51.687276Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:28:51.687280Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:28:51.687288Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:28:51.687292Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:28:51.687296Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-03T10:28:51.687297Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:28:51.687300Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:28:51.687302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037930;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-03T10:28:51.687303Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:28:51.687354Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-03T10:28:51.687362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037927;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-03T10:28:51.715048Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:51.715394Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037925;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:51.716581Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:51.716607Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:51.717803Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:51.717838Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:51.718989Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:51.719034Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:51.719822Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037924;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:51.719976Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:28:51.732894Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:1913: ActorId: [1:7511668154659527689:2507] TxId: 281474976715673. Ctx: { TraceId: 01jwtncdca2w7rpbf56zfsbdtv, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWIwZWU3MzgtNDc5OTk5M2MtOGM2YjM4NS03NDM0NjAxOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-03T10:28:51.742442Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037926;tx_state=TTxProgressTx::Execute;tx_current=281474976715674;tx_id=281474976715674;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715674; 2025-06-03T10:28:51.742442Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=281474976715674;tx_id=281474976715674;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715674; 2025-06-03T10:28:51.742568Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037931;tx_state=TTxProgressTx::Execute;tx_current=281474976715674;tx_id=281474976715674;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715674; 2025-06-03T10:28:51.753373Z node 1 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:1913: ActorId: [1:7511668154659527758:2507] TxId: 281474976715675. Ctx: { TraceId: 01jwtncdd0914ewz3j4z4z2vh6, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWIwZWU3MzgtNDc5OTk5M2MtOGM2YjM4NS03NDM0NjAxOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-03T10:28:51.758305Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037930;tx_state=TTxProgressTx::Execute;tx_current=281474976715676;tx_id=281474976715676;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715676; query_phases { duration_us: 2661 cpu_time_us: 738 affected_shards: 1 } query_phases { duration_us: 3349 cpu_time_us: 72 affected_shards: 1 } compilation { duration_us: 8066 cpu_time_us: 7396 } process_cpu_time_us: 202 total_duration_us: 14730 total_cpu_time_us: 8408 WARNING: All log messages before y_absl::InitializeLog() is called are written to STDERR F0000 00:00:1748946531.761388 252836 repeated_ptr_field.h:272] Check failed: index < current_size_ (0 vs. 0) F0000 00:00:1748946531.761388 252836 repeated_ptr_field.h:272] Check failed: index < current_size_ (0 vs. 0) (y_absl::lts_y_20240722::log_internal::FatalException) LogMessageFatal exception >> TInterconnectTest::TestEventWithPayloadSerialization [GOOD] >> Cdc::InitialScanRacyCompleteAndRequest [GOOD] >> Cdc::InitialScanAndLimits |64.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::ProtoMapToJson [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardBackgroundCompactionTest::ShouldNotCompactServerlessAfterDisable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:27:43.647947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:43.647987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:43.647994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:43.648003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:43.648020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:43.648025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:43.648037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:43.648053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:43.648185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:43.648271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:43.664118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:27:43.664152Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:43.669580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:43.669727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:43.669782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:43.671680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:43.671765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:43.671877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:43.671939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:43.672519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:43.672572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:43.672891Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:43.672905Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:43.672915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:43.672924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:43.672929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:43.672949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:27:43.674198Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:27:43.691700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:43.691786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:43.691868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:43.691940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:43.691954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:43.692912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:43.692947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:43.693018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:43.693032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:43.693038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:43.693045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:43.693627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:43.693645Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:43.693651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:43.694123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:43.694135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:43.694144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:43.694154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:43.694979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:43.695418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:43.695462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:43.695638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:43.695662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:43.695678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:43.695743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:27:43.695750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:43.695785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:43.695796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:27:43.696189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:43.696198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:43.696244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... T10:28:53.530912Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4942: StateWork, processing event TEvPrivate::TEvRunConditionalErase 2025-06-03T10:28:53.530917Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6637: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409546 2025-06-03T10:28:53.530942Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409546 2025-06-03T10:28:53.530964Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409546 2025-06-03T10:28:53.623845Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435073, Sender [0:0:0], Recipient [3:769:2654]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2025-06-03T10:28:53.623892Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2025-06-03T10:28:53.623933Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186233409552 outdated step 200 last cleanup 0 2025-06-03T10:28:53.623958Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186233409552 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:53.623967Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186233409552 2025-06-03T10:28:53.623972Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186233409552 has no attached operations 2025-06-03T10:28:53.623976Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186233409552 2025-06-03T10:28:53.624013Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435073, Sender [0:0:0], Recipient [3:776:2659]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvCleanupTransaction 2025-06-03T10:28:53.624016Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3155: StateWork, processing event TEvPrivate::TEvCleanupTransaction 2025-06-03T10:28:53.624023Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:214: No cleanup at 72075186233409553 outdated step 200 last cleanup 0 2025-06-03T10:28:53.624028Z node 3 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186233409553 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:53.624030Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186233409553 2025-06-03T10:28:53.624033Z node 3 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186233409553 has no attached operations 2025-06-03T10:28:53.624035Z node 3 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186233409553 2025-06-03T10:28:53.624060Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:769:2654]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-03T10:28:53.624098Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3437: TEvPeriodicTableStats from datashard 72075186233409552, FollowerId 0, tableId 2 2025-06-03T10:28:53.624110Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:776:2659]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-03T10:28:53.624124Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3437: TEvPeriodicTableStats from datashard 72075186233409553, FollowerId 0, tableId 2 2025-06-03T10:28:53.624232Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269553162, Sender [3:776:2659], Recipient [3:898:2755]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409553 TableLocalId: 2 Generation: 2 Round: 10 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 5 Memory: 119208 } ShardState: 2 UserTablePartOwners: 72075186233409553 NodeId: 3 StartTime: 121 TableOwnerId: 72075186233409549 FollowerId: 0 2025-06-03T10:28:53.624239Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4919: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-03T10:28:53.624255Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72075186233409549 from shard 72075186233409553 followerId 0 pathId [OwnerId: 72075186233409549, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0005 2025-06-03T10:28:53.624271Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72075186233409549 from shard 72075186233409553 followerId 0 pathId [OwnerId: 72075186233409549, LocalPathId: 2] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-03T10:28:53.624279Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-06-03T10:28:53.624300Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269553162, Sender [3:769:2654], Recipient [3:898:2755]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409552 TableLocalId: 2 Generation: 2 Round: 10 TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 10 Memory: 119208 } ShardState: 2 UserTablePartOwners: 72075186233409552 NodeId: 3 StartTime: 121 TableOwnerId: 72075186233409549 FollowerId: 0 2025-06-03T10:28:53.624303Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4919: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-03T10:28:53.624307Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72075186233409549 from shard 72075186233409552 followerId 0 pathId [OwnerId: 72075186233409549, LocalPathId: 2] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.001 2025-06-03T10:28:53.624315Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72075186233409549 from shard 72075186233409552 followerId 0 pathId [OwnerId: 72075186233409549, LocalPathId: 2] raw table stats: DataSize: 0 RowCount: 0 IndexSize: 0 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 0 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-03T10:28:53.634928Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:898:2755]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:28:53.634963Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4890: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:28:53.634979Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124999, Sender [3:898:2755], Recipient [3:898:2755]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:28:53.634985Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4889: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:28:53.645162Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435096, Sender [0:0:0], Recipient [3:898:2755]: NKikimr::NSchemeShard::TEvPrivate::TEvSendBaseStatsToSA 2025-06-03T10:28:53.645202Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5067: StateWork, processing event TEvPrivate::TEvSendBaseStatsToSA 2025-06-03T10:28:53.645285Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435076, Sender [0:0:0], Recipient [3:898:2755]: NKikimr::NSchemeShard::TEvPrivate::TEvRunConditionalErase 2025-06-03T10:28:53.645310Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4942: StateWork, processing event TEvPrivate::TEvRunConditionalErase 2025-06-03T10:28:53.645316Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6637: Handle: TEvRunConditionalErase, at schemeshard: 72075186233409549 2025-06-03T10:28:53.645344Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:56: TTxRunConditionalErase DoExecute: at schemeshard: 72075186233409549 2025-06-03T10:28:53.645365Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__conditional_erase.cpp:189: TTxRunConditionalErase DoComplete: at schemeshard: 72075186233409549 2025-06-03T10:28:53.645396Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269746180, Sender [3:2012:3829], Recipient [3:898:2755]: NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult 2025-06-03T10:28:53.645402Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5066: StateWork, processing event TEvTxProxySchemeCache::TEvNavigateKeySetResult 2025-06-03T10:28:53.665930Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [3:2015:3832], Recipient [3:769:2654]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:53.665959Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:53.665971Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186233409552, clientId# [3:2014:3831], serverId# [3:2015:3832], sessionId# [0:0:0] 2025-06-03T10:28:53.666066Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553213, Sender [3:2013:3830], Recipient [3:769:2654]: NKikimrTxDataShard.TEvGetCompactTableStats PathId { OwnerId: 72075186233409549 LocalId: 2 } 2025-06-03T10:28:53.666173Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [3:2018:3835], Recipient [3:776:2659]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:53.666178Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:53.666183Z node 3 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186233409553, clientId# [3:2017:3834], serverId# [3:2018:3835], sessionId# [0:0:0] 2025-06-03T10:28:53.666201Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553213, Sender [3:2016:3833], Recipient [3:776:2659]: NKikimrTxDataShard.TEvGetCompactTableStats PathId { OwnerId: 72075186233409549 LocalId: 2 } |64.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mind/ut/ydb-core-mind-ut |64.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/ut/ydb-core-mind-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/actorlib_impl/ut/unittest >> TInterconnectTest::TestEventWithPayloadSerialization [GOOD] Test command err: Starting iteration 0 Starting iteration 1 Starting iteration 2 Starting iteration 3 Starting iteration 4 Starting iteration 5 Starting iteration 6 Starting iteration 7 Starting iteration 8 Starting iteration 9 Starting iteration 10 Starting iteration 11 Starting iteration 12 Starting iteration 13 Starting iteration 14 Starting iteration 15 Starting iteration 16 Starting iteration 17 Starting iteration 18 Starting iteration 19 Starting iteration 20 Starting iteration 21 Starting iteration 22 Starting iteration 23 Starting iteration 24 Starting iteration 25 Starting iteration 26 Starting iteration 27 Starting iteration 28 Starting iteration 29 Starting iteration 30 Starting iteration 31 Starting iteration 32 Starting iteration 33 Starting iteration 34 Starting iteration 35 Starting iteration 36 Starting iteration 37 Starting iteration 38 Starting iteration 39 Starting iteration 40 Starting iteration 41 Starting iteration 42 Starting iteration 43 Starting iteration 44 Starting iteration 45 Starting iteration 46 Starting iteration 47 Starting iteration 48 Starting iteration 49 Starting iteration 50 Starting iteration 51 Starting iteration 52 Starting iteration 53 Starting iteration 54 Starting iteration 55 Starting iteration 56 Starting iteration 57 Starting iteration 58 Starting iteration 59 Starting iteration 60 Starting iteration 61 Starting iteration 62 Starting iteration 63 Starting iteration 64 Starting iteration 65 Starting iteration 66 Starting iteration 67 Starting iteration 68 Starting iteration 69 Starting iteration 70 Starting iteration 71 Starting iteration 72 Starting iteration 73 Starting iteration 74 Starting iteration 75 Starting iteration 76 Starting iteration 77 Starting iteration 78 Starting iteration 79 Starting iteration 80 Starting iteration 81 Starting iteration 82 Starting iteration 83 Starting iteration 84 Starting iteration 85 Starting iteration 86 Starting iteration 87 Starting iteration 88 Starting iteration 89 Starting iteration 90 Starting iteration 91 Starting iteration 92 Starting iteration 93 Starting iteration 94 Starting iteration 95 Starting iteration 96 Starting iteration 97 Starting iteration 98 Starting iteration 99 Starting iteration 100 Starting iteration 101 Starting iteration 102 Starting iteration 103 Starting iteration 104 Starting iteration 105 Starting iteration 106 Starting iteration 107 Starting iteration 108 Starting iteration 109 Starting iteration 110 Starting iteration 111 Starting iteration 112 Starting iteration 113 Starting iteration 114 Starting iteration 115 Starting iteration 116 Starting iteration 117 Starting iteration 118 Starting iteration 119 Starting iteration 120 Starting iteration 121 Starting iteration 122 Starting iteration 123 Starting iteration 124 Starting iteration 125 Starting iteration 126 Starting iteration 127 Starting iteration 128 Starting iteration 129 Starting iteration 130 Starting iteration 131 Starting iteration 132 Starting iteration 133 Starting iteration 134 Starting iteration 135 Starting iteration 136 Starting iteration 137 Starting iteration 138 Starting iteration 139 Starting iteration 140 Starting iteration 141 Starting iteration 142 Starting iteration 143 Starting iteration 144 Starting iteration 145 Starting iteration 146 Starting iteration 147 Starting iteration 148 Starting iteration 149 Starting iteration 150 Starting iteration 151 Starting iteration 152 Starting iteration 153 Starting iteration 154 Starting iteration 155 Starting iteration 156 Starting iteration 157 Starting iteration 158 Starting iteration 159 Starting iteration 160 Starting iteration 161 Starting iteration 162 Starting iteration 163 Starting iteration 164 Starting iteration 165 Starting iteration 166 Starting iteration 167 Starting iteration 168 Starting iteration 169 Starting iteration 170 Starting iteration 171 Starting iteration 172 Starting iteration 173 Starting iteration 174 Starting iteration 175 Starting iteration 176 Starting iteration 177 Starting iteration 178 Starting iteration 179 Starting iteration 180 Starting iteration 181 Starting iteration 182 Starting iteration 183 Starting iteration 184 Starting iteration 185 Starting iteration 186 Starting iteration 187 Starting iteration 188 Starting iteration 189 Starting iteration 190 Starting iteration 191 Starting iteration 192 Starting iteration 193 Starting iteration 194 Starting iteration 195 Starting iteration 196 Starting iteration 197 Starting iteration 198 Starting iteration 199 0 0 0 1 0 3 0 7 0 15 0 31 0 63 0 127 0 255 0 511 0 1023 0 2047 0 4095 0 8191 0 16383 0 32767 0 65535 1 0 1 1 1 3 1 7 1 15 1 31 1 63 1 127 1 255 1 511 1 1023 1 2047 1 4095 1 8191 1 16383 1 32767 1 65535 3 0 3 1 3 3 3 7 3 15 3 31 3 63 3 127 3 255 3 511 3 1023 3 2047 3 4095 3 8191 3 16383 3 32767 3 65535 7 0 7 1 7 3 7 7 7 15 7 31 7 63 7 127 7 255 7 511 7 1023 7 2047 7 4095 7 8191 7 16383 7 32767 7 65535 15 0 15 1 15 3 15 7 15 15 15 31 15 63 15 127 15 255 15 511 15 1023 15 2047 15 4095 15 8191 15 16383 15 32767 15 65535 31 0 31 1 31 3 31 7 31 15 31 31 31 63 31 127 31 255 31 511 31 1023 31 2047 31 4095 31 8191 31 16383 31 32767 31 65535 63 0 63 1 63 3 63 7 63 15 63 31 63 63 63 127 63 255 63 511 63 1023 63 2047 63 4095 63 8191 63 16383 63 32767 63 65535 127 0 127 1 127 3 127 7 127 15 127 31 127 63 127 127 127 255 127 511 127 1023 127 2047 127 4095 127 8191 127 16383 127 32767 127 65535 255 0 255 1 255 3 255 7 255 15 255 31 255 63 255 127 255 255 255 511 255 1023 255 2047 255 4095 255 8191 255 16383 255 32767 255 65535 511 0 511 1 511 3 511 7 511 15 511 31 511 63 511 127 511 255 511 511 511 1023 511 2047 511 4095 511 8191 511 16383 511 32767 511 65535 1023 0 1023 1 1023 3 1023 7 1023 15 1023 31 1023 63 1023 127 1023 255 1023 511 1023 1023 1023 2047 1023 4095 1023 8191 1023 16383 1023 32767 1023 65535 2047 0 2047 1 2047 3 2047 7 2047 15 2047 31 2047 63 2047 127 2047 255 2047 511 2047 1023 2047 2047 2047 4095 2047 8191 2047 16383 2047 32767 2047 65535 4095 0 4095 1 4095 3 4095 7 4095 15 4095 31 4095 63 4095 127 4095 255 4095 511 4095 1023 4095 2047 4095 4095 4095 8191 4095 16383 4095 32767 4095 65535 8191 0 8191 1 8191 3 8191 7 8191 15 8191 31 8191 63 8191 127 8191 255 8191 511 8191 1023 8191 2047 8191 4095 8191 8191 8191 16383 8191 32767 8191 65535 16383 0 16383 1 16383 3 16383 7 16383 15 16383 31 16383 63 16383 127 16383 255 16383 511 16383 1023 16383 2047 16383 4095 16383 8191 16383 16383 16383 32767 16383 65535 32767 0 32767 1 32767 3 32767 7 32767 15 32767 31 32767 63 32767 127 32767 255 32767 511 32767 1023 32767 2047 32767 4095 32767 8191 32767 16383 32767 32767 32767 65535 65535 0 65535 1 65535 3 65535 7 65535 15 65535 31 65535 63 65535 127 65535 255 65535 511 65535 1023 65535 2047 65535 4095 65535 8191 65535 16383 65535 32767 65535 65535 |64.5%| [LD] {RESULT} $(B)/ydb/core/load_test/ut_ycsb/ydb-core-load_test-ut_ycsb >> SystemView::ShowCreateTableColumnUpsertIndex [GOOD] >> SystemView::ShowCreateTableColumnAlterObject |64.5%| [LD] {RESULT} $(B)/ydb/core/mind/ut/ydb-core-mind-ut >> TopicAutoscaling::Simple_BeforeAutoscaleAwareSDK [GOOD] >> Viewer::JsonAutocompleteStartOfDatabaseName >> TopicAutoscaling::Simple_AutoscaleAwareSDK |64.5%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_collector/test-results/unittest/{meta.json ... results_accumulator.log} >> Cdc::InitialScanUpdatedRows [GOOD] >> Cdc::MustNotLoseSchemaSnapshot >> Viewer::TabletMerging >> Viewer::JsonAutocompleteSimilarDatabaseName >> Cdc::AddIndex [GOOD] >> Cdc::AddStream >> Viewer::Cluster10000Tablets |64.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore/unittest |64.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_incremental_restore/unittest >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_PQv1 >> Viewer::TabletMergingPacked >> UpsertLoad::ShouldWriteKqpUpsertKeyFrom >> Viewer::FuzzySearcherLimit3OutOf4 [GOOD] >> Viewer::FuzzySearcherLimit4OutOf4 [GOOD] >> Viewer::FuzzySearcherLongWord [GOOD] >> Viewer::FuzzySearcherPriority [GOOD] >> Viewer::JsonAutocompleteColumns >> TServiceAccountServiceTest::Get [GOOD] |64.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap |64.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap |64.5%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/olap/ydb-core-kqp-ut-olap >> Viewer::JsonAutocompleteStartOfDatabaseName [GOOD] >> Viewer::JsonStorageListingV1 >> Cdc::InitialScanAndLimits [GOOD] >> Cdc::InitialScanComplete >> TCdcStreamTests::MeteringServerless [GOOD] >> TCdcStreamTests::MeteringDedicated >> Viewer::JsonAutocompleteSimilarDatabaseName [GOOD] >> Viewer::JsonAutocompleteSimilarDatabaseNameWithLimit >> TopicAutoscaling::PartitionSplit_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_PreferedPartition_BeforeAutoscaleAwareSDK >> Balancing::Balancing_OneTopic_TopicApi [GOOD] >> Balancing::Balancing_OneTopic_PQv1 >> ReadLoad::ShouldReadIterate >> TopicAutoscaling::PartitionSplit_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_AutoscaleAwareSDK >> Viewer::TabletMergingPacked [GOOD] >> Viewer::VDiskMerging >> UpsertLoad::ShouldWriteDataBulkUpsertBatch >> Cdc::MustNotLoseSchemaSnapshot [GOOD] >> Cdc::MustNotLoseSchemaSnapshotWithVolatileTx >> JsonProtoConversion::JsonToProtoArray [GOOD] >> Viewer::PDiskMerging [GOOD] >> Viewer::SelectStringWithBase64Encoding >> SystemView::StoragePoolsFields [GOOD] >> Viewer::VDiskMerging [GOOD] >> Viewer::TenantInfo5kkTablets >> Viewer::JsonAutocompleteSimilarDatabaseNameWithLimit [GOOD] >> Viewer::JsonAutocompleteSimilarDatabaseNamePOST >> Cdc::AddStream [GOOD] >> Cdc::AwsRegion >> Viewer::JsonAutocompleteColumns [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/library/ycloud/impl/ut/unittest >> TServiceAccountServiceTest::Get [GOOD] Test command err: 2025-06-03T10:28:56.089118Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668175732344312:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:56.089136Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ee2/r3tmp/tmpBAZTue/pdisk_1.dat 2025-06-03T10:28:56.160479Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:56.161040Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668175732344293:2079] 1748946536088952 != 1748946536088955 TClient is connected to server localhost:24799 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:28:56.192019Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:56.192057Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:56.193162Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:56.234159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:56.598880Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668175391114564:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:56.598900Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ee2/r3tmp/tmpltxFuo/pdisk_1.dat 2025-06-03T10:28:56.621412Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:56.622837Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511668175391114543:2079] 1748946536598643 != 1748946536598646 TClient is connected to server localhost:11080 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:56.704563Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:56.704599Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:56.705081Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:56.705520Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... |64.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/http_proxy/ut/unittest >> JsonProtoConversion::JsonToProtoArray [GOOD] >> Viewer::SelectStringWithNoBase64Encoding >> UpsertLoad::ShouldWriteKqpUpsertKeyFrom [GOOD] >> Viewer::Cluster10000Tablets [GOOD] >> Viewer::FuzzySearcherLimit1OutOf4 [GOOD] >> Viewer::FuzzySearcherLimit2OutOf4 [GOOD] >> Viewer::ExecuteQueryDoesntExecuteSchemeOperationsInsideTransation |64.5%| [TA] $(B)/ydb/library/ycloud/impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |64.5%| [TA] $(B)/ydb/core/http_proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/viewer/ut/unittest >> Viewer::JsonAutocompleteColumns [GOOD] Test command err: 2025-06-03T10:28:57.072392Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:319:2362], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:57.072452Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:57.072471Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 10897, node 1 TClient is connected to server localhost:62888 json result: {"Success":true,"Result":{"Total":6,"Entities":[{"Name":"name","Type":"column","Parent":"orders"},{"Name":"name","Type":"column","Parent":"products"},{"Name":"id","Type":"column","Parent":"orders"},{"Name":"id","Type":"column","Parent":"products"},{"Name":"description","Type":"column","Parent":"orders"},{"Name":"description","Type":"column","Parent":"products"}]},"Version":2} >> Cdc::InitialScanComplete [GOOD] >> Cdc::InitialScanEnqueuesZeroRecords >> ReadLoad::ShouldReadIterate [GOOD] >> ReadLoad::ShouldReadIterateMoreThanRows ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteKqpUpsertKeyFrom [GOOD] Test command err: 2025-06-03T10:28:56.593766Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:56.593836Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:56.593857Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002571/r3tmp/tmpJX7oSf/pdisk_1.dat 2025-06-03T10:28:56.711822Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:56.730722Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:56.732014Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946536136529 != 1748946536136533 2025-06-03T10:28:56.773993Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:56.774040Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:56.784709Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:56.858538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:57.066551Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertKqpStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" } UpsertKqpStart { RowCount: 20 Inflight: 5 KeyFrom: 12345 } 2025-06-03T10:28:57.066591Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:298: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 2} Bootstrap called: RowCount: 20 Inflight: 5 KeyFrom: 12345 2025-06-03T10:28:57.066927Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:361: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 2} started# 5 actors each with inflight# 4 2025-06-03T10:28:57.066933Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 1} Bootstrap called: RowCount: 4 Inflight: 1 KeyFrom: 12345 2025-06-03T10:28:57.066940Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 2} Bootstrap called: RowCount: 4 Inflight: 1 KeyFrom: 12345 2025-06-03T10:28:57.066944Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 3} Bootstrap called: RowCount: 4 Inflight: 1 KeyFrom: 12345 2025-06-03T10:28:57.066947Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 4} Bootstrap called: RowCount: 4 Inflight: 1 KeyFrom: 12345 2025-06-03T10:28:57.066951Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 5} Bootstrap called: RowCount: 4 Inflight: 1 KeyFrom: 12345 2025-06-03T10:28:57.067545Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 1} session: ydb://session/3?node_id=1&id=NTk2OWQwZDctOTg4MWQyZTctMjczYzQ0NDgtODYxYzQ0MzE= 2025-06-03T10:28:57.068071Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 2} session: ydb://session/3?node_id=1&id=ZjYzODYxY2MtMzFmYTVlODktOTU1MjRjOTQtNmQ5YjBkYzQ= 2025-06-03T10:28:57.068085Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 3} session: ydb://session/3?node_id=1&id=YzY5NTBmZjEtMWYzYTJlYTItZjZkMTBkY2UtNjdkOWEyYTI= 2025-06-03T10:28:57.068321Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 4} session: ydb://session/3?node_id=1&id=NDhkYjk2ZTItODljNWVkMzctMTI0YzliOWItM2M4MjJjYmY= 2025-06-03T10:28:57.068525Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 5} session: ydb://session/3?node_id=1&id=YzNlODUwLWVmMGNmNTBkLTkzNTEzOTQ1LTFiM2UzMDQx 2025-06-03T10:28:57.069197Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:746:2628], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:57.069215Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:773:2649], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:57.069220Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:774:2650], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:57.069225Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:775:2651], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:57.069229Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:776:2652], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:57.069236Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:777:2653], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:57.069243Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:57.070469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:57.079428Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:793:2669] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:57.079671Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:794:2670] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:57.079722Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:795:2671] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:57.079799Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:802:2672] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:57.225615Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:784:2660], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:57.225655Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:785:2661], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:57.225667Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:786:2662], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:57.225678Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:787:2663], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:57.225687Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:788:2664], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:57.257346Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:890:2731] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:57.366938Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 1} finished in 1748946537.366919s, errors=0 2025-06-03T10:28:57.367071Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 1 { Tag: 1 DurationMs: 1748946537366 OperationsOK: 4 OperationsError: 0 } 2025-06-03T10:28:57.378183Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:963:2769] txid# 281474976715668, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:57.428102Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 3} finished in 1748946537.428080s, errors=0 2025-06-03T10:28:57.428261Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 3 { Tag: 3 DurationMs: 1748946537428 OperationsOK: 4 OperationsError: 0 } 2025-06-03T10:28:57.439558Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:1014:2791] txid# 281474976715673, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:57.464892Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:1042:2804] txid# 281474976715676, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:57.503876Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:1101:2827] txid# 281474976715682, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:57.516623Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 2} finished in 1748946537.516601s, errors=0 2025-06-03T10:28:57.516774Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 2 { Tag: 2 DurationMs: 1748946537516 OperationsOK: 4 OperationsError: 0 } 2025-06-03T10:28:57.530458Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 5} finished in 1748946537.530434s, errors=0 2025-06-03T10:28:57.530600Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 5 { Tag: 5 DurationMs: 1748946537530 OperationsOK: 4 OperationsError: 0 } 2025-06-03T10:28:57.554695Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 4} finished in 1748946537.554677s, errors=0 2025-06-03T10:28:57.554817Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 4 { Tag: 4 DurationMs: 1748946537554 OperationsOK: 4 OperationsError: 0 } 2025-06-03T10:28:57.554824Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:395: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 2} finished in 0.487912s, oks# 20, errors# 0 2025-06-03T10:28:57.554839Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:733:2615] with tag# 2 >> UpsertLoad::ShouldWriteDataBulkUpsertBatch [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsertKeyFrom >> Viewer::JsonAutocompleteSimilarDatabaseNamePOST [GOOD] >> Viewer::JsonAutocompleteSimilarDatabaseNameLowerCase >> UpsertLoad::ShouldWriteDataBulkUpsert >> ReadLoad::ShouldReadKqp >> TopicAutoscaling::ReadingAfterSplitTest_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_AutoscaleAwareSDK >> Viewer::ExecuteQueryDoesntExecuteSchemeOperationsInsideTransation [GOOD] >> Viewer::FloatPointJsonQuery >> Cdc::MustNotLoseSchemaSnapshotWithVolatileTx [GOOD] >> Cdc::ResolvedTimestampForDisplacedUpsert >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkqlKeyFrom >> Cdc::ResolvedTimestampsVolatileOutOfOrder [GOOD] >> Cdc::SequentialSplitMerge >> ReadLoad::ShouldReadIterateMoreThanRows [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkql >> SystemView::ShowCreateTableColumnAlterObject [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsertKeyFrom [GOOD] |64.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |64.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut |64.5%| [TA] {RESULT} $(B)/ydb/library/ycloud/impl/ut/test-results/unittest/{meta.json ... results_accumulator.log} |64.5%| [TA] {RESULT} $(B)/ydb/core/http_proxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} |64.5%| [LD] {RESULT} $(B)/ydb/core/fq/libs/ydb/ut/ydb-core-fq-libs-ydb-ut >> TExportToS3Tests::ShouldRetryAtFinalStage [GOOD] >> Cdc::AwsRegion [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/load_test/ut_ycsb/unittest >> ReadLoad::ShouldReadIterateMoreThanRows [GOOD] Test command err: 2025-06-03T10:28:57.331124Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:57.331234Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:57.331272Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00256d/r3tmp/tmpVQVJbA/pdisk_1.dat 2025-06-03T10:28:57.451049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:57.468091Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:57.469340Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946536877199 != 1748946536877203 2025-06-03T10:28:57.511555Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:57.511600Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:57.522288Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:57.596138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:57.805259Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:346: TLoad# 0 warmups table# usertable in dir# /Root with rows# 1000 2025-06-03T10:28:57.805723Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:732:2614], subTag: 1} TUpsertActor Bootstrap called: RowCount: 1000 Inflight: 100 BatchSize: 100 with type# 0, target# TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" 2025-06-03T10:28:57.834790Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:732:2614], subTag: 1} TUpsertActor finished in 0.028978s, errors=0 2025-06-03T10:28:57.835111Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kReadIteratorStart with tag# 2, proto# NotifyWhenFinished: true TableSetup { WorkingDir: "/Root" TableName: "usertable" } TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" } ReadIteratorStart { RowCount: 1000 Inflights: 1 Chunks: 0 Chunks: 1 Chunks: 10 } 2025-06-03T10:28:57.835146Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:334: ReadIteratorLoadScenario# [1:741:2623] with id# {Tag: 0, parent: [1:732:2614], subTag: 3} Bootstrap called: RowCount: 1000 Inflights: 1 Chunks: 0 Chunks: 1 Chunks: 10 2025-06-03T10:28:57.837454Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:396: ReadIteratorLoadScenario# {Tag: 0, parent: [1:732:2614], subTag: 3} will work with tablet# 72075186224037888 with ownerId# 72057594046644480 with tableId# 2 resolved for path# /Root/usertable with columnsCount# 11, keyColumnCount# 1 2025-06-03T10:28:57.837510Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [1:744:2626] 2025-06-03T10:28:57.837529Z node 1 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 1} Bootstrap called, sample# 0 2025-06-03T10:28:57.837535Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 1} Connect to# 72075186224037888 called 2025-06-03T10:28:57.837673Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 1} Handle TEvClientConnected called, Status# OK 2025-06-03T10:28:57.839400Z node 1 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 1} finished in 0.001712s, read# 1000 2025-06-03T10:28:57.839476Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [1:744:2626] with chunkSize# 0 finished: 0 { DurationMs: 1 OperationsOK: 1000 OperationsError: 0 } 2025-06-03T10:28:57.839504Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [1:747:2629] 2025-06-03T10:28:57.839512Z node 1 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 2} Bootstrap called, sample# 0 2025-06-03T10:28:57.839517Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 2} Connect to# 72075186224037888 called 2025-06-03T10:28:57.839578Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 2} Handle TEvClientConnected called, Status# OK 2025-06-03T10:28:57.855422Z node 1 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 2} finished in 0.015822s, read# 1000 2025-06-03T10:28:57.855500Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [1:747:2629] with chunkSize# 1 finished: 0 { DurationMs: 15 OperationsOK: 1000 OperationsError: 0 } 2025-06-03T10:28:57.855538Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [1:750:2632] 2025-06-03T10:28:57.855548Z node 1 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 3} Bootstrap called, sample# 0 2025-06-03T10:28:57.855554Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 3} Connect to# 72075186224037888 called 2025-06-03T10:28:57.855634Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 3} Handle TEvClientConnected called, Status# OK 2025-06-03T10:28:57.858184Z node 1 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 3} finished in 0.002534s, read# 1000 2025-06-03T10:28:57.858241Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [1:750:2632] with chunkSize# 10 finished: 0 { DurationMs: 2 OperationsOK: 1000 OperationsError: 0 } 2025-06-03T10:28:57.858272Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [1:753:2635] 2025-06-03T10:28:57.858281Z node 1 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 4} Bootstrap called, sample# 1000 2025-06-03T10:28:57.858286Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 4} Connect to# 72075186224037888 called 2025-06-03T10:28:57.858357Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 4} Handle TEvClientConnected called, Status# OK 2025-06-03T10:28:57.858997Z node 1 :DS_LOAD_TEST NOTICE: common.cpp:137: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 4} finished in 0.000493s, sampled# 1000, iter finished# 1, oks# 1000 2025-06-03T10:28:57.859023Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:506: ReadIteratorLoadScenario# {Tag: 0, parent: [1:732:2614], subTag: 3} received keyCount# 1000 2025-06-03T10:28:57.859072Z node 1 :DS_LOAD_TEST DEBUG: test_load_read_iterator.cpp:551: ReadIteratorLoadScenario# {Tag: 0, parent: [1:732:2614], subTag: 3} started read actor with id# [1:756:2638] 2025-06-03T10:28:57.859079Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:79: TReadIteratorPoints# {Tag: 0, parent: [1:741:2623], subTag: 5} Bootstrap called, will read keys# 1000 2025-06-03T10:28:57.889805Z node 1 :DS_LOAD_TEST DEBUG: test_load_read_iterator.cpp:559: ReadIteratorLoadScenario# {Tag: 0, parent: [1:732:2614], subTag: 3} received point times# 1000, Inflight left# 0 2025-06-03T10:28:57.889910Z node 1 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:482: headread with inflight# 1 finished: 0 { DurationMs: 30 OperationsOK: 1000 OperationsError: 0 Info: "single row head read hist (ms):\n50%: 1\n95%: 1\n99%: 1\n99.9%: 1\n" } 2025-06-03T10:28:57.889943Z node 1 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:616: ReadIteratorLoadScenario# {Tag: 0, parent: [1:732:2614], subTag: 3} finished in 0.054766s with report: { DurationMs: 1 OperationsOK: 1000 OperationsError: 0 PrefixInfo: "Test run# 1, type# FullScan with chunk# inf" } { DurationMs: 15 OperationsOK: 1000 OperationsError: 0 PrefixInfo: "Test run# 2, type# FullScan with chunk# 1" } { DurationMs: 2 OperationsOK: 1000 OperationsError: 0 PrefixInfo: "Test run# 3, type# FullScan with chunk# 10" } { DurationMs: 30 OperationsOK: 1000 OperationsError: 0 Info: "single row head read hist (ms):\n50%: 1\n95%: 1\n99%: 1\n99.9%: 1\n" PrefixInfo: "Test run# 4, type# ReadHeadPoints with inflight# 1" } 2025-06-03T10:28:57.890054Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:741:2623] with tag# 3 2025-06-03T10:28:58.620292Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:323:2365], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:58.620388Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:58.620399Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00256d/r3tmp/tmpfJJ837/pdisk_1.dat 2025-06-03T10:28:58.718390Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:58.733776Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:58.734558Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1748946538168815 != 1748946538168819 2025-06-03T10:28:58.776660Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:58.776714Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:58.787456Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:58.861105Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:59.056643Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:346: TLoad# 0 warmups table# usertable in dir# /Root with rows# 10 2025-06-03T10:28:59.056738Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:732:2614], subTag: 1} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 100 BatchSize: 100 with type# 0, target# TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" 2025-06-03T10:28:59.077765Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:732:2614], subTag: 1} TUpsertActor finished in 0.020963s, errors=0 2025-06-03T10:28:59.077951Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kReadIteratorStart with tag# 2, proto# NotifyWhenFinished: true TableSetup { WorkingDir: "/Root" TableName: "usertable" } TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" } ReadIteratorStart { RowCount: 10 ReadCount: 1000 Inflights: 1 Chunks: 0 Chunks: 1 Chunks: 10 } 2025-06-03T10:28:59.077970Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:334: ReadIteratorLoadScenario# [2:741:2623] with id# {Tag: 0, parent: [2:732:2614], subTag: 3} Bootstrap called: RowCount: 10 ReadCount: 1000 Inflights: 1 Chunks: 0 Chunks: 1 Chunks: 10 2025-06-03T10:28:59.079560Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:396: ReadIteratorLoadScenario# {Tag: 0, parent: [2:732:2614], subTag: 3} will work with tablet# 72075186224037888 with ownerId# 72057594046644480 with tableId# 2 resolved for path# /Root/usertable with columnsCount# 11, keyColumnCount# 1 2025-06-03T10:28:59.079591Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [2:744:2626] 2025-06-03T10:28:59.079603Z node 2 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 1} Bootstrap called, sample# 0 2025-06-03T10:28:59.079608Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 1} Connect to# 72075186224037888 called 2025-06-03T10:28:59.079671Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 1} Handle TEvClientConnected called, Status# OK 2025-06-03T10:28:59.079841Z node 2 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 1} finished in 0.000165s, read# 10 2025-06-03T10:28:59.079871Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [2:744:2626] with chunkSize# 0 finished: 0 { DurationMs: 0 OperationsOK: 10 OperationsError: 0 } 2025-06-03T10:28:59.079882Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [2:747:2629] 2025-06-03T10:28:59.079886Z node 2 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 2} Bootstrap called, sample# 0 2025-06-03T10:28:59.079889Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 2} Connect to# 72075186224037888 called 2025-06-03T10:28:59.079916Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 2} Handle TEvClientConnected called, Status# OK 2025-06-03T10:28:59.080087Z node 2 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 2} finished in 0.000167s, read# 10 2025-06-03T10:28:59.080103Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [2:747:2629] with chunkSize# 1 finished: 0 { DurationMs: 0 OperationsOK: 10 OperationsError: 0 } 2025-06-03T10:28:59.080116Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [2:750:2632] 2025-06-03T10:28:59.080122Z node 2 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 3} Bootstrap called, sample# 0 2025-06-03T10:28:59.080126Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 3} Connect to# 72075186224037888 called 2025-06-03T10:28:59.080171Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 3} Handle TEvClientConnected called, Status# OK 2025-06-03T10:28:59.080251Z node 2 :DS_LOAD_TEST NOTICE: common.cpp:147: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 3} finished in 0.000076s, read# 10 2025-06-03T10:28:59.080264Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:456: fullscan actor# [2:750:2632] with chunkSize# 10 finished: 0 { DurationMs: 0 OperationsOK: 10 OperationsError: 0 } 2025-06-03T10:28:59.080271Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:437: started fullscan actor# [2:753:2635] 2025-06-03T10:28:59.080276Z node 2 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 4} Bootstrap called, sample# 10 2025-06-03T10:28:59.080280Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 4} Connect to# 72075186224037888 called 2025-06-03T10:28:59.080314Z node 2 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 4} Handle TEvClientConnected called, Status# OK 2025-06-03T10:28:59.080360Z node 2 :DS_LOAD_TEST NOTICE: common.cpp:137: ReadIteratorScan# {Tag: 0, parent: [2:741:2623], subTag: 4} finished in 0.000038s, sampled# 10, iter finished# 1, oks# 10 2025-06-03T10:28:59.080370Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:506: ReadIteratorLoadScenario# {Tag: 0, parent: [2:732:2614], subTag: 3} received keyCount# 10 2025-06-03T10:28:59.080422Z node 2 :DS_LOAD_TEST DEBUG: test_load_read_iterator.cpp:551: ReadIteratorLoadScenario# {Tag: 0, parent: [2:732:2614], subTag: 3} started read actor with id# [2:756:2638] 2025-06-03T10:28:59.080427Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:79: TReadIteratorPoints# {Tag: 0, parent: [2:741:2623], subTag: 5} Bootstrap called, will read keys# 10 2025-06-03T10:28:59.101996Z node 2 :DS_LOAD_TEST DEBUG: test_load_read_iterator.cpp:559: ReadIteratorLoadScenario# {Tag: 0, parent: [2:732:2614], subTag: 3} received point times# 1000, Inflight left# 0 2025-06-03T10:28:59.102064Z node 2 :DS_LOAD_TEST INFO: test_load_read_iterator.cpp:482: headread with inflight# 1 finished: 0 { DurationMs: 21 OperationsOK: 1000 OperationsError: 0 Info: "single row head read hist (ms):\n50%: 1\n95%: 1\n99%: 1\n99.9%: 1\n" } 2025-06-03T10:28:59.102089Z node 2 :DS_LOAD_TEST NOTICE: test_load_read_iterator.cpp:616: ReadIteratorLoadScenario# {Tag: 0, parent: [2:732:2614], subTag: 3} finished in 0.024093s with report: { DurationMs: 0 OperationsOK: 10 OperationsError: 0 PrefixInfo: "Test run# 1, type# FullScan with chunk# inf" } { DurationMs: 0 OperationsOK: 10 OperationsError: 0 PrefixInfo: "Test run# 2, type# FullScan with chunk# 1" } { DurationMs: 0 OperationsOK: 10 OperationsError: 0 PrefixInfo: "Test run# 3, type# FullScan with chunk# 10" } { DurationMs: 21 OperationsOK: 1000 OperationsError: 0 Info: "single row head read hist (ms):\n50%: 1\n95%: 1\n99%: 1\n99.9%: 1\n" PrefixInfo: "Test run# 4, type# ReadHeadPoints with inflight# 1" } 2025-06-03T10:28:59.102117Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:741:2623] with tag# 3 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/ut/unittest >> SystemView::StoragePoolsFields [GOOD] Test command err: test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002b43/r3tmp/tmpkSd1Ee/pdisk_1.dat 2025-06-03T10:28:03.584097Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:03.627704Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:03.632751Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:03.632783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:03.634741Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27193, node 1 2025-06-03T10:28:03.669552Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:03.669576Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:03.669578Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:03.669628Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8619 TClient is connected to server localhost:8619 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:03.741958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:04.093624Z node 1 :KQP_COMPILE_SERVICE INFO: kqp_compile_service.cpp:276: Subscribed for config changes 2025-06-03T10:28:04.093654Z node 1 :KQP_COMPILE_SERVICE INFO: kqp_compile_service.cpp:329: Updated config 2025-06-03T10:28:04.099949Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667953005435036:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:04.099980Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:04.100139Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667953005435048:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:04.101045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:04.107458Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667953005435050:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:04.183418Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667953005435123:2738] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:04.183843Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:1183: Try to find query by queryId, queryId: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_user":"", "ydb_database":"Root" }, "settings": { "ydb_user":"", "ydb_database":"Root" }, "rollback_settings": { } } }} 2025-06-03T10:28:04.183880Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:410: Perform request, TraceId.SpanIdPtr: 0x0000110893237918 2025-06-03T10:28:04.183904Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:420: Received compile request, sender: [1:7511667953005435017:2336], queryUid: , queryText: "\n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n ", keepInCache: 1, split: 0{ TraceId: 01jwtnayw17k5q8dyakk6x02pm, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2U3MGM4M2EtOWJhMjM4MmQtOTk1Mjc5NmUtNmY1NGE1NTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default} 2025-06-03T10:28:04.183925Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:1183: Try to find query by queryId, queryId: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_user":"", "ydb_database":"Root" }, "settings": { "ydb_user":"", "ydb_database":"Root" }, "rollback_settings": { } } }} 2025-06-03T10:28:04.183937Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:516: Added request to queue, sender: [1:7511667953005435017:2336], queueSize: 1 2025-06-03T10:28:04.184113Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:877: Created compile actor, sender: [1:7511667953005435017:2336], compileActor: [1:7511667953005435134:2347] 2025-06-03T10:28:04.255028Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jwtnayw17k5q8dyakk6x02pm, SessionId: CompileActor 2025-06-03 10:28:04.254 INFO ydb-core-sys_view-ut(pid=212617, tid=0x00007EFEA54B4640) [core dq] kqp_host.cpp:1375: Good place to weld in 2025-06-03T10:28:04.255626Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jwtnayw17k5q8dyakk6x02pm, SessionId: CompileActor 2025-06-03 10:28:04.255 INFO ydb-core-sys_view-ut(pid=212617, tid=0x00007EFEA54B4640) [core dq] kqp_host.cpp:1380: Compiled query: ( (let $1 (Write! world (DataSink '"kikimr" '"db") (Key '('objectId (String '"accessKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"secretAccessKey")))))) (let $2 (Write! $1 (DataSink '"kikimr" '"db") (Key '('objectId (String '"secretKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"fakeSecret")))))) (let $3 '('('"auth_method" '"AWS") '('"aws_access_key_id_secret_name" '"accessKey") '('"aws_region" '"ru-central1") '('"aws_secret_access_key_secret_name" '"secretKey") '('"location" '"http://fake.fake/olap-tier1") '('"source_type" '"ObjectStorage"))) (return (Write! $2 (DataSink '"kikimr" '"db") (Key '('objectId (String '"/Root/tier1")) '('typeId (String '"EXTERNAL_DATA_SOURCE"))) (Void) '('('mode 'createObject) '('features $3)))) ) 2025-06-03T10:28:04.255719Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jwtnayw17k5q8dyakk6x02pm, SessionId: CompileActor 2025-06-03 10:28:04.255 INFO ydb-core-sys_view-ut(pid=212617, tid=0x00007EFEA54B4640) [KQP] kqp_host.cpp:1386: Compiled query: ( (let $1 (Write! world (DataSink '"kikimr" '"db") (Key '('objectId (String '"accessKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"secretAccessKey")))))) (let $2 (Write! $1 (DataSink '"kikimr" '"db") (Key '('objectId (String '"secretKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"fakeSecret")))))) (let $3 '('('"auth_method" '"AWS") '('"aws_access_key_id_secret_name" '"accessKey") '('"aws_region" '"ru-central1") '('"aws_secret_access_key_secret_name" '"secretKey") '('"location" '"http://fake.fake/olap-tier1") '('"source_type" '"ObjectStorage"))) (return (Write! $2 (DataSink '"kikimr" '"db") (Key '('objectId (String '"/Root/tier1")) '('typeId (String '"EXTERNAL_DATA_SOURCE"))) (Void) '('('mode 'createObject) '('features $3)))) ) 2025-06-03T10:28:04.255879Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jwtnayw17k5q8dyakk6x02pm, SessionId: CompileActor 2025-06-03 10:28:04.255 TRACE ydb-core-sys_view-ut(pid=212617, tid=0x00007EFEA54B4640) [KQP] kqp_transform.cpp:33: YqlTransformer: ( (let $1 (Write! world (DataSink '"kikimr" '"db") (Key '('objectId (String '"accessKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"secretAcc ... e_info.cpp:25: HIVE#72057594037968897 Node(41, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:51.776859Z node 41 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:52.050337Z node 41 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [41:7511668160294305843:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:52.050360Z node 41 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [41:7511668160294305819:2325], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:52.050368Z node 41 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:52.051095Z node 41 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:52.052887Z node 41 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [41:7511668160294305848:2329], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:28:52.104450Z node 41 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [41:7511668160294305899:2323] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:52.122149Z node 41 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtncde655c17agawreh5wts, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=41&id=ZTJjZmEyYmEtYjA2MDU1YTYtYzc5Yzk5YS00MWEzZWY3NA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:52.122596Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [41:7511668160294305934:2337], owner: [41:7511668160294305930:2335], scan id: 0, table id: [72057594046644480:1:0:ds_storage_pools] 2025-06-03T10:28:52.122785Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [41:7511668160294305934:2337], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:52.122909Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [41:7511668160294305934:2337], row count: 0, finished: 1 2025-06-03T10:28:52.122926Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [41:7511668160294305934:2337], owner: [41:7511668160294305930:2335], scan id: 0, table id: [72057594046644480:1:0:ds_storage_pools] 2025-06-03T10:28:52.123480Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946532121, txId: 281474976715660] shutting down 2025-06-03T10:28:53.138891Z node 41 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jwtncer4360dy5shawgws7br, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=41&id=YjIzMGRiYTctYjUxZmVjNDctOGMzZWM4ODUtMjA1ZmIyMmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:53.139335Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [41:7511668164589273281:2351], owner: [41:7511668164589273277:2349], scan id: 0, table id: [72057594046644480:1:0:ds_storage_pools] 2025-06-03T10:28:53.139492Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [41:7511668164589273281:2351], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:53.139549Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [41:7511668164589273281:2351], row count: 0, finished: 1 2025-06-03T10:28:53.139557Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [41:7511668164589273281:2351], owner: [41:7511668164589273277:2349], scan id: 0, table id: [72057594046644480:1:0:ds_storage_pools] 2025-06-03T10:28:53.140070Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946533138, txId: 281474976715662] shutting down 2025-06-03T10:28:54.157035Z node 41 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jwtncfqx7nxfh5k26f1a7dch, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=41&id=MTQwODY3NzQtNDRhNDdhMS01YTViZWNiZS1iZTMwYzRmNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:54.157547Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [41:7511668168884240613:2362], owner: [41:7511668168884240610:2360], scan id: 0, table id: [72057594046644480:1:0:ds_storage_pools] 2025-06-03T10:28:54.157671Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [41:7511668168884240613:2362], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:54.157731Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [41:7511668168884240613:2362], row count: 0, finished: 1 2025-06-03T10:28:54.157748Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [41:7511668168884240613:2362], owner: [41:7511668168884240610:2360], scan id: 0, table id: [72057594046644480:1:0:ds_storage_pools] 2025-06-03T10:28:54.158269Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946534156, txId: 281474976715664] shutting down 2025-06-03T10:28:55.182750Z node 41 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jwtncgqra3hrbnkgbbzwbe9p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=41&id=N2JhODBiZWUtNTkzMGUxY2YtYjQ4ZmMxMzMtZTU5NTk3YTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:55.183326Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [41:7511668173179207946:2373], owner: [41:7511668173179207943:2371], scan id: 0, table id: [72057594046644480:1:0:ds_storage_pools] 2025-06-03T10:28:55.183557Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [41:7511668173179207946:2373], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:55.183608Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [41:7511668173179207946:2373], row count: 0, finished: 1 2025-06-03T10:28:55.183614Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [41:7511668173179207946:2373], owner: [41:7511668173179207943:2371], scan id: 0, table id: [72057594046644480:1:0:ds_storage_pools] 2025-06-03T10:28:55.184338Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946535182, txId: 281474976715666] shutting down 2025-06-03T10:28:56.202647Z node 41 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715669. Ctx: { TraceId: 01jwtnchqs4mpatz4htr6kpczx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=41&id=YTM4YzA5NTQtY2Y1ZjkxMGYtZGJmYjU2ODUtYWExOTU1MDg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:56.203194Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [41:7511668177474175280:2384], owner: [41:7511668177474175276:2382], scan id: 0, table id: [72057594046644480:1:0:ds_storage_pools] 2025-06-03T10:28:56.203380Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [41:7511668177474175280:2384], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:56.203461Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [41:7511668177474175280:2384], row count: 0, finished: 1 2025-06-03T10:28:56.203482Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [41:7511668177474175280:2384], owner: [41:7511668177474175276:2382], scan id: 0, table id: [72057594046644480:1:0:ds_storage_pools] 2025-06-03T10:28:56.204137Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946536202, txId: 281474976715668] shutting down 2025-06-03T10:28:56.672101Z node 41 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[41:7511668155999337929:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:56.672143Z node 41 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:28:57.224086Z node 41 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715671. Ctx: { TraceId: 01jwtncjqn467jkdjqda44nzd2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=41&id=YjY4NjgxZS03NWNhYzNmMC0yMTZiNDk2Yi04ZTkyMGIzZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:57.224598Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [41:7511668181769142618:2397], owner: [41:7511668181769142614:2395], scan id: 0, table id: [72057594046644480:1:0:ds_storage_pools] 2025-06-03T10:28:57.224761Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [41:7511668181769142618:2397], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:28:57.226839Z node 41 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [41:7511668181769142618:2397], row count: 1, finished: 1 2025-06-03T10:28:57.226867Z node 41 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [41:7511668181769142618:2397], owner: [41:7511668181769142614:2395], scan id: 0, table id: [72057594046644480:1:0:ds_storage_pools] 2025-06-03T10:28:57.227659Z node 41 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946537223, txId: 281474976715670] shutting down >> CommitOffset::Commit_WithoutSession_TopPast [GOOD] >> CommitOffset::Commit_WithWrongSession_ToParent >> TopicAutoscaling::ControlPlane_AutoscalingWithStorageSizeRetention [GOOD] >> TopicAutoscaling::CDC_PartitionSplit_AutosplitByLoad >> UpsertLoad::ShouldWriteDataBulkUpsert [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsert2 >> Viewer::JsonAutocompleteSimilarDatabaseNameLowerCase [GOOD] >> Viewer::JsonAutocompleteSchemePOST ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteDataBulkUpsertKeyFrom [GOOD] Test command err: 2025-06-03T10:28:57.456904Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:57.456989Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:57.457019Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00256a/r3tmp/tmpiTwJYi/pdisk_1.dat 2025-06-03T10:28:57.574797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:57.592110Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:57.593482Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946536994500 != 1748946536994504 2025-06-03T10:28:57.635787Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:57.635837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:57.646539Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:57.720573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:57.927767Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertBulkStart { RowCount: 100 Inflight: 3 BatchSize: 7 } 2025-06-03T10:28:57.927820Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:732:2614], subTag: 2} TUpsertActor Bootstrap called: RowCount: 100 Inflight: 3 BatchSize: 7 with type# 0, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-06-03T10:28:58.003247Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:732:2614], subTag: 2} TUpsertActor finished in 0.075339s, errors=0 2025-06-03T10:28:58.003293Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:733:2615] with tag# 2 2025-06-03T10:28:58.704830Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:323:2365], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:58.704919Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:58.704930Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00256a/r3tmp/tmp4W2jN4/pdisk_1.dat 2025-06-03T10:28:58.801853Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:58.818833Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:58.819497Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1748946538317195 != 1748946538317199 2025-06-03T10:28:58.861683Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:58.861726Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:58.872533Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:58.947871Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:59.140358Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertBulkStart { RowCount: 10 Inflight: 3 KeyFrom: 12345 } 2025-06-03T10:28:59.140393Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:732:2614], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 KeyFrom: 12345 with type# 0, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-06-03T10:28:59.204279Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:732:2614], subTag: 2} TUpsertActor finished in 0.063827s, errors=0 2025-06-03T10:28:59.204327Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:733:2615] with tag# 2 >> Viewer::FloatPointJsonQuery [GOOD] >> Viewer::AuthorizeYdbTokenWithDatabaseAttributes >> Cdc::InitialScanEnqueuesZeroRecords [GOOD] >> Cdc::InitialScanRacyProgressAndDrop >> CommitOffset::PartitionSplit_OffsetCommit [GOOD] >> CommitOffset::DistributedTxCommit ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_change_exchange/unittest >> Cdc::AwsRegion [GOOD] Test command err: 2025-06-03T10:28:24.044064Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:24.044172Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:24.044208Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00288c/r3tmp/tmpfO3mwW/pdisk_1.dat 2025-06-03T10:28:24.173785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:24.194145Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:24.195246Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946503432589 != 1748946503432593 2025-06-03T10:28:24.242077Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:24.242124Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:24.252792Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:24.339218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:24.359337Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:28:24.359432Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:28:24.370952Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:28:24.371010Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:28:24.371205Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:28:24.371216Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:28:24.371224Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:28:24.371278Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:28:24.371299Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:28:24.371315Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:28:24.381756Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:28:24.386748Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:28:24.386850Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:28:24.386883Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:28:24.386889Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:24.386895Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:28:24.386901Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:24.387094Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:28:24.387120Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:28:24.387227Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:24.387236Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:24.387246Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:28:24.387252Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:24.387264Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:28:24.387300Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:28:24.387354Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:28:24.387373Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:28:24.387741Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:24.398098Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:28:24.398155Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:28:24.543517Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:697:2587], serverId# [1:699:2589], sessionId# [0:0:0] 2025-06-03T10:28:24.545136Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1000 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1000 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-03T10:28:24.545177Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:24.545328Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:24.545341Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:28:24.545356Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1000:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-03T10:28:24.545429Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1000:281474976715657 keys extracted: 0 2025-06-03T10:28:24.545471Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-03T10:28:24.545699Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:24.545720Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-03T10:28:24.546238Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-03T10:28:24.546356Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:24.546794Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-03T10:28:24.546807Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:24.547044Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1000} 2025-06-03T10:28:24.547060Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:24.547266Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:24.547276Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:24.547283Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-03T10:28:24.547302Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1000 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:410:2404], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:28:24.547313Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:28:24.547325Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:24.547638Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:153: [ChangeSender][72075186224037888:1][1:682:2578][Inactive] Handle NKikimrChangeExchange.TEvActivateSender 2025-06-03T10:28:24.549104Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:24.549500Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1000 2025-06-03T10:28:24.549654Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-03T10:28:24.549666Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:28:25.172809Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:323:2365], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:25.172877Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:25.172888Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; tes ... Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-06-03T10:28:59.078711Z node 21 :PERSQUEUE DEBUG: partition.cpp:2186: [PQ: 72075186224037891, Partition: 0, State: StateIdle] --- delete ---------------- 2025-06-03T10:28:59.078716Z node 21 :PERSQUEUE DEBUG: partition.cpp:2192: [PQ: 72075186224037891, Partition: 0, State: StateIdle] [x0000000000, x0000000001) 2025-06-03T10:28:59.078721Z node 21 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037891, Partition: 0, State: StateIdle] --- write ----------------- 2025-06-03T10:28:59.078725Z node 21 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037891, Partition: 0, State: StateIdle] m0000000000p72075186224037888 2025-06-03T10:28:59.078729Z node 21 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037891, Partition: 0, State: StateIdle] d0000000000_00000000000000000000_00000_0000000001_00000| 2025-06-03T10:28:59.078733Z node 21 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037891, Partition: 0, State: StateIdle] i0000000000 2025-06-03T10:28:59.078737Z node 21 :PERSQUEUE DEBUG: partition.cpp:2199: [PQ: 72075186224037891, Partition: 0, State: StateIdle] --- rename ---------------- 2025-06-03T10:28:59.078742Z node 21 :PERSQUEUE DEBUG: partition.cpp:2204: [PQ: 72075186224037891, Partition: 0, State: StateIdle] =========================== 2025-06-03T10:28:59.078776Z node 21 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-06-03T10:28:59.078785Z node 21 :PERSQUEUE DEBUG: read.h:300: CacheProxy. Passthrough blob. Partition 0 offset 0 partNo 0 count 1 size 426 2025-06-03T10:28:59.078887Z node 21 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 0 offset 0 count 1 size 427 actorID [21:790:2654] 2025-06-03T10:28:59.078925Z node 21 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037889' partition 0 offset 0 partno 0 count 1 parts 0 size 427 2025-06-03T10:28:59.079057Z node 21 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 0 offset 0 count 1 size 426 actorID [21:969:2762] 2025-06-03T10:28:59.079077Z node 21 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037891' partition 0 offset 0 partno 0 count 1 parts 0 size 426 >>>>> GetRecords path=/Root/Table/Stream1 partitionId=0 2025-06-03T10:28:59.079272Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-03T10:28:59.079280Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream1/streamImpl' partition 0 2025-06-03T10:28:59.079411Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:736: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 0 Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 0 max time lag 0ms effective offset 0 2025-06-03T10:28:59.079420Z node 21 :PERSQUEUE DEBUG: subscriber.cpp:68: waiting read cookie 0 partition 0 user $without_consumer offset 0 count 10000 size 26214400 timeout 0 2025-06-03T10:28:59.079437Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-06-03T10:28:59.079446Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:605: [PQ: 72075186224037889, Partition: 0, State: StateIdle] waiting read cookie 0 partition 0 read timeout for $without_consumer offset 0 2025-06-03T10:28:59.079465Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-03T10:28:59.089733Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 342 WriteNewSizeFromSupportivePartitions# 0 2025-06-03T10:28:59.089789Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-03T10:28:59.089814Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Answering for message sourceid: '\00072075186224037888', Topic: 'Table/Stream1/streamImpl', Partition: 0, SeqNo: 1, partNo: 0, Offset: 0 is stored on disk 2025-06-03T10:28:59.089857Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:779: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 1 2025-06-03T10:28:59.089867Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:821: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 1 2025-06-03T10:28:59.089904Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037891, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 341 WriteNewSizeFromSupportivePartitions# 0 2025-06-03T10:28:59.089914Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037891, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-03T10:28:59.089924Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037891, Partition: 0, State: StateIdle] Answering for message sourceid: '\00072075186224037888', Topic: 'Table/Stream2/streamImpl', Partition: 0, SeqNo: 2, partNo: 0, Offset: 0 is stored on disk 2025-06-03T10:28:59.089985Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-03T10:28:59.090028Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-03T10:28:59.090051Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:736: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 1 Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer offset 0 count 1 size 1024000 endOffset 1 max time lag 0ms effective offset 0 2025-06-03T10:28:59.090058Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:936: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 1 added 0 blobs, size 0 count 0 last offset 0, current partition end offset: 1 2025-06-03T10:28:59.090072Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:953: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 1. All data is from uncompacted head. 2025-06-03T10:28:59.090081Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-06-03T10:28:59.090179Z node 21 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:160: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037891][21:1166:2803] Handle NKikimrClient.TResponse { SessionId: TxId: Success { Response: Status: 1 ErrorCode: OK PartitionResponse { CmdWriteResult { AlreadyWritten: false SourceId: "\00072075186224037888" SeqNo: 2 Offset: 0 WriteTimestampMS: 2517 PartitionQuotedTimeMs: 0 TotalTimeInPartitionQueueMs: 0 WriteTimeMs: 0 TopicQuotedTimeMs: 0 WrittenInTx: false } Cookie: 1 } } } 2025-06-03T10:28:59.090197Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:861: Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer readTimeStamp done, result 2517 queuesize 0 startOffset 0 2025-06-03T10:28:59.090221Z node 21 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:160: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037889][21:1163:2705] Handle NKikimrClient.TResponse { SessionId: TxId: Success { Response: Status: 1 ErrorCode: OK PartitionResponse { CmdWriteResult { AlreadyWritten: false SourceId: "\00072075186224037888" SeqNo: 1 Offset: 0 WriteTimestampMS: 2517 PartitionQuotedTimeMs: 0 TotalTimeInPartitionQueueMs: 0 WriteTimeMs: 0 TopicQuotedTimeMs: 0 WrittenInTx: false } Cookie: 1 } } } 2025-06-03T10:28:59.090238Z node 21 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:643: [CdcChangeSenderMain][72075186224037888:1][21:1021:2803] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 0 } 2025-06-03T10:28:59.090266Z node 21 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:643: [CdcChangeSenderMain][72075186224037888:1][21:867:2705] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 0 } 2025-06-03T10:28:59.090297Z node 21 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 1, at tablet# 72075186224037888 2025-06-03T10:28:59.090305Z node 21 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 2, at tablet: 72075186224037888 2025-06-03T10:28:59.100765Z node 21 :TX_DATASHARD INFO: datashard_change_sending.cpp:335: TTxRemoveChangeRecords Complete: removed# 1, left# 1, at tablet# 72075186224037888 2025-06-03T10:28:59.100829Z node 21 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 1, at tablet# 72075186224037888 2025-06-03T10:28:59.100837Z node 21 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 1, at tablet: 72075186224037888 2025-06-03T10:28:59.111271Z node 21 :TX_DATASHARD INFO: datashard_change_sending.cpp:335: TTxRemoveChangeRecords Complete: removed# 1, left# 0, at tablet# 72075186224037888 >>>>> GetRecords path=/Root/Table/Stream1 partitionId=0 2025-06-03T10:28:59.377654Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-03T10:28:59.377682Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream1/streamImpl' partition 0 2025-06-03T10:28:59.377748Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:736: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 2 Topic 'Table/Stream1/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 1 max time lag 0ms effective offset 0 2025-06-03T10:28:59.377760Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:936: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 2 added 0 blobs, size 0 count 0 last offset 0, current partition end offset: 1 2025-06-03T10:28:59.377771Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:953: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 2. All data is from uncompacted head. 2025-06-03T10:28:59.377779Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-06-03T10:28:59.377905Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 >>>>> GetRecords path=/Root/Table/Stream2 partitionId=0 2025-06-03T10:28:59.378069Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-03T10:28:59.378076Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037891] got client message batch for topic 'Table/Stream2/streamImpl' partition 0 2025-06-03T10:28:59.378188Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:736: [PQ: 72075186224037891, Partition: 0, State: StateIdle] read cookie 0 Topic 'Table/Stream2/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 1 max time lag 0ms effective offset 0 2025-06-03T10:28:59.378194Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:936: [PQ: 72075186224037891, Partition: 0, State: StateIdle] read cookie 0 added 0 blobs, size 0 count 0 last offset 0, current partition end offset: 1 2025-06-03T10:28:59.378203Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:953: [PQ: 72075186224037891, Partition: 0, State: StateIdle] Reading cookie 0. All data is from uncompacted head. 2025-06-03T10:28:59.378208Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-06-03T10:28:59.378271Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkqlKeyFrom [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkql [GOOD] >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkql2 >> TopicAutoscaling::PartitionMerge_PreferedPartition_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionMerge_PreferedPartition_AutoscaleAwareSDK ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export/unittest >> TExportToS3Tests::ShouldRetryAtFinalStage [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:28:16.793770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:28:16.793803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:16.793809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:28:16.793815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:28:16.793830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:28:16.793835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:28:16.793847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:28:16.793865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:28:16.794004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:28:16.794108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:28:16.809347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:28:16.809383Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:16.818125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:28:16.818280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:28:16.818334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:28:16.821551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:28:16.821640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:28:16.821819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:16.821895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:28:16.823145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:16.823209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:28:16.823609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:16.823624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:28:16.823636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:28:16.823647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:16.823654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:28:16.823682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:28:16.825496Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:28:16.849514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:28:16.849622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:16.849701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:28:16.849762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:28:16.849797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:16.850769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:16.850808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:28:16.850897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:16.850910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:28:16.850917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:28:16.850923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:28:16.851540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:16.851561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:28:16.851570Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:28:16.852100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:16.852115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:28:16.852122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:16.852130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:28:16.852888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:28:16.855612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:28:16.855674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:28:16.855918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:16.855959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:28:16.855969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:16.856071Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:28:16.856081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:28:16.856121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:28:16.856133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:28:16.856669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:28:16.856680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:16.856743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... d: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0008 2025-06-03T10:28:55.240542Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-03T10:28:55.240621Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 70 row count 2 2025-06-03T10:28:55.240649Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Table, is column=0, is olap=0, RowCount 2, DataSize 70 2025-06-03T10:28:55.240697Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-03T10:28:55.240710Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-03T10:28:55.240718Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=0, is column=0, is olap=0, RowCount 0, DataSize 0, with borrowed parts 2025-06-03T10:28:55.240726Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-03T10:28:55.250936Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-03T10:28:58.630547Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 70 rowCount 2 cpuUsage 0.0008 2025-06-03T10:28:58.651008Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.0008 2025-06-03T10:28:58.681674Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 2 2025-06-03T10:28:58.681765Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 70 row count 2 2025-06-03T10:28:58.681801Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Table, is column=0, is olap=0, RowCount 2, DataSize 70 2025-06-03T10:28:58.681834Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-03T10:28:58.681846Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:2 data size 0 row count 0 2025-06-03T10:28:58.681854Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=0, is column=0, is olap=0, RowCount 0, DataSize 0, with borrowed parts 2025-06-03T10:28:58.681860Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409547 2025-06-03T10:28:58.692067Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-03T10:28:59.551584Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:783: [Export] [s3] Bootstrap: self# [4:575:2532], attempt# 1 2025-06-03T10:28:59.556320Z node 4 :DATASHARD_BACKUP DEBUG: export_scan.cpp:118: [Export] [scanner] Handle TEvExportScan::TEvReset: self# [4:574:2531] 2025-06-03T10:28:59.558610Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:427: [Export] [s3] Handle TEvExportScan::TEvReady: self# [4:575:2532], sender# [4:574:2531] 2025-06-03T10:28:59.558645Z node 4 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [4:574:2531] 2025-06-03T10:28:59.558678Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:445: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [4:575:2532], sender# [4:574:2531], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 0 Checksum: } 2025-06-03T10:28:59.558762Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:512: [Export] [s3] Handle TEvDataShard::TEvS3Upload: self# [4:575:2532], upload# { Id: 1 Status: Complete Error: (empty maybe) Parts: [6e3e0a41fdab8add833862f1bd2954c3,1d8dd09e584ce6a47582a31b591900e2,d41d8cd98f00b204e9800998ecf8427e] } REQUEST: POST /data_00.csv?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:24545 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 1BE85278-BA2F-412E-8559-922E6C04AD85 amz-sdk-request: attempt=1 content-length: 459 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /data_00.csv / uploadId=1 2025-06-03T10:28:59.564855Z node 4 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:609: [Export] [s3] Handle TEvExternalStorage::TEvCompleteMultipartUploadResponse: self# [4:575:2532], result# 2025-06-03T10:28:59.564977Z node 4 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [4:574:2531], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-03T10:28:59.569845Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 448 RawX2: 17179871600 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:28:59.569885Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 281474976710759, tablet: 72075186233409547, partId: 0 2025-06-03T10:28:59.569928Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944, message: Source { RawX1: 448 RawX2: 17179871600 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:28:59.569949Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 281474976710759:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 448 RawX2: 17179871600 } Origin: 72075186233409547 State: 2 TxId: 281474976710759 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:28:59.569971Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 281474976710759:0, shardIdx: 72057594046678944:2, datashard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:28:59.569976Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-03T10:28:59.569985Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 281474976710759:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-03T10:28:59.569995Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710759:0 129 -> 240 2025-06-03T10:28:59.570109Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 281474976710759:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:28:59.570978Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-03T10:28:59.571142Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710759:0, at schemeshard: 72057594046678944 2025-06-03T10:28:59.571156Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 281474976710759:0 ProgressState 2025-06-03T10:28:59.571178Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-03T10:28:59.571184Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-03T10:28:59.571191Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710759:0 progress is 1/1 2025-06-03T10:28:59.571194Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-03T10:28:59.571201Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710759, ready parts: 1/1, is published: true 2025-06-03T10:28:59.571253Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [4:126:2151] message: TxId: 281474976710759 2025-06-03T10:28:59.571265Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710759 ready parts: 1/1 2025-06-03T10:28:59.571273Z node 4 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710759:0 2025-06-03T10:28:59.571279Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976710759:0 2025-06-03T10:28:59.571368Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-03T10:28:59.572046Z node 4 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvNotifyTxCompletionResult: txId# 281474976710759 2025-06-03T10:28:59.572071Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6753: Message: TxId: 281474976710759 2025-06-03T10:28:59.572691Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:28:59.572708Z node 4 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [4:593:2546] TestWaitNotification: OK eventTxId 102 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkqlKeyFrom [GOOD] Test command err: 2025-06-03T10:28:59.767895Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:59.767994Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:59.768032Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002548/r3tmp/tmpZuTBTV/pdisk_1.dat 2025-06-03T10:28:59.902922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:59.921929Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:59.923453Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946539180916 != 1748946539180920 2025-06-03T10:28:59.965663Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:59.965713Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:59.976738Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:00.050686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:29:00.281725Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertLocalMkqlStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertLocalMkqlStart { RowCount: 10 Inflight: 3 KeyFrom: 12345 } 2025-06-03T10:29:00.281783Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:732:2614], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 KeyFrom: 12345 with type# 1, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-06-03T10:29:00.352539Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:732:2614], subTag: 2} TUpsertActor finished in 0.070664s, errors=0 2025-06-03T10:29:00.352580Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:733:2615] with tag# 2 >> Viewer::LevenshteinDistance [GOOD] >> Viewer::JsonStorageListingV2 >> UpsertLoad::ShouldWriteDataBulkUpsert2 [GOOD] |64.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut |64.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut |64.6%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/with_offset_ranges_mode_ut >> UpsertLoad::ShouldCreateTable >> UpsertLoad::ShouldWriteKqpUpsert2 >> Viewer::JsonAutocompleteSchemePOST [GOOD] >> Viewer::AuthorizeYdbTokenWithDatabaseAttributes [GOOD] >> Viewer::SelectStringWithBase64Encoding [GOOD] >> Viewer::QueryExecuteScript >> UpsertLoad::ShouldWriteKqpUpsert >> Cdc::SequentialSplitMerge [GOOD] >> Cdc::ShouldBreakLocksOnConcurrentSchemeTx ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteDataBulkUpsert2 [GOOD] Test command err: 2025-06-03T10:28:59.061964Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:59.062051Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:59.062080Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002567/r3tmp/tmpllDAVF/pdisk_1.dat 2025-06-03T10:28:59.165323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:59.188476Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:59.190061Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946538660872 != 1748946538660876 2025-06-03T10:28:59.233874Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:59.233920Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:59.244468Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:59.318767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:59.559863Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertBulkStart { RowCount: 10 Inflight: 3 } 2025-06-03T10:28:59.559941Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:732:2614], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 0, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-06-03T10:28:59.630981Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:732:2614], subTag: 2} TUpsertActor finished in 0.070878s, errors=0 2025-06-03T10:28:59.631033Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:733:2615] with tag# 2 2025-06-03T10:29:00.235524Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:323:2365], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:00.235638Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:00.235653Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002567/r3tmp/tmpd9Q0QH/pdisk_1.dat 2025-06-03T10:29:00.332730Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:29:00.364913Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:00.365516Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1748946539828207 != 1748946539828211 2025-06-03T10:29:00.414022Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:00.414069Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:00.425822Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:00.505636Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:29:00.710386Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "JustTable" } UpsertBulkStart { RowCount: 10 Inflight: 3 } 2025-06-03T10:29:00.710435Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:732:2614], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 0, target# TabletId: 72075186224037888 TableId: 2 TableName: "JustTable" 2025-06-03T10:29:00.773692Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:732:2614], subTag: 2} TUpsertActor finished in 0.063173s, errors=0 2025-06-03T10:29:00.773748Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:733:2615] with tag# 2 >> TNodeBrokerTest::ShiftIdRangeRemoveExpired >> Viewer::SelectStringWithNoBase64Encoding [GOOD] >> Viewer::ServerlessNodesPage >> ReadLoad::ShouldReadKqp [GOOD] >> ReadLoad::ShouldReadKqpMoreThanRows ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/viewer/ut/unittest >> Viewer::AuthorizeYdbTokenWithDatabaseAttributes [GOOD] Test command err: 2025-06-03T10:28:57.046381Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:727:2426], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:57.046483Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:57.046509Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:28:57.046884Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:724:2368], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:57.046941Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:57.046949Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:28:57.171369Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:57.256164Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-03T10:28:57.271994Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:424} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-03T10:28:57.324298Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:364} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 5395, node 1 TClient is connected to server localhost:13785 2025-06-03T10:28:57.354990Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:57.355007Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:57.355010Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:57.355110Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration Request timer = 0.002404569461 BASE_PERF = 1.000346469 2025-06-03T10:28:57.955469Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511668179044847900:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:57.955817Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:28:57.971647Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511668179044847879:2079] 1748946537955271 != 1748946537955274 2025-06-03T10:28:57.971761Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63971, node 3 2025-06-03T10:28:57.986583Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:57.986598Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:57.986600Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:57.986661Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16872 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:58.061148Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:58.061199Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:58.062012Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:58.062133Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:28:58.068448Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:28:58.069139Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:58.453684Z node 3 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:28:58.453703Z node 3 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:28:58.455225Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668183339815870:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:58.455226Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668183339815879:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:58.455249Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:58.456028Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480 2025-06-03T10:28:58.458582Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511668183339815884:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-03T10:28:58.514432Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511668183339815935:2344] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:58.553655Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=3&id=ODhjZjNkZmEtOThkNmFmYjgtYWM3N2QyZTAtNzNmZmYyMzE=, ActorId: [3:7511668183339815868:2337], ActorState: ExecuteState, TraceId: 01jwtnckypagjv93c1kmgd50mm, Create QueryResponse for error on request, msg: Scheme operations cannot be executed inside transaction 2025-06-03T10:28:59.014436Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511668189416031606:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:59.014459Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:28:59.028576Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:59.028784Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7511668189416031583:2079] 1748946539014235 != 1748946539014238 TServer::EnableGrpc on GrpcPort 3332, node 4 2025-06-03T10:28:59.042706Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:59.042721Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:59.042724Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:59.042796Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11431 2025-06-03T10:28:59.119384Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:59.119429Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:59.120364Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:59.123527Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:28:59.124279Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:59.566005Z node 4 :GRPC_SERVER DEBUG: grpc_request_proxy.cpp:595: Got grpc request# request auth and check internal request, traceId# undef, sdkBuildInfo# undef, state# AS_NOT_PERFORMED, database# /Root, peer# , grpcInfo# undef, timeout# 9.999980s 2025-06-03T10:28:59.566066Z node 4 :TICKET_PARSER INFO: v ... c000] received request Name# ListDatabases ok# false data# peer# 2025-06-03T10:29:00.859143Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe310e00] received request Name# RemoveDatabase ok# false data# peer# 2025-06-03T10:29:00.859153Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe30b100] received request Name# DescribeDatabaseOptions ok# false data# peer# 2025-06-03T10:29:00.859175Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe35a300] received request Name# GetScaleRecommendation ok# false data# peer# 2025-06-03T10:29:00.859187Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe363800] received request Name# ListEndpoints ok# false data# peer# 2025-06-03T10:29:00.859208Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe35aa00] received request Name# WhoAmI ok# false data# peer# 2025-06-03T10:29:00.859221Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe308000] received request Name# NodeRegistration ok# false data# peer# 2025-06-03T10:29:00.859238Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe378000] received request Name# Scan ok# false data# peer# 2025-06-03T10:29:00.859271Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe370700] received request Name# GetShardLocations ok# false data# peer# 2025-06-03T10:29:00.859272Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe30a300] received request Name# DescribeTable ok# false data# peer# 2025-06-03T10:29:00.859309Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe367100] received request Name# CreateSnapshot ok# false data# peer# 2025-06-03T10:29:00.859315Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe35b100] received request Name# RefreshSnapshot ok# false data# peer# 2025-06-03T10:29:00.859347Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe304000] received request Name# DiscardSnapshot ok# false data# peer# 2025-06-03T10:29:00.859351Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe311c00] received request Name# List ok# false data# peer# 2025-06-03T10:29:00.859391Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe358700] received request Name# RateLimiter/CreateResource ok# false data# peer# 2025-06-03T10:29:00.859393Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe307100] received request Name# RateLimiter/AlterResource ok# false data# peer# 2025-06-03T10:29:00.859430Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe313800] received request Name# RateLimiter/DropResource ok# false data# peer# 2025-06-03T10:29:00.859432Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbdebc000] received request Name# RateLimiter/ListResources ok# false data# peer# 2025-06-03T10:29:00.859463Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbc515500] received request Name# RateLimiter/AcquireResource ok# false data# peer# 2025-06-03T10:29:00.859464Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe364e00] received request Name# RateLimiter/DescribeResource ok# false data# peer# 2025-06-03T10:29:00.859495Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe30b800] received request Name# CreateStream ok# false data# peer# 2025-06-03T10:29:00.859495Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe364700] received request Name# ListStreams ok# false data# peer# 2025-06-03T10:29:00.859529Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe371500] received request Name# DeleteStream ok# false data# peer# 2025-06-03T10:29:00.859531Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe365500] received request Name# DescribeStream ok# false data# peer# 2025-06-03T10:29:00.859565Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cfff100] received request Name# SetWriteQuota ok# false data# peer# 2025-06-03T10:29:00.859565Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe367800] received request Name# ListShards ok# false data# peer# 2025-06-03T10:29:00.859601Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe358000] received request Name# UpdateStream ok# false data# peer# 2025-06-03T10:29:00.859604Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe307800] received request Name# PutRecord ok# false data# peer# 2025-06-03T10:29:00.859633Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe354700] received request Name# GetRecords ok# false data# peer# 2025-06-03T10:29:00.859633Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe378e00] received request Name# PutRecords ok# false data# peer# 2025-06-03T10:29:00.859663Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe355500] received request Name# GetShardIterator ok# false data# peer# 2025-06-03T10:29:00.859666Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbf8ce700] received request Name# SubscribeToShard ok# false data# peer# 2025-06-03T10:29:00.859697Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cfed500] received request Name# DescribeLimits ok# false data# peer# 2025-06-03T10:29:00.859703Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cfece00] received request Name# DescribeStreamSummary ok# false data# peer# 2025-06-03T10:29:00.859732Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cfec700] received request Name# DecreaseStreamRetentionPeriod ok# false data# peer# 2025-06-03T10:29:00.859736Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cffea00] received request Name# IncreaseStreamRetentionPeriod ok# false data# peer# 2025-06-03T10:29:00.859764Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe313100] received request Name# UpdateShardCount ok# false data# peer# 2025-06-03T10:29:00.859770Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe359500] received request Name# UpdateStreamMode ok# false data# peer# 2025-06-03T10:29:00.859796Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe30aa00] received request Name# RegisterStreamConsumer ok# false data# peer# 2025-06-03T10:29:00.859810Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe304e00] received request Name# DeregisterStreamConsumer ok# false data# peer# 2025-06-03T10:29:00.859825Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cfe1500] received request Name# DescribeStreamConsumer ok# false data# peer# 2025-06-03T10:29:00.859843Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cfe0e00] received request Name# ListStreamConsumers ok# false data# peer# 2025-06-03T10:29:00.859868Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cfedc00] received request Name# AddTagsToStream ok# false data# peer# 2025-06-03T10:29:00.859879Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe357800] received request Name# DisableEnhancedMonitoring ok# false data# peer# 2025-06-03T10:29:00.859901Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cff5500] received request Name# EnableEnhancedMonitoring ok# false data# peer# 2025-06-03T10:29:00.859910Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cff2a00] received request Name# ListTagsForStream ok# false data# peer# 2025-06-03T10:29:00.859933Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cffce00] received request Name# MergeShards ok# false data# peer# 2025-06-03T10:29:00.859938Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe354e00] received request Name# RemoveTagsFromStream ok# false data# peer# 2025-06-03T10:29:00.859965Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe361500] received request Name# SplitShard ok# false data# peer# 2025-06-03T10:29:00.859968Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe359c00] received request Name# StartStreamEncryption ok# false data# peer# 2025-06-03T10:29:00.859995Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cff3800] received request Name# StopStreamEncryption ok# false data# peer# 2025-06-03T10:29:00.859997Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe360700] received request Name# SelfCheck ok# false data# peer# 2025-06-03T10:29:00.860025Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe358e00] received request Name# NodeCheck ok# false data# peer# 2025-06-03T10:29:00.860029Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe35b800] received request Name# CreateSession ok# false data# peer# 2025-06-03T10:29:00.860055Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cff4e00] received request Name# DeleteSession ok# false data# peer# 2025-06-03T10:29:00.860064Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe362a00] received request Name# AttachSession ok# false data# peer# 2025-06-03T10:29:00.860089Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbf57f500] received request Name# BeginTransaction ok# false data# peer# 2025-06-03T10:29:00.860093Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe3df100] received request Name# CommitTransaction ok# false data# peer# 2025-06-03T10:29:00.860121Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cfe2a00] received request Name# RollbackTransaction ok# false data# peer# 2025-06-03T10:29:00.860125Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cff4700] received request Name# ExecuteQuery ok# false data# peer# 2025-06-03T10:29:00.860149Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe312a00] received request Name# ExecuteScript ok# false data# peer# 2025-06-03T10:29:00.860153Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cffdc00] received request Name# FetchScriptResults ok# false data# peer# 2025-06-03T10:29:00.860179Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe310000] received request Name# ExecuteTabletMiniKQL ok# false data# peer# 2025-06-03T10:29:00.860184Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbc516300] received request Name# ChangeTabletSchema ok# false data# peer# 2025-06-03T10:29:00.860212Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbc47b800] received request Name# RestartTablet ok# false data# peer# 2025-06-03T10:29:00.860215Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe304700] received request Name# CreateLogStore ok# false data# peer# 2025-06-03T10:29:00.860242Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe306300] received request Name# DescribeLogStore ok# false data# peer# 2025-06-03T10:29:00.860244Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe308700] received request Name# DropLogStore ok# false data# peer# 2025-06-03T10:29:00.860269Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe305c00] received request Name# AlterLogStore ok# false data# peer# 2025-06-03T10:29:00.860275Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe3dea00] received request Name# CreateLogTable ok# false data# peer# 2025-06-03T10:29:00.860300Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533bbe309c00] received request Name# DescribeLogTable ok# false data# peer# 2025-06-03T10:29:00.860304Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cffc700] received request Name# DropLogTable ok# false data# peer# 2025-06-03T10:29:00.860330Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cff6a00] received request Name# AlterLogTable ok# false data# peer# 2025-06-03T10:29:00.860334Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cfe5500] received request Name# Login ok# false data# peer# 2025-06-03T10:29:00.860362Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cff0700] received request Name# DescribeReplication ok# false data# peer# 2025-06-03T10:29:00.860366Z node 5 :GRPC_SERVER DEBUG: logger.cpp:36: [0x533b9cff5c00] received request Name# DescribeView ok# false data# peer# >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkql2 [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/viewer/ut/unittest >> Viewer::JsonAutocompleteSchemePOST [GOOD] Test command err: 2025-06-03T10:28:56.029990Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:319:2362], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:56.030074Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:56.030099Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 17034, node 1 TClient is connected to server localhost:3853 json result: {"Success":true,"Result":{"Total":5,"Entities":[{"Name":"/Root/MyDatabase","Type":"ext_sub_domain"},{"Name":"/Root/TestDatabase","Type":"ext_sub_domain"},{"Name":"/Root/test","Type":"ext_sub_domain"},{"Name":"/Root/slice","Type":"ext_sub_domain"},{"Name":"/Root/qwerty","Type":"ext_sub_domain"}]},"Version":2} 2025-06-03T10:28:57.145941Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:336:2378], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:57.146002Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:57.146021Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 30707, node 2 TClient is connected to server localhost:9880 json result: {"Success":true,"Result":{"Total":2,"Entities":[{"Name":"/Root/MyDatabase","Type":"ext_sub_domain"},{"Name":"/Root/TestDatabase","Type":"ext_sub_domain"}]},"Version":2} 2025-06-03T10:28:58.159714Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:336:2378], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:58.159765Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:58.159796Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 15155, node 3 TClient is connected to server localhost:12467 json result: {"Success":true,"Result":{"Total":2,"Entities":[{"Name":"/Root/MyDatabase","Type":"ext_sub_domain"},{"Name":"/Root/TestDatabase","Type":"ext_sub_domain"}]},"Version":2} 2025-06-03T10:28:59.352113Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:317:2360], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:59.352192Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:59.352216Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 24183, node 4 TClient is connected to server localhost:62913 json result: {"Success":true,"Result":{"Total":2,"Entities":[{"Name":"/Root/MyDatabase","Type":"ext_sub_domain"},{"Name":"/Root/TestDatabase","Type":"ext_sub_domain"}]},"Version":2} 2025-06-03T10:29:00.698435Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:339:2381], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:00.698541Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:00.698561Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 61139, node 5 TClient is connected to server localhost:24703 json result: {"Success":true,"Result":{"Total":3,"Entities":[{"Name":"clients","Type":"table"},{"Name":"orders","Type":"table"},{"Name":"products","Type":"table"}]},"Version":2} |64.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join >> Cdc::InitialScanRacyProgressAndDrop [GOOD] >> Cdc::EnqueueRequestProcessSend |64.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join |64.6%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/join/ydb-core-kqp-ut-join >> TopicAutoscaling::Simple_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::Simple_PQv1 >> TDynamicNameserverTest::CacheMissPipeDisconnect-EnableNodeBrokerDeltaProtocol-false >> TNodeBrokerTest::NodesV2BackMigration |64.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 |64.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 |64.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_export_reboots_s3/ydb-core-tx-schemeshard-ut_export_reboots_s3 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteDataBulkUpsertLocalMkql2 [GOOD] Test command err: 2025-06-03T10:28:59.824119Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:59.824246Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:59.824289Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002537/r3tmp/tmpVY0mX8/pdisk_1.dat 2025-06-03T10:28:59.961265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:59.979767Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:59.980979Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946539271689 != 1748946539271693 2025-06-03T10:29:00.023432Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:00.023482Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:00.034209Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:00.109430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:29:00.327202Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertLocalMkqlStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "usertable" } UpsertLocalMkqlStart { RowCount: 10 Inflight: 3 } 2025-06-03T10:29:00.327237Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:732:2614], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 1, target# TabletId: 72075186224037888 TableId: 2 TableName: "usertable" 2025-06-03T10:29:00.401411Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:732:2614], subTag: 2} TUpsertActor finished in 0.074108s, errors=0 2025-06-03T10:29:00.401457Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:733:2615] with tag# 2 2025-06-03T10:29:01.115191Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:323:2365], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:01.115301Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:01.115314Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002537/r3tmp/tmpsFKnVC/pdisk_1.dat 2025-06-03T10:29:01.230182Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:29:01.248068Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:01.248690Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1748946540589463 != 1748946540589467 2025-06-03T10:29:01.296291Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:01.296348Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:01.309822Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:01.385502Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:29:01.599910Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertLocalMkqlStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 TableName: "JustTable" } UpsertLocalMkqlStart { RowCount: 10 Inflight: 3 } 2025-06-03T10:29:01.599961Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:732:2614], subTag: 2} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 1, target# TabletId: 72075186224037888 TableId: 2 TableName: "JustTable" 2025-06-03T10:29:01.693751Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:732:2614], subTag: 2} TUpsertActor finished in 0.093716s, errors=0 2025-06-03T10:29:01.693803Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:733:2615] with tag# 2 >> Cdc::ResolvedTimestampForDisplacedUpsert [GOOD] >> TDynamicNameserverTest::CacheMissPipeDisconnect-EnableNodeBrokerDeltaProtocol-false [GOOD] >> TDynamicNameserverTest::CacheMissNoDeadline-EnableNodeBrokerDeltaProtocol-true >> UpsertLoad::ShouldCreateTable [GOOD] >> UpsertLoad::ShouldDropCreateTable |64.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut |64.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut |64.6%| [LD] {RESULT} $(B)/ydb/services/persqueue_v1/ut/ydb-services-persqueue_v1-ut >> KqpOlap::OlapLayout >> UpsertLoad::ShouldWriteKqpUpsert2 [GOOD] |64.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator |64.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator |64.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_read_iterator/ydb-core-tx-datashard-ut_read_iterator >> TDynamicNameserverTest::CacheMissNoDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] >> KqpOlapTiering::EvictionResetTiering >> UpsertLoad::ShouldWriteKqpUpsert [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_change_exchange/unittest >> Cdc::ResolvedTimestampForDisplacedUpsert [GOOD] Test command err: 2025-06-03T10:28:15.506847Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668000858316013:2080];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:15.506862Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0028c0/r3tmp/tmpxl4V5d/pdisk_1.dat 2025-06-03T10:28:15.581766Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29146, node 1 2025-06-03T10:28:15.607956Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:15.608010Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:15.608074Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:15.608077Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:15.608080Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:15.608129Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:28:15.613833Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:15.615939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:15.619187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046644480 2025-06-03T10:28:15.652687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:15.661409Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:7511668000858316579:2308] 2025-06-03T10:28:15.661491Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:28:15.662945Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:28:15.662963Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:28:15.663122Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:28:15.663127Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:28:15.663132Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:28:15.663177Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:28:15.663183Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:28:15.663190Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:7511668000858316593:2308] in generation 1 2025-06-03T10:28:15.663919Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:28:15.669202Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:28:15.669278Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:28:15.669573Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:7511668000858316595:2309] 2025-06-03T10:28:15.669579Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:15.669584Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:28:15.669588Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:15.669618Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:28:15.669636Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:28:15.669639Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:15.669651Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:15.669659Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:28:15.669662Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:15.705867Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7511668000858316575:2295], serverId# [1:7511668000858316598:2306], sessionId# [0:0:0] 2025-06-03T10:28:15.705924Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:28:15.706030Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:28:15.706067Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:28:15.706399Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:15.706902Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:28:15.706932Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:28:15.707603Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7511668000858316611:2313], serverId# [1:7511668000858316613:2315], sessionId# [0:0:0] 2025-06-03T10:28:15.708821Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1748946495751 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1748946495751 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-03T10:28:15.708839Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:15.708879Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:15.708898Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:15.708908Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:28:15.708918Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1748946495751:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-03T10:28:15.708989Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1748946495751:281474976715657 keys extracted: 0 2025-06-03T10:28:15.709029Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-03T10:28:15.709051Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:15.709066Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-03T10:28:15.709568Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-03T10:28:15.709692Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:15.709902Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 0 2025-06-03T10:28:15.709908Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:15.711606Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1748946495751} 2025-06-03T10:28:15.711638Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:15.711657Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:15.711662Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:15.711667Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-03T10:28:15.711685Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1748946495751 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:7511668000858316397:2194], exec latency: 0 ms, propose latency: 2 ms 2025-06-03T10:28:15.711699Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:28:15.711711Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:15.711729Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1748946495758 2025-06-03T10:28:15.712573Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:153: [ChangeSender][72075186224037888:1][1:7511668000858316595:2309][Inactive] Handle NKikimrChangeExchange.TEvActivateSender 2025-06-03T10:28:15.713066Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-03T10:28:15.713081Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:28:15.717483Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:15.717592Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_b ... Timecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR ... unblocking NKikimr::TEvMediatorTimecast::TEvGranularUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR ... unblocking NKikimr::TEvMediatorTimecast::TEvUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR ... unblocking NKikimr::TEvMediatorTimecast::TEvUpdate from TX_MEDIATOR_TABLET_QUEUE_ACTOR to TX_MEDIATOR_TIMECAST_ACTOR 2025-06-03T10:29:02.356804Z node 27 :TX_DATASHARD DEBUG: datashard.cpp:3780: Notified by mediator time cast with PlanStep# 9000 at tablet 72075186224037888 2025-06-03T10:29:02.356836Z node 27 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:29:02.356864Z node 27 :TX_DATASHARD INFO: cdc_stream_heartbeat.cpp:42: [CdcStreamHeartbeat] Emit change records: edge# v9000/18446744073709551615, at tablet# 72075186224037888 2025-06-03T10:29:02.356919Z node 27 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 6 Group: 0 Step: 9000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-03T10:29:02.357649Z node 27 :TX_DATASHARD INFO: cdc_stream_heartbeat.cpp:78: [CdcStreamHeartbeat] Enqueue 1 change record(s): at tablet# 72075186224037888 2025-06-03T10:29:02.357667Z node 27 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 6 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 0 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 } 2025-06-03T10:29:02.357675Z node 27 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:29:02.357684Z node 27 :TX_DATASHARD DEBUG: datashard.cpp:3812: Waiting for PlanStep# 12000 from mediator time cast 2025-06-03T10:29:02.357706Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:71: [ChangeSender][72075186224037888:1][27:682:2578] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 6 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 0 }] } 2025-06-03T10:29:02.357720Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:628: [CdcChangeSenderMain][72075186224037888:1][27:842:2685] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 6 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] BodySize: 0 }] } 2025-06-03T10:29:02.357765Z node 27 :TX_DATASHARD INFO: datashard_change_sending.cpp:215: TTxRequestChangeRecords Execute: at tablet# 72075186224037888 2025-06-03T10:29:02.357809Z node 27 :TX_DATASHARD DEBUG: datashard_change_sending.cpp:235: Send 1 change records: to# [27:842:2685], at tablet# 72075186224037888 2025-06-03T10:29:02.357817Z node 27 :TX_DATASHARD INFO: datashard_change_sending.cpp:260: TTxRequestChangeRecords Complete: sent# 1, forgotten# 0, left# 0, at tablet# 72075186224037888 2025-06-03T10:29:02.357832Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:633: [CdcChangeSenderMain][72075186224037888:1][27:842:2685] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 6 Group: 0 Step: 9000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-06-03T10:29:02.357857Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:111: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037889][27:928:2685] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 6 Group: 0 Step: 9000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-06-03T10:29:02.357936Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-03T10:29:02.357948Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-03T10:29:02.357984Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 10 requestId: cookie: 6 2025-06-03T10:29:02.358012Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-03T10:29:02.358017Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-03T10:29:02.358031Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2196: [PQ: 72075186224037889] got client message topic: Table/Stream/streamImpl partition: 0 SourceId: '\00072075186224037888' SeqNo: 6 partNo : 0 messageNo: 11 size 26 offset: -1 2025-06-03T10:29:02.358069Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:1162: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 process heartbeat sourceId '\00072075186224037888' version v9000/0 2025-06-03T10:29:02.358084Z node 27 :PERSQUEUE INFO: partition_write.cpp:1658: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 emit heartbeat v9000/0 2025-06-03T10:29:02.358103Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 part blob processing sourceId '\00072075186224037889' seqNo 0 partNo 0 2025-06-03T10:29:02.358154Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:1333: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 part blob complete sourceId '\00072075186224037889' seqNo 0 partNo 0 FormedBlobsCount 0 NewHead: Offset 5 PartNo 0 PackedSize 107 count 1 nextOffset 6 batches 1 2025-06-03T10:29:02.358200Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:1623: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Add new write blob: topic 'Table/Stream/streamImpl' partition 0 compactOffset 5,1 HeadOffset 0 endOffset 5 curOffset 6 d0000000000_00000000000000000005_00000_0000000001_00000| size 93 WTime 8979 2025-06-03T10:29:02.358218Z node 27 :PERSQUEUE DEBUG: partition.cpp:2185: [PQ: 72075186224037889, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-06-03T10:29:02.358223Z node 27 :PERSQUEUE DEBUG: partition.cpp:2186: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- delete ---------------- 2025-06-03T10:29:02.358227Z node 27 :PERSQUEUE DEBUG: partition.cpp:2192: [PQ: 72075186224037889, Partition: 0, State: StateIdle] [x0000000000, x0000000001) 2025-06-03T10:29:02.358232Z node 27 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- write ----------------- 2025-06-03T10:29:02.358236Z node 27 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037889, Partition: 0, State: StateIdle] m0000000000p72075186224037889 2025-06-03T10:29:02.358241Z node 27 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037889, Partition: 0, State: StateIdle] d0000000000_00000000000000000005_00000_0000000001_00000| 2025-06-03T10:29:02.358245Z node 27 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037889, Partition: 0, State: StateIdle] i0000000000 2025-06-03T10:29:02.358249Z node 27 :PERSQUEUE DEBUG: partition.cpp:2199: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- rename ---------------- 2025-06-03T10:29:02.358255Z node 27 :PERSQUEUE DEBUG: partition.cpp:2204: [PQ: 72075186224037889, Partition: 0, State: StateIdle] =========================== 2025-06-03T10:29:02.358267Z node 27 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-06-03T10:29:02.358278Z node 27 :PERSQUEUE DEBUG: read.h:300: CacheProxy. Passthrough blob. Partition 0 offset 5 partNo 0 count 1 size 93 2025-06-03T10:29:02.358494Z node 27 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 0 offset 5 count 1 size 93 actorID [27:785:2653] 2025-06-03T10:29:02.358527Z node 27 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037889' partition 0 offset 5 partno 0 count 1 parts 0 size 93 2025-06-03T10:29:02.368785Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 44 WriteNewSizeFromSupportivePartitions# 0 2025-06-03T10:29:02.368838Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-03T10:29:02.368861Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Answering for message sourceid: '\00072075186224037888', Topic: 'Table/Stream/streamImpl', Partition: 0, SeqNo: 6, partNo: 0, Offset: 5 is stored on disk 2025-06-03T10:29:02.368929Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 11 requestId: cookie: 6 2025-06-03T10:29:02.369027Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:160: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037889][27:928:2685] Handle NKikimrClient.TResponse { SessionId: TxId: Success { Response: Status: 1 ErrorCode: OK PartitionResponse { CmdWriteResult { AlreadyWritten: false SourceId: "\00072075186224037888" SeqNo: 6 Offset: 5 WriteTimestampMS: 8979 PartitionQuotedTimeMs: 0 TotalTimeInPartitionQueueMs: 0 WriteTimeMs: 0 TopicQuotedTimeMs: 0 WrittenInTx: false } Cookie: 6 } } } 2025-06-03T10:29:02.369063Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:643: [CdcChangeSenderMain][72075186224037888:1][27:842:2685] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 0 } 2025-06-03T10:29:02.369113Z node 27 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 1, at tablet# 72075186224037888 2025-06-03T10:29:02.369121Z node 27 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 6, at tablet: 72075186224037888 2025-06-03T10:29:02.369321Z node 27 :TX_DATASHARD INFO: datashard_change_sending.cpp:335: TTxRemoveChangeRecords Complete: removed# 1, left# 0, at tablet# 72075186224037888 ... checking the update is logged before the new resolved timestamp >>>>> GetRecords path=/Root/Table/Stream partitionId=0 2025-06-03T10:29:02.482587Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-03T10:29:02.482610Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-03T10:29:02.482653Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:736: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 8 Topic 'Table/Stream/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 6 max time lag 0ms effective offset 0 2025-06-03T10:29:02.482662Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:936: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 8 added 0 blobs, size 0 count 0 last offset 0, current partition end offset: 6 2025-06-03T10:29:02.482676Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:953: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 8. All data is from uncompacted head. 2025-06-03T10:29:02.482682Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-06-03T10:29:02.482723Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteKqpUpsert2 [GOOD] Test command err: 2025-06-03T10:29:01.767612Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:01.767706Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:01.767736Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002530/r3tmp/tmp9RPIbv/pdisk_1.dat 2025-06-03T10:29:01.902538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:29:01.919848Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:01.921174Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946541281362 != 1748946541281366 2025-06-03T10:29:01.963483Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:01.963529Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:01.976016Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:02.054251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:29:02.261429Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertKqpStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "JustTable" } UpsertKqpStart { RowCount: 20 Inflight: 5 } 2025-06-03T10:29:02.261461Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:298: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 2} Bootstrap called: RowCount: 20 Inflight: 5 2025-06-03T10:29:02.261828Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:361: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 2} started# 5 actors each with inflight# 4 2025-06-03T10:29:02.261837Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 1} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-03T10:29:02.261845Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 2} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-03T10:29:02.261851Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 3} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-03T10:29:02.261856Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 4} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-03T10:29:02.261862Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 5} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-03T10:29:02.262424Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 1} session: ydb://session/3?node_id=1&id=NjA0NGI4ZTktMWZmODhkNzItZTQwMjYwYi04ODFkNjM4NQ== 2025-06-03T10:29:02.262766Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 2} session: ydb://session/3?node_id=1&id=MTRjMmQ1LTljOTJlNWEwLTU5NGQyMjEwLTI3ZGFkYzM0 2025-06-03T10:29:02.262773Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 3} session: ydb://session/3?node_id=1&id=YzE4NDQ3NGUtZGIwMDcwMWEtNWFlZDYyZDgtYjgyMzUzOTE= 2025-06-03T10:29:02.262934Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 4} session: ydb://session/3?node_id=1&id=MzVlZjI3OGEtZGJlMDhjYjctNzMyMWExMzktNGEzMzJjNjA= 2025-06-03T10:29:02.263108Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 5} session: ydb://session/3?node_id=1&id=NDI3YmRjYjUtOTEwMmZhNmEtMmU1YjA1MDEtOWFmZWM1OTc= 2025-06-03T10:29:02.263706Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:746:2628], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:02.263722Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:773:2649], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:02.263727Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:774:2650], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:02.263733Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:775:2651], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:02.263737Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:776:2652], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:02.263748Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:777:2653], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:02.263757Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:02.264677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:29:02.270774Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:793:2669] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:02.271081Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:794:2670] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:02.271155Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:795:2671] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:02.271244Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:802:2672] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:02.420845Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:784:2660], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:02.420882Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:785:2661], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:02.420892Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:786:2662], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:02.420902Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:787:2663], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:02.420911Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:788:2664], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:02.452544Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:890:2731] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:02.554701Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 4} finished in 1748946542.554682s, errors=0 2025-06-03T10:29:02.554824Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 4 { Tag: 4 DurationMs: 1748946542554 OperationsOK: 4 OperationsError: 0 } 2025-06-03T10:29:02.566155Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:963:2769] txid# 281474976715668, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:02.616349Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 5} finished in 1748946542.616327s, errors=0 2025-06-03T10:29:02.616484Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 5 { Tag: 5 DurationMs: 1748946542616 OperationsOK: 4 OperationsError: 0 } 2025-06-03T10:29:02.627672Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:1014:2791] txid# 281474976715673, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:02.675542Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 1} finished in 1748946542.675520s, errors=0 2025-06-03T10:29:02.675693Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 1 { Tag: 1 DurationMs: 1748946542675 OperationsOK: 4 OperationsError: 0 } 2025-06-03T10:29:02.687091Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:1065:2813] txid# 281474976715678, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:02.714932Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:1093:2826] txid# 281474976715681, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:02.774746Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 2} finished in 1748946542.774724s, errors=0 2025-06-03T10:29:02.774902Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 2 { Tag: 2 DurationMs: 1748946542774 OperationsOK: 4 OperationsError: 0 } 2025-06-03T10:29:02.789471Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 3} finished in 1748946542.789451s, errors=0 2025-06-03T10:29:02.789618Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 3 { Tag: 3 DurationMs: 1748946542789 OperationsOK: 4 OperationsError: 0 } 2025-06-03T10:29:02.789630Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:395: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 2} finished in 0.527824s, oks# 20, errors# 0 2025-06-03T10:29:02.789659Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:733:2615] with tag# 2 >> KqpOlapAggregations::JsonDoc_Exists >> Cdc::ShouldBreakLocksOnConcurrentSchemeTx [GOOD] >> Cdc::ResolvedTimestampsContinueAfterMerge >> KqpOlapAggregations::AggregationCountPushdown >> Viewer::ServerlessNodesPage [GOOD] >> Viewer::ServerlessWithExclusiveNodes ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldWriteKqpUpsert [GOOD] Test command err: 2025-06-03T10:29:02.010850Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:02.010953Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:02.010987Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002529/r3tmp/tmpfQgNFr/pdisk_1.dat 2025-06-03T10:29:02.139821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:29:02.157021Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:02.158288Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946541457480 != 1748946541457484 2025-06-03T10:29:02.200583Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:02.200638Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:02.211264Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:02.285348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:29:02.499358Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertKqpStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" } UpsertKqpStart { RowCount: 20 Inflight: 5 } 2025-06-03T10:29:02.499412Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:298: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 2} Bootstrap called: RowCount: 20 Inflight: 5 2025-06-03T10:29:02.499999Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:361: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 2} started# 5 actors each with inflight# 4 2025-06-03T10:29:02.500011Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 1} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-03T10:29:02.500024Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 2} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-03T10:29:02.500030Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 3} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-03T10:29:02.500037Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 4} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-03T10:29:02.500044Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:116: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 5} Bootstrap called: RowCount: 4 Inflight: 1 2025-06-03T10:29:02.500909Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 1} session: ydb://session/3?node_id=1&id=OTA3NTg3MzYtOGJlNDk0MjEtNWRjYzA1ZjgtYjlmZDU1OGI= 2025-06-03T10:29:02.501621Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 2} session: ydb://session/3?node_id=1&id=YzgzNjY1OTktOTk3ZjM4NTYtZTIwOTg4N2YtMmRiODc1YTg= 2025-06-03T10:29:02.501640Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 3} session: ydb://session/3?node_id=1&id=MjI2ZjBkNi04YzVmODBiNy1kYzQ2NzA1Zi1hYmE3MTdlNQ== 2025-06-03T10:29:02.501950Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 4} session: ydb://session/3?node_id=1&id=Y2Q3ZTk5MmMtNjM5MjUzMWMtOWQzMGI2OWMtZjViZDM1NTY= 2025-06-03T10:29:02.502287Z node 1 :DS_LOAD_TEST DEBUG: kqp_upsert.cpp:207: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 5} session: ydb://session/3?node_id=1&id=YTI4M2Q2MzEtY2ViYmUwMzktZmM1MzgwMmMtOGQ1MWMyNjE= 2025-06-03T10:29:02.503465Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:746:2628], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:02.503496Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:773:2649], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:02.503507Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:774:2650], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:02.503517Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:775:2651], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:02.503527Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:776:2652], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:02.503539Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:777:2653], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:02.503552Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:02.505393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:29:02.515817Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:793:2669] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:02.516377Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:794:2670] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:02.516475Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:795:2671] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:02.516607Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:802:2672] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:02.670137Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:784:2660], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:02.670171Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:785:2661], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:02.670178Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:786:2662], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:02.670184Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:787:2663], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:02.670189Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:788:2664], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:02.702653Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:890:2731] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:02.822639Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:940:2760] txid# 281474976715666, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:02.865478Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 1} finished in 1748946542.865460s, errors=0 2025-06-03T10:29:02.865646Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 1 { Tag: 1 DurationMs: 1748946542865 OperationsOK: 4 OperationsError: 0 } 2025-06-03T10:29:02.879979Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 3} finished in 1748946542.879959s, errors=0 2025-06-03T10:29:02.880107Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 3 { Tag: 3 DurationMs: 1748946542879 OperationsOK: 4 OperationsError: 0 } 2025-06-03T10:29:02.894682Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:1011:2788] txid# 281474976715673, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:02.942975Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 2} finished in 1748946542.942951s, errors=0 2025-06-03T10:29:02.943122Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 2 { Tag: 2 DurationMs: 1748946542942 OperationsOK: 4 OperationsError: 0 } 2025-06-03T10:29:02.954732Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:1062:2810] txid# 281474976715678, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:03.006964Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 5} finished in 1748946543.006944s, errors=0 2025-06-03T10:29:03.007034Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 5 { Tag: 5 DurationMs: 1748946543006 OperationsOK: 4 OperationsError: 0 } 2025-06-03T10:29:03.018453Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:1113:2832] txid# 281474976715683, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:03.066963Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:190: TKqpUpsertActor# {Tag: 0, parent: [1:733:2615], subTag: 4} finished in 1748946543.066944s, errors=0 2025-06-03T10:29:03.067042Z node 1 :DS_LOAD_TEST INFO: kqp_upsert.cpp:376: kqp# {Tag: 0, parent: [1:732:2614], subTag: 2} finished: 4 { Tag: 4 DurationMs: 1748946543066 OperationsOK: 4 OperationsError: 0 } 2025-06-03T10:29:03.067052Z node 1 :DS_LOAD_TEST NOTICE: kqp_upsert.cpp:395: TKqpUpsertActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 2} finished in 0.567085s, oks# 20, errors# 0 2025-06-03T10:29:03.067074Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:733:2615] with tag# 2 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TDynamicNameserverTest::CacheMissNoDeadline-EnableNodeBrokerDeltaProtocol-true [GOOD] Test command err: 2025-06-03T10:29:02.243249Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.243420Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.243519Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.245170Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.288161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:02.288203Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-03T10:29:02.294894Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:29:02.295558Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:29:02.295666Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-03T10:29:02.295958Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:29:02.296922Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-03T10:29:02.297120Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-03T10:29:02.297175Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-03T10:29:02.297197Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-03T10:29:02.297205Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-03T10:29:02.297228Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-03T10:29:02.297286Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-03T10:29:02.297317Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-03T10:29:02.297325Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-03T10:29:02.297331Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-03T10:29:02.297360Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-03T10:29:02.297368Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-03T10:29:02.323385Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-03T10:29:02.323453Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T01:00:00.025000Z 2025-06-03T10:29:02.323470Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z, approximate epoch start #1.1 nodes=0 expired=0 2025-06-03T10:29:02.323485Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:609: Preparing update nodes log for epoch ##1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z nodes=0 expired=0 removed=0 2025-06-03T10:29:02.366193Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:201:2197], Recipient [1:170:2176]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:02.366266Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:657: Handle NKikimr::TEvTabletPipe::TEvClientConnected { TabletId: 72057594037936129 Status: OK ServerId: [1:201:2197] Leader: 1 Dead: 0 Generation: 2 VersionInfo:  } ... waiting for nameservers are connected (done) 2025-06-03T10:29:02.368197Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:18:2065], Recipient [1:170:2176]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { MinEpoch: 1 } 2025-06-03T10:29:02.368239Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-03T10:29:02.368262Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #1.1 1970-01-01T00:00:00.025000Z - 1970-01-01T01:00:00.025000Z - 1970-01-01T02:00:00.025000Z 2025-06-03T10:29:02.368363Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:205:2201], Recipient [1:170:2176]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:02.368432Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [1:203:2199], Recipient [1:170:2176]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" } 2025-06-03T10:29:02.368440Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-03T10:29:02.368452Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1" 2025-06-03T10:29:02.368537Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [1:16:2063], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:29:02.368557Z node 1 :TX_PROXY_SCHEME_CACHE TRACE: cache.cpp:2322: Create subscriber: self# [1:16:2063], path# /dc-1, domainOwnerId# 72057594046678944 2025-06-03T10:29:02.379287Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2589: HandleNotify: self# [1:16:2063], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046678944 } 2025-06-03T10:29:02.379428Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2464: ResolveCacheItem: self# [1:16:2063], notify# NKikimr::TSchemeBoardEvents::TEvNotifyUpdate { Path: /dc-1 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DescribeSchemeResult: Status: StatusSuccess Path: "/dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046678944 }, by path# { Subscriber: { Subscriber: [1:207:2202] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 0 Status: undefined Kind: 0 TableKind: 0 Created: 0 CreateStep: 0 PathId: DomainId: IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, by pathId# nullptr 2025-06-03T10:29:02.379523Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [1:16:2063], cacheItem# { Subscriber: { Subscriber: [1:207:2202] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:29:02.379635Z node 1 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [1:214:2203], recipient# [1:206:2176], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:29:02.379662Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:29:02.379695Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } FixedNodeId: false Path: "dc-1": scope id# <720575940466789 ... 06-03T10:29:02.962533Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-03T10:29:02.962539Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host1:1001 (not fixed) tenant: dc-1 2025-06-03T10:29:02.962576Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1024.v2 host1:1001 to database state=Active resolvehost=host1.host1.host1 address=1.2.3.4 dc=1 location=DC=1/M=2/R=3/U=4/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=0 authorizedbycertificate=false bridgePileId= 2025-06-03T10:29:02.962635Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1024.v2 host1:1001 2025-06-03T10:29:02.962645Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 1 to 2 2025-06-03T10:29:02.962649Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=2 2025-06-03T10:29:02.973782Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-03T10:29:02.973819Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1024.v2 host1:1001 2025-06-03T10:29:02.973834Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 1 to 2 2025-06-03T10:29:02.973841Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1024.v2 host1:1001 to epoch cache 2025-06-03T10:29:02.973879Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1024.v2 to update nodes log 2025-06-03T10:29:02.973948Z node 2 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200024000 Name: "slot-0" } 2025-06-03T10:29:02.974130Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [2:216:2205], Recipient [2:168:2174]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:02.974177Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039938, Sender [2:201:2197], Recipient [2:168:2174]: NKikimr::NNodeBroker::TEvNodeBroker::TEvRegistrationRequest { Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1" } 2025-06-03T10:29:02.974185Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:255: StateWork, processing event TEvNodeBroker::TEvRegistrationRequest 2025-06-03T10:29:02.974198Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1502: Handle TEvNodeBroker::TEvRegistrationRequest: request# Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1" 2025-06-03T10:29:02.974258Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:2702: Handle TEvTxProxySchemeCache::TEvNavigateKeySet: self# [2:16:2063], request# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }] } 2025-06-03T10:29:02.974290Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:1842: FillEntry for TNavigate: self# [2:16:2063], cacheItem# { Subscriber: { Subscriber: [2:205:2200] DomainOwnerId: 72057594046678944 Type: 2 SyncCookie: 0 } Filled: 1 Status: StatusSuccess Kind: 2 TableKind: 0 Created: 1 CreateStep: 0 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] DomainId: [OwnerId: 72057594046678944, LocalPathId: 1] IsPrivatePath: 0 IsVirtual: 0 SchemaVersion: 0 }, entry# { Path: dc-1 TableId: [18446744073709551615:18446744073709551615:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Unknown Kind: KindUnknown DomainInfo }, props# { Cookie: 0 IsSync: false Partial: 0 } 2025-06-03T10:29:02.974352Z node 2 :TX_PROXY_SCHEME_CACHE DEBUG: cache.cpp:265: Send result: self# [2:218:2206], recipient# [2:217:2174], result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:29:02.974367Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1570: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: response# { Path: dc-1 TableId: [72057594046678944:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046678944, LocalPathId: 1] Params { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:29:02.974382Z node 2 :NODE_BROKER TRACE: node_broker.cpp:1596: Finished resolving tenant: request# Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } FixedNodeId: false Path: "dc-1": scope id# <72057594046678944:1>: serviced subdomain# 72057594046678944:1 2025-06-03T10:29:02.974396Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435073, Sender [2:217:2174], Recipient [2:168:2174]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvResolvedRegistrationRequest 2025-06-03T10:29:02.974403Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:264: StateWork, processing event TEvPrivate::TEvResolvedRegistrationRequest 2025-06-03T10:29:02.974423Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:79: TTxRegisterNode Execute 2025-06-03T10:29:02.974429Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:83: Registration request from host2:1001 (not fixed) tenant: dc-1 2025-06-03T10:29:02.974462Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:863: [DB] Adding node #1025.v3 host2:1001 to database state=Active resolvehost=host2.host2.host2 address=1.2.3.5 dc=1 location=DC=1/M=2/R=3/U=5/ lease=1 expire=Thu, 01 Jan 1970 02:00:00 UTC servicedsubdomain=72057594046678944:1 slotindex=1 authorizedbycertificate=false bridgePileId= 2025-06-03T10:29:02.974522Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Dirty] Register new active node #1025.v3 host2:1001 2025-06-03T10:29:02.974529Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Dirty] Update current epoch version from 2 to 3 2025-06-03T10:29:02.974534Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:1371: [DB] Update epoch version in database version=3 2025-06-03T10:29:02.986166Z node 2 :NODE_BROKER DEBUG: node_broker__register_node.cpp:195: TTxRegisterNode Complete 2025-06-03T10:29:02.986203Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:267: [Committed] Register new active node #1025.v3 host2:1001 2025-06-03T10:29:02.986217Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:558: [Committed] Update current epoch version from 2 to 3 2025-06-03T10:29:02.986224Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:636: Add node #1025.v3 host2:1001 to epoch cache 2025-06-03T10:29:02.986260Z node 2 :NODE_BROKER DEBUG: node_broker.cpp:665: Add node #1025.v3 to update nodes log 2025-06-03T10:29:02.986341Z node 2 :NODE_BROKER TRACE: node_broker__register_node.cpp:60: TTxRegisterNode reply with: Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } Expire: 7200024000 Name: "slot-1" } ... waiting for cache miss 2025-06-03T10:29:02.986448Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:525: Handle NActors::TEvInterconnect::TEvResolveNode { NodeId: 1024 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.986465Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:626: New cache miss: nodeId# 1024, deadline# 18446744073709.551615s 2025-06-03T10:29:02.986473Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:525: Handle NActors::TEvInterconnect::TEvResolveNode { NodeId: 1025 Deadline: 1.107024s } 2025-06-03T10:29:02.986477Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:626: New cache miss: nodeId# 1025, deadline# 1.107024s 2025-06-03T10:29:02.986482Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:630: Schedule wakeup for new earliest deadline 1.107024s ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest from NAMESERVICE to NODE_BROKER_ACTOR cookie 1 ... waiting for cache miss (done) 2025-06-03T10:29:02.998760Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 2146435074, Sender [0:0:0], Recipient [2:168:2174]: NKikimr::NNodeBroker::TNodeBroker::TEvPrivate::TEvProcessSubscribersQueue 2025-06-03T10:29:02.998798Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:265: StateWork, processing event TEvPrivate::TEvProcessSubscribersQueue 2025-06-03T10:29:02.998816Z node 2 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v1 -> v3 to [2:18:2065] ... blocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE cookie 0 2025-06-03T10:29:03.071471Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:873: HandleWakeup at 1.108024s 2025-06-03T10:29:03.071514Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:134: Cache miss failed: nodeId=1025, error=Deadline exceeded ... unblocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE ... unblocking NKikimr::NNodeBroker::TEvNodeBroker::TEvUpdateNodes from NODE_BROKER_ACTOR to NAMESERVICE ... unblocking NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest from NAMESERVICE to NODE_BROKER_ACTOR 2025-06-03T10:29:03.071661Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:724: Handle NKikimrNodeBroker.TUpdateNodes Epoch { Id: 1 Version: 1 Start: 24000 End: 3600024000 NextEnd: 7200024000 } SeqNo: 0 2025-06-03T10:29:03.071688Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039952, Sender [2:18:2065], Recipient [2:168:2174]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesRequest { SeqNo: 0 } 2025-06-03T10:29:03.071699Z node 2 :NODE_BROKER TRACE: node_broker_impl.h:262: StateWork, processing event TEvNodeBroker::TEvSyncNodesRequest 2025-06-03T10:29:03.071772Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:724: Handle NKikimrNodeBroker.TUpdateNodes Epoch { Id: 1 Version: 3 Start: 24000 End: 3600024000 NextEnd: 7200024000 } Updates { Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.host1.host1" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 7200024000 Name: "slot-0" } } Updates { Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.host2.host2" Address: "1.2.3.5" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "5" } Expire: 7200024000 Name: "slot-1" } } SeqNo: 0 2025-06-03T10:29:03.071803Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:789: Handle NKikimr::NNodeBroker::TEvNodeBroker::TEvSyncNodesResponse { SeqNo: 0 } 2025-06-03T10:29:03.071809Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:128: Cache miss succeed: nodeId=1024 2025-06-03T10:29:03.071823Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:525: Handle NActors::TEvInterconnect::TEvResolveNode { NodeId: 1024 Deadline: 18446744073709.551615s } >> Cdc::EnqueueRequestProcessSend [GOOD] >> Cdc::InitialScanAndResolvedTimestamps >> KqpOlap::OlapLayout [GOOD] >> KqpOlap::OlapRead_FailsOnDataQuery >> KqpOlapAggregations::JsonDoc_Exists [GOOD] >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_BeforeAutoscaleAwareSDK >> KqpOlapStatistics::StatsUsage >> ColumnStatistics::CountMinSketchStatistics [GOOD] >> ReadLoad::ShouldReadKqpMoreThanRows [GOOD] >> KqpOlapAggregations::Aggregation_Some |64.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication |64.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication |64.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_replication/ydb-core-tx-schemeshard-ut_replication >> Viewer::QueryExecuteScript [GOOD] >> Viewer::Plan2SvgOK >> UpsertLoad::ShouldDropCreateTable [GOOD] >> KqpOlapAggregations::AggregationCountPushdown [GOOD] >> KqpOlapAggregations::AggregationCountGroupByPushdown >> TNodeBrokerTest::ShiftIdRangeRemoveExpired [GOOD] >> BasicStatistics::TwoNodes [GOOD] >> KqpOlapStatistics::StatsUsageNotPK >> KqpOlapJson::CompactionVariants |64.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/client/ut/ydb-core-client-ut |64.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/client/ut/ydb-core-client-ut |64.7%| [LD] {RESULT} $(B)/ydb/core/client/ut/ydb-core-client-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapAggregations::JsonDoc_Exists [GOOD] Test command err: Trying to start YDB, gRPC: 6407, MsgBus: 23208 2025-06-03T10:29:03.556529Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668205848240322:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:03.556556Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d8d/r3tmp/tmpfnkQGO/pdisk_1.dat 2025-06-03T10:29:03.645521Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:03.646428Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668205848240298:2079] 1748946543556399 != 1748946543556402 TServer::EnableGrpc on GrpcPort 6407, node 1 2025-06-03T10:29:03.665507Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:03.665520Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:03.665523Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:03.665600Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23208 2025-06-03T10:29:03.711731Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:03.711771Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:03.713188Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23208 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:29:03.746548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:29:03.755383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:29:03.777270Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668205848241002:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:03.778058Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668205848241002:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:03.778145Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668205848241002:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:03.778169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668205848241002:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:03.778202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668205848241002:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:03.778224Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668205848241002:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:29:03.778244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668205848241002:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:29:03.778270Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668205848241002:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:29:03.778294Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668205848241002:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:29:03.778319Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668205848241002:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:29:03.778341Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668205848241002:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:29:03.778364Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668205848241002:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:29:03.788571Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668205848241006:2315];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:03.788599Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668205848241006:2315];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:03.788636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668205848241006:2315];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:03.788658Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668205848241006:2315];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:03.788679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668205848241006:2315];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:03.788704Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668205848241006:2315];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:29:03.788724Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668205848241006:2315];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:29:03.788746Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668205848241006:2315];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:29:03.788776Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668205848241006:2315];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:29:03.788795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668205848241006:2315];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:29:03.788815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668205848241006:2315];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:29:03.788834Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668205848241006:2315];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:29:03.794230Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668205848241007:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:03.794263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668205848241007:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:03.794313Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668205848241007:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:03.794335Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668205848241007:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:03.794357Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668205848241007:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:03.794378Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668205848241007:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract. ... TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:29:03.802728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:29:03.802734Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:29:03.802740Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:29:03.802744Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:29:03.802789Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-03T10:29:03.802793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-03T10:29:03.832646Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:03.833738Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:03.834690Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:03.835888Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=1448;columns=6; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=1448;columns=6; REQUEST: --!syntax_v1 PRAGMA Kikimr.OptUseFinalizeByKey; SELECT id, JSON_EXISTS(jsonval, "$.col1"), JSON_EXISTS(jsondoc, "$.col1") FROM `/Root/tableWithNulls` WHERE JSON_EXISTS(jsondoc, "$.col1") AND id = 6; 2025-06-03T10:29:04.055476Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668210143208579:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:04.055507Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:04.055581Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668210143208614:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:04.056481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:29:04.059124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-03T10:29:04.059209Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668210143208616:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:29:04.154056Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668210143208667:2487] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:04.346255Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946544114, txId: 18446744073709551615] shutting down REQUEST: --!syntax_v1 PRAGMA Kikimr.OptUseFinalizeByKey; SELECT id, JSON_EXISTS(jsonval, "$.col1"), JSON_EXISTS(jsondoc, "$.col1") FROM `/Root/tableWithNulls` WHERE JSON_EXISTS(jsondoc, "$.col1") AND id = 6; JSON Plan: {"Plan":{"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["tableWithNulls"],"PlanNodeId":1,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"E-Rows":"No estimate","Predicate":"","Pushdown":"True","Name":"Filter","E-Size":"No estimate","E-Cost":"No estimate"},{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["id [6, 6]"],"Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/tableWithNulls","E-Rows":"No estimate","Table":"tableWithNulls","ReadRangesKeys":["id"],"ReadColumns":["id","jsondoc","jsonval"],"SsaProgram":{"Command":[{"Assign":{"Constant":{"Text":"$.col1"},"Column":{"Id":7}}},{"Assign":{"Function":{"KernelName":"JsonExists","KernelIdx":0,"FunctionType":2,"Arguments":[{"Id":6},{"Id":7}]},"Column":{"Id":8}}},{"Assign":{"Function":{"Id":23,"Arguments":[{"Id":8}]},"Column":{"Id":9}}},{"Assign":{"Constant":{"Uint8":0},"Column":{"Id":10}}},{"Assign":{"Function":{"YqlOperationId":17,"KernelIdx":1,"FunctionType":2,"Arguments":[{"Id":9},{"Id":10}]},"Column":{"Id":11}}},{"Filter":{"Predicate":{"Id":11}}},{"Projection":{"Columns":[{"Id":1},{"Id":6},{"Id":5}]}}]},"E-Cost":"No estimate","ReadRangesExpectedSize":1}],"Node Type":"Filter-TableRangeScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Node Type":"Collect"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/tableWithNulls","reads":[{"columns":["id","jsondoc","jsonval"],"scan_by":["id [6, 6]"],"type":"Scan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":5,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["id [6, 6]"],"Name":"TableRangeScan","Path":"\/Root\/tableWithNulls","E-Rows":"No estimate","Table":"tableWithNulls","ReadRangesKeys":["id"],"ReadColumns":["id","jsondoc","jsonval"],"SsaProgram":{"Command":[{"Assign":{"Constant":{"Text":"$.col1"},"Column":{"Id":7}}},{"Assign":{"Function":{"KernelName":"JsonExists","KernelIdx":0,"FunctionType":2,"Arguments":[{"Id":6},{"Id":7}]},"Column":{"Id":8}}},{"Assign":{"Function":{"Id":23,"Arguments":[{"Id":8}]},"Column":{"Id":9}}},{"Assign":{"Constant":{"Uint8":0},"Column":{"Id":10}}},{"Assign":{"Function":{"YqlOperationId":17,"KernelIdx":1,"FunctionType":2,"Arguments":[{"Id":9},{"Id":10}]},"Column":{"Id":11}}},{"Filter":{"Predicate":{"Id":11}}},{"Projection":{"Columns":[{"Id":1},{"Id":6},{"Id":5}]}}]},"E-Cost":"No estimate","ReadRangesExpectedSize":1}],"Node Type":"TableRangeScan"}],"Operators":[{"E-Rows":"No estimate","Predicate":"","Pushdown":"True","Name":"Filter","E-Size":"No estimate","E-Cost":"No estimate"}],"Node Type":"Filter"}],"Node Type":"ResultSet_1","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} AST: ( (declare %kqp%tx_result_binding_0_0 (TupleType (ListType (TupleType (TupleType (OptionalType (DataType 'Int32)) (DataType 'Int32)) (TupleType (OptionalType (DataType 'Int32)) (DataType 'Int32)))))) (let $1 '('('"_logical_id" '1318) '('"_id" '"198ad9e9-71433070-42b6c82a-66dbf9e9") '('"_partition_mode" '"single"))) (let $2 (DqPhyStage '() (lambda '() (block '( (let $17 (Int32 '"6")) (let $18 (Just $17)) (let $19 (Int32 '1)) (let $20 '($18 $19)) (let $21 (If (== $17 (Int32 '2147483647)) $20 '((+ $18 $19) (Int32 '0)))) (return (ToStream (Just '((RangeFinalize (RangeMultiply (Uint64 '10000) (RangeUnion (RangeCreate (AsList '($20 $21)))))))))) ))) $1)) (let $3 (DqCnValue (TDqOutput $2 '0))) (let $4 (KqpPhysicalTx '($2) '($3) '() '('('"type" '"compute")))) (let $5 '"%kqp%tx_result_binding_0_0") (let $6 (DataType 'Int32)) (let $7 (TupleType (OptionalType $6) $6)) (let $8 (TupleType (ListType (TupleType $7 $7)))) (let $9 (OptionalType (DataType 'Bool))) (let $10 (DqPhyStage '() (lambda '() (block '( (let $22 (KqpTable '"/Root/tableWithNulls" '"72057594046644480:2" '"" '1)) (let $23 '('"id" '"jsondoc" '"jsonval")) (let $24 '('('"UsedKeyColumns" '('"id")) '('"ExpectedMaxRanges" '1) '('"PointPrefixLen" '1))) (let $25 (Utf8 '"$.col1")) (let $26 (Bool 'false)) (let $27 (KqpWideReadOlapTableRanges $22 %kqp%tx_result_binding_0_0 $23 '() $24 (lambda '($28) (block '( (let $29 '('?? (KqpOlapJsonExists '"jsondoc" $25) $26)) (return (KqpOlapFilter $28 $29)) ))))) (return (FromFlow (NarrowMap $27 (lambda '($30 $31 $32) (block '( (let $33 '($9)) (let $34 (ResourceType '"JsonNode")) (let $35 (OptionalType $34)) (let $36 '((ResourceType '"JsonPath"))) (let $37 (DataType 'Utf8)) (let $38 (DictType $37 $34)) (let $39 '($38)) (let $40 (CallableType '() $33 '($35) $36 $39 $33)) (let $41 '('('"strict"))) (let $42 (Udf '"Json2.SqlExists" (Void) (VoidType) '"" $40 (VoidType) '"" $41)) (let $43 (IfPresent $32 (lambda '($53) (block '( (let $54 '((DataType 'Json) '"" '1)) (let $55 (CallableType '() '($34) $54)) (let $56 (Udf '"Json2.Parse" (Void) (VoidType) '"" $55 (VoidType) '"" '())) (return (Just (Apply $56 $53))) ))) (Nothing $35))) (let $44 (CallableType '() $36 '($37))) (let $45 (Udf '"Json2.CompilePath" (Void) (VoidType) '"" $44 (VoidType) '"" '())) (let $46 (Apply $45 $25)) (let $47 (Dict $38)) (let $48 (Just $26)) (let $49 (Apply $42 $43 $46 $47 $48)) (let $50 (CallableType '() $33 '((OptionalType (DataType 'JsonDocument))) $36 $39 $33)) (let $51 (Udf '"Json2.JsonDocumentSqlExists" (Void) (VoidType) '"" $50 (VoidType) '"" $41)) (let $52 (Apply $51 $31 $46 $47 $48)) (return (AsStruct '('"column1" $49) '('"column2" $52) '('"id" $30))) )))))) ))) '('('"_logical_id" '1388) '('"_id" '"45494db6-872c8d09-21a9fbb5-8805ee57")))) (let $11 (DqCnUnionAll (TDqOutput $10 '0))) (let $12 (DqPhyStage '($11) (lambda '($57) $57) '('('"_logical_id" '1824) '('"_id" '"f5f96bfd-10991d35-cf10674d-695d7f2e")))) (let $13 '('"id" '"column1" '"column2")) (let $14 (DqCnResult (TDqOutput $12 '0) $13)) (let $15 (KqpTxResultBinding $8 '0 '0)) (let $16 (KqpPhysicalTx '($10 $12) '($14) '('($5 $15)) '('('"type" '"scan")))) (return (KqpPhysicalQuery '($4 $16) '((KqpTxResultBinding (ListType (StructType '('"column1" $9) '('"column2" $9) '('"id" $6))) '1 '0)) '('('"type" '"scan_query")))) ) |64.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects |64.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects |64.7%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/effects/ydb-core-kqp-ut-effects ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> ColumnStatistics::CountMinSketchStatistics [GOOD] Test command err: 2025-06-03T10:26:32.409215Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:32.409261Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:26:32.409273Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ba4/r3tmp/tmpWeCLyL/pdisk_1.dat 2025-06-03T10:26:32.524393Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11018, node 1 2025-06-03T10:26:32.635756Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:32.635784Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:32.635789Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:32.635849Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:32.636532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:26:32.713815Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:32.713867Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:32.726069Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15027 2025-06-03T10:26:33.069160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:26:33.934388Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:26:33.944562Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:33.944616Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:33.998308Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:26:33.999001Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:34.161242Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:34.161443Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:34.161591Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:34.161627Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:34.161673Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:34.161692Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:34.161706Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:34.161724Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:34.161743Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:34.317410Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:34.317468Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:34.329010Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:34.376586Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:34.393124Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:26:34.393168Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:26:34.402735Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:26:34.402817Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:26:34.402847Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:26:34.402853Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:26:34.402861Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:26:34.402868Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:26:34.402874Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:26:34.402883Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:26:34.403071Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:26:34.421005Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:34.421044Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:34.423285Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:26:34.424433Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:26:34.424582Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:26:34.429148Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:26:34.435779Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:26:34.435807Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:26:34.435823Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:26:34.449108Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:26:34.451839Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:26:34.451889Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:26:34.580514Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:26:34.668821Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:26:34.732457Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:26:35.254483Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2216:3061], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:35.254540Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:35.259098Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:26:35.310870Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:26:35.310992Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:26:35.311089Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:26:35.311135Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:26:35.311164Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:26:35.311192Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:26:35.311234Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:26:35.311267Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_re ... 37894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 51 2025-06-03T10:29:01.350844Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:29:01.350884Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:29:01.350899Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-03T10:29:01.350906Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:29:01.351078Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:29:01.352626Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:29:01.354025Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7265:5324], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:01.354063Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7274:5329], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:01.354096Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:01.357610Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897 2025-06-03T10:29:01.376657Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7279:5332], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-03T10:29:01.388647Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-03T10:29:01.613618Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7363:5378] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:01.629900Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:7385:5392]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:01.630010Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:29:01.630030Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:7387:5394] 2025-06-03T10:29:01.630048Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:7387:5394] 2025-06-03T10:29:01.630216Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7388:5395] 2025-06-03T10:29:01.630266Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7388:5395], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:29:01.630295Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-03T10:29:01.630345Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7387:5394], server id = [2:7388:5395], tablet id = 72075186224037894, status = OK 2025-06-03T10:29:01.630378Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:29:01.630418Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:7385:5392], StatRequests.size() = 1 2025-06-03T10:29:01.660145Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NzllZDhjODYtOGI2N2VlYjAtYzcyZGQ4YTktOWQyMjUzMQ==, TxId: 2025-06-03T10:29:01.660180Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NzllZDhjODYtOGI2N2VlYjAtYzcyZGQ4YTktOWQyMjUzMQ==, TxId: 2025-06-03T10:29:01.660326Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:29:01.674452Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:29:01.674491Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:29:01.754353Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-03T10:29:01.754393Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-03T10:29:01.849646Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:7387:5394], schemeshard count = 1 2025-06-03T10:29:04.224204Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:29:04.224242Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:29:04.224254Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is column table. 2025-06-03T10:29:04.224260Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:29:04.227525Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:29:04.242793Z node 2 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:29:04.243053Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:29:04.243092Z node 2 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:29:04.243402Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:29:04.256967Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:29:04.257080Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 2, current Round: 0 2025-06-03T10:29:04.257330Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:7492:5454], server id = [2:7493:5455], tablet id = 72075186224037899, status = OK 2025-06-03T10:29:04.257504Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:7492:5454], path = { OwnerId: 72075186224037897 LocalId: 4 } 2025-06-03T10:29:04.258956Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037899 2025-06-03T10:29:04.258987Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:29:04.259164Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:29:04.259228Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:29:04.259315Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:29:04.259968Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:7492:5454], server id = [2:7493:5455], tablet id = 72075186224037899 2025-06-03T10:29:04.259981Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:29:04.260166Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:29:04.269820Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:7513:5474]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:04.269910Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:29:04.269919Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:7513:5474], StatRequests.size() = 1 2025-06-03T10:29:04.309978Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NDgxYTVhYTktZWUyY2I4MjItYTAxZTAwZTEtYWI0ZThhOA==, TxId: 2025-06-03T10:29:04.310012Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NDgxYTVhYTktZWUyY2I4MjItYTAxZTAwZTEtYWI0ZThhOA==, TxId: 2025-06-03T10:29:04.310212Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:29:04.310579Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:7526:5563]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:04.310632Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:29:04.310637Z node 1 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-03T10:29:04.311190Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:29:04.311203Z node 1 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Database ], TablePath[ /Root/Database/.metadata/_statistics ] 2025-06-03T10:29:04.311213Z node 1 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037897, LocalPathId: 4] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-03T10:29:04.313748Z node 1 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/load_test/ut_ycsb/unittest >> ReadLoad::ShouldReadKqpMoreThanRows [GOOD] Test command err: 2025-06-03T10:28:59.209490Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:59.209589Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:59.209624Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002561/r3tmp/tmpaz0AmQ/pdisk_1.dat 2025-06-03T10:28:59.333939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:59.356743Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:59.358133Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946538743750 != 1748946538743754 2025-06-03T10:28:59.404879Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:59.404927Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:59.415633Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:59.490998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:59.705939Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:346: TLoad# 0 warmups table# usertable in dir# /Root with rows# 100 2025-06-03T10:28:59.706397Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:732:2614], subTag: 1} TUpsertActor Bootstrap called: RowCount: 100 Inflight: 100 BatchSize: 100 with type# 0, target# TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" 2025-06-03T10:28:59.730543Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:732:2614], subTag: 1} TUpsertActor finished in 0.024063s, errors=0 2025-06-03T10:28:59.730668Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kReadKqpStart with tag# 2, proto# NotifyWhenFinished: true TableSetup { WorkingDir: "/Root" TableName: "usertable" } TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "usertable" } ReadKqpStart { RowCount: 100 Inflights: 10 } 2025-06-03T10:28:59.730690Z node 1 :DS_LOAD_TEST NOTICE: kqp_select.cpp:322: TKqpSelectActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 3} Bootstrap called: RowCount: 100 Inflights: 10 2025-06-03T10:28:59.733352Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:366: TKqpSelectActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 3} will work with tablet# 72075186224037888 with ownerId# 72057594046644480 with tableId# 2 resolved for path# /Root/usertable with columnsCount# 11, keyColumnCount# 1 2025-06-03T10:28:59.733415Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:400: TKqpSelectActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 3} started fullscan actor# [1:744:2626] 2025-06-03T10:28:59.733439Z node 1 :DS_LOAD_TEST INFO: common.cpp:52: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 1} Bootstrap called, sample# 100 2025-06-03T10:28:59.733445Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:61: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 1} Connect to# 72075186224037888 called 2025-06-03T10:28:59.733524Z node 1 :DS_LOAD_TEST DEBUG: common.cpp:75: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 1} Handle TEvClientConnected called, Status# OK 2025-06-03T10:28:59.733836Z node 1 :DS_LOAD_TEST NOTICE: common.cpp:137: ReadIteratorScan# {Tag: 0, parent: [1:741:2623], subTag: 1} finished in 0.000280s, sampled# 100, iter finished# 1, oks# 100 2025-06-03T10:28:59.733873Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:416: TKqpSelectActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 3} received keyCount# 100 2025-06-03T10:28:59.733935Z node 1 :DS_LOAD_TEST NOTICE: kqp_select.cpp:445: TKqpSelectActorMultiSession# {Tag: 0, parent: [1:732:2614], subTag: 3} started# 10 actors each with inflight# 1 2025-06-03T10:28:59.733948Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 2} Bootstrap called 2025-06-03T10:28:59.733955Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 2} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-03T10:28:59.733966Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 3} Bootstrap called 2025-06-03T10:28:59.733970Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 3} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-03T10:28:59.733976Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 4} Bootstrap called 2025-06-03T10:28:59.733981Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 4} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-03T10:28:59.733987Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 5} Bootstrap called 2025-06-03T10:28:59.733991Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 5} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-03T10:28:59.733999Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 6} Bootstrap called 2025-06-03T10:28:59.734003Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 6} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-03T10:28:59.734008Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 7} Bootstrap called 2025-06-03T10:28:59.734012Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 7} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-03T10:28:59.734017Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 8} Bootstrap called 2025-06-03T10:28:59.734021Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 8} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-03T10:28:59.734026Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 9} Bootstrap called 2025-06-03T10:28:59.734031Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 9} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-03T10:28:59.734037Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 10} Bootstrap called 2025-06-03T10:28:59.734042Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 10} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-03T10:28:59.734048Z node 1 :DS_LOAD_TEST INFO: kqp_select.cpp:130: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 11} Bootstrap called 2025-06-03T10:28:59.734052Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:142: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 11} sends event for session creation to proxy: [1:8678280833929343339:121] 2025-06-03T10:28:59.734701Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 2} session: ydb://session/3?node_id=1&id=NzYyZTNmN2QtZGVmYzE0ZTUtYTJkMDk0ZTctMmE0NTJiMWM= 2025-06-03T10:28:59.735200Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 3} session: ydb://session/3?node_id=1&id=ZGFhZTAxOTEtYWQ4NTA0ZTItZGQwNDUxZmYtOTIzYTczZjY= 2025-06-03T10:28:59.735530Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 4} session: ydb://session/3?node_id=1&id=NDQ0ZDNiMzgtZGNhMDg5NmUtZDY2YjFkMWYtNmI2OGNkMmE= 2025-06-03T10:28:59.735826Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 5} session: ydb://session/3?node_id=1&id=OGQ5Yzk2ZTItN2RhYTRiY2MtMjg4ZDJhMDUtMWVjNzBhOTQ= 2025-06-03T10:28:59.736326Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 6} session: ydb://session/3?node_id=1&id=MWI2ZjE0NTYtMjBkZWFjMDUtMTRkODc3ODUtNjk0MGM5ZDI= 2025-06-03T10:28:59.736358Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 7} session: ydb://session/3?node_id=1&id=ZmFjYWRkZDgtZDkwNTg3YzgtODI2NjZiYi01YTg3ZDU3Yg== 2025-06-03T10:28:59.736625Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 8} session: ydb://session/3?node_id=1&id=NzBhMDhmMWItNzY5ZTM1ZDUtMzk2MzYzOGQtZTFmMTIxNjQ= 2025-06-03T10:28:59.736889Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 9} session: ydb://session/3?node_id=1&id=MzgwZDk0NjctOGViZTM5OGYtZTkyZjcxYTEtODBlNGI4ZDU= 2025-06-03T10:28:59.737182Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 10} session: ydb://session/3?node_id=1&id=ZDZmZDczNDgtNWE3NmExMTQtMTkzYjA3NDgtZjdiY2Q0YzA= 2025-06-03T10:28:59.737523Z node 1 :DS_LOAD_TEST DEBUG: kqp_select.cpp:214: TKqpSelectActor# {Tag: 0, parent: [1:741:2623], subTag: 11} session: ydb://session/3?node_id=1&id=OWY5ZGIyY2UtZTExOTMwMjAtMzZhMWY3YzMtMjY1MDg4OGQ= 2025-06-03T10:28:59.738675Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:804:2680], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:59.738723Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:768:2650], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:59.738747Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:800:2676], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:59.738756Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:801:2677], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:59.738763Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme ... 546: Actor# [2:856:2722] txid# 281474976715667, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exists but creating right now (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateCreate), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:02.979888Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:818:2694], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:02.979929Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:819:2695], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:02.979939Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:820:2696], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:02.979950Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:821:2697], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:02.979960Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:825:2701], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:02.979973Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:826:2702], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:02.979983Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:827:2703], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:02.979993Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:832:2708], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:02.980004Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:838:2714], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:02.980015Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:854:2721], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:03.013160Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:974:2810] txid# 281474976715668, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:03.087566Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:741:2623], subTag: 7} finished in 0.284804s, errors=0 2025-06-03T10:29:03.087654Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished: 7 { Tag: 7 DurationMs: 284 OperationsOK: 100 OperationsError: 0 } 2025-06-03T10:29:03.098986Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:1903:3132] txid# 281474976715769, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:03.172544Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:741:2623], subTag: 2} finished in 0.387065s, errors=0 2025-06-03T10:29:03.172635Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished: 2 { Tag: 2 DurationMs: 387 OperationsOK: 100 OperationsError: 0 } 2025-06-03T10:29:03.184334Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:2810:3438] txid# 281474976715870, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:03.273945Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:741:2623], subTag: 11} finished in 0.469812s, errors=0 2025-06-03T10:29:03.274043Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished: 11 { Tag: 11 DurationMs: 469 OperationsOK: 100 OperationsError: 0 } 2025-06-03T10:29:03.287151Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:3717:3744] txid# 281474976715971, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:03.396808Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:741:2623], subTag: 4} finished in 0.609071s, errors=0 2025-06-03T10:29:03.396916Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished: 4 { Tag: 4 DurationMs: 609 OperationsOK: 100 OperationsError: 0 } 2025-06-03T10:29:03.410037Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:4624:4050] txid# 281474976716072, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:03.530629Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:741:2623], subTag: 6} finished in 0.728442s, errors=0 2025-06-03T10:29:03.530741Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished: 6 { Tag: 6 DurationMs: 728 OperationsOK: 100 OperationsError: 0 } 2025-06-03T10:29:03.545196Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:5531:4356] txid# 281474976716173, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:03.726109Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:741:2623], subTag: 10} finished in 0.921993s, errors=0 2025-06-03T10:29:03.726282Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished: 10 { Tag: 10 DurationMs: 921 OperationsOK: 100 OperationsError: 0 } 2025-06-03T10:29:03.739668Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:6438:4662] txid# 281474976716274, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:03.907737Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:741:2623], subTag: 9} finished in 1.104256s, errors=0 2025-06-03T10:29:03.907836Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished: 9 { Tag: 9 DurationMs: 1104 OperationsOK: 100 OperationsError: 0 } 2025-06-03T10:29:03.923931Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7345:4968] txid# 281474976716375, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:04.124872Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:741:2623], subTag: 3} finished in 1.338707s, errors=0 2025-06-03T10:29:04.124996Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished: 3 { Tag: 3 DurationMs: 1338 OperationsOK: 100 OperationsError: 0 } 2025-06-03T10:29:04.139467Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:8252:5274] txid# 281474976716476, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:04.349279Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:741:2623], subTag: 5} finished in 1.547153s, errors=0 2025-06-03T10:29:04.349442Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished: 5 { Tag: 5 DurationMs: 1547 OperationsOK: 100 OperationsError: 0 } 2025-06-03T10:29:04.365410Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:9159:5580] txid# 281474976716577, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:04.592284Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:197: TKqpSelectActor# {Tag: 0, parent: [2:741:2623], subTag: 8} finished in 1.789148s, errors=0 2025-06-03T10:29:04.592422Z node 2 :DS_LOAD_TEST DEBUG: kqp_select.cpp:461: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished: 8 { Tag: 8 DurationMs: 1789 OperationsOK: 100 OperationsError: 0 } 2025-06-03T10:29:04.592434Z node 2 :DS_LOAD_TEST NOTICE: kqp_select.cpp:480: TKqpSelectActorMultiSession# {Tag: 0, parent: [2:732:2614], subTag: 3} finished in 1.811961s, oks# 1000, errors# 0 2025-06-03T10:29:04.592547Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:741:2623] with tag# 3 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/load_test/ut_ycsb/unittest >> UpsertLoad::ShouldDropCreateTable [GOOD] Test command err: 2025-06-03T10:29:01.862456Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:01.862572Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:01.862608Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002534/r3tmp/tmpj9Hre7/pdisk_1.dat 2025-06-03T10:29:02.007627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:29:02.029185Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:02.030653Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946541279853 != 1748946541279857 2025-06-03T10:29:02.072783Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:02.072832Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:02.083614Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:02.157250Z node 1 :DS_LOAD_TEST NOTICE: test_load_actor.cpp:194: TLoad# 0 creates table# BrandNewTable in dir# /Root 2025-06-03T10:29:02.193444Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:643:2550], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:02.193482Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:02.196393Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:29:02.414509Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:346: TLoad# 0 warmups table# BrandNewTable in dir# /Root with rows# 10 2025-06-03T10:29:02.414884Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:639:2547], subTag: 1} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 100 BatchSize: 100 with type# 0, target# TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "BrandNewTable" 2025-06-03T10:29:02.435998Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:639:2547], subTag: 1} TUpsertActor finished in 0.021035s, errors=0 2025-06-03T10:29:02.436153Z node 1 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 2, proto# NotifyWhenFinished: true TableSetup { WorkingDir: "/Root" TableName: "BrandNewTable" CreateTable: true MinParts: 11 MaxParts: 13 MaxPartSizeMb: 1234 } TargetShard { TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "BrandNewTable" } UpsertBulkStart { RowCount: 10 Inflight: 3 } 2025-06-03T10:29:02.436186Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [1:639:2547], subTag: 3} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 0, target# TabletId: 72075186224037888 TableId: 2 WorkingDir: "/Root" TableName: "BrandNewTable" 2025-06-03T10:29:02.489341Z node 1 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [1:639:2547], subTag: 3} TUpsertActor finished in 0.053071s, errors=0 2025-06-03T10:29:02.489383Z node 1 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [1:749:2624] with tag# 3 2025-06-03T10:29:03.325382Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:323:2365], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:03.325482Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:03.325493Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002534/r3tmp/tmptnrHmn/pdisk_1.dat 2025-06-03T10:29:03.461074Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:29:03.477280Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:03.478652Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1748946542809743 != 1748946542809747 2025-06-03T10:29:03.526052Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:03.526108Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:03.537880Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:03.626073Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:29:03.828712Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 1, proto# NotifyWhenFinished: true TargetShard { TabletId: 72075186224037888 TableId: 2 } UpsertBulkStart { RowCount: 100 Inflight: 3 } 2025-06-03T10:29:03.828752Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:732:2614], subTag: 2} TUpsertActor Bootstrap called: RowCount: 100 Inflight: 3 with type# 0, target# TabletId: 72075186224037888 TableId: 2 2025-06-03T10:29:04.224799Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:732:2614], subTag: 2} TUpsertActor finished in 0.395981s, errors=0 2025-06-03T10:29:04.224840Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:733:2615] with tag# 2 2025-06-03T10:29:04.226188Z node 2 :DS_LOAD_TEST NOTICE: test_load_actor.cpp:174: TLoad# 0 drops table# table in dir# /Root 2025-06-03T10:29:04.228594Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:775:2656], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:04.228630Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:04.372390Z node 2 :DS_LOAD_TEST NOTICE: test_load_actor.cpp:194: TLoad# 0 creates table# table in dir# /Root 2025-06-03T10:29:04.376189Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:838:2699], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:04.376236Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:04.378896Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:29:04.414914Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found 2025-06-03T10:29:04.572902Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:346: TLoad# 0 warmups table# table in dir# /Root with rows# 10 2025-06-03T10:29:04.573058Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:771:2653], subTag: 1} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 100 BatchSize: 100 with type# 0, target# TabletId: 72075186224037889 TableId: 3 WorkingDir: "/Root" TableName: "table" 2025-06-03T10:29:04.585741Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:771:2653], subTag: 1} TUpsertActor finished in 0.012575s, errors=0 2025-06-03T10:29:04.585868Z node 2 :DS_LOAD_TEST DEBUG: test_load_actor.cpp:425: TLoad# 0 created load actor of type# kUpsertBulkStart with tag# 2, proto# NotifyWhenFinished: true TableSetup { WorkingDir: "/Root" TableName: "table" DropTable: true } TargetShard { TabletId: 72075186224037889 TableId: 3 WorkingDir: "/Root" TableName: "table" } UpsertBulkStart { RowCount: 10 Inflight: 3 } 2025-06-03T10:29:04.585906Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:157: Id# {Tag: 0, parent: [2:771:2653], subTag: 3} TUpsertActor Bootstrap called: RowCount: 10 Inflight: 3 with type# 0, target# TabletId: 72075186224037889 TableId: 3 WorkingDir: "/Root" TableName: "table" 2025-06-03T10:29:04.638504Z node 2 :DS_LOAD_TEST NOTICE: bulk_mkql_upsert.cpp:255: Id# {Tag: 0, parent: [2:771:2653], subTag: 3} TUpsertActor finished in 0.052506s, errors=0 2025-06-03T10:29:04.638542Z node 2 :DS_LOAD_TEST INFO: test_load_actor.cpp:447: TLoad# 0 received finished from actor# [2:929:2771] with tag# 3 >> KqpOlap::OlapRead_FailsOnDataQuery [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::ShiftIdRangeRemoveExpired [GOOD] Test command err: 2025-06-03T10:29:01.908761Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.915280Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.934480Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.934584Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.934843Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.934916Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.934976Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.935018Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.935912Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.936027Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.936077Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.936145Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.936230Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.936385Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.936446Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.936507Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.936565Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.936600Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.936624Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.936690Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.936724Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.936787Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.936818Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.937319Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.937414Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.937541Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.937574Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.937651Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.937685Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.938813Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.938989Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.939059Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.939127Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.939171Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:01.939207Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.939272Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.939557Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.939619Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.940221Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.940300Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.940490Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.940553Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.940567Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.940579Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.940591Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.942469Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.950178Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.950314Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.950547Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.950637Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.951411Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.951896Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.952145Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.952238Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.952378Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.952563Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.955224Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.955517Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-03T10:29:01.995990Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:01.996032Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-03T10:29:02.003764Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:29:02.004615Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:29:02.004757Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-03T10:29:02.005128Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:29:02.005983Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-03T10:29:02.006027Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-03T10:29:02.006094Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-03T10:29:02.006118Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Starting the first epoch: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-03T10:29:02.006125Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1036: [DB] Loaded the first approximate epoch start: #1.1 2025-06-03T10:29:02.006152Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:27: TTxLoadState Complete 2025-06-03T10:29:02.006172Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:84: TTxMigrateState Execute 2025-06-03T10:29:02.006179Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:52: TTxMigrateState ProcessMigrationBatch UpdateNodes left 0, NewVersionUpdateNodes left 0 2025-06-03T10:29:02.006185Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:21: TTxMigrateState FinalizeMigration 2025-06-03T10:29:02.006192Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1326: [DB] Update epoch in database: #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z 2025-06-03T10:29:02.006221Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1345: [DB] Update approx epoch start in database: #1.1 2025-06-03T10:29:02.006228Z node 1 :NODE_BROKER NOTICE: node_broker.cpp:1358: [DB] Update main nodes table to: Nodes 2025-06-03T10:29:02.049384Z node 1 :NODE_BROKER DEBUG: node_broker__migrate_state.cpp:95: TTxMigrateState Complete 2025-06-03T10:29:02.049432Z node 1 :NODE_BROKER TRACE: node_broker.cpp:459: Scheduled epoch update at 1970-01-01T01:00:00.023000Z 2025-06-03T10:29:02.049442Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:568: Preparing nodes list cache for epoch #1.1 1970-01-01T00:00:00.023000Z - 1970-01-01T01:00:00.023000Z - 1970-01-01T02:00:00.023000Z, approximate epoch start #1.1 nodes=0 expired=0 2025-06-03T10:29:02.049453Z ... erverConnected 2025-06-03T10:29:03.596135Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:619:2211], Recipient [1:737:2279]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-03T10:29:03.596140Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-03T10:29:03.596147Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.11 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-03T10:29:03.596211Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:786:2320], Recipient [1:737:2279]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:03.596229Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:619:2211], Recipient [1:737:2279]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 10 } 2025-06-03T10:29:03.596233Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-03T10:29:03.596239Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.11 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-03T10:29:03.596289Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:788:2322], Recipient [1:737:2279]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:03.596303Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:619:2211], Recipient [1:737:2279]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-03T10:29:03.596308Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-03T10:29:03.596313Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.11 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-03T10:29:03.596373Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:790:2324], Recipient [1:737:2279]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:03.596385Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:619:2211], Recipient [1:737:2279]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 9 } 2025-06-03T10:29:03.596389Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-03T10:29:03.596395Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.11 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-03T10:29:03.596450Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:792:2326], Recipient [1:737:2279]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:03.596466Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:619:2211], Recipient [1:737:2279]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-03T10:29:03.596471Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-03T10:29:03.596477Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.11 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-03T10:29:03.596532Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:794:2328], Recipient [1:737:2279]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:03.596548Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:619:2211], Recipient [1:737:2279]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 8 } 2025-06-03T10:29:03.596552Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-03T10:29:03.596559Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #3.11 1970-01-01T02:00:00.023000Z - 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z 2025-06-03T10:29:03.596637Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:796:2330], Recipient [1:737:2279]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:03.596660Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:619:2211], Recipient [1:737:2279]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 11 SeqNo: 2 } 2025-06-03T10:29:03.596666Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-03T10:29:03.596675Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:619:2211], seqNo: 2, version: 11, server pipe id: [1:796:2330] 2025-06-03T10:29:03.596687Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v11 -> v11 to [1:619:2211] 2025-06-03T10:29:03.596750Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:796:2330], Recipient [1:737:2279]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:03.596756Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:619:2211], seqNo: 2, server pipe id: [1:796:2330] 2025-06-03T10:29:03.596780Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:798:2332], Recipient [1:737:2279]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:03.596798Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:619:2211], Recipient [1:737:2279]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 10 SeqNo: 3 } 2025-06-03T10:29:03.596804Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-03T10:29:03.596809Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:619:2211], seqNo: 3, version: 10, server pipe id: [1:798:2332] 2025-06-03T10:29:03.596814Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v10 -> v11 to [1:619:2211] 2025-06-03T10:29:03.596867Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:798:2332], Recipient [1:737:2279]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:03.596872Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:619:2211], seqNo: 3, server pipe id: [1:798:2332] 2025-06-03T10:29:03.596898Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:800:2334], Recipient [1:737:2279]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:03.596915Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:619:2211], Recipient [1:737:2279]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 9 SeqNo: 4 } 2025-06-03T10:29:03.596920Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-03T10:29:03.596925Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:619:2211], seqNo: 4, version: 9, server pipe id: [1:800:2334] 2025-06-03T10:29:03.596930Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v9 -> v11 to [1:619:2211] 2025-06-03T10:29:03.596980Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:800:2334], Recipient [1:737:2279]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:03.596985Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:619:2211], seqNo: 4, server pipe id: [1:800:2334] 2025-06-03T10:29:03.597017Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:802:2336], Recipient [1:737:2279]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:03.597056Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:619:2211], Recipient [1:737:2279]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 8 SeqNo: 5 } 2025-06-03T10:29:03.597061Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-03T10:29:03.597066Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:619:2211], seqNo: 5, version: 8, server pipe id: [1:802:2336] 2025-06-03T10:29:03.597071Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v8 -> v11 to [1:619:2211] 2025-06-03T10:29:03.597130Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:802:2336], Recipient [1:737:2279]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:03.597135Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:619:2211], seqNo: 5, server pipe id: [1:802:2336] 2025-06-03T10:29:03.597164Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:804:2338], Recipient [1:737:2279]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:03.597185Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:619:2211], Recipient [1:737:2279]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-03T10:29:03.597192Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-03T10:29:03.597230Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 14400023000 Name: "slot-0" } } 2025-06-03T10:29:03.604379Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:806:2340], Recipient [1:737:2279]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:03.604485Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:619:2211], Recipient [1:737:2279]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1025 } 2025-06-03T10:29:03.604496Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-03T10:29:03.604546Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1025 Host: "host2" Port: 1001 ResolveHost: "host2.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 14400023000 Name: "slot-1" } } 2025-06-03T10:29:03.604696Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:808:2342], Recipient [1:737:2279]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:03.604710Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:619:2211], Recipient [1:737:2279]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1026 } 2025-06-03T10:29:03.604716Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-03T10:29:03.604726Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: WRONG_REQUEST Reason: "Unknown node" } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> BasicStatistics::TwoNodes [GOOD] Test command err: 2025-06-03T10:26:27.621181Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:531:2415], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:27.621246Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:26:27.621259Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001bf8/r3tmp/tmpwL76Ki/pdisk_1.dat 2025-06-03T10:26:27.730367Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9873, node 1 2025-06-03T10:26:27.836327Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:27.836350Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:27.836357Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:27.836471Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:27.837076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:26:27.925586Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:27.925616Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:27.937022Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6587 2025-06-03T10:26:28.292130Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:26:29.392288Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:26:29.395154Z node 3 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 3 2025-06-03T10:26:29.405672Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:29.405700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:29.405857Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:29.405865Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:29.459290Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:26:29.459323Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-03T10:26:29.460045Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:29.460129Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:29.605895Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.606052Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.606177Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.606210Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.606223Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.606264Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.606286Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.606298Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.606313Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:29.757879Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:29.757911Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:29.768873Z node 3 :HIVE WARN: hive_impl.cpp:771: HIVE#72075186224037888 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:26:29.769000Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:29.779786Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:29.779816Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:29.791765Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:29.820940Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:29.832833Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:26:29.832874Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:26:29.842863Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:26:29.843104Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:26:29.843127Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:26:29.843132Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:26:29.843137Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:26:29.843142Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:26:29.843147Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:26:29.843154Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:26:29.843553Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:26:29.872579Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:29.872613Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:2294:2562], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:29.874092Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2304:2570] 2025-06-03T10:26:29.875606Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2338:2586] 2025-06-03T10:26:29.875660Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2338:2586], schemeshard id = 72075186224037897 2025-06-03T10:26:29.876761Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:26:29.880357Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:26:29.880381Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:26:29.880392Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:26:29.884016Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:26:29.885750Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:26:29.885793Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:26:29.963539Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:26:30.065536Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:26:30.140527Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:26:30.801824Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2662:3069], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:30.801886Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:30.805190Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:26:30.931278Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2810:3106], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:30.931355Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:30.931993Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2815:3110]], StatType[ ... 06-03T10:28:56.554798Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-03T10:28:57.159572Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 120 ], ReplyToActorId[ [2:7486:4459]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:57.159689Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 120 ] 2025-06-03T10:28:57.159699Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 120, ReplyToActorId = [2:7486:4459], StatRequests.size() = 1 2025-06-03T10:28:57.861841Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:28:57.861873Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:28:57.861886Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-03T10:28:57.861892Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:28:57.862006Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:28:57.866061Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:28:57.867169Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7513:4478], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:57.867194Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7523:4483], DatabaseId: /Root/Database, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:57.867207Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:57.870266Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897 2025-06-03T10:28:57.886558Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7527:4486], DatabaseId: /Root/Database, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-03T10:28:58.026307Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7624:4535] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Database/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:58.036789Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [2:7654:4551]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:58.036890Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-03T10:28:58.036899Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [2:7654:4551], StatRequests.size() = 1 2025-06-03T10:28:58.055208Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NGUwYTM3N2MtZGNlYjg5MC1kN2YzZmFjOS00OTEwNDNlNw==, TxId: 2025-06-03T10:28:58.055234Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NGUwYTM3N2MtZGNlYjg5MC1kN2YzZmFjOS00OTEwNDNlNw==, TxId: 2025-06-03T10:28:58.055427Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:28:58.067047Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:28:58.067067Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:28:58.569649Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [2:7689:4561]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:58.569770Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-03T10:28:58.569780Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [2:7689:4561], StatRequests.size() = 1 2025-06-03T10:28:59.926777Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [2:7728:4575]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:28:59.926892Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-03T10:28:59.926899Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [2:7728:4575], StatRequests.size() = 1 2025-06-03T10:29:00.653573Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:29:00.663999Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:29:00.664032Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:29:00.664044Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-03T10:29:00.664050Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:29:00.664183Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database 2025-06-03T10:29:00.664932Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:29:00.668906Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZWMzZTE0YWItNzllOGVjZjUtM2I3NzEyMDAtMWQxYjZmNzQ=, TxId: 2025-06-03T10:29:00.668934Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZWMzZTE0YWItNzllOGVjZjUtM2I3NzEyMDAtMWQxYjZmNzQ=, TxId: 2025-06-03T10:29:00.669109Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:29:00.681908Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:29:00.681935Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:29:01.334408Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [2:7796:4606]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:01.334554Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-03T10:29:01.334565Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [2:7796:4606], StatRequests.size() = 1 2025-06-03T10:29:02.793021Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 125 ], ReplyToActorId[ [2:7841:4620]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:02.793203Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 125 ] 2025-06-03T10:29:02.793214Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 125, ReplyToActorId = [2:7841:4620], StatRequests.size() = 1 2025-06-03T10:29:03.512493Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-03T10:29:03.512660Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:29:03.512768Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:29:03.523423Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:29:03.523451Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:29:04.157344Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 126 ], ReplyToActorId[ [2:7878:4634]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:04.157498Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 126 ] 2025-06-03T10:29:04.157507Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 126, ReplyToActorId = [2:7878:4634], StatRequests.size() = 1 2025-06-03T10:29:04.157671Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [3:7880:3152]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:04.158515Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:29:04.158538Z node 3 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [3:7890:3156] 2025-06-03T10:29:04.158551Z node 3 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [3:7890:3156] 2025-06-03T10:29:04.159319Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:7898:4636] 2025-06-03T10:29:04.159444Z node 3 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 3, client id = [3:7890:3156], server id = [2:7898:4636], tablet id = 72075186224037894, status = OK 2025-06-03T10:29:04.159661Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:7898:4636], node id = 3, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:29:04.159677Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 3, schemeshard count = 1 2025-06-03T10:29:04.159736Z node 3 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 3 2025-06-03T10:29:04.159753Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [3:7880:3152], StatRequests.size() = 1 |64.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut >> KqpOlapStatistics::StatsUsage [GOOD] >> TNodeBrokerTest::NodesV2BackMigration [GOOD] |64.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut |64.8%| [LD] {RESULT} $(B)/ydb/core/grpc_services/ut/ydb-core-grpc_services-ut >> Viewer::ServerlessWithExclusiveNodes [GOOD] >> Viewer::SharedDoesntShowExclusiveNodes ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlap::OlapRead_FailsOnDataQuery [GOOD] Test command err: Trying to start YDB, gRPC: 12571, MsgBus: 26670 2025-06-03T10:29:03.043420Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668208208467406:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:03.043448Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000dad/r3tmp/tmpCpqMLK/pdisk_1.dat TServer::EnableGrpc on GrpcPort 12571, node 1 2025-06-03T10:29:03.106258Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668208208467385:2079] 1748946543043226 != 1748946543043229 2025-06-03T10:29:03.109795Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:03.111415Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:03.111429Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:03.111431Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:03.111475Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26670 2025-06-03T10:29:03.146492Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:03.146528Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:03.147541Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26670 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:29:03.194922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:29:03.207047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:29:03.258578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511668208208468219:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:03.258663Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511668208208468219:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:03.258727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511668208208468219:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:03.258756Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511668208208468219:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:03.258783Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511668208208468219:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:03.258809Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511668208208468219:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:29:03.258835Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511668208208468219:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:29:03.258864Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511668208208468219:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:29:03.258892Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511668208208468219:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:29:03.258924Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511668208208468219:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:29:03.258952Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511668208208468219:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:29:03.258978Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[1:7511668208208468219:2314];tablet_id=72075186224037895;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:29:03.263812Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7511668208208468226:2319];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:03.263842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7511668208208468226:2319];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:03.263894Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7511668208208468226:2319];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:03.263930Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7511668208208468226:2319];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:03.263952Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7511668208208468226:2319];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:03.263972Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7511668208208468226:2319];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:29:03.263996Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7511668208208468226:2319];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:29:03.264019Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7511668208208468226:2319];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:29:03.264044Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7511668208208468226:2319];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:29:03.264066Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7511668208208468226:2319];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:29:03.264085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7511668208208468226:2319];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:29:03.264115Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[1:7511668208208468226:2319];tablet_id=72075186224037893;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:29:03.268861Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668208208468224:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:03.268891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668208208468224:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:03.268931Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668208208468224:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:03.268959Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668208208468224:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:03.268989Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668208208468224:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:03.269010Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668208208468224:2317];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstrac ... ched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:29:04.684776Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:29:04.684789Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:29:04.684803Z node 2 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037893;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:29:04.684815Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:29:04.684823Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:29:04.684944Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-03T10:29:04.684958Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-03T10:29:04.685136Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-03T10:29:04.685150Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-03T10:29:04.685164Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-03T10:29:04.685175Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-03T10:29:04.685198Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-03T10:29:04.685209Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-03T10:29:04.685222Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-03T10:29:04.685233Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-06-03T10:29:04.685247Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-06-03T10:29:04.685260Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-03T10:29:04.685274Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-03T10:29:04.685285Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-03T10:29:04.685376Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:29:04.685389Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:29:04.685412Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:29:04.685424Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:29:04.685439Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:29:04.685457Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:29:04.685466Z node 2 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:29:04.685472Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:29:04.685478Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:29:04.685572Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-03T10:29:04.685583Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-03T10:29:04.712617Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715659; 2025-06-03T10:29:04.712692Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715659; 2025-06-03T10:29:04.712762Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715659; 2025-06-03T10:29:04.712844Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715659; 2025-06-03T10:29:04.716977Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:29:04.723857Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:29:04.724761Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:29:04.725849Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=1448;columns=6; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=1448;columns=6; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=2568;columns=5; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=2568;columns=5; 2025-06-03T10:29:04.914489Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668211768091297:2394], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:04.914524Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668211768091287:2391], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:04.914552Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:04.915690Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480 2025-06-03T10:29:04.918877Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511668211768091301:2395], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-03T10:29:05.006300Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668216063058648:2595] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:05.061005Z node 2 :KQP_EXECUTER ERROR: kqp_data_executer.cpp:2028: ActorId: [2:7511668216063058677:2389] TxId: 281474976715663. Ctx: { TraceId: 01jwtnct8h1swehzgzepzjncf8, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDAxZjNlMmQtMjBiZWZjYy1lODVhYjg0MC0xMzM0NTE4NQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Data manipulation queries do not support column shard tables. 2025-06-03T10:29:05.061397Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=2&id=NDAxZjNlMmQtMjBiZWZjYy1lODVhYjg0MC0xMzM0NTE4NQ==, ActorId: [2:7511668211768091261:2389], ActorState: ExecuteState, TraceId: 01jwtnct8h1swehzgzepzjncf8, Create QueryResponse for error on request, msg: >> KqpOlapAggregations::Aggregation_Some [GOOD] >> TopicAutoscaling::PartitionSplit_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionMerge_PreferedPartition_PQv1 >> ListObjectsInS3Export::PagingParameters [GOOD] |64.8%| [TA] $(B)/ydb/core/load_test/ut_ycsb/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/ut/unittest >> TNodeBrokerTest::NodesV2BackMigration [GOOD] Test command err: 2025-06-03T10:29:02.376511Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.384365Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.402798Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.402881Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.403091Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.403154Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.403199Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.403232Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.404115Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.404210Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.404250Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.404307Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.404385Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.404526Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.404582Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.404639Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.404688Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.404717Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.404741Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.404799Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.404825Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.404876Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.404900Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.405336Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.405424Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.405538Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.405567Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.405629Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.405656Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.406637Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.406801Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.406869Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.406930Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.406972Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:547: Handle NActors::TEvInterconnect::TEvListNodes 2025-06-03T10:29:02.407008Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.407076Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.407261Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.407308Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.408076Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.408165Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 1 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.408352Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.408407Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.408419Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.408430Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.408441Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.410193Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.417263Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.418689Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.418724Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.418788Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.418816Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.419511Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.419781Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.419846Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.419940Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.420038Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.420202Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.420224Z node 1 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.420370Z node 8 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.420952Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 8 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.430518Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 4 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.430571Z node 6 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.431080Z node 4 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.431131Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 6 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.431854Z node 3 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 7 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.432334Z node 7 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 3 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.434707Z node 2 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 5 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.435134Z node 5 :NAMESERVICE DEBUG: dynamic_nameserver.cpp:576: Handle NActors::TEvInterconnect::TEvGetNode { NodeId: 2 Deadline: 18446744073709.551615s } 2025-06-03T10:29:02.466632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:02.466668Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded ... waiting for nameservers are connected 2025-06-03T10:29:02.472448Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:29:02.473018Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:29:02.473116Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:20: TTxInitScheme Execute 2025-06-03T10:29:02.473381Z node 1 :NODE_BROKER DEBUG: node_broker_impl.h:243: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:29:02.473954Z node 1 :NODE_BROKER DEBUG: node_broker__init_scheme.cpp:29: TTxInitScheme Complete 2025-06-03T10:29:02.473980Z node 1 :NODE_BROKER DEBUG: node_broker__load_state.cpp:19: TTxLoadState Execute 2025-06-03T10:29:02.474024Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:973: [DB] Using default config. 2025-06-03T10:29:02.474042Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:1010: [DB] Star ... nt# 272039936, Sender [1:629:2211], Recipient [1:861:2360]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-03T10:29:04.474345Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-03T10:29:04.474363Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.11 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-03T10:29:04.476853Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:898:2390], Recipient [1:861:2360]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:04.476919Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2211], Recipient [1:861:2360]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-03T10:29:04.476927Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-03T10:29:04.476945Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.11 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-03T10:29:04.477049Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:900:2392], Recipient [1:861:2360]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:04.477070Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2211], Recipient [1:861:2360]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-03T10:29:04.477075Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-03T10:29:04.477083Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.11 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-03T10:29:04.477151Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:902:2394], Recipient [1:861:2360]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:04.477177Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2211], Recipient [1:861:2360]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 11 } 2025-06-03T10:29:04.477181Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-03T10:29:04.477186Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.11 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-03T10:29:04.477241Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:904:2396], Recipient [1:861:2360]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:04.477257Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2211], Recipient [1:861:2360]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-03T10:29:04.477262Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-03T10:29:04.477269Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.11 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-03T10:29:04.477354Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:906:2398], Recipient [1:861:2360]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:04.477374Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2211], Recipient [1:861:2360]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 10 } 2025-06-03T10:29:04.477379Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-03T10:29:04.477386Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.11 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-03T10:29:04.477459Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:908:2400], Recipient [1:861:2360]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:04.477478Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2211], Recipient [1:861:2360]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { } 2025-06-03T10:29:04.477483Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-03T10:29:04.477490Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.11 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-03T10:29:04.477555Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:910:2402], Recipient [1:861:2360]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:04.477575Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039936, Sender [1:629:2211], Recipient [1:861:2360]: NKikimr::NNodeBroker::TEvNodeBroker::TEvListNodes { CachedVersion: 9 } 2025-06-03T10:29:04.477581Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:253: StateWork, processing event TEvNodeBroker::TEvListNodes 2025-06-03T10:29:04.477587Z node 1 :NODE_BROKER TRACE: node_broker.cpp:426: Send TEvNodesInfo for epoch #4.11 1970-01-01T03:00:00.023000Z - 1970-01-01T04:00:00.023000Z - 1970-01-01T05:00:00.023000Z 2025-06-03T10:29:04.477659Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:912:2404], Recipient [1:861:2360]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:04.477685Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:629:2211], Recipient [1:861:2360]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 11 SeqNo: 5 } 2025-06-03T10:29:04.477692Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-03T10:29:04.477704Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:629:2211], seqNo: 5, version: 11, server pipe id: [1:912:2404] 2025-06-03T10:29:04.477715Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v11 -> v11 to [1:629:2211] 2025-06-03T10:29:04.477796Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:912:2404], Recipient [1:861:2360]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:04.477806Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:629:2211], seqNo: 5, server pipe id: [1:912:2404] 2025-06-03T10:29:04.477872Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:914:2406], Recipient [1:861:2360]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:04.477895Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:629:2211], Recipient [1:861:2360]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 10 SeqNo: 6 } 2025-06-03T10:29:04.477901Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-03T10:29:04.477906Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:629:2211], seqNo: 6, version: 10, server pipe id: [1:914:2406] 2025-06-03T10:29:04.477912Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v10 -> v11 to [1:629:2211] 2025-06-03T10:29:04.477981Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:914:2406], Recipient [1:861:2360]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:04.477987Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:629:2211], seqNo: 6, server pipe id: [1:914:2406] 2025-06-03T10:29:04.478021Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:916:2408], Recipient [1:861:2360]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:04.478035Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039950, Sender [1:629:2211], Recipient [1:861:2360]: NKikimr::NNodeBroker::TEvNodeBroker::TEvSubscribeNodesRequest { CachedVersion: 9 SeqNo: 7 } 2025-06-03T10:29:04.478039Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:261: StateWork, processing event TEvNodeBroker::TEvSubscribeNodesRequest 2025-06-03T10:29:04.478042Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:753: New subscriber [1:629:2211], seqNo: 7, version: 9, server pipe id: [1:916:2408] 2025-06-03T10:29:04.478046Z node 1 :NODE_BROKER TRACE: node_broker.cpp:736: Send TEvUpdateNodes v9 -> v11 to [1:629:2211] 2025-06-03T10:29:04.478092Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877764, Sender [1:916:2408], Recipient [1:861:2360]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:04.478097Z node 1 :NODE_BROKER DEBUG: node_broker.cpp:774: Unsubscribed [1:629:2211], seqNo: 7, server pipe id: [1:916:2408] 2025-06-03T10:29:04.478119Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:918:2410], Recipient [1:861:2360]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:04.478136Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:629:2211], Recipient [1:861:2360]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1024 } 2025-06-03T10:29:04.478141Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-03T10:29:04.478184Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: OK } Node { NodeId: 1024 Host: "host1" Port: 1001 ResolveHost: "host1.yandex.net" Address: "1.2.3.4" Location { DataCenter: "1" Module: "2" Rack: "3" Unit: "4" } Expire: 18000023000 Name: "slot-0" } } 2025-06-03T10:29:04.478263Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:920:2412], Recipient [1:861:2360]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:04.478280Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:629:2211], Recipient [1:861:2360]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1025 } 2025-06-03T10:29:04.478285Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-03T10:29:04.478294Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: WRONG_REQUEST Reason: "Unknown node" } } 2025-06-03T10:29:04.478353Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 269877761, Sender [1:922:2414], Recipient [1:861:2360]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:04.478367Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:249: StateWork, received event# 272039937, Sender [1:629:2211], Recipient [1:861:2360]: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolveNode { NodeId: 1026 } 2025-06-03T10:29:04.478371Z node 1 :NODE_BROKER TRACE: node_broker_impl.h:254: StateWork, processing event TEvNodeBroker::TEvResolveNode 2025-06-03T10:29:04.478379Z node 1 :NODE_BROKER TRACE: node_broker.cpp:1493: Send TEvResolvedNode: NKikimr::NNodeBroker::TEvNodeBroker::TEvResolvedNode { Status { Code: WRONG_REQUEST Reason: "Unknown node" } } >> KqpOlapStatistics::StatsUsageNotPK [GOOD] >> Viewer::Plan2SvgOK [GOOD] >> Viewer::Plan2SvgBad ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapStatistics::StatsUsage [GOOD] Test command err: Trying to start YDB, gRPC: 10030, MsgBus: 6410 2025-06-03T10:29:04.824974Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668211977348161:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:04.825053Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d80/r3tmp/tmp8gprcN/pdisk_1.dat 2025-06-03T10:29:04.940120Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:04.940714Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668211977348141:2079] 1748946544824780 != 1748946544824783 TServer::EnableGrpc on GrpcPort 10030, node 1 2025-06-03T10:29:04.961197Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:04.961214Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:04.961217Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:04.961283Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6410 2025-06-03T10:29:05.008659Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:05.008702Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:05.010295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6410 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:05.058180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:05.061380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:29:05.067973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:29:05.087386Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668216272316138:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:05.087484Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668216272316138:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:05.087554Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668216272316138:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:05.087591Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668216272316138:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:05.087618Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668216272316138:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:05.087650Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668216272316138:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:29:05.087679Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668216272316138:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:29:05.087716Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668216272316138:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:29:05.087755Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668216272316138:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:29:05.087785Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668216272316138:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:29:05.087815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668216272316138:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:29:05.087853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668216272316138:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:29:05.098099Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668216272316140:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:05.098139Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668216272316140:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:05.098232Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668216272316140:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:05.098257Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668216272316140:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:05.098282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668216272316140:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:05.098306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668216272316140:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:29:05.098328Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668216272316140:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:29:05.098353Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668216272316140:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:29:05.098379Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668216272316140:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:29:05.098405Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668216272316140:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:29:05.098428Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668216272316140:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:29:05.098453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668216272316140:2317];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:29:05.105422Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668216272316131:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:05.105464Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668216272316131:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:05.105539Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668216272316131:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:05.105576Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668216272316131:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:05.105611Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668216272316131:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:05.1 ... g.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-03T10:29:05.115714Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-03T10:29:05.115774Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-03T10:29:05.115781Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-03T10:29:05.115794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-03T10:29:05.115799Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-06-03T10:29:05.115810Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-06-03T10:29:05.115816Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-03T10:29:05.115823Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-03T10:29:05.115828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-03T10:29:05.115853Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:29:05.115858Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:29:05.115878Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:29:05.115884Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:29:05.115897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:29:05.115902Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:29:05.115909Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:29:05.115914Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:29:05.115919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:29:05.115987Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-03T10:29:05.115991Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-03T10:29:05.123361Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:05.123463Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:05.123532Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:05.123600Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:05.134284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:29:05.139822Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715659; 2025-06-03T10:29:05.141116Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715659; 2025-06-03T10:29:05.142282Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715659; 2025-06-03T10:29:05.460549Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668216272316449:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.460582Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.506763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnStore, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:29:05.509428Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:29:05.509480Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:29:05.509572Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:29:05.509636Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:29:05.516297Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668216272316499:2433], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.516335Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.519699Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668216272316503:2508] txid# 281474976715661, issues: { message: "inappropriate type for max index" severity: 1 } 2025-06-03T10:29:05.525111Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668216272316514:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.525136Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.527985Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668216272316518:2515] txid# 281474976715662, issues: { message: "cannot modify index: max index not modifiable" severity: 1 } 2025-06-03T10:29:05.531218Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668216272316529:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.531239Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.534255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnStore, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:29:05.536704Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; 2025-06-03T10:29:05.536705Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; 2025-06-03T10:29:05.536780Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; 2025-06-03T10:29:05.536854Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715663;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715663; >> KqpOlapTiering::LocksInterference ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapAggregations::Aggregation_Some [GOOD] Test command err: Trying to start YDB, gRPC: 22987, MsgBus: 20560 2025-06-03T10:29:04.961401Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668209039186448:2204];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:04.961569Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d7c/r3tmp/tmpwCrdv9/pdisk_1.dat 2025-06-03T10:29:05.052864Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668209039186279:2079] 1748946544959376 != 1748946544959379 2025-06-03T10:29:05.055416Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22987, node 1 2025-06-03T10:29:05.062692Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:05.062723Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:05.063605Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:05.073519Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:05.073534Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:05.073537Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:05.073588Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20560 TClient is connected to server localhost:20560 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:05.156926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:05.160134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:29:05.166917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:29:05.185332Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668213334154282:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:05.185431Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668213334154282:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:05.185509Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668213334154282:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:05.185545Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668213334154282:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:05.185572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668213334154282:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:05.185599Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668213334154282:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:29:05.185638Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668213334154282:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:29:05.185664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668213334154282:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:29:05.185698Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668213334154282:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:29:05.185725Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668213334154282:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:29:05.185753Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668213334154282:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:29:05.185785Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668213334154282:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:29:05.191115Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668213334154278:2314];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:05.191152Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668213334154278:2314];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:05.191232Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668213334154278:2314];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:05.191261Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668213334154278:2314];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:05.191290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668213334154278:2314];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:05.191315Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668213334154278:2314];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:29:05.191339Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668213334154278:2314];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:29:05.191364Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668213334154278:2314];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:29:05.191397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668213334154278:2314];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:29:05.191423Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668213334154278:2314];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:29:05.191449Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668213334154278:2314];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:29:05.191475Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668213334154278:2314];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:29:05.196661Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668213334154279:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:05.196703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668213334154279:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:05.196765Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668213334154279:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:05.196795Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668213334154279:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:05.196831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668213334154279:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:0 ... cute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-03T10:29:05.204974Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-03T10:29:05.204995Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:29:05.205009Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:29:05.205053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:29:05.205057Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:29:05.205069Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:29:05.205073Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:29:05.205080Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:29:05.205085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:29:05.205089Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:29:05.205144Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-03T10:29:05.205148Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-03T10:29:05.233211Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:05.238422Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:05.239559Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:05.240647Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=1448;columns=6; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=1448;columns=6; REQUEST: --!syntax_v1 PRAGMA Kikimr.OptUseFinalizeByKey; SELECT SOME(level) FROM `/Root/tableWithNulls` WHERE id=1 2025-06-03T10:29:05.426361Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668213334154566:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.426416Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.426619Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668213334154593:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.427727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:29:05.431785Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668213334154595:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:29:05.530889Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668213334154646:2483] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:05.765587Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946545486, txId: 18446744073709551615] shutting down REQUEST: --!syntax_v1 PRAGMA Kikimr.OptUseFinalizeByKey; SELECT SOME(level) FROM `/Root/tableWithNulls` WHERE id=1 JSON Plan: {"Plan":{"Plans":[{"PlanNodeId":7,"Plans":[{"PlanNodeId":6,"Operators":[{"Inputs":[],"Iterator":"precompute_1_0","Name":"Iterator"}],"Node Type":"ConstantExpr","CTE Name":"precompute_1_0"}],"Node Type":"ResultSet_2","PlanNodeType":"ResultSet"},{"PlanNodeId":4,"Subplan Name":"CTE precompute_1_0","Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["tableWithNulls"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["id [1, 1]"],"Name":"TableRangeScan","Inputs":[],"Path":"\/Root\/tableWithNulls","E-Rows":"No estimate","Table":"tableWithNulls","ReadRangesKeys":["id"],"ReadColumns":["level"],"SsaProgram":{"Command":[{"GroupBy":{"Aggregates":[{"Function":{"Id":1,"Arguments":[{"Id":3}]},"Column":{"Id":7}}]}},{"Projection":{"Columns":[{"Id":7}]}}]},"E-Cost":"No estimate","ReadRangesExpectedSize":1}],"Node Type":"TableRangeScan"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"Aggregate","Phase":"Final"},{"Inputs":[{"InternalOperatorId":2}],"Name":"Limit","Limit":"1"},{"Inputs":[{"ExternalPlanNodeId":2}],"Name":"Aggregate","Phase":"Final"}],"Node Type":"Aggregate-Limit-Aggregate"}],"Node Type":"Precompute_1","Parent Relationship":"InitPlan","PlanNodeType":"Materialize"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/tableWithNulls","reads":[{"columns":["level"],"scan_by":["id [1, 1]"],"type":"Scan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":6,"Plans":[{"PlanNodeId":8,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["id [1, 1]"],"Name":"TableRangeScan","Path":"\/Root\/tableWithNulls","E-Rows":"No estimate","Table":"tableWithNulls","ReadRangesKeys":["id"],"ReadColumns":["level"],"SsaProgram":{"Command":[{"GroupBy":{"Aggregates":[{"Function":{"Id":1,"Arguments":[{"Id":3}]},"Column":{"Id":7}}]}},{"Projection":{"Columns":[{"Id":7}]}}]},"E-Cost":"No estimate","ReadRangesExpectedSize":1}],"Node Type":"TableRangeScan"}],"Operators":[{"Name":"Aggregate","Phase":"Final"}],"Node Type":"Aggregate"}],"Operators":[{"Name":"Limit","Limit":"1"}],"Node Type":"Limit"}],"Operators":[{"Name":"Aggregate","Phase":"Final"}],"Node Type":"Aggregate"}],"Node Type":"ResultSet_2","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} AST: ( (declare %kqp%tx_result_binding_0_0 (TupleType (ListType (TupleType (TupleType (OptionalType (DataType 'Int32)) (DataType 'Int32)) (TupleType (OptionalType (DataType 'Int32)) (DataType 'Int32)))))) (declare %kqp%tx_result_binding_1_0 (ListType (StructType '('"column0" (OptionalType (DataType 'Int32)))))) (let $1 '('"_partition_mode" '"single")) (let $2 '('('"_logical_id" '1098) '('"_id" '"fdf530e5-3aa2c993-7d9bd933-d1b57656") $1)) (let $3 (DqPhyStage '() (lambda '() (block '( (let $27 (Int32 '1)) (let $28 (Just $27)) (let $29 '($28 $27)) (let $30 (If (== $27 (Int32 '2147483647)) $29 '((+ $28 $27) (Int32 '0)))) (return (ToStream (Just '((RangeFinalize (RangeMultiply (Uint64 '10000) (RangeUnion (RangeCreate (AsList '($29 $30)))))))))) ))) $2)) (let $4 (DqCnValue (TDqOutput $3 '0))) (let $5 (KqpPhysicalTx '($3) '($4) '() '('('"type" '"compute")))) (let $6 '"%kqp%tx_result_binding_0_0") (let $7 (DataType 'Int32)) (let $8 (OptionalType $7)) (let $9 (TupleType $8 $7)) (let $10 (TupleType (ListType (TupleType $9 $9)))) (let $11 '('('"_logical_id" '1156) '('"_id" '"4dd90f3b-f0bcc94-6605932d-d97e9a74") '('"_wide_channels" (StructType '('_yql_agg_0 $8))))) (let $12 (DqPhyStage '() (lambda '() (block '( (let $31 (KqpTable '"/Root/tableWithNulls" '"72057594046644480:2" '"" '1)) (let $32 '('('"UsedKeyColumns" '('"id")) '('"ExpectedMaxRanges" '1) '('"PointPrefixLen" '1))) (let $33 (KqpWideReadOlapTableRanges $31 %kqp%tx_result_binding_0_0 '('"level") '() $32 (lambda '($34) (TKqpOlapAgg $34 '('('_yql_agg_0 'some '"level")) '())))) (return (FromFlow $33)) ))) $11)) (let $13 (DqCnUnionAll (TDqOutput $12 '0))) (let $14 (DqPhyStage '($13) (lambda '($35) (block '( (let $36 (Bool 'false)) (let $37 (WideCondense1 (ToFlow $35) (lambda '($39) $39) (lambda '($40 $41) $36) (lambda '($42 $43) (Coalesce $43 $42)))) (let $38 (Condense (NarrowMap (Take $37 (Uint64 '1)) (lambda '($44) (AsStruct '('Some0 $44)))) (Nothing (OptionalType (StructType '('Some0 $8)))) (lambda '($45 $46) $36) (lambda '($47 $48) (Just $47)))) (return (FromFlow (Map $38 (lambda '($49) (AsList (AsStruct '('"column0" (Member $49 'Some0)))))))) ))) '('('"_logical_id" '1690) '('"_id" '"4cc840dc-d440862a-1a44dd8-7d5c7d61")))) (let $15 (DqCnValue (TDqOutput $14 '0))) (let $16 (KqpTxResultBinding $10 '0 '0)) (let $17 '('('"type" '"scan"))) (let $18 (KqpPhysicalTx '($12 $14) '($15) '('($6 $16)) $17)) (let $19 '"%kqp%tx_result_binding_1_0") (let $20 (ListType (StructType '('"column0" $8)))) (let $21 '('('"_logical_id" '1786) '('"_id" '"16fcb27e-a96e08b-c85237ab-1b257ecc") $1)) (let $22 (DqPhyStage '() (lambda '() (Iterator %kqp%tx_result_binding_1_0)) $21)) (let $23 (DqCnResult (TDqOutput $22 '0) '('"column0"))) (let $24 (KqpTxResultBinding $20 '1 '0)) (let $25 (KqpPhysicalTx '($22) '($23) '('($19 $24)) $17)) (let $26 '($5 $18 $25)) (return (KqpPhysicalQuery $26 '((KqpTxResultBinding $20 '"2" '0)) '('('"type" '"scan_query")))) ) >> Balancing::Balancing_OneTopic_PQv1 [GOOD] >> Balancing::Balancing_ManyTopics_TopicApi >> KqpOlapAggregations::BlockGenericWithDistinct >> ListObjectsInS3Export::ParametersValidation >> TopicAutoscaling::CDC_PartitionSplit_AutosplitByLoad [GOOD] >> TopicAutoscaling::ControlPlane_CDC >> ReadSessionImplTest::SuccessfulInit [GOOD] >> ReadSessionImplTest::SuccessfulInitAndThenTimeoutCallback [GOOD] >> ReadSessionImplTest::StopsRetryAfterFailedAttempt [GOOD] >> ReadSessionImplTest::StopsRetryAfterTimeout [GOOD] >> ReadSessionImplTest::UnpackBigBatchWithTwoPartitions [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulRelease ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapStatistics::StatsUsageNotPK [GOOD] Test command err: Trying to start YDB, gRPC: 14706, MsgBus: 29895 2025-06-03T10:29:05.327292Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668214973703029:2203];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:05.327436Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d77/r3tmp/tmpuE4sL1/pdisk_1.dat 2025-06-03T10:29:05.386518Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:05.389633Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668214973702864:2079] 1748946545324334 != 1748946545324337 TServer::EnableGrpc on GrpcPort 14706, node 1 2025-06-03T10:29:05.406875Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:05.406890Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:05.406892Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:05.406954Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29895 TClient is connected to server localhost:29895 2025-06-03T10:29:05.453816Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:05.453862Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:05.454908Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:29:05.474784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:29:05.488739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:29:05.510950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668214973703567:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:05.511005Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668214973703567:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:05.511079Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668214973703567:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:05.511109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668214973703567:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:05.511141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668214973703567:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:05.511169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668214973703567:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:29:05.511188Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668214973703567:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:29:05.511226Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668214973703567:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:29:05.511244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668214973703567:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:29:05.511269Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668214973703567:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:29:05.511298Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668214973703567:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:29:05.511326Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668214973703567:2314];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:29:05.516876Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668214973703568:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:05.516911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668214973703568:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:05.516966Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668214973703568:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:05.517008Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668214973703568:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:05.517055Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668214973703568:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:05.517084Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668214973703568:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:29:05.517111Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668214973703568:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:29:05.517137Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668214973703568:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:29:05.517167Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668214973703568:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:29:05.517188Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668214973703568:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:29:05.517209Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668214973703568:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:29:05.517230Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668214973703568:2315];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:29:05.522433Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668214973703571:2316];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:05.522467Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668214973703571:2316];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:05.522523Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668214973703571:2316];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:05.522549Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668214973703571:2316];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:05.522579Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668214973703571:2316];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:05.522602Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668214973703571:2316];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstrac ... n=CLASS_NAME=Chunks;id=Chunks; 2025-06-03T10:29:05.529601Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-03T10:29:05.529604Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-03T10:29:05.529615Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-03T10:29:05.529618Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-03T10:29:05.529625Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-03T10:29:05.529629Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-06-03T10:29:05.529636Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-06-03T10:29:05.529643Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-03T10:29:05.529648Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-03T10:29:05.529651Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-03T10:29:05.529665Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:29:05.529668Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:29:05.529680Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:29:05.529689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:29:05.529703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:29:05.529713Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:29:05.529719Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:29:05.529725Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:29:05.529738Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:29:05.529779Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-03T10:29:05.529786Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-03T10:29:05.548810Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:05.548882Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:05.548934Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:05.548983Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:05.551425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:29:05.557912Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715659; 2025-06-03T10:29:05.558961Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715659; 2025-06-03T10:29:05.559889Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715659; 2025-06-03T10:29:05.855341Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668214973703881:2428], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.855377Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.918935Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668214973703904:2475] txid# 281474976715660, issues: { message: "ttl update error: Haven\'t MAX-index for TTL column and TTL column is not first column in primary key. in alter constructor STANDALONE_UPDATE" severity: 1 } 2025-06-03T10:29:05.934660Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668214973703915:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.934689Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.939436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnStore, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:29:05.943443Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:29:05.943603Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:29:05.943869Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:29:05.943945Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:29:05.972423Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668214973703958:2441], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.972488Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.974387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:29:05.980089Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:29:05.980224Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:29:05.980361Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:29:05.989946Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668214973704001:2450], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.989977Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.993216Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668214973704005:2548] txid# 281474976715663, issues: { message: "Haven\'t MAX-index for TTL column and TTL column is not first column in primary key" severity: 1 } >> ReadSessionImplTest::ForcefulDestroyPartitionStream [GOOD] >> ReadSessionImplTest::DestroyPartitionStreamRequest [GOOD] >> ReadSessionImplTest::DecompressZstdEmptyMessage [GOOD] >> ReadSessionImplTest::PacksBatches_BatchABitBiggerThanLimit [GOOD] >> ReadSessionImplTest::PacksBatches_BatchesEqualToServerBatches [GOOD] >> ReadSessionImplTest::HoleBetweenOffsets >> ReadSessionImplTest::HoleBetweenOffsets [GOOD] >> ReadSessionImplTest::LOGBROKER_7702 [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression >> Cdc::ResolvedTimestampsContinueAfterMerge [GOOD] >> DataShardReadIterator::ShouldReadKeyCellVec >> KqpOlapAggregations::AggregationCountGroupByPushdown [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulRelease [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit >> Cdc::InitialScanAndResolvedTimestamps [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit [GOOD] >> Viewer::Plan2SvgBad [GOOD] >> Viewer::JsonStorageListingV1 [GOOD] >> Viewer::JsonStorageListingV1GroupIdFilter >> DataShardReadIteratorSysTables::ShouldRead ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::LOGBROKER_7702 [GOOD] Test command err: 2025-06-03T10:29:07.203243Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.203251Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.203255Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:29:07.203362Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:29:07.210496Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:29:07.212387Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.213448Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:29:07.215820Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.215832Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.215840Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:29:07.215970Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:29:07.221418Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:29:07.221522Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.221643Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:29:07.221745Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-03T10:29:07.222228Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.222236Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.222241Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:29:07.222358Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:29:07.236265Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:29:07.236376Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.241421Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:29:07.241840Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.244769Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-03T10:29:07.245346Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:29:07.245371Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-03T10:29:07.245944Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.245951Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.245955Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:29:07.246060Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:29:07.246211Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:29:07.246290Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.249403Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 11 Compressed message data size: 31 2025-06-03T10:29:07.249758Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-03T10:29:07.249799Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-03T10:29:07.249884Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-03T10:29:07.249903Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-03T10:29:07.253359Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:29:07.253381Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-03T10:29:07.253398Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-03T10:29:07.253470Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 GOT RANGE 0 3 Getting new event 2025-06-03T10:29:07.253519Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-03T10:29:07.253525Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-03T10:29:07.253529Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-03T10:29:07.253558Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 5). Partition stream id: 1 GOT RANGE 3 5 Getting new event 2025-06-03T10:29:07.253571Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-06-03T10:29:07.253575Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-06-03T10:29:07.253579Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-03T10:29:07.253591Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 7). Partition stream id: 1 GOT RANGE 5 7 Getting new event 2025-06-03T10:29:07.253601Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-06-03T10:29:07.253606Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-06-03T10:29:07.253612Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-03T10:29:07.253632Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [7, 9). Partition stream id: 1 GOT RANGE 7 9 2025-06-03T10:29:07.257755Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.257766Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.257771Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:29:07.257862Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:29:07.257970Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:29:07.258028Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.261441Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 10 Compressed message data size: 30 2025-06-03T10:29:07.261751Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-03T10:29:07.261792Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-03T10:29:07.261920Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-03T10:29:07.261931Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-03T10:29:07.261970Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:29:07.261981Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-03T10:29:07.261988Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-03T10:29:07.261994Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-03T10:29:07.262008Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 4, size 40 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-03T10:29:07.262079Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 5). Partition stream id: 1 GOT RANGE 0 5 Getting new event 2025-06-03T10:29:07.262120Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-06-03T10:29:07.262125Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-06-03T10:29:07.262130Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-06-03T10:29:07.262134Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-06-03T10:29:07.262140Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 4, size 40 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-03T10:29:07.262169Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 9). Partition stream id: 1 GOT RANGE 5 9 2025-06-03T10:29:07.264889Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.264897Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.264902Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:29:07.265034Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:29:07.265189Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:29:07.265249Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.269420Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:29:07.269689Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-03T10:29:07.269736Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-03T10:29:07.269825Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (10-11) 2025-06-03T10:29:07.269833Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-03T10:29:07.269859Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:29:07.269866Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-03T10:29:07.269871Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (10-10) 2025-06-03T10:29:07.269874Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (11-11) 2025-06-03T10:29:07.269885Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 16 bytes 2025-06-03T10:29:07.269889Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 16 bytes got data event: DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 10 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 11 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } } 2025-06-03T10:29:07.269926Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 Got commit req { offset_ranges { assign_id: 1 end_offset: 3 } } RANGE 0 3 2025-06-03T10:29:07.269961Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 12). Partition stream id: 1 Got commit req { offset_ranges { assign_id: 1 start_offset: 3 end_offset: 12 } } RANGE 3 12 |64.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |64.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal |64.8%| [TA] {RESULT} $(B)/ydb/core/load_test/ut_ycsb/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_change_exchange/unittest >> Cdc::ResolvedTimestampsContinueAfterMerge [GOOD] Test command err: 2025-06-03T10:28:24.448843Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668037632059270:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:24.448965Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00283e/r3tmp/tmp3Z4z10/pdisk_1.dat TServer::EnableGrpc on GrpcPort 10021, node 1 2025-06-03T10:28:24.549683Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:24.549724Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:24.551016Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:24.553199Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:24.575841Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:24.575859Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:24.575862Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:24.575918Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:28:24.582351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:24.589756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046644480 2025-06-03T10:28:24.604634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:24.615873Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:7511668037632059858:2308] 2025-06-03T10:28:24.615970Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:28:24.621502Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:28:24.621541Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:28:24.621751Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:28:24.621758Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:28:24.621764Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:28:24.621820Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:28:24.621829Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:28:24.621839Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:7511668037632059872:2308] in generation 1 2025-06-03T10:28:24.625762Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:28:24.633484Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:28:24.633579Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:28:24.633596Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:7511668037632059874:2309] 2025-06-03T10:28:24.633605Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:24.633608Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:28:24.633613Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:24.633674Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:28:24.633699Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:28:24.633712Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:24.633719Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:24.633730Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:28:24.633734Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:24.664134Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7511668037632059854:2295], serverId# [1:7511668037632059877:2306], sessionId# [0:0:0] 2025-06-03T10:28:24.664192Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:28:24.664264Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:28:24.664304Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:28:24.664589Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:24.665090Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:28:24.665118Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:28:24.666381Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7511668037632059890:2313], serverId# [1:7511668037632059892:2315], sessionId# [0:0:0] 2025-06-03T10:28:24.666399Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:24.667466Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1748946504711 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1748946504711 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-03T10:28:24.667483Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:24.667520Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:24.667528Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:28:24.667544Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1748946504711:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-03T10:28:24.667614Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1748946504711:281474976715657 keys extracted: 0 2025-06-03T10:28:24.667651Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-03T10:28:24.667672Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:24.667695Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-03T10:28:24.668161Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-03T10:28:24.669019Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:24.669308Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 1748946504710 2025-06-03T10:28:24.669318Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:24.669324Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1748946504718 2025-06-03T10:28:24.669334Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1748946504711} 2025-06-03T10:28:24.669341Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:24.669350Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:24.669355Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:24.669359Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-03T10:28:24.669374Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1748946504711 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:7511668037632059589:2143], exec latency: 1 ms, propose latency: 1 ms 2025-06-03T10:28:24.669385Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:28:24.669399Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:24.670164Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:153: [ChangeSender][72075186224037888:1][1:7511668037632059874:2309][Inactive] Handle NKikimrChangeExchange.TEvActivateSender 2025-06-03T10:28:24.670612Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-03T10:28:24.670624Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:28:24.675302Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:24.675338Z node 1 :TX_DATASHARD DEBUG: datashard__ ... 89, Partition: 0, State: StateIdle] m0000000000p72075186224037893 2025-06-03T10:29:06.673873Z node 24 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037889, Partition: 0, State: StateIdle] i0000000000 2025-06-03T10:29:06.673878Z node 24 :PERSQUEUE DEBUG: partition.cpp:2199: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- rename ---------------- 2025-06-03T10:29:06.673884Z node 24 :PERSQUEUE DEBUG: partition.cpp:2204: [PQ: 72075186224037889, Partition: 0, State: StateIdle] =========================== 2025-06-03T10:29:06.673936Z node 24 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-06-03T10:29:06.674352Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 ... wait for final heartbeat >>>>> GetRecords path=/Root/Table/Stream partitionId=0 2025-06-03T10:29:06.674848Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-03T10:29:06.674869Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-03T10:29:06.675042Z node 24 :PERSQUEUE DEBUG: partition_read.cpp:736: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 0 Topic 'Table/Stream/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 2 max time lag 0ms effective offset 0 2025-06-03T10:29:06.675055Z node 24 :PERSQUEUE DEBUG: partition_read.cpp:936: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 0 added 0 blobs, size 0 count 0 last offset 0, current partition end offset: 2 2025-06-03T10:29:06.675071Z node 24 :PERSQUEUE DEBUG: partition_read.cpp:953: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 0. All data is from uncompacted head. 2025-06-03T10:29:06.675082Z node 24 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-06-03T10:29:06.675233Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-03T10:29:06.689516Z node 24 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-03T10:29:06.689633Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-03T10:29:06.689702Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-03T10:29:06.689713Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-03T10:29:06.689744Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-03T10:29:06.689825Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:61: [CdcChangeSenderPartition][72075186224037893:1][0][72075186224037889][24:1297:3021] Handle NKikimr::NPQ::TEvPartitionWriter::TEvInitResult { SessionId: TxId: Success { OwnerCookie: 72075186224037893|a72210e7-c6846d81-30759a28-65bb58a4_0 SourceIdInfo: SourceId: "\00072075186224037893" SeqNo: 0 Offset: 2 WriteTimestampMS: 0 Explicit: true State: STATE_REGISTERED } } 2025-06-03T10:29:06.689848Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:643: [CdcChangeSenderMain][72075186224037893:1][24:1294:3021] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 0 } 2025-06-03T10:29:06.689883Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:111: [CdcChangeSenderPartition][72075186224037893:1][0][72075186224037889][24:1297:3021] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 6000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 3] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-06-03T10:29:06.689959Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-03T10:29:06.689966Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-03T10:29:06.689999Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 1 2025-06-03T10:29:06.690031Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-03T10:29:06.690037Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-03T10:29:06.690056Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:2196: [PQ: 72075186224037889] got client message topic: Table/Stream/streamImpl partition: 0 SourceId: '\00072075186224037893' SeqNo: 1 partNo : 0 messageNo: 1 size 26 offset: -1 2025-06-03T10:29:06.690107Z node 24 :PERSQUEUE DEBUG: partition_write.cpp:1162: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 process heartbeat sourceId '\00072075186224037893' version v6000/0 2025-06-03T10:29:06.690124Z node 24 :PERSQUEUE INFO: partition_write.cpp:1658: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 emit heartbeat v6000/0 2025-06-03T10:29:06.690154Z node 24 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 part blob processing sourceId '\00072075186224037889' seqNo 0 partNo 0 2025-06-03T10:29:06.690222Z node 24 :PERSQUEUE DEBUG: partition_write.cpp:1333: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 part blob complete sourceId '\00072075186224037889' seqNo 0 partNo 0 FormedBlobsCount 0 NewHead: Offset 2 PartNo 0 PackedSize 107 count 1 nextOffset 3 batches 1 2025-06-03T10:29:06.690286Z node 24 :PERSQUEUE DEBUG: partition_write.cpp:1623: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Add new write blob: topic 'Table/Stream/streamImpl' partition 0 compactOffset 2,1 HeadOffset 0 endOffset 2 curOffset 3 d0000000000_00000000000000000002_00000_0000000001_00000| size 93 WTime 6505 2025-06-03T10:29:06.690319Z node 24 :PERSQUEUE DEBUG: partition.cpp:2185: [PQ: 72075186224037889, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-06-03T10:29:06.690325Z node 24 :PERSQUEUE DEBUG: partition.cpp:2186: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- delete ---------------- 2025-06-03T10:29:06.690332Z node 24 :PERSQUEUE DEBUG: partition.cpp:2192: [PQ: 72075186224037889, Partition: 0, State: StateIdle] [x0000000000, x0000000001) 2025-06-03T10:29:06.690337Z node 24 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- write ----------------- 2025-06-03T10:29:06.690342Z node 24 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037889, Partition: 0, State: StateIdle] m0000000000p72075186224037889 2025-06-03T10:29:06.690347Z node 24 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037889, Partition: 0, State: StateIdle] d0000000000_00000000000000000002_00000_0000000001_00000| 2025-06-03T10:29:06.690352Z node 24 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037889, Partition: 0, State: StateIdle] i0000000000 2025-06-03T10:29:06.690357Z node 24 :PERSQUEUE DEBUG: partition.cpp:2199: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- rename ---------------- 2025-06-03T10:29:06.690363Z node 24 :PERSQUEUE DEBUG: partition.cpp:2204: [PQ: 72075186224037889, Partition: 0, State: StateIdle] =========================== 2025-06-03T10:29:06.690397Z node 24 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-06-03T10:29:06.690413Z node 24 :PERSQUEUE DEBUG: read.h:300: CacheProxy. Passthrough blob. Partition 0 offset 2 partNo 0 count 1 size 93 2025-06-03T10:29:06.690861Z node 24 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 0 offset 2 count 1 size 93 actorID [24:1266:3001] 2025-06-03T10:29:06.690948Z node 24 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037889' partition 0 offset 2 partno 0 count 1 parts 0 size 93 2025-06-03T10:29:06.701511Z node 24 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 44 WriteNewSizeFromSupportivePartitions# 0 2025-06-03T10:29:06.701576Z node 24 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-03T10:29:06.701608Z node 24 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Answering for message sourceid: '\00072075186224037893', Topic: 'Table/Stream/streamImpl', Partition: 0, SeqNo: 1, partNo: 0, Offset: 2 is stored on disk 2025-06-03T10:29:06.701713Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-03T10:29:06.701852Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:160: [CdcChangeSenderPartition][72075186224037893:1][0][72075186224037889][24:1297:3021] Handle NKikimrClient.TResponse { SessionId: TxId: Success { Response: Status: 1 ErrorCode: OK PartitionResponse { CmdWriteResult { AlreadyWritten: false SourceId: "\00072075186224037893" SeqNo: 1 Offset: 2 WriteTimestampMS: 6505 PartitionQuotedTimeMs: 0 TotalTimeInPartitionQueueMs: 0 WriteTimeMs: 0 TopicQuotedTimeMs: 0 WrittenInTx: false } Cookie: 1 } } } 2025-06-03T10:29:06.701885Z node 24 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:643: [CdcChangeSenderMain][72075186224037893:1][24:1294:3021] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 0 } 2025-06-03T10:29:06.701953Z node 24 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 1, at tablet# 72075186224037893 2025-06-03T10:29:06.701969Z node 24 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 1, at tablet: 72075186224037893 2025-06-03T10:29:06.712917Z node 24 :TX_DATASHARD INFO: datashard_change_sending.cpp:335: TTxRemoveChangeRecords Complete: removed# 1, left# 0, at tablet# 72075186224037893 >>>>> GetRecords path=/Root/Table/Stream partitionId=0 2025-06-03T10:29:07.141595Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-03T10:29:07.141631Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-03T10:29:07.141691Z node 24 :PERSQUEUE DEBUG: partition_read.cpp:736: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 1 Topic 'Table/Stream/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 3 max time lag 0ms effective offset 0 2025-06-03T10:29:07.141704Z node 24 :PERSQUEUE DEBUG: partition_read.cpp:936: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 1 added 0 blobs, size 0 count 0 last offset 0, current partition end offset: 3 2025-06-03T10:29:07.141719Z node 24 :PERSQUEUE DEBUG: partition_read.cpp:953: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 1. All data is from uncompacted head. 2025-06-03T10:29:07.141729Z node 24 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-06-03T10:29:07.141790Z node 24 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 |64.8%| [LD] {RESULT} $(B)/ydb/core/mind/bscontroller/ut_selfheal/ydb-core-mind-bscontroller-ut_selfheal >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKey+EvWrite ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapAggregations::AggregationCountGroupByPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 5163, MsgBus: 21012 2025-06-03T10:29:03.784220Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668206574327615:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:03.785494Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d86/r3tmp/tmpayH7eh/pdisk_1.dat 2025-06-03T10:29:03.870573Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:03.880898Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:03.880916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 5163, node 1 2025-06-03T10:29:03.881777Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:03.893493Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:03.893507Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:03.893509Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:03.893553Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21012 TClient is connected to server localhost:21012 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:29:03.981123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:29:03.984820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:29:03.988774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:29:04.006737Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206574328199:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:04.006814Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206574328199:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:04.006881Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206574328199:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:04.006906Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206574328199:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:04.006929Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206574328199:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:04.006951Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206574328199:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:29:04.006977Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206574328199:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:29:04.007002Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206574328199:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:29:04.007022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206574328199:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:29:04.007045Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206574328199:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:29:04.007065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206574328199:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:29:04.007085Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206574328199:2316];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:29:04.015307Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206574328196:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:04.015333Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206574328196:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:04.015386Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206574328196:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:04.015403Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206574328196:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:04.015419Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206574328196:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:04.015434Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206574328196:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:29:04.015447Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206574328196:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:29:04.015461Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206574328196:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:29:04.015474Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206574328196:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:29:04.015487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206574328196:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:29:04.015500Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206574328196:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:29:04.015513Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206574328196:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:29:04.019612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206574328198:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:04.019659Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206574328198:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:04.019722Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206574328198:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:04.019744Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206574328198:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:04.019767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206574328198:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:04.019793Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206574328198:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=n ... 2025-06-03T10:29:05.247906Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:29:05.247927Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:29:05.247932Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:29:05.247946Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:29:05.247951Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:29:05.247957Z node 2 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:29:05.247963Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:29:05.247968Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:29:05.248024Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-03T10:29:05.248028Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-03T10:29:05.274727Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:05.274807Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:05.274889Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:05.274969Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:05.278316Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:29:05.288538Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715659; 2025-06-03T10:29:05.289535Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715659; 2025-06-03T10:29:05.290506Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715659; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=1175912;columns=5; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=1175912;columns=5; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=1175912;columns=5; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=1175912;columns=5; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=1175912;columns=5; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=1175912;columns=5; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=1175912;columns=5; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=1175912;columns=5; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=1175912;columns=5; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=1175912;columns=5; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=8228912;columns=5; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=8228912;columns=5; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=12930912;columns=5; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=12930912;columns=5; 2025-06-03T10:29:05.563018Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668214537173257:2406], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.563047Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668214537173247:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.563140Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.564017Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480 2025-06-03T10:29:05.567823Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511668214537173261:2407], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-03T10:29:05.657457Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668214537173312:2513] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:07.081135Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946545619, txId: 18446744073709551615] shutting down JSON Plan: {"Plan":{"Plans":[{"PlanNodeId":6,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["olapStore\/olapTable"],"PlanNodeId":1,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["timestamp (-∞, +∞)","uid (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/olapStore\/olapTable","E-Rows":"No estimate","Table":"olapStore\/olapTable","ReadColumns":["level"],"SsaProgram":{"Command":[{"GroupBy":{"Aggregates":[{"Function":{"Id":2,"Arguments":[{"Id":4}]},"Column":{"Id":6}}],"KeyColumns":[{"Id":4}]}},{"Projection":{"Columns":[{"Id":6},{"Id":4}]}}]},"E-Cost":"No estimate"}],"Node Type":"TableFullScan"}],"Node Type":"HashShuffle","KeyColumns":["level"],"PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"InternalOperatorId":1}],"SortBy":"row.level","Name":"Sort"},{"Inputs":[{"ExternalPlanNodeId":2}],"Name":"Aggregate","Phase":"Final"}],"Node Type":"Sort-Aggregate"}],"Node Type":"Merge","SortColumns":["level (Asc)"],"PlanNodeType":"Connection"}],"Node Type":"Stage"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/olapStore\/olapTable","reads":[{"columns":["level"],"scan_by":["timestamp (-∞, +∞)","uid (-∞, +∞)"],"type":"FullScan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":6,"Plans":[{"PlanNodeId":7,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["timestamp (-∞, +∞)","uid (-∞, +∞)"],"Name":"TableFullScan","Path":"\/Root\/olapStore\/olapTable","E-Rows":"No estimate","Table":"olapStore\/olapTable","ReadColumns":["level"],"SsaProgram":{"Command":[{"GroupBy":{"Aggregates":[{"Function":{"Id":2,"Arguments":[{"Id":4}]},"Column":{"Id":6}}],"KeyColumns":[{"Id":4}]}},{"Projection":{"Columns":[{"Id":6},{"Id":4}]}}]},"E-Cost":"No estimate"}],"Node Type":"TableFullScan"}],"Node Type":"HashShuffle (KeyColumns: [\"level\"])","PlanNodeType":"Connection"}],"Operators":[{"Name":"Aggregate","Phase":"Final"}],"Node Type":"Aggregate"}],"Operators":[{"SortBy":"row.level","Name":"Sort"}],"Node Type":"Sort"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} AST: ( (let $1 (DataType 'Uint64)) (let $2 '('"level" (OptionalType (DataType 'Int32)))) (let $3 '('('"_logical_id" '599) '('"_id" '"e3291bf1-e89f336c-2fd2d007-a85097af") '('"_wide_channels" (StructType '('_yql_agg_0 $1) $2)))) (let $4 (DqPhyStage '() (lambda '() (block '( (let $13 (KqpTable '"/Root/olapStore/olapTable" '"72057594046644480:3" '"" '1)) (let $14 '('"level")) (let $15 (KqpWideReadOlapTableRanges $13 (Void) $14 '() '() (lambda '($16) (TKqpOlapAgg $16 '('('_yql_agg_0 'count '"level")) $14)))) (return (FromFlow $15)) ))) $3)) (let $5 (DqCnHashShuffle (TDqOutput $4 '0) '('1))) (let $6 (StructType '('"column1" $1) $2)) (let $7 '('('"_logical_id" '1015) '('"_id" '"b2104858-b5997199-833fef5c-1d037b31") '('"_wide_channels" $6))) (let $8 (DqPhyStage '($5) (lambda '($17) (block '( (let $18 (lambda '($29 $30) $30 $29)) (let $19 (WideCombiner (ToFlow $17) '"" (lambda '($20 $21) $21) (lambda '($22 $23 $24) $23) (lambda '($25 $26 $27 $28) (AggrAdd $26 $28)) $18)) (return (FromFlow (WideSort $19 '('('1 (Bool 'true)))))) ))) $7)) (let $9 (DqCnMerge (TDqOutput $8 '0) '('('1 '"Asc")))) (let $10 (DqPhyStage '($9) (lambda '($31) (FromFlow (NarrowMap (ToFlow $31) (lambda '($32 $33) (AsStruct '('"column1" $32) '('"level" $33)))))) '('('"_logical_id" '1027) '('"_id" '"ece7c41f-36f5f5f5-66c489ab-511a199")))) (let $11 '($4 $8 $10)) (let $12 (DqCnResult (TDqOutput $10 '0) '('"level" '"column1"))) (return (KqpPhysicalQuery '((KqpPhysicalTx $11 '($12) '() '('('"type" '"scan")))) '((KqpTxResultBinding (ListType $6) '0 '0)) '('('"type" '"scan_query")))) ) >> TExportToS3WithRebootsTests::ForgetShouldSucceedOnSingleTable [GOOD] >> TExportToS3WithRebootsTests::ForgetShouldSucceedOnSingleShardTableWithChangefeed >> DataShardReadIterator::ShouldRangeReadReverseLeftInclusive ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_change_exchange/unittest >> Cdc::InitialScanAndResolvedTimestamps [GOOD] Test command err: 2025-06-03T10:28:23.875745Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668037052671201:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:23.875847Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002845/r3tmp/tmpgL4iJ1/pdisk_1.dat 2025-06-03T10:28:23.966210Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:23.966431Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668037052671039:2079] 1748946503874085 != 1748946503874088 TServer::EnableGrpc on GrpcPort 29480, node 1 2025-06-03T10:28:23.988473Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:23.988487Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:23.988490Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:23.988546Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:28:23.991856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:28:24.033518Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:24.033550Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:24.033655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:24.037406Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:24.058398Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:7511668041347638965:2308] 2025-06-03T10:28:24.058501Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:28:24.064303Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:28:24.064349Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:28:24.064555Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:28:24.064566Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:28:24.064572Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:28:24.064634Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:28:24.064648Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:28:24.064657Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:7511668041347638979:2308] in generation 1 2025-06-03T10:28:24.071855Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:28:24.076013Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:28:24.076210Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:28:24.076230Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:7511668041347638981:2309] 2025-06-03T10:28:24.076233Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:24.076238Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:28:24.076242Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:24.076294Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:28:24.076318Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:28:24.076322Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:24.076327Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:24.076336Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:28:24.076339Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:24.088255Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7511668041347638956:2292], serverId# [1:7511668041347638984:2306], sessionId# [0:0:0] 2025-06-03T10:28:24.088330Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:28:24.088424Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:28:24.088458Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:28:24.088750Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:24.090644Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:28:24.090689Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:28:24.091339Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:7511668041347638997:2313], serverId# [1:7511668041347638998:2314], sessionId# [0:0:0] 2025-06-03T10:28:24.092329Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:69: Planned transaction txId 281474976715657 at step 1748946504137 at tablet 72075186224037888 { Transactions { TxId: 281474976715657 AckTo { RawX1: 0 RawX2: 0 } } Step: 1748946504137 MediatorID: 72057594046382081 TabletID: 72075186224037888 } 2025-06-03T10:28:24.092337Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:24.092372Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:24.092385Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:24.092392Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 1 2025-06-03T10:28:24.092403Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:333: Found ready operation [1748946504137:281474976715657] in PlanQueue unit at 72075186224037888 2025-06-03T10:28:24.092480Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:634: LoadTxDetails at 72075186224037888 loaded tx from db 1748946504137:281474976715657 keys extracted: 0 2025-06-03T10:28:24.092510Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 1 immediate 0 planned 1 2025-06-03T10:28:24.092522Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:28:24.092531Z node 1 :TX_DATASHARD INFO: create_table_unit.cpp:69: Trying to CREATE TABLE at 72075186224037888 tableId# [OwnerId: 72057594046644480, LocalPathId: 2] schema version# 1 2025-06-03T10:28:24.092851Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-03T10:28:24.092949Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:28:24.093115Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 1748946504136 2025-06-03T10:28:24.093117Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:24.093206Z node 1 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 1748946504137} 2025-06-03T10:28:24.093215Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:24.093223Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 0 next step 1748946504137 2025-06-03T10:28:24.093229Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:28:24.093232Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:28:24.093236Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037888 2025-06-03T10:28:24.093265Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [1748946504137 : 281474976715657] from 72075186224037888 at tablet 72075186224037888 send result to client [1:7511668037052671477:2191], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:28:24.093273Z node 1 :TX_DATASHARD INFO: datashard.cpp:1590: 72075186224037888 Sending notify to schemeshard 72057594046644480 txId 281474976715657 state Ready TxInFly 0 2025-06-03T10:28:24.093280Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:28:24.096840Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:153: [ChangeSender][72075186224037888:1][1:7511668041347638981:2309][Inactive] Handle NKikimrChangeExchange.TEvActivateSender 2025-06-03T10:28:24.097200Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715657 datashard 72075186224037888 state Ready 2025-06-03T10:28:24.097223Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:28:24.102304Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:28:24.102350Z node 1 :TX_ ... 24037888 2025-06-03T10:29:07.322418Z node 27 :TX_DATASHARD INFO: cdc_stream_heartbeat.cpp:42: [CdcStreamHeartbeat] Emit change records: edge# v7500/18446744073709551615, at tablet# 72075186224037888 2025-06-03T10:29:07.322472Z node 27 :TX_DATASHARD DEBUG: datashard.cpp:874: PersistChangeRecord: record: { Order: 4 Group: 0 Step: 6000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }, at tablet: 72075186224037888 2025-06-03T10:29:07.322801Z node 27 :TX_DATASHARD INFO: cdc_stream_heartbeat.cpp:42: [CdcStreamHeartbeat] Emit change records: edge# v7500/18446744073709551615, at tablet# 72075186224037888 2025-06-03T10:29:07.323299Z node 27 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715662 datashard 72075186224037888 state Ready 2025-06-03T10:29:07.323317Z node 27 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037888 Got TEvSchemaChangedResult from SS at 72075186224037888 2025-06-03T10:29:07.341623Z node 27 :TX_DATASHARD INFO: cdc_stream_heartbeat.cpp:78: [CdcStreamHeartbeat] Enqueue 1 change record(s): at tablet# 72075186224037888 2025-06-03T10:29:07.341666Z node 27 :TX_DATASHARD DEBUG: datashard.cpp:1170: EnqueueChangeRecords: at tablet: 72075186224037888, records: { Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] BodySize: 0 TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 } 2025-06-03T10:29:07.341682Z node 27 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:29:07.341697Z node 27 :TX_DATASHARD DEBUG: datashard.cpp:3812: Waiting for PlanStep# 9000 from mediator time cast 2025-06-03T10:29:07.341706Z node 27 :TX_DATASHARD INFO: cdc_stream_heartbeat.cpp:78: [CdcStreamHeartbeat] Enqueue 0 change record(s): at tablet# 72075186224037888 2025-06-03T10:29:07.341711Z node 27 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:29:07.341744Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender.cpp:71: [ChangeSender][72075186224037888:1][27:682:2578] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] BodySize: 0 }] } 2025-06-03T10:29:07.341804Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:628: [CdcChangeSenderMain][72075186224037888:1][27:971:2769] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 4 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] BodySize: 0 }] } 2025-06-03T10:29:07.341872Z node 27 :TX_DATASHARD INFO: datashard_change_sending.cpp:215: TTxRequestChangeRecords Execute: at tablet# 72075186224037888 2025-06-03T10:29:07.341935Z node 27 :TX_DATASHARD DEBUG: datashard_change_sending.cpp:235: Send 1 change records: to# [27:971:2769], at tablet# 72075186224037888 2025-06-03T10:29:07.341944Z node 27 :TX_DATASHARD INFO: datashard_change_sending.cpp:260: TTxRequestChangeRecords Complete: sent# 1, forgotten# 0, left# 0, at tablet# 72075186224037888 2025-06-03T10:29:07.341968Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:633: [CdcChangeSenderMain][72075186224037888:1][27:971:2769] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 4 Group: 0 Step: 6000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-06-03T10:29:07.342003Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:111: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037889][27:1052:2769] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 4 Group: 0 Step: 6000 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 7] Kind: CdcHeartbeat Source: Unspecified Body: 0b TableId: [OwnerId: 72057594046644480, LocalPathId: 2] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-06-03T10:29:07.342103Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-03T10:29:07.342116Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-03T10:29:07.342166Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 2 requestId: cookie: 2 2025-06-03T10:29:07.342201Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-03T10:29:07.342206Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-03T10:29:07.342222Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2196: [PQ: 72075186224037889] got client message topic: Table/Stream/streamImpl partition: 0 SourceId: '\00072075186224037888' SeqNo: 4 partNo : 0 messageNo: 3 size 26 offset: -1 2025-06-03T10:29:07.342270Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:1162: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 process heartbeat sourceId '\00072075186224037888' version v6000/0 2025-06-03T10:29:07.342286Z node 27 :PERSQUEUE INFO: partition_write.cpp:1658: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 emit heartbeat v6000/0 2025-06-03T10:29:07.342306Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 part blob processing sourceId '\00072075186224037889' seqNo 0 partNo 0 2025-06-03T10:29:07.342358Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:1333: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Topic 'Table/Stream/streamImpl' partition 0 part blob complete sourceId '\00072075186224037889' seqNo 0 partNo 0 FormedBlobsCount 0 NewHead: Offset 3 PartNo 0 PackedSize 107 count 1 nextOffset 4 batches 1 2025-06-03T10:29:07.342408Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:1623: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Add new write blob: topic 'Table/Stream/streamImpl' partition 0 compactOffset 3,1 HeadOffset 0 endOffset 3 curOffset 4 d0000000000_00000000000000000003_00000_0000000001_00000| size 93 WTime 7451 2025-06-03T10:29:07.342429Z node 27 :PERSQUEUE DEBUG: partition.cpp:2185: [PQ: 72075186224037889, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-06-03T10:29:07.342434Z node 27 :PERSQUEUE DEBUG: partition.cpp:2186: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- delete ---------------- 2025-06-03T10:29:07.342440Z node 27 :PERSQUEUE DEBUG: partition.cpp:2192: [PQ: 72075186224037889, Partition: 0, State: StateIdle] [x0000000000, x0000000001) 2025-06-03T10:29:07.342444Z node 27 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- write ----------------- 2025-06-03T10:29:07.342449Z node 27 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037889, Partition: 0, State: StateIdle] m0000000000p72075186224037889 2025-06-03T10:29:07.342453Z node 27 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037889, Partition: 0, State: StateIdle] d0000000000_00000000000000000003_00000_0000000001_00000| 2025-06-03T10:29:07.342458Z node 27 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037889, Partition: 0, State: StateIdle] i0000000000 2025-06-03T10:29:07.342462Z node 27 :PERSQUEUE DEBUG: partition.cpp:2199: [PQ: 72075186224037889, Partition: 0, State: StateIdle] --- rename ---------------- 2025-06-03T10:29:07.342467Z node 27 :PERSQUEUE DEBUG: partition.cpp:2204: [PQ: 72075186224037889, Partition: 0, State: StateIdle] =========================== 2025-06-03T10:29:07.342485Z node 27 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-06-03T10:29:07.342496Z node 27 :PERSQUEUE DEBUG: read.h:300: CacheProxy. Passthrough blob. Partition 0 offset 3 partNo 0 count 1 size 93 2025-06-03T10:29:07.342758Z node 27 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 0 offset 3 count 1 size 93 actorID [27:918:2727] 2025-06-03T10:29:07.342796Z node 27 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037889' partition 0 offset 3 partno 0 count 1 parts 0 size 93 2025-06-03T10:29:07.357846Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 44 WriteNewSizeFromSupportivePartitions# 0 2025-06-03T10:29:07.357902Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037889, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-03T10:29:07.357927Z node 27 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Answering for message sourceid: '\00072075186224037888', Topic: 'Table/Stream/streamImpl', Partition: 0, SeqNo: 4, partNo: 0, Offset: 3 is stored on disk 2025-06-03T10:29:07.357998Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 3 requestId: cookie: 2 2025-06-03T10:29:07.358101Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:160: [CdcChangeSenderPartition][72075186224037888:1][0][72075186224037889][27:1052:2769] Handle NKikimrClient.TResponse { SessionId: TxId: Success { Response: Status: 1 ErrorCode: OK PartitionResponse { CmdWriteResult { AlreadyWritten: false SourceId: "\00072075186224037888" SeqNo: 4 Offset: 3 WriteTimestampMS: 7451 PartitionQuotedTimeMs: 0 TotalTimeInPartitionQueueMs: 0 WriteTimeMs: 0 TopicQuotedTimeMs: 0 WrittenInTx: false } Cookie: 2 } } } 2025-06-03T10:29:07.358124Z node 27 :CHANGE_EXCHANGE DEBUG: change_sender_cdc_stream.cpp:643: [CdcChangeSenderMain][72075186224037888:1][27:971:2769] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 0 } 2025-06-03T10:29:07.358176Z node 27 :TX_DATASHARD INFO: datashard_change_sending.cpp:310: TTxRemoveChangeRecords Execute: records# 1, at tablet# 72075186224037888 2025-06-03T10:29:07.358184Z node 27 :TX_DATASHARD DEBUG: datashard.cpp:1087: RemoveChangeRecord: order: 4, at tablet: 72075186224037888 2025-06-03T10:29:07.369627Z node 27 :TX_DATASHARD INFO: datashard_change_sending.cpp:335: TTxRemoveChangeRecords Complete: removed# 1, left# 0, at tablet# 72075186224037888 >>>>> GetRecords path=/Root/Table/Stream partitionId=0 2025-06-03T10:29:07.517985Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'streamImpl' requestId: 2025-06-03T10:29:07.518020Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037889] got client message batch for topic 'Table/Stream/streamImpl' partition 0 2025-06-03T10:29:07.518094Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:736: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 2 Topic 'Table/Stream/streamImpl' partition 0 user $without_consumer offset 0 count 10000 size 26214400 endOffset 4 max time lag 0ms effective offset 0 2025-06-03T10:29:07.518105Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:936: [PQ: 72075186224037889, Partition: 0, State: StateIdle] read cookie 2 added 0 blobs, size 0 count 0 last offset 0, current partition end offset: 4 2025-06-03T10:29:07.518138Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:953: [PQ: 72075186224037889, Partition: 0, State: StateIdle] Reading cookie 2. All data is from uncompacted head. 2025-06-03T10:29:07.518148Z node 27 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-06-03T10:29:07.518314Z node 27 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'streamImpl' partition: 0 messageNo: 0 requestId: cookie: 0 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/viewer/ut/unittest >> Viewer::Plan2SvgBad [GOOD] Test command err: Data has built Merge = 0.04491924534 Data has merged 2025-06-03T10:28:57.346180Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668180704037875:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:57.346208Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:28:57.401592Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668180704037854:2079] 1748946537346030 != 1748946537346033 2025-06-03T10:28:57.404249Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 32121, node 1 2025-06-03T10:28:57.418810Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:57.418830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:57.418833Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:57.418888Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11750 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:57.477589Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:57.477624Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:57.478545Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:57.478919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:57.488096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:28:57.488991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:57.843170Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668180704038552:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:57.843179Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668180704038560:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:57.843194Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:57.844088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480 2025-06-03T10:28:57.845994Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668180704038566:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-03T10:28:57.928865Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668180704038617:2345] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:58.336780Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668184071609742:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:58.336815Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:28:58.354204Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:58.354539Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511668184071609720:2079] 1748946538336563 != 1748946538336566 TServer::EnableGrpc on GrpcPort 25698, node 2 2025-06-03T10:28:58.363122Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:58.363136Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:58.363138Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:58.363189Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29806 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:28:58.441823Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:58.441859Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:58.442262Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:58.443412Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:58.443637Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:58.448974Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:28:58.449979Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:58.803053Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668184071610423:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:58.803093Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668184071610415:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:58.803111Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:58.803930Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480 2025-06-03T10:28:58.805902Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511668184071610429:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-03T10:28:58.889874Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668184071610480:2344] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:59.221622Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511668187927129750:20 ... TPoolCreatorActor] ActorId: [5:7511668202064172525:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-03T10:29:02.104354Z node 5 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [5:7511668202064172576:2345] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:02.111852Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:29:02.169495Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:02.169510Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:02.235440Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:02.235464Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:02.297687Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:02.297712Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:02.357748Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:02.357768Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:02.417763Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:02.417788Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:02.481710Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:02.481734Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:02.541720Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:02.541741Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:02.606262Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:02.606284Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:02.665604Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:02.665633Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:02.733641Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:02.733666Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:02.782493Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:02.782512Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:02.861603Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:02.861624Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:02.941696Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:02.941716Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:03.025734Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:03.025764Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:03.093731Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:03.093755Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:03.153671Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:03.153696Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:03.155343Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715679:0, at schemeshard: 72057594046644480 2025-06-03T10:29:03.155849Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715680:0, at schemeshard: 72057594046644480 2025-06-03T10:29:03.156106Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480 2025-06-03T10:29:04.308211Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:04.308244Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:04.378077Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:04.378094Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:04.434151Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:04.434185Z node 5 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:04.519402Z node 5 :RPC_REQUEST WARN: rpc_stream_execute_scan_query.cpp:410: Client lost 2025-06-03T10:29:04.519476Z node 5 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [5:7511668210654108209:2570] TxId: 281474976715693. Ctx: { TraceId: 01jwtncsv38cjbmzn7hf8c37gx, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=OWU0MWRiMjQtYzk5NDRlZDktZTZjMmRkODEtMjI4OWNhZDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-03T10:29:04.519533Z node 5 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=5&id=OWU0MWRiMjQtYzk5NDRlZDktZTZjMmRkODEtMjI4OWNhZDM=, ActorId: [5:7511668210654108190:2570], ActorState: ExecuteState, TraceId: 01jwtncsv38cjbmzn7hf8c37gx, Create QueryResponse for error on request, msg: 2025-06-03T10:29:04.519646Z node 5 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [5:7511668210654108215:2575], TxId: 281474976715693, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=5&id=OWU0MWRiMjQtYzk5NDRlZDktZTZjMmRkODEtMjI4OWNhZDM=. TraceId : 01jwtncsv38cjbmzn7hf8c37gx. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [5:7511668210654108209:2570], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-03T10:29:04.519672Z node 5 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946544562, txId: 281474976715692] shutting down 2025-06-03T10:29:05.164682Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511668213473740260:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:05.164726Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:29:05.193882Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:05.194200Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7511668213473740073:2079] 1748946545162822 != 1748946545162825 TServer::EnableGrpc on GrpcPort 15490, node 6 2025-06-03T10:29:05.205192Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:05.205201Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:05.205203Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:05.205276Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29324 2025-06-03T10:29:05.267693Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:05.267731Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:05.269193Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:05.738062Z node 6 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (8C3E2D8D): Could not find correct token validator test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:29:06.537844Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:06.545449Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:06.549457Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7511668219566104677:2079] 1748946546462272 != 1748946546462275 TServer::EnableGrpc on GrpcPort 14643, node 7 2025-06-03T10:29:06.585750Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:06.585766Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:06.585770Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:06.585832Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:29:06.597866Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:06.597905Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:06.598360Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16647 2025-06-03T10:29:07.197887Z node 7 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (8C3E2D8D): Could not find correct token validator >> KqpOlapAggregations::BlockGenericWithDistinct [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit [GOOD] Test command err: 2025-06-03T10:29:07.181532Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.181547Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.181552Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:29:07.181694Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:29:07.183749Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-03T10:29:07.183803Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.186575Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.186585Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.186590Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:29:07.186679Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:29:07.186821Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-03T10:29:07.186843Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.187249Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.187254Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.187258Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:29:07.193380Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-03T10:29:07.193407Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.193413Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.193458Z :INFO: [db] [sessionid] [cluster] Closing session to cluster: SessionClosed { Status: INTERNAL_ERROR Issues: "
: Error: Failed to establish connection to server "" ( cluster cluster). Attempts done: 1 " } 2025-06-03T10:29:07.194189Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.194196Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.194200Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:29:07.194664Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-06-03T10:29:07.194688Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.194693Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.194707Z :INFO: [db] [sessionid] [cluster] Closing session to cluster: SessionClosed { Status: TIMEOUT Issues: "
: Error: Failed to establish connection to server. Attempts done: 1 " } 2025-06-03T10:29:07.197828Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-03T10:29:07.197838Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-03T10:29:07.197843Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:29:07.203984Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:29:07.204207Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:29:07.206479Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-03T10:29:07.207102Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:29:07.207240Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 2. Cluster: "TestCluster". Topic: "TestTopic". Partition: 2. Read offset: (NULL) 2025-06-03T10:29:07.211806Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-50) 2025-06-03T10:29:07.212285Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:29:07.212299Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-03T10:29:07.212305Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-03T10:29:07.212309Z :DEBUG: Take Data. Partition 1. Read: {0, 3} (4-4) 2025-06-03T10:29:07.212317Z :DEBUG: Take Data. Partition 1. Read: {0, 4} (5-5) 2025-06-03T10:29:07.212321Z :DEBUG: Take Data. Partition 1. Read: {0, 5} (6-6) 2025-06-03T10:29:07.212326Z :DEBUG: Take Data. Partition 1. Read: {0, 6} (7-7) 2025-06-03T10:29:07.212330Z :DEBUG: Take Data. Partition 1. Read: {0, 7} (8-8) 2025-06-03T10:29:07.212339Z :DEBUG: Take Data. Partition 1. Read: {0, 8} (9-9) 2025-06-03T10:29:07.212343Z :DEBUG: Take Data. Partition 1. Read: {0, 9} (10-10) 2025-06-03T10:29:07.212347Z :DEBUG: Take Data. Partition 1. Read: {0, 10} (11-11) 2025-06-03T10:29:07.212352Z :DEBUG: Take Data. Partition 1. Read: {0, 11} (12-12) 2025-06-03T10:29:07.212356Z :DEBUG: Take Data. Partition 1. Read: {0, 12} (13-13) 2025-06-03T10:29:07.212360Z :DEBUG: Take Data. Partition 1. Read: {0, 13} (14-14) 2025-06-03T10:29:07.212364Z :DEBUG: Take Data. Partition 1. Read: {0, 14} (15-15) 2025-06-03T10:29:07.212368Z :DEBUG: Take Data. Partition 1. Read: {0, 15} (16-16) 2025-06-03T10:29:07.212390Z :DEBUG: Take Data. Partition 1. Read: {0, 16} (17-17) 2025-06-03T10:29:07.212394Z :DEBUG: Take Data. Partition 1. Read: {0, 17} (18-18) 2025-06-03T10:29:07.212398Z :DEBUG: Take Data. Partition 1. Read: {0, 18} (19-19) 2025-06-03T10:29:07.212402Z :DEBUG: Take Data. Partition 1. Read: {0, 19} (20-20) 2025-06-03T10:29:07.212407Z :DEBUG: Take Data. Partition 1. Read: {0, 20} (21-21) 2025-06-03T10:29:07.212411Z :DEBUG: Take Data. Partition 1. Read: {0, 21} (22-22) 2025-06-03T10:29:07.212415Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (23-23) 2025-06-03T10:29:07.212420Z :DEBUG: Take Data. Partition 1. Read: {1, 1} (24-24) 2025-06-03T10:29:07.212424Z :DEBUG: Take Data. Partition 1. Read: {1, 2} (25-25) 2025-06-03T10:29:07.212429Z :DEBUG: Take Data. Partition 1. Read: {1, 3} (26-26) 2025-06-03T10:29:07.212432Z :DEBUG: Take Data. Partition 1. Read: {1, 4} (27-27) 2025-06-03T10:29:07.212437Z :DEBUG: Take Data. Partition 1. Read: {1, 5} (28-28) 2025-06-03T10:29:07.212440Z :DEBUG: Take Data. Partition 1. Read: {1, 6} (29-29) 2025-06-03T10:29:07.212444Z :DEBUG: Take Data. Partition 1. Read: {1, 7} (30-30) 2025-06-03T10:29:07.212448Z :DEBUG: Take Data. Partition 1. Read: {1, 8} (31-31) 2025-06-03T10:29:07.212452Z :DEBUG: Take Data. Partition 1. Read: {1, 9} (32-32) 2025-06-03T10:29:07.212468Z :DEBUG: Take Data. Partition 1. Read: {1, 10} (33-33) 2025-06-03T10:29:07.212472Z :DEBUG: Take Data. Partition 1. Read: {1, 11} (34-34) 2025-06-03T10:29:07.212475Z :DEBUG: Take Data. Partition 1. Read: {1, 12} (35-35) 2025-06-03T10:29:07.212479Z :DEBUG: Take Data. Partition 1. Read: {1, 13} (36-36) 2025-06-03T10:29:07.212486Z :DEBUG: Take Data. Partition 1. Read: {1, 14} (37-37) 2025-06-03T10:29:07.212490Z :DEBUG: Take Data. Partition 1. Read: {1, 15} (38-38) 2025-06-03T10:29:07.212494Z :DEBUG: Take Data. Partition 1. Read: {1, 16} (39-39) 2025-06-03T10:29:07.212498Z :DEBUG: Take Data. Partition 1. Read: {1, 17} (40-40) 2025-06-03T10:29:07.212501Z :DEBUG: Take Data. Partition 1. Read: {1, 18} (41-41) 2025-06-03T10:29:07.212505Z :DEBUG: Take Data. Partition 1. Read: {1, 19} (42-42) 2025-06-03T10:29:07.212508Z :DEBUG: Take Data. Partition 1. Read: {1, 20} (43-43) 2025-06-03T10:29:07.212513Z :DEBUG: Take Data. Partition 1. Read: {1, 21} (44-44) 2025-06-03T10:29:07.212517Z :DEBUG: Take Data. Partition 1. Read: {1, 22} (45-45) 2025-06-03T10:29:07.212521Z :DEBUG: Take Data. Partition 1. Read: {1, 23} (46-46) 2025-06-03T10:29:07.212525Z :DEBUG: Take Data. Partition 1. Read: {1, 24} (47-47) 2025-06-03T10:29:07.212529Z :DEBUG: Take Data. Partition 1. Read: {1, 25} (48-48) 2025-06-03T10:29:07.212533Z :DEBUG: Take Data. Partition 1. Read: {1, 26} (49-49) 2025-06-03T10:29:07.212537Z :DEBUG: Take Data. Partition 1. Read: {1, 27} (50-50) 2025-06-03T10:29:07.212554Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 50, size 5000 bytes 2025-06-03T10:29:07.212708Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 2 (51-100) 2025-06-03T10:29:07.212747Z :DEBUG: Take Data. Partition 2. Read: {0, 0} (51-51) 2025-06-03T10:29:07.212752Z :DEBUG: Take Data. Partition 2. Read: {0, 1} (52-52) 2025-06-03T10:29:07.212757Z :DEBUG: Take Data. Partition 2. Read: {0, 2} (53-53) 2025-06-03T10:29:07.212761Z :DEBUG: Take Data. Partition 2. Read: {0, 3} (54-54) 2025-06-03T10:29:07.212765Z :DEBUG: Take Data. Partition 2. Read: {0, 4} (55-55) 2025-06-03T10:29:07.212769Z :DEBUG: Take Data. Partition 2. Read: {0, 5} (56-56) 2025-06-03T10:29:07.212773Z :DEBUG: Take Data. Partition 2. Read: {0, 6} (57-57) 2025-06-03T10:29:07.212777Z :DEBUG: Take Data. Partition 2. Read: {0, 7} (58-58) 2025-06-03T10:29:07.212782Z :DEBUG: Take Data. Partition 2. Read: {0, 8} (59-59) 2025-06-03T10:29:07.212786Z :DEBUG: Take Data. Partition 2. Read: {0, 9} (60-60) 2025-06-03T10:29:07.212789Z :DEBUG: Take Data. Partition 2. Read: {0, 10} (61-61) 2025-06-03T10:29:07.212793Z :DEBUG: Take Data. Partition 2. Read: {0, 11} (62-62) 2025-06-03T10:29:07.212797Z :DEBUG: Take Data. Partition 2. Read: {0, 12} (63-63) 2025-06-03T10:29:07.212801Z :DEBUG: Take Data. Partition 2. Read: {0, 13} (64-64) 2025-06-03T10:29:07.212804Z :DEBUG: Take Data. Partition 2. Read: {0, 14} (65-65) 2025-06-03T10:29:07.212808Z :DEBUG: Take Data. Partition 2. Read: {0, 15} (66-66) 2025-06-03T10:29:07.212815Z :DEBUG: Take Data. Partition 2. Read: {0, 16} (67-67) 2025-06-03T10:29:07.212819Z :DEBUG: Take Data. Partition 2. Read: {0, 17} (68-68) 2025-06-03T10:29:07.212823Z :DEBUG: Take Data. Partition 2. Read: {0, 18} (69-69) 2025-06-03T10:29:07.212827Z :DEBUG: Take Data. Partition 2. Read: {0, 19} (70-70) 2025-06-03T10:29:07.212831Z :DEBUG: Take Data. Partition 2. Read: {0, 20} (71-71) 2025-06-03T10:29:07.212834Z :DEBUG: Take Data. Partition 2. Read: {0, 21} (72-72) 2025-06-03T10:29:07.212838Z :DEBUG: Take Data. Partition 2. Read: {1, 0} (73-73) 2025-06-03T10:29:07.212842Z :DEBUG: Take Data. Partition 2. Read: {1, 1} (74-74) 2025-06-03T10:29:07.212846Z :DEBUG: Take Data. Partition 2. Read: {1, 2} (75-75) 2025-06-03T10:29:07.212850Z :DEBUG: Take Data. Partition 2. Read: {1, 3} (76-76) 2025-06-03T10:29:07.212854Z :DEBUG: Take Data. Partition 2. Read: {1, 4} (77-77) 2025-06-03T10:29:07.212858Z :DEBUG: Take Data. Partition 2. Read: {1, 5} (78-78) 2025-06-03T10:29:07.212862Z :DEBUG: Take Data. Partition 2. Read: {1, 6} (79-79) 2025-06-03T10:29:07.212866Z :DEBUG: Take Data. Partition 2. Read: {1, 7} (80-80) 2025-06-03T10:29:07.212870Z :DEBUG: Take Data. Partition 2. Read: {1, 8} (81-81) 2025-06-03T10:29:07.212874Z :DEBUG: Take Data. Partition 2. Read: {1, 9} (82-82) 2025-06-03T10:29:07.212882Z :DEBUG: Take Data. Partition 2. Read: {1, 10} (83-83) 2025-06-03T10:29:07.212887Z :DEBUG: Take Data. Partition 2. Read: {1, 11} (84-84) 2025-06-03T10:29:07.212890Z :DEBUG: Take Data. Partition 2. Read: {1, 12} (85-85) 2025-06-03T10:29:07.212894Z :DEBUG: Take Data. Partition 2. Read: {1, 13} (86-86) 2025-06-03T10:29:07.212898Z :DEBUG: Take Data. Partition 2. Read: {1, 14} (87-87) 2025-06-03T10:29:07.212902Z :DEBUG: Take Data. Partition 2. Read: {1, 15} (88-88) 2025-06-03T10:29:07.212905Z :DEBUG: Take Data. Partition 2. Read: {1, 16} (89-89) 2025-06-03T10:29:07.212909Z :DEBUG: Take Data. Partition 2. Read: {1, 17} (90-90) 2025-06-03T10:29:07.212913Z :DEBUG: Take Data. Partition 2. Read: {1, 18} (91-91) 2025-06-03T10:29:07.212917Z :DEBUG: Take Data. Partition 2. Read: {1, 19} (92-92) 2025-06-03T10:29:07.212920Z :DEBUG: Take Data. Partition 2. Read: {1, 20} (93-93) 2025-06-03T10:29:07.212924Z :DEBUG: Take Data. Partition 2. Read: {1, 21} (94-94) 2025-06-03T10:29:07.212929Z :DEBUG: Take Data. Partition 2. Read: {1, 22} (95-95) 2025-06-03T10:29:07.212933Z :DEBUG: Take Data. Partition 2. Read: {1, 23} (96-96) 2025-06-03T10:29:07.212936Z :DEBUG: Take Data. Partition 2. Read: {1, 24} (97-97) 2025-06-03T10:29:07.212940Z :DEBUG: Take Data. Partition 2. Read: {1, 25} (98-98) 2025-06-03T10:29:07.212944Z :DEBUG: Take Data. Partition 2. Read: {1, 26} (99-99) 2025-06-03T10:29:07.212948Z :DEBUG: Take Data. Partition 2. Read: {1, 27} (100-100) 2025-06-03T10:29:07.212955Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 50, size 5000 bytes 2025-06-03T10:29:07.213392Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-03T10:29:07.213925Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.213931Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.213936Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:29:07.214033Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:29:07.214153Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:29:07.214185Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.217214Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:29:07.371224Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.373451Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-03T10:29:07.373499Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:29:07.373508Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-03T10:29:07.373546Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-03T10:29:07.573991Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 2025-06-03T10:29:07.677424Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2025-06-03T10:29:07.677500Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-03T10:29:07.677565Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-06-03T10:29:07.678148Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.678153Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.678157Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:29:07.682715Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:29:07.682906Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:29:07.682981Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.685378Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:29:07.783701Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:29:07.783783Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-03T10:29:07.783802Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:29:07.783812Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-03T10:29:07.783841Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [0, 3). Partition stream id: 1 2025-06-03T10:29:07.783872Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-03T10:29:07.783949Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2025-06-03T10:29:07.783981Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-03T10:29:07.784023Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster >> Viewer::SharedDoesntShowExclusiveNodes [GOOD] >> ListObjectsInS3Export::ParametersValidation [GOOD] >> Viewer::ServerlessWithExclusiveNodesCheckTable |64.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer |64.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer |64.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_transfer/ydb-core-tx-schemeshard-ut_transfer ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapAggregations::BlockGenericWithDistinct [GOOD] Test command err: Trying to start YDB, gRPC: 28356, MsgBus: 24437 2025-06-03T10:29:07.163027Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668222361919379:2211];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:07.168577Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d6a/r3tmp/tmpUBLYW9/pdisk_1.dat 2025-06-03T10:29:07.229198Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:07.233421Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668222361919194:2079] 1748946547095002 != 1748946547095005 TServer::EnableGrpc on GrpcPort 28356, node 1 2025-06-03T10:29:07.265551Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:07.265567Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:07.265570Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:07.265630Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:29:07.286078Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:07.286119Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:07.293837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24437 TClient is connected to server localhost:24437 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:07.409369Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:07.421919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:29:07.435944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:29:07.458415Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668222361919882:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:07.458492Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668222361919882:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:07.458549Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668222361919882:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:07.458572Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668222361919882:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:07.458598Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668222361919882:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:07.458618Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668222361919882:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:29:07.458645Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668222361919882:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:29:07.458666Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668222361919882:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:29:07.458687Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668222361919882:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:29:07.458708Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668222361919882:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:29:07.458727Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668222361919882:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:29:07.458754Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668222361919882:2315];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:29:07.469181Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668222361919883:2316];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:07.469217Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668222361919883:2316];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:07.469275Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668222361919883:2316];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:07.469361Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668222361919883:2316];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:07.469386Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668222361919883:2316];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:07.469408Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668222361919883:2316];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:29:07.469429Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668222361919883:2316];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:29:07.469464Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668222361919883:2316];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:29:07.469488Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668222361919883:2316];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:29:07.469508Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668222361919883:2316];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:29:07.469528Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668222361919883:2316];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:29:07.469549Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668222361919883:2316];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:29:07.475169Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668222361919881:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:07.475199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668222361919881:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:07.475248Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668222361919881:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:07.475270Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668222361919881:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:07.475299Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668222361919881:2314];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:0 ... rewrite=0; 2025-06-03T10:29:07.485144Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:29:07.485149Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:29:07.485273Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-03T10:29:07.485277Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-03T10:29:07.514099Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:07.515377Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:07.516383Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:29:07.517536Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=1448;columns=6; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=1448;columns=6; REQUEST: --!syntax_v1 PRAGMA Kikimr.OptUseFinalizeByKey; SELECT COUNT(DISTINCT id) FROM `/Root/tableWithNulls` WHERE level = 5 AND Cast(id AS String) = "5"; 2025-06-03T10:29:07.675496Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668222361920169:2436], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:07.675554Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:07.675730Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668222361920204:2439], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:07.676749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:29:07.680638Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-03T10:29:07.680768Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668222361920206:2440], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:29:07.742760Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668222361920257:2482] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } REQUEST: --!syntax_v1 PRAGMA Kikimr.OptUseFinalizeByKey; SELECT COUNT(DISTINCT id) FROM `/Root/tableWithNulls` WHERE level = 5 AND Cast(id AS String) = "5"; JSON Plan: {"Plan":{"Plans":[{"PlanNodeId":6,"Plans":[{"PlanNodeId":5,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":2,"Plans":[{"Tables":["tableWithNulls"],"PlanNodeId":1,"Operators":[{"Inputs":[{"InternalOperatorId":1}],"GroupBy":"item.id","Aggregation":"state","Name":"Aggregate","Phase":"Intermediate"},{"Inputs":[{"InternalOperatorId":2}],"E-Rows":"No estimate","Predicate":"level == 5 AND id == \"5\"","Pushdown":"True","Name":"Filter","E-Size":"No estimate","E-Cost":"No estimate"},{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["id (-∞, +∞)"],"Name":"TableFullScan","Inputs":[],"Path":"\/Root\/tableWithNulls","E-Rows":"No estimate","Table":"tableWithNulls","ReadColumns":["id","level"],"SsaProgram":{"Command":[{"Assign":{"Constant":{"Int32":5},"Column":{"Id":7}}},{"Assign":{"Function":{"YqlOperationId":11,"KernelIdx":0,"FunctionType":2,"Arguments":[{"Id":3},{"Id":7}]},"Column":{"Id":8}}},{"Assign":{"Constant":{"Uint8":0},"Column":{"Id":9}}},{"Assign":{"Function":{"YqlOperationId":17,"KernelIdx":1,"FunctionType":2,"Arguments":[{"Id":8},{"Id":9}]},"Column":{"Id":10}}},{"Assign":{"Function":{"KernelName":"","KernelIdx":2,"FunctionType":2,"Arguments":[{"Id":1}]},"Column":{"Id":11}}},{"Assign":{"Function":{"YqlOperationId":0,"KernelIdx":3,"FunctionType":2,"Arguments":[{"Id":10},{"Id":11}]},"Column":{"Id":12}}},{"Filter":{"Predicate":{"Id":12}}},{"Projection":{"Columns":[{"Id":1}]}}]},"E-Cost":"No estimate"}],"Node Type":"Aggregate-Filter-TableFullScan"}],"Node Type":"HashShuffle","KeyColumns":["id"],"PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"Aggregate","Phase":"Intermediate"},{"Inputs":[{"ExternalPlanNodeId":2}],"Name":"Aggregate","Phase":"Final"}],"Node Type":"Aggregate-Aggregate"}],"Node Type":"UnionAll","PlanNodeType":"Connection"}],"Operators":[{"Inputs":[{"InternalOperatorId":1}],"Name":"Aggregate","Phase":"Final"},{"Inputs":[{"InternalOperatorId":2}],"Name":"Limit","Limit":"1"},{"Inputs":[{"ExternalPlanNodeId":4}],"Name":"Aggregate","Phase":"Final"}],"Node Type":"Aggregate-Limit-Aggregate"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","Stats":{"ResourcePoolId":"default"},"PlanNodeType":"Query"},"meta":{"version":"0.2","type":"query"},"tables":[{"name":"\/Root\/tableWithNulls","reads":[{"columns":["id","level"],"scan_by":["id (-∞, +∞)"],"type":"FullScan"}]}],"SimplifiedPlan":{"PlanNodeId":0,"Plans":[{"PlanNodeId":1,"Plans":[{"PlanNodeId":2,"Plans":[{"PlanNodeId":3,"Plans":[{"PlanNodeId":4,"Plans":[{"PlanNodeId":6,"Plans":[{"PlanNodeId":7,"Plans":[{"PlanNodeId":8,"Plans":[{"PlanNodeId":9,"Plans":[{"PlanNodeId":10,"Plans":[{"PlanNodeId":11,"Operators":[{"Scan":"Parallel","E-Size":"No estimate","ReadRanges":["id (-∞, +∞)"],"Name":"TableFullScan","Path":"\/Root\/tableWithNulls","E-Rows":"No estimate","Table":"tableWithNulls","ReadColumns":["id","level"],"SsaProgram":{"Command":[{"Assign":{"Constant":{"Int32":5},"Column":{"Id":7}}},{"Assign":{"Function":{"YqlOperationId":11,"KernelIdx":0,"FunctionType":2,"Arguments":[{"Id":3},{"Id":7}]},"Column":{"Id":8}}},{"Assign":{"Constant":{"Uint8":0},"Column":{"Id":9}}},{"Assign":{"Function":{"YqlOperationId":17,"KernelIdx":1,"FunctionType":2,"Arguments":[{"Id":8},{"Id":9}]},"Column":{"Id":10}}},{"Assign":{"Function":{"KernelName":"","KernelIdx":2,"FunctionType":2,"Arguments":[{"Id":1}]},"Column":{"Id":11}}},{"Assign":{"Function":{"YqlOperationId":0,"KernelIdx":3,"FunctionType":2,"Arguments":[{"Id":10},{"Id":11}]},"Column":{"Id":12}}},{"Filter":{"Predicate":{"Id":12}}},{"Projection":{"Columns":[{"Id":1}]}}]},"E-Cost":"No estimate"}],"Node Type":"TableFullScan"}],"Operators":[{"E-Rows":"No estimate","Predicate":"level == 5 AND id == \"5\"","Pushdown":"True","Name":"Filter","E-Size":"No estimate","E-Cost":"No estimate"}],"Node Type":"Filter"}],"Operators":[{"GroupBy":"item.id","Aggregation":"state","Name":"Aggregate","Phase":"Intermediate"}],"Node Type":"Aggregate"}],"Node Type":"HashShuffle (KeyColumns: [\"id\"])","PlanNodeType":"Connection"}],"Operators":[{"Name":"Aggregate","Phase":"Final"}],"Node Type":"Aggregate"}],"Operators":[{"Name":"Aggregate","Phase":"Intermediate"}],"Node Type":"Aggregate"}],"Operators":[{"Name":"Aggregate","Phase":"Final"}],"Node Type":"Aggregate"}],"Operators":[{"Name":"Limit","Limit":"1"}],"Node Type":"Limit"}],"Operators":[{"Name":"Aggregate","Phase":"Final"}],"Node Type":"Aggregate"}],"Node Type":"ResultSet","PlanNodeType":"ResultSet"}],"Node Type":"Query","OptimizerStats":{"EquiJoinsCount":0,"JoinsCount":0},"PlanNodeType":"Query"}} AST: ( (let $1 (Bool 'false)) (let $2 (DataType 'Int32)) (let $3 '('"id" $2)) (let $4 '('('"_logical_id" '1112) '('"_id" '"dcf48b92-888aad36-635291c8-3ef211bd") '('"_wide_channels" (StructType $3)))) (let $5 (DqPhyStage '() (lambda '() (block '( (let $15 (KqpTable '"/Root/tableWithNulls" '"72057594046644480:2" '"" '1)) (let $16 (KqpBlockReadOlapTableRanges $15 (Void) '('"id" '"level") '() '() (lambda '($18) (block '( (let $19 '('eq '"level" (Int32 '"5"))) (let $20 '('?? $19 $1)) (let $21 (KqpOlapApply (lambda '($22) (== (SafeCast $22 (DataType 'String)) (String '"5"))) '((KqpOlapApplyColumnArg (StructType $3 '('"level" (OptionalType $2))) '"id")) '"")) (return (TKqpOlapExtractMembers (KqpOlapFilter $18 (KqpOlapAnd $20 $21)) '('"id"))) ))))) (let $17 (lambda '($23 $24) (BlockAsStruct '('"id" $23)) $24)) (return (FromFlow (WideCombiner (ToFlow (WideFromBlocks (FromFlow (WideMap $16 $17)))) '-1073741824 (lambda '($25) (Member $25 '"id")) (lambda '($26 $27) $26) (lambda '($28 $29 $30) $30) (lambda '($31 $32) $32)))) ))) $4)) (let $6 (DqCnHashShuffle (TDqOutput $5 '0) '('0))) (let $7 (Uint64 '1)) (let $8 (DataType 'Uint64)) (let $9 '('('"_logical_id" '1817) '('"_id" '"3a3ef507-aa1cc06d-1055d7b8-fdf36465") '('"_wide_channels" (StructType '('_yql_agg_0 (OptionalType $8)))))) (let $10 (DqPhyStage '($6) (lambda '($33) (block '( (let $34 (lambda '($38) $38)) (let $35 (lambda '($39 $40))) (let $36 (WideCombiner (ToFlow $33) '"" $34 $35 $35 $34)) (let $37 (Condense1 (NarrowMap $36 (lambda '($41) (AsStruct '('"id" $41)))) (lambda '($42) $7) (lambda '($43 $44) $1) (lambda '($45 $46) (Inc $46)))) (return (FromFlow (ExpandMap $37 (lambda '($47) (Just $47))))) ))) $9)) (let $11 (DqCnUnionAll (TDqOutput $10 '0))) (let $12 (DqPhyStage '($11) (lambda '($48) (block '( (let $49 (WideCondense1 (ToFlow $48) (lambda '($51) $51) (lambda '($52 $53) $1) (lambda '($54 $55) (IfPresent $54 (lambda '($56) (IfPresent $55 (lambda '($57) (Just (AggrAdd $56 $57))) $54)) $55)))) (let $50 (Condense (NarrowMap (Take $49 $7) (lambda '($58) (AsStruct '('Count0 (Unwrap $58))))) (Nothing (OptionalType (StructType '('Count0 $8)))) (lambda '($59 $60) $1) (lambda '($61 $62) (Just $61)))) (return (FromFlow (Map $50 (lambda '($63) (AsStruct '('"column0" (Coalesce (Member $63 'Count0) (Uint64 '0)))))))) ))) '('('"_logical_id" '2552) '('"_id" '"dd9c090a-9dfdd0b9-a3e65d72-87c4947")))) (let $13 '($5 $10 $12)) (let $14 (DqCnResult (TDqOutput $12 '0) '('"column0"))) (return (KqpPhysicalQuery '((KqpPhysicalTx $13 '($14) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType (StructType '('"column0" $8))) '0 '0)) '('('"type" '"query")))) ) >> BasicStatistics::Serverless [GOOD] >> TObjectStorageListingTest::Split >> TopicAutoscaling::Simple_PQv1 [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_BeforeAutoscaleAwareSDK |64.9%| [TA] $(B)/ydb/core/tx/datashard/ut_change_exchange/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpImmediateEffects::ConflictingKeyRW1WR2 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> BasicStatistics::Serverless [GOOD] Test command err: 2025-06-03T10:26:26.476080Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:26.476117Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:26:26.476125Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001c1f/r3tmp/tmpdQou6r/pdisk_1.dat 2025-06-03T10:26:26.576104Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61417, node 1 2025-06-03T10:26:26.684784Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:26.684812Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:26.684819Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:26.684881Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:26.685654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:26:26.764703Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:26.764741Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:26.776675Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5242 2025-06-03T10:26:27.129132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:26:27.974356Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:26:27.986356Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:27.986410Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:28.040222Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:26:28.040830Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:28.194374Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.194494Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.194610Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.194639Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.194673Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.194686Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.194699Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.194718Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.194737Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.346831Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:28.346865Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:28.358236Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:28.390923Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:28.403390Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:26:28.403421Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:26:28.410969Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:26:28.411019Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:26:28.411037Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:26:28.411042Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:26:28.411046Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:26:28.411051Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:26:28.411055Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:26:28.411060Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:26:28.411170Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:26:28.424359Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:28.424397Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:28.425609Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:26:28.426377Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:26:28.426474Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:26:28.428178Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-03T10:26:28.431235Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:26:28.431253Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:26:28.431261Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-03T10:26:28.434414Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:26:28.435751Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:26:28.435777Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:26:28.538523Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:26:28.615636Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:26:28.658152Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:26:29.177998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:26:29.610113Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:29.688565Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7814: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-03T10:26:29.688589Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7830: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-03T10:26:29.688601Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:2567:2933], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-03T10:26:29.689024Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2569:2935] 2025-06-03T10:26:29.689172Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2569:2935], schemeshard id = 72075186224037899 2025-06-03T10:26:30.444166Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2693:3236], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:30.444211Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:30.447520Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72075186224037899 2025-06-03T10:26:30.522527Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2846:3272], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:30.522578Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:30.531017Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToAc ... 5186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 25 2025-06-03T10:29:00.830358Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-03T10:29:00.893645Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 118 ], ReplyToActorId[ [2:7580:5435]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:00.893781Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 118 ] 2025-06-03T10:29:00.893798Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 118, ReplyToActorId = [2:7580:5435], StatRequests.size() = 1 2025-06-03T10:29:02.193894Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:29:02.193937Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:29:02.193953Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 3] is data table. 2025-06-03T10:29:02.193961Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:29:02.194158Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-03T10:29:02.200547Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:29:02.202183Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7616:5461], DatabaseId: /Root/Shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:02.202247Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7627:5466], DatabaseId: /Root/Shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:02.202296Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Shared, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:02.206523Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897 2025-06-03T10:29:02.239488Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7630:5469], DatabaseId: /Root/Shared, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-03T10:29:02.339564Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 119 ], ReplyToActorId[ [2:7727:5518]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:02.339664Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 119 ] 2025-06-03T10:29:02.339672Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 119, ReplyToActorId = [2:7727:5518], StatRequests.size() = 1 2025-06-03T10:29:02.419315Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7732:5520] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Shared/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:02.432282Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 120 ], ReplyToActorId[ [2:7761:5535]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:02.432341Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 120 ] 2025-06-03T10:29:02.432402Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:171: [72075186224037894] EvRequestStats, node id = 2, schemeshard count = 1, urgent = 0 2025-06-03T10:29:02.432407Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-03T10:29:02.432443Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:29:02.432454Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 120, ReplyToActorId = [2:7761:5535], StatRequests.size() = 1 2025-06-03T10:29:02.452790Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=M2ViNWQ5MzUtYmY4NmM2YWYtZTZkZmZmNTItODY3N2U3YWM=, TxId: 2025-06-03T10:29:02.452823Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=M2ViNWQ5MzUtYmY4NmM2YWYtZTZkZmZmNTItODY3N2U3YWM=, TxId: 2025-06-03T10:29:02.452991Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:29:02.464809Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:29:02.464841Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:29:02.506495Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-03T10:29:02.506541Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-03T10:29:02.590502Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:3030:3093], schemeshard count = 1 2025-06-03T10:29:02.872407Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7996: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224037899 2025-06-03T10:29:02.872447Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 221.000000s, at schemeshard: 72075186224037899 2025-06-03T10:29:02.872546Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037899, stats size# 26 2025-06-03T10:29:02.884483Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-03T10:29:03.842308Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [2:7822:5572]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:03.842427Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-03T10:29:03.842437Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [2:7822:5572], StatRequests.size() = 1 2025-06-03T10:29:05.176103Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:29:05.187239Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:29:05.187277Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:29:05.187290Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is data table. 2025-06-03T10:29:05.187297Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-03T10:29:05.187403Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-03T10:29:05.188107Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:29:05.192102Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=Yzk1MzYzZGYtMTZmNzc3LTQ3NjRmMjg3LTg4OWI3ZWVj, TxId: 2025-06-03T10:29:05.192139Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=Yzk1MzYzZGYtMTZmNzc3LTQ3NjRmMjg3LTg4OWI3ZWVj, TxId: 2025-06-03T10:29:05.192334Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:29:05.206484Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-03T10:29:05.206514Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:29:05.250779Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [2:7890:5613]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:05.250896Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-03T10:29:05.250905Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [2:7890:5613], StatRequests.size() = 1 2025-06-03T10:29:06.799820Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [2:7936:5638]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:06.799947Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-03T10:29:06.799959Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [2:7936:5638], StatRequests.size() = 1 2025-06-03T10:29:08.157498Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 2 2025-06-03T10:29:08.157720Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:29:08.157858Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:29:08.168684Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:29:08.168725Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:29:08.239146Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [2:7974:5659]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:08.239256Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-03T10:29:08.239266Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [2:7974:5659], StatRequests.size() = 1 >> KqpImmediateEffects::DeleteAfterUpsert >> KqpEffects::InsertAbort_Literal_Duplicates+UseSink >> KqpWrite::UpsertNullKey >> KqpInplaceUpdate::Negative_SingleRowListFromRange+UseSink >> TObjectStorageListingTest::Split [GOOD] >> TObjectStorageListingTest::SuffixColumns >> DataShardReadIteratorSysTables::ShouldRead [GOOD] >> DataShardReadIteratorSysTables::ShouldNotReadUserTableUsingLocalTid |64.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_services/ut/unittest >> TopicAutoscaling::ReadingAfterSplitTest_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_AutoscaleAwareSDK_AutoCommit |64.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots >> DataShardReadIterator::ShouldReadKeyCellVec [GOOD] |64.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots >> SplitPathTests::WithoutDatabaseShouldSuccess [GOOD] >> DataShardReadIterator::ShouldReadKeyArrow >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKey+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKey-EvWrite >> TObjectStorageListingTest::SuffixColumns [GOOD] |64.9%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_change_exchange/test-results/unittest/{meta.json ... results_accumulator.log} |64.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_filestore_reboots/ydb-core-tx-schemeshard-ut_filestore_reboots >> Viewer::TabletMerging [GOOD] >> Viewer::StorageGroupOutputWithoutFilterNoDepends >> DataShardReadIterator::ShouldRangeReadReverseLeftInclusive [GOOD] >> DataShardReadIterator::ShouldRangeReadReverseLeftNonInclusive >> TopicAutoscaling::PartitionSplit_PreferedPartition_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_PreferedPartition_AutoscaleAwareSDK |64.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_services/ut/unittest >> SplitPathTests::WithoutDatabaseShouldSuccess [GOOD] >> Viewer::ServerlessWithExclusiveNodesCheckTable [GOOD] >> KqpImmediateEffects::DeleteAfterUpsert [GOOD] >> KqpImmediateEffects::DeleteAfterInsert |64.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_services/ut/unittest >> KqpEffects::InsertAbort_Literal_Duplicates+UseSink [GOOD] >> KqpEffects::InsertAbort_Literal_Duplicates-UseSink >> KqpImmediateEffects::ConflictingKeyRW1WR2 [GOOD] >> KqpImmediateEffects::ConflictingKeyRW1RWR2 >> KqpInplaceUpdate::Negative_SingleRowListFromRange+UseSink [GOOD] >> KqpInplaceUpdate::Negative_BatchUpdate-UseSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/client/ut/unittest >> TObjectStorageListingTest::SuffixColumns [GOOD] Test command err: 2025-06-03T10:29:09.109397Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668234371482627:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:09.109821Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002f3b/r3tmp/tmpgH76mO/pdisk_1.dat 2025-06-03T10:29:09.199726Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668234371482603:2079] 1748946549108808 != 1748946549108811 2025-06-03T10:29:09.200752Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5787, node 1 2025-06-03T10:29:09.217674Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:09.217689Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:09.217692Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:09.217754Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29041 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-03T10:29:09.256014Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:09.256041Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:09.257251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:29:09.258133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:09.270189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /dc-1/Dir/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946549336 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "Hash" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Path" ... (TRUNCATED) waiting... TClient::Ls request: /dc-1/Dir/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946549336 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "Hash" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Path" ... (TRUNCATED) 2025-06-03T10:29:09.859051Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668233236279266:2202];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002f3b/r3tmp/tmp1s0Yt1/pdisk_1.dat 2025-06-03T10:29:09.878420Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:29:09.905107Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:09.905359Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511668233236279101:2079] 1748946549857297 != 1748946549857300 TServer::EnableGrpc on GrpcPort 14221, node 2 2025-06-03T10:29:09.933566Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:09.933582Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:09.933584Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:09.933649Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1590 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:29:09.974473Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:09.974514Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:09.981121Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-03T10:29:09.986415Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:09.993637Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:10.004163Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:10.298215Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553163, Sender [2:7511668237531247730:2483], Recipient [2:7511668237531247093:2315]: NKikimrTxDataShard.TEvObjectStorageListingRequest TableId: 3 SerializedKeyPrefix: "\002\000\010\000\000\0002\000\000\000\000\000\000\000\010\000\000\000Bucket50" PathColumnPrefix: "Music/AC DC/" PathColumnDelimiter: "/" SerializedStartAfterKeySuffix: "\002\000\037\000\000\000Music/AC DC/Shoot to Thrill.mp3\010\000\000\000B\000\000\000\000\000\000\000" ColumnsToReturn: 3 ColumnsToReturn: 4 ColumnsToReturn: 6 MaxKeys: 10 2025-06-03T10:29:10.298232Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3145: StateWork, processing event TEvDataShard::TEvObjectStorageListingRequest 2025-06-03T10:29:10.298280Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037888 S3 Listing: start at key ((type:4, value:"2\0\0\0\0\0\0\0") (type:4608, value:"Bucket50") (type:4608, value:"Music/AC DC/Shoot to Thrill.mp3") (type:4, value:"B\0\0\0\0\0\0\0")), end at key ((type:4, value:"2\0\0\0\0\0\0\0") (type:4608, value:"Bucket50") (type:4608, value:"Music/AC DC0") (type:0)) restarted: 0 last path: "" contents: 0 common prefixes: 0 2025-06-03T10:29:10.298364Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Shoot to Thrill.mp3" -> (Utf8 : Music/AC DC/Shoot to Thrill.mp3, Uint64 : 77, String : ) 2025-06-03T10:29:10.298374Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Shoot to Thrill.mp3" -> (Utf8 : Music/AC DC/Shoot to Thrill.mp3, Uint64 : 88, String : ) 2025-06-03T10:29:10.298381Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Shoot to Thrill.mp3" -> (Utf8 : Music/AC DC/Shoot to Thrill.mp3, Uint64 : 666, String : ) 2025-06-03T10:29:10.298386Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Thunderstruck.mp3" -> (Utf8 : Music/AC DC/Thunderstruck.mp3, Uint64 : 1, String : ) 2025-06-03T10:29:10.298391Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Thunderstruck.mp3" -> (Utf8 : Music/AC DC/Thunderstruck.mp3, Uint64 : 66, String : ) 2025-06-03T10:29:10.298415Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:374: 72075186224037888 S3 Listing: finished status: 0 description: "" contents: 5 common prefixes: 0 2025-06-03T10:29:10.302567Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553163, Sender [2:7511668237531247734:2484], Recipient [2:7511668237531247093:2315]: NKikimrTxDataShard.TEvObjectStorageListingRequest TableId: 3 SerializedKeyPrefix: "\002\000\010\000\000\0002\000\000\000\000\000\000\000\010\000\000\000Bucket50" PathColumnPrefix: "Music/AC DC/" PathColumnDelimiter: "/" SerializedStartAfterKeySuffix: "\001\000\037\000\000\000Music/AC DC/Shoot to Thrill.mp3" ColumnsToReturn: 3 ColumnsToReturn: 4 ColumnsToReturn: 5 MaxKeys: 10 2025-06-03T10:29:10.302582Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3145: StateWork, processing event TEvDataShard::TEvObjectStorageListingRequest 2025-06-03T10:29:10.302639Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:152: 72075186224037888 S3 Listing: start at key ((type:4, value:"2\0\0\0\0\0\0\0") (type:4608, value:"Bucket50") (type:4608, value:"Music/AC DC/Shoot to Thrill.mp3")), end at key ((type:4, value:"2\0\0\0\0\0\0\0") (type:4608, value:"Bucket50") (type:4608, value:"Music/AC DC0") (type:0)) restarted: 0 last path: "" contents: 0 common prefixes: 0 2025-06-03T10:29:10.302712Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Thunderstruck.mp3" -> (Utf8 : Music/AC DC/Thunderstruck.mp3, Uint64 : 1, Uint64 : 10) 2025-06-03T10:29:10.302736Z node 2 :TX_DATASHARD TRACE: datashard__object_storage_listing.cpp:240: 72075186224037888 S3 Listing: "Music/AC DC/Thunderstruck.mp3" -> (Utf8 : Music/AC DC/Thunderstruck.mp3, Uint64 : 66, Uint64 : 10) 2025-06-03T10:29:10.302765Z node 2 :TX_DATASHARD DEBUG: datashard__object_storage_listing.cpp:374: 72075186224037888 S3 Listing: finished status: 0 description: "" contents: 2 common prefixes: 0 >> KqpWrite::UpsertNullKey [GOOD] >> KqpWrite::ProjectReplace-UseSink >> BasicStatistics::ServerlessGlobalIndex [GOOD] >> DataShardReadIteratorSysTables::ShouldNotReadUserTableUsingLocalTid [GOOD] >> DataShardReadIteratorSysTables::ShouldForbidSchemaVersion |64.9%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_services/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/viewer/ut/unittest >> Viewer::ServerlessWithExclusiveNodesCheckTable [GOOD] Test command err: 2025-06-03T10:28:57.705052Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668180021078369:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:57.705092Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:28:57.783427Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668180021078349:2079] 1748946537704898 != 1748946537704901 2025-06-03T10:28:57.786201Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11738, node 1 2025-06-03T10:28:57.800292Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:57.800308Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:57.800311Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:57.800368Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6637 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:57.832055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:57.839279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-06-03T10:28:57.839754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:57.850357Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:57.850395Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:57.851425Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:58.323408Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668184316046340:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:58.323413Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668184316046348:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:58.323436Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:58.324252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480 2025-06-03T10:28:58.326375Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668184316046354:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-03T10:28:58.384941Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668184316046405:2345] txid# 281474976710662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:58.692456Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668186435663374:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:58.692475Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:28:58.709099Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:58.709407Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511668186435663354:2079] 1748946538692321 != 1748946538692324 TServer::EnableGrpc on GrpcPort 32511, node 2 2025-06-03T10:28:58.721524Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:58.721539Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:58.721541Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:58.721609Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3615 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:58.797036Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:58.797075Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:58.797892Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:28:58.798028Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:28:58.803263Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:28:58.803949Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:59.171315Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668190730631347:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:59.171342Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668190730631358:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:59.171350Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:59.172364Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480 2025-06-03T10:28:59.175867Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511668190730631361:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-03T10:28:59.235409Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668190730631412:2345] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:59.565674Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511668188583468119:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:59.565726Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error ... ion.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480 2025-06-03T10:29:01.311988Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715661, at schemeshard: 72057594046644480 2025-06-03T10:29:01.312084Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511668198392385864:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-03T10:29:01.407484Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511668198392385915:2345] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:02.700985Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:467:2426], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:02.701103Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:02.701113Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:29:02.851783Z node 5 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:03.003685Z node 5 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-03T10:29:03.010068Z node 5 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:424} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-03T10:29:03.054216Z node 5 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:364} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 12277, node 5 TClient is connected to server localhost:9950 2025-06-03T10:29:03.088019Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:03.088043Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:03.088049Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:03.088166Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration json result: {"Version":15,"TotalNodes":"1","FoundNodes":"1","FieldsAvailable":"0000000010000110111111100100111","FieldsRequired":"0000000000000000000000000100101","Problems":["no-database-board-info"],"Nodes":[{"NodeId":6,"Database":"/Root/shared","Disconnected":true,"CpuUsage":"nan","SystemState":{"StartTime":"0","ChangeTime":"1","LoadAverage":[183.97802734375,127.8095703125,60.40087890625],"NumberOfCpus":64,"SystemState":"Green","Host":"ghrun-pyvh3niaay.auto.internal","Version":".ad1e251","Location":{"DataCenter":"2","Module":"2","Rack":"2","Unit":"2"},"CoresUsed":0,"CoresTotal":0,"RealNumberOfCpus":64}}]} 2025-06-03T10:29:04.692362Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:542:2427], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:04.692460Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:04.692497Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:29:04.809995Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:04.913994Z node 7 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-03T10:29:04.919850Z node 7 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:424} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-03T10:29:04.971455Z node 7 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:364} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 17519, node 7 TClient is connected to server localhost:32236 2025-06-03T10:29:05.011156Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:05.011178Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:05.011184Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:05.011336Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration json result: {"Version":15,"TotalNodes":"1","FoundNodes":"1","FieldsAvailable":"0000000010000110111111100000111","FieldsRequired":"0000000000000000000000000000101","Nodes":[{"NodeId":9,"Database":"/Root/serverless","Disconnected":true,"CpuUsage":"nan","SystemState":{"StartTime":"0","ChangeTime":"1","LoadAverage":[183.97802734375,127.8095703125,60.40087890625],"NumberOfCpus":64,"SystemState":"Green","Host":"ghrun-pyvh3niaay.auto.internal","Version":".ad1e251","Location":{"DataCenter":"3","Module":"3","Rack":"3","Unit":"3"},"CoresUsed":0,"CoresTotal":0,"RealNumberOfCpus":64}}]} 2025-06-03T10:29:07.028144Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:542:2427], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:07.028251Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:07.028318Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:29:07.207000Z node 10 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:07.348940Z node 10 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-03T10:29:07.364129Z node 10 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:424} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-03T10:29:07.508425Z node 10 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:364} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 7133, node 10 TClient is connected to server localhost:7491 2025-06-03T10:29:07.600067Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:07.600090Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:07.600096Z node 10 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:07.600278Z node 10 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration json result: {"Version":15,"TotalNodes":"1","FoundNodes":"1","FieldsAvailable":"0000000010000110111111100000111","FieldsRequired":"0000000000000000000000000000101","Nodes":[{"NodeId":11,"Database":"/Root/shared","Disconnected":true,"CpuUsage":"nan","SystemState":{"StartTime":"0","ChangeTime":"1","LoadAverage":[177.8134765625,127.4638671875,60.6513671875],"NumberOfCpus":64,"SystemState":"Green","Host":"ghrun-pyvh3niaay.auto.internal","Version":".ad1e251","Location":{"DataCenter":"2","Module":"2","Rack":"2","Unit":"2"},"CoresUsed":0,"CoresTotal":0,"RealNumberOfCpus":64}}]} 2025-06-03T10:29:09.575343Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [13:553:2361], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:09.575445Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:09.575481Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:29:09.710212Z node 13 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:09.835597Z node 13 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-03T10:29:09.874618Z node 13 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:424} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-03T10:29:09.957888Z node 13 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:364} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 12356, node 13 TClient is connected to server localhost:29793 2025-06-03T10:29:09.996453Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:09.996476Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:09.996481Z node 13 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:09.996656Z node 13 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration json result: {"Version":15,"TotalNodes":"2","FoundNodes":"2","FieldsAvailable":"0000000010100110111111100100111","FieldsRequired":"0000000000000000000000000100101","Nodes":[{"NodeId":15,"Disconnected":true,"CpuUsage":"nan","SystemState":{"StartTime":"0","ChangeTime":"1","LoadAverage":[177.8134765625,127.4638671875,60.6513671875],"NumberOfCpus":64,"SystemState":"Green","Host":"ghrun-pyvh3niaay.auto.internal","Version":".ad1e251","Location":{"DataCenter":"3","Module":"3","Rack":"3","Unit":"3"},"CoresUsed":0,"CoresTotal":0,"RealNumberOfCpus":64}},{"NodeId":16,"Disconnected":true,"CpuUsage":"nan","SystemState":{"StartTime":"0","ChangeTime":"1","LoadAverage":[177.8134765625,127.4638671875,60.6513671875],"NumberOfCpus":64,"SystemState":"Green","Host":"ghrun-pyvh3niaay.auto.internal","Version":".ad1e251","Location":{"DataCenter":"4","Module":"4","Rack":"4","Unit":"4"},"CoresUsed":0,"CoresTotal":0,"RealNumberOfCpus":64},"Tablets":[{"Type":"DataShard","State":"Green","Count":1}]}]} >> SplitPathTests::WithDatabaseShouldFail [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> BasicStatistics::ServerlessGlobalIndex [GOOD] Test command err: 2025-06-03T10:26:28.562913Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:28.562948Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:26:28.562956Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001bd6/r3tmp/tmp31K2Oc/pdisk_1.dat 2025-06-03T10:26:28.657346Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 65266, node 1 2025-06-03T10:26:28.758346Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:28.758366Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:28.758370Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:28.758413Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:28.758884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:26:28.835036Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:28.835067Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:28.846687Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18282 2025-06-03T10:26:29.191616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:26:29.991872Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:26:30.002779Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:30.002828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:30.056820Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:26:30.057473Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:30.223520Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.223699Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.223852Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.223892Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.223939Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.223964Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.223981Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.224000Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.224025Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.378391Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:30.378444Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:30.390218Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:30.430930Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:30.447999Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:26:30.448031Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:26:30.456975Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:26:30.457032Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:26:30.457059Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:26:30.457065Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:26:30.457072Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:26:30.457079Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:26:30.457086Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:26:30.457093Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:26:30.457222Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:26:30.471448Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:30.471481Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:30.473079Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:26:30.474053Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:26:30.474223Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:26:30.476339Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-03T10:26:30.480607Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:26:30.480632Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:26:30.480644Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-03T10:26:30.485373Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:26:30.487459Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:26:30.487502Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:26:30.599071Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:26:30.675471Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:26:30.729100Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:26:31.241005Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:26:31.680585Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:31.786066Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7814: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-03T10:26:31.786090Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7830: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-03T10:26:31.786102Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:2569:2935], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-03T10:26:31.786770Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2571:2937] 2025-06-03T10:26:31.786820Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2571:2937], schemeshard id = 72075186224037899 2025-06-03T10:26:32.522505Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2689:3231], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:32.522563Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:32.527209Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72075186224037899 2025-06-03T10:26:32.644713Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2922:3277], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:32.644761Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:32.652213Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToA ... AND local_path_id = $local_path_id; 2025-06-03T10:29:04.745058Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7835:5567], DatabaseId: /Root/Shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:04.745091Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7845:5572], DatabaseId: /Root/Shared, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:04.745120Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Shared, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:04.749014Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720658:2, at schemeshard: 72075186224037897 2025-06-03T10:29:04.765929Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7849:5575], DatabaseId: /Root/Shared, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720658 completed, doublechecking } 2025-06-03T10:29:04.866698Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 119 ], ReplyToActorId[ [2:7944:5623]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:04.866825Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 119 ] 2025-06-03T10:29:04.866843Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 119, ReplyToActorId = [2:7944:5623], StatRequests.size() = 1 2025-06-03T10:29:04.954472Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7949:5625] txid# 281474976720659, issues: { message: "Check failed: path: \'/Root/Shared/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:04.981450Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 120 ], ReplyToActorId[ [2:7978:5640]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:04.981844Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 120 ] 2025-06-03T10:29:04.981985Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:171: [72075186224037894] EvRequestStats, node id = 2, schemeshard count = 1, urgent = 0 2025-06-03T10:29:04.981996Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-03T10:29:04.982036Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:29:04.982062Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 120, ReplyToActorId = [2:7978:5640], StatRequests.size() = 1 2025-06-03T10:29:05.009628Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NzExMjFmMzEtMzgwYWNkMmUtNmNiOGQzNDItMTJmMWIyYjU=, TxId: 2025-06-03T10:29:05.009666Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NzExMjFmMzEtMzgwYWNkMmUtNmNiOGQzNDItMTJmMWIyYjU=, TxId: 2025-06-03T10:29:05.009865Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:29:05.023457Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 3] 2025-06-03T10:29:05.023490Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:29:05.086410Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224037894] EvFastPropagateCheck 2025-06-03T10:29:05.086455Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224037894] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-03T10:29:05.154088Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:3197:3159], schemeshard count = 1 2025-06-03T10:29:05.416269Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7996: SendBaseStatsToSA(), path count: 2, at schemeshard: 72075186224037899 2025-06-03T10:29:05.416310Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 181.000000s, at schemeshard: 72075186224037899 2025-06-03T10:29:05.416401Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037899, stats size# 50 2025-06-03T10:29:05.434088Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-03T10:29:06.484698Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [2:8041:5679]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:06.484836Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-03T10:29:06.484845Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [2:8041:5679], StatRequests.size() = 1 2025-06-03T10:29:07.925763Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:29:07.925865Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:29:07.925872Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:29:07.925884Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 4] is data table. 2025-06-03T10:29:07.925891Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037899, LocalPathId: 4] 2025-06-03T10:29:07.926010Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-03T10:29:07.926889Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:29:07.931580Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NTFhZGFkYWMtZDhhNGI2ZWYtNDA2NDdiMWEtMjdlMTMwYTI=, TxId: 2025-06-03T10:29:07.931615Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NTFhZGFkYWMtZDhhNGI2ZWYtNDA2NDdiMWEtMjdlMTMwYTI=, TxId: 2025-06-03T10:29:07.931812Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:29:07.945765Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037899, LocalPathId: 4] 2025-06-03T10:29:07.945796Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:29:08.001216Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [2:8109:5720]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:08.001372Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-03T10:29:08.001382Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [2:8109:5720], StatRequests.size() = 1 2025-06-03T10:29:09.448908Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [2:8157:5746]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:09.449060Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-03T10:29:09.449072Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [2:8157:5746], StatRequests.size() = 1 2025-06-03T10:29:10.825788Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 2 2025-06-03T10:29:10.825932Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:29:10.825940Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:29:10.825951Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is data table. 2025-06-03T10:29:10.825957Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-03T10:29:10.826087Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-03T10:29:10.826767Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:29:10.832951Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:29:10.833795Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:29:10.840652Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=M2ZlYzA0ZjktOTlmYjZjMjQtMjcxYTdjMzMtNmVhOTlkMWI=, TxId: 2025-06-03T10:29:10.840692Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=M2ZlYzA0ZjktOTlmYjZjMjQtMjcxYTdjMzMtNmVhOTlkMWI=, TxId: 2025-06-03T10:29:10.840913Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:29:10.870302Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-03T10:29:10.870337Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:29:10.962774Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [2:8219:5782]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:10.962896Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-03T10:29:10.962906Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [2:8219:5782], StatRequests.size() = 1 >> OperationMapping::IndexBuildSuccess [GOOD] >> DataShardReadIterator::ShouldReadKeyArrow [GOOD] >> DataShardReadIterator::ShouldReadKeyOnlyValueColumn >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKey-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRange+EvWrite |64.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots |64.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots |65.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/tx-schemeshard-ut_vector_index_build_reboots >> DataShardReadIterator::ShouldRangeReadReverseLeftNonInclusive [GOOD] >> DataShardReadIterator::ShouldNotReadAfterCancel >> KqpImmediateEffects::DeleteAfterInsert [GOOD] >> KqpEffects::InsertAbort_Literal_Duplicates-UseSink [GOOD] >> KqpEffects::InsertAbort_Literal_Conflict-UseSink |65.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_services/ut/unittest >> SplitPathTests::WithDatabaseShouldFail [GOOD] >> KqpImmediateEffects::ConflictingKeyRW1RWR2 [GOOD] >> BsControllerTest::SelfHealBlock4Plus2 >> KqpInplaceUpdate::Negative_BatchUpdate-UseSink [GOOD] |65.0%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/grpc_services/ut/unittest >> OperationMapping::IndexBuildSuccess [GOOD] >> KqpWrite::ProjectReplace-UseSink [GOOD] >> BsControllerTest::DecommitRejected >> TopicAutoscaling::ControlPlane_CDC [GOOD] >> TopicAutoscaling::ControlPlane_CDC_Disable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::DeleteAfterInsert [GOOD] Test command err: Trying to start YDB, gRPC: 15612, MsgBus: 13850 2025-06-03T10:29:09.634878Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668233333533387:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:09.634906Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00116c/r3tmp/tmpuWwCiw/pdisk_1.dat 2025-06-03T10:29:09.733656Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:09.736086Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:09.736132Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:09.739970Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 15612, node 1 2025-06-03T10:29:09.755675Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:09.755696Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:09.755699Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:09.755751Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13850 TClient is connected to server localhost:13850 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:09.918929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:09.934483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:09.980483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:10.044701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:10.070794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:10.265857Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668237628502278:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.265887Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.327260Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.347539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.363213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.379042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.394002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.415357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.473395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.492809Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668237628502934:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.492839Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.492938Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668237628502939:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.493874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:29:10.497805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:29:10.497882Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668237628502941:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:29:10.554680Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668237628502994:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:10.749199Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 25202, MsgBus: 26361 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00116c/r3tmp/tmpiG7h0N/pdisk_1.dat 2025-06-03T10:29:11.300809Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 25202, node 2 2025-06-03T10:29:11.309930Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:11.313258Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:11.313279Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:11.313282Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:11.313384Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26361 2025-06-03T10:29:11.368171Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:11.368194Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:11.369371Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26361 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:11.403883Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:11.410288Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:29:11.420608Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:11.446408Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:11.481146Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:11.499412Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:11.897486Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668242530755184:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:11.897530Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:11.907897Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:29:11.919403Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:29:11.927831Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:29:11.941864Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.003203Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.031403Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.055543Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.077499Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668246825723137:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:12.077522Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:12.077681Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668246825723142:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:12.078719Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:29:12.082712Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:29:12.082873Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511668246825723144:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:29:12.148293Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668246825723195:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:12.374273Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/effects/unittest >> KqpImmediateEffects::ConflictingKeyRW1RWR2 [GOOD] Test command err: Trying to start YDB, gRPC: 28046, MsgBus: 3241 2025-06-03T10:29:09.471280Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668231756573108:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:09.471596Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001182/r3tmp/tmpq64OVB/pdisk_1.dat 2025-06-03T10:29:09.560712Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28046, node 1 2025-06-03T10:29:09.575937Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:09.575984Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:09.580955Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:09.593590Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:09.593614Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:09.593618Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:09.593675Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3241 TClient is connected to server localhost:3241 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:09.722567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:09.736394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:29:09.754374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:09.859263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:09.917129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:09.960794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:10.090036Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668236051541998:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.090079Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.180131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.196834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.282819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.302509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.320717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.347841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.361811Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.390890Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668236051542655:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.390934Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.391042Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668236051542660:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.392218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:29:10.395492Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668236051542662:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:29:10.487636Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668236051542722:3401] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:10.717275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 25871, MsgBus: 23143 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001182/r3tmp/tmp9cr5H0/pdisk_1.dat 2025-06-03T10:29:11.575009Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:11.589674Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:11.590521Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511668239238963914:2079] 1748946551544391 != 1748946551544394 TServer::EnableGrpc on GrpcPort 25871, node 2 2025-06-03T10:29:11.610010Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:11.610027Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:11.610031Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:11.610085Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23143 2025-06-03T10:29:11.669111Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:11.669139Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:11.669856Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23143 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:11.738534Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:11.742052Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:29:11.762497Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:11.803270Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:11.887028Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:11.934719Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:12.201819Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668243533932839:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:12.201854Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:12.217133Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.239135Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.267022Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.282959Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.296431Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.318782Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.333971Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.354023Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668243533933492:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:12.354069Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:12.354136Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668243533933497:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:12.355059Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:29:12.361480Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511668243533933499:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:29:12.437072Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668243533933550:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:12.637886Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.779033Z node 2 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because it cannot acquire locks;tx_id=6; 2025-06-03T10:29:12.781200Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 6 at tablet 72075186224037922 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because it cannot acquire locks" issue_code: 2001 severity: 1 } 2025-06-03T10:29:12.781268Z node 2 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 6 at tablet 72075186224037922 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because it cannot acquire locks" issue_code: 2001 severity: 1 } 2025-06-03T10:29:12.781397Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:798: SelfId: [2:7511668243533934077:2510], Table: `/Root/TestImmediateEffects` ([72057594046644480:17:1]), SessionActorId: [2:7511668243533933824:2510]Got LOCKS BROKEN for table `/Root/TestImmediateEffects`. ShardID=72075186224037922, Sink=[2:7511668243533934077:2510].{
: Error: Operation is aborting because it cannot acquire locks, code: 2001 } 2025-06-03T10:29:12.781562Z node 2 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2935: SelfId: [2:7511668243533934044:2510], SessionActorId: [2:7511668243533933824:2510], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/TestImmediateEffects`., code: 2001
: Error: Operation is aborting because it cannot acquire locks, code: 2001 . sessionActorId=[2:7511668243533933824:2510]. isRollback=0 2025-06-03T10:29:12.781669Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:1848: SessionId: ydb://session/3?node_id=2&id=NzkwMjVmOGUtNGE4YWUwYzMtYTgyMTY1YTUtOWRkMTE1ZjU=, ActorId: [2:7511668243533933824:2510], ActorState: ExecuteState, TraceId: 01jwtnd1xp871q69astxj0kx1z, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [2:7511668243533934071:2510] from: [2:7511668243533934044:2510] 2025-06-03T10:29:12.781692Z node 2 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [2:7511668243533934071:2510] TxId: 281474976715677. Ctx: { TraceId: 01jwtnd1xp871q69astxj0kx1z, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzkwMjVmOGUtNGE4YWUwYzMtYTgyMTY1YTUtOWRkMTE1ZjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/TestImmediateEffects`., code: 2001 subissue: {
: Error: Operation is aborting because it cannot acquire locks, code: 2001 } } 2025-06-03T10:29:12.781805Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=2&id=NzkwMjVmOGUtNGE4YWUwYzMtYTgyMTY1YTUtOWRkMTE1ZjU=, ActorId: [2:7511668243533933824:2510], ActorState: ExecuteState, TraceId: 01jwtnd1xp871q69astxj0kx1z, Create QueryResponse for error on request, msg: >> CommitOffset::Commit_WithWrongSession_ToParent [GOOD] >> CommitOffset::Commit_WithoutSession_ParentNotFinished |65.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::DecommitRejected [GOOD] >> DataShardReadIteratorSysTables::ShouldForbidSchemaVersion [GOOD] >> DataShardReadIteratorSysTables::ShouldNotAllowArrow ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/effects/unittest >> KqpInplaceUpdate::Negative_BatchUpdate-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 2421, MsgBus: 10267 2025-06-03T10:29:09.875644Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668233517789851:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:09.881355Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001148/r3tmp/tmpyUxT5S/pdisk_1.dat 2025-06-03T10:29:10.027996Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668233517789694:2079] 1748946549871022 != 1748946549871025 2025-06-03T10:29:10.035075Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2421, node 1 2025-06-03T10:29:10.081586Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:10.081601Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:10.081605Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:10.081662Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:29:10.095591Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:10.095623Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:10.098078Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10267 TClient is connected to server localhost:10267 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:10.294552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:10.301869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:29:10.309867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:10.398303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:10.458320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:29:10.474824Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.556787Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668237812758620:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.556840Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.676757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.693879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.755305Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.770183Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.789521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.802615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.816325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.846386Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668237812759274:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.846422Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.846983Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668237812759279:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.847955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:29:10.852487Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668237812759281:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:29:10.914568Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668237812759341:3398] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:11.207896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 26994, MsgBus: 9309 2025-06-03T10:29:11.688671Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668240581329696:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:11.688692Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001148/r3tmp/tmpWCjTPo/pdisk_1.dat TServer::EnableGrpc on GrpcPort 26994, node 2 2025-06-03T10:29:11.715518Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:11.725539Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:11.725559Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:11.725561Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:11.725627Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9309 2025-06-03T10:29:11.789049Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:11.789083Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:9309 2025-06-03T10:29:11.790186Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:11.796828Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:11.806595Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:29:11.874504Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:29:11.912001Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:11.929544Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:12.209719Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668244876298587:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:12.209815Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:12.226640Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.247303Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.274869Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.291886Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.310015Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.323416Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.336888Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.359250Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668244876299239:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:12.359296Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:12.359581Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668244876299244:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:12.360502Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:29:12.363619Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511668244876299246:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:29:12.445188Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668244876299297:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:12.689271Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 >> SelfHealActorTest::NoMoreThanOneReplicating [GOOD] >> Viewer::StorageGroupOutputWithoutFilterNoDepends [GOOD] >> Viewer::StorageGroupOutputWithSpaceCheckDependsOnVDiskSpaceStatus ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/effects/unittest >> KqpWrite::ProjectReplace-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 5895, MsgBus: 11292 2025-06-03T10:29:09.833479Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668233108031374:2077];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:09.833850Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00114a/r3tmp/tmpmCwWVa/pdisk_1.dat 2025-06-03T10:29:10.035047Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668233108031325:2079] 1748946549829632 != 1748946549829635 2025-06-03T10:29:10.042401Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5895, node 1 2025-06-03T10:29:10.101565Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:10.101579Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:10.101582Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:10.101641Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11292 2025-06-03T10:29:10.176194Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:10.176231Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:10.176944Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:11292 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:10.238368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:10.282353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:29:10.296414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:10.379299Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:29:10.416955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.436740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:10.631666Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668237403000279:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.631702Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.692147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.701720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.712766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.724805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.740951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.802541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.816650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.842224Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668237403000943:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.842251Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.842404Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668237403000948:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.843322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:29:10.846417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:29:10.846498Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668237403000950:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:29:10.938679Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668237403001001:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 5999, MsgBus: 14658 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00114a/r3tmp/tmplfdWZu/pdisk_1.dat 2025-06-03T10:29:11.793468Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:11.833505Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:11.837416Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511668240685039545:2079] 1748946551760417 != 1748946551760420 TServer::EnableGrpc on GrpcPort 5999, node 2 2025-06-03T10:29:11.853965Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:11.853978Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:11.853980Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:11.854033Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14658 2025-06-03T10:29:11.885642Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:11.885684Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:11.889721Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14658 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:11.986508Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:11.997715Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:29:12.014447Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:29:12.077482Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:12.119180Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.140191Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:12.397763Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668244980008483:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:12.397798Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:12.405391Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.416065Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.473507Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.484068Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.503666Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.524824Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.536789Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:29:12.552106Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668244980009139:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:12.552142Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:12.552190Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668244980009144:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:12.553134Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:29:12.558054Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511668244980009146:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:29:12.623973Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668244980009197:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::DecommitRejected [GOOD] Test command err: 2025-06-03T10:29:13.553383Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2025-06-03T10:29:13.553402Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2025-06-03T10:29:13.553427Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2025-06-03T10:29:13.553431Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2025-06-03T10:29:13.553438Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2025-06-03T10:29:13.553442Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2025-06-03T10:29:13.553449Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2025-06-03T10:29:13.553453Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2025-06-03T10:29:13.553459Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2025-06-03T10:29:13.553463Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2025-06-03T10:29:13.553470Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2025-06-03T10:29:13.553474Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2025-06-03T10:29:13.553481Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2025-06-03T10:29:13.553485Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2025-06-03T10:29:13.553491Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2025-06-03T10:29:13.553495Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2025-06-03T10:29:13.553502Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2025-06-03T10:29:13.553507Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2025-06-03T10:29:13.553515Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2025-06-03T10:29:13.553519Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2025-06-03T10:29:13.553535Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2025-06-03T10:29:13.553539Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2025-06-03T10:29:13.553545Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2025-06-03T10:29:13.553549Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2025-06-03T10:29:13.553555Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2025-06-03T10:29:13.553559Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2025-06-03T10:29:13.553566Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2025-06-03T10:29:13.553569Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2025-06-03T10:29:13.553576Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2025-06-03T10:29:13.553582Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2025-06-03T10:29:13.555934Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:508:32] Status# ERROR ClientId# [1:508:32] ServerId# [0:0:0] PipeClient# [1:508:32] 2025-06-03T10:29:13.556071Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:509:20] Status# ERROR ClientId# [2:509:20] ServerId# [0:0:0] PipeClient# [2:509:20] 2025-06-03T10:29:13.556080Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:510:20] Status# ERROR ClientId# [3:510:20] ServerId# [0:0:0] PipeClient# [3:510:20] 2025-06-03T10:29:13.556087Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:511:20] Status# ERROR ClientId# [4:511:20] ServerId# [0:0:0] PipeClient# [4:511:20] 2025-06-03T10:29:13.556094Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:512:20] Status# ERROR ClientId# [5:512:20] ServerId# [0:0:0] PipeClient# [5:512:20] 2025-06-03T10:29:13.556101Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:513:20] Status# ERROR ClientId# [6:513:20] ServerId# [0:0:0] PipeClient# [6:513:20] 2025-06-03T10:29:13.556108Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:514:20] Status# ERROR ClientId# [7:514:20] ServerId# [0:0:0] PipeClient# [7:514:20] 2025-06-03T10:29:13.556114Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:515:20] Status# ERROR ClientId# [8:515:20] ServerId# [0:0:0] PipeClient# [8:515:20] 2025-06-03T10:29:13.556121Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:516:20] Status# ERROR ClientId# [9:516:20] ServerId# [0:0:0] PipeClient# [9:516:20] 2025-06-03T10:29:13.556128Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:517:20] Status# ERROR ClientId# [10:517:20] ServerId# [0:0:0] PipeClient# [10:517:20] 2025-06-03T10:29:13.556137Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:518:20] Status# ERROR ClientId# [11:518:20] ServerId# [0:0:0] PipeClient# [11:518:20] 2025-06-03T10:29:13.556144Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:519:20] Status# ERROR ClientId# [12:519:20] ServerId# [0:0:0] PipeClient# [12:519:20] 2025-06-03T10:29:13.556152Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:520:20] Status# ERROR ClientId# [13:520:20] ServerId# [0:0:0] PipeClient# [13:520:20] 2025-06-03T10:29:13.556158Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:521:20] Status# ERROR ClientId# [14:521:20] ServerId# [0:0:0] PipeClient# [14:521:20] 2025-06-03T10:29:13.556164Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:522:20] Status# ERROR ClientId# [15:522:20] ServerId# [0:0:0] PipeClient# [15:522:20] 2025-06-03T10:29:13.564457Z 1 00h00m00.100000s :BS_NODE DEBUG: [1] Connect 2025-06-03T10:29:13.564492Z 2 00h00m00.100000s :BS_NODE DEBUG: [2] Connect 2025-06-03T10:29:13.564503Z 3 00h00m00.100000s :BS_NODE DEBUG: [3] Connect 2025-06-03T10:29:13.564513Z 4 00h00m00.100000s :BS_NODE DEBUG: [4] Connect 2025-06-03T10:29:13.564522Z 5 00h00m00.100000s :BS_NODE DEBUG: [5] Connect 2025-06-03T10:29:13.564532Z 6 00h00m00.100000s :BS_NODE DEBUG: [6] Connect 2025-06-03T10:29:13.564542Z 7 00h00m00.100000s :BS_NODE DEBUG: [7] Connect 2025-06-03T10:29:13.564551Z 8 00h00m00.100000s :BS_NODE DEBUG: [8] Connect 2025-06-03T10:29:13.564562Z 9 00h00m00.100000s :BS_NODE DEBUG: [9] Connect 2025-06-03T10:29:13.564583Z 10 00h00m00.100000s :BS_NODE DEBUG: [10] Connect 2025-06-03T10:29:13.564592Z 11 00h00m00.100000s :BS_NODE DEBUG: [11] Connect 2025-06-03T10:29:13.564602Z 12 00h00m00.100000s :BS_NODE DEBUG: [12] Connect 2025-06-03T10:29:13.564611Z 13 00h00m00.100000s :BS_NODE DEBUG: [13] Connect 2025-06-03T10:29:13.564619Z 14 00h00m00.100000s :BS_NODE DEBUG: [14] Connect 2025-06-03T10:29:13.564628Z 15 00h00m00.100000s :BS_NODE DEBUG: [15] Connect 2025-06-03T10:29:13.565129Z 1 00h00m00.100000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:581:60] Status# OK ClientId# [1:581:60] ServerId# [1:610:61] PipeClient# [1:581:60] 2025-06-03T10:29:13.565142Z 1 00h00m00.100000s :BS_NODE DEBUG: [1] State switched from 0 to 1 2025-06-03T10:29:13.565827Z 2 00h00m00.100000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:582:21] Status# OK ClientId# [2:582:21] ServerId# [1:611:62] PipeClient# [2:582:21] 2025-06-03T10:29:13.565839Z 2 00h00m00.100000s :BS_NODE DEBUG: [2] State switched from 0 to 1 2025-06-03T10:29:13.565847Z 3 00h00m00.100000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:583:21] Status# OK ClientId# [3:583:21] ServerId# [1:612:63] PipeClient# [3:583:21] 2025-06-03T10:29:13.565851Z 3 00h00m00.100000s :BS_NODE DEBUG: [3] State switched from 0 to 1 2025-06-03T10:29:13.565857Z 4 00h00m00.100000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:584:21] Status# OK ClientId# [4:584:21] ServerId# [1:613:64] PipeClient# [4:584:21] 2025-06-03T10:29:13.565862Z 4 00h00m00.100000s :BS_NODE DEBUG: [4] State switched from 0 to 1 2025-06-03T10:29:13.565868Z 5 00h00m00.100000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:585:21] Status# OK ClientId# [5:585:21] ServerId# [1:614:65] PipeClient# [5:585:21] 2025-06-03T10:29:13.565872Z 5 00h00m00.100000s :BS_NODE DEBUG: [5] State switched from 0 to 1 2025-06-03T10:29:13.565879Z 6 00h00m00.100000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:586:21] Status# OK ClientId# [6:586:21] ServerId# [1:615:66] PipeClient# [6:586:21] 2025-06-03T10:29:13.565884Z 6 00h00m00.100000s :BS_NODE DEBUG: [6] State switched from 0 to 1 2025-06-03T10:29:13.565890Z 7 00h00m00.100000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:587:21] Status# OK ClientId# [7:587:21] ServerId# [1:616:67] PipeClient# [7:587:21] 2025-06-03T10:29:13.565896Z 7 00h00m00.100000s :BS_NODE DEBUG: [7] State switched from 0 to 1 2025-06-03T10:29:13.565905Z 8 00h00m00.100000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:588:21] Status# OK ClientId# [8:588:21] ServerId# [1:617:68] PipeClient# [8:588:21] 2025-06-03T10:29:13.565910Z 8 00h00m00.100000s :BS_NODE DEBUG: [8] State switched from 0 to 1 2025-06-03T10:29:13.565916Z 9 00h00m00.100000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:589:21] Status# OK ClientId# [9:589:21] ServerId# [1:618:69] PipeClient# [9:589:21] 2025-06-03T10:29:13.565921Z 9 00h00m00.100000s :BS_NODE DEBUG: [9] State switched from 0 to 1 2025-06-03T10:29:13.565927Z 10 00h00m00.100000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:590:21] Status# OK ClientId# [10:590:21] ServerId# [1:619:70] PipeClient# [10:590:21] 2025-06-03T10:29:13.565932Z 10 00h00m00.100000s :BS_NODE DEBUG: [10] State switched from 0 to 1 2025-06-03T10:29:13.565938Z 11 00h00m00.100000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:591:21] Status# OK ClientId# [11:591:21] ServerId# [1:620:71] PipeClient# [11:591:21] 2025-06-03T10:29:13.565942Z 11 00h00m00.100000s :BS_NODE DEBUG: [11] State switched from 0 to 1 2025-06-03T10:29:13.565949Z 12 00h00m00.100000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:592:21] Status# OK ClientId# [12:592:21] ServerId# [1:621:72] PipeClient# [12:592:21] 2025-06-03T10:29:13.565953Z 12 00h00m00.100000s :BS_NODE DEBUG: [12] State switched from 0 to 1 2025-06-03T10:29:13.565961Z 13 00h00m00.100000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:593:21] Status# OK ClientId# [13:593:21] ServerId# [1:622:73] PipeClient# [13:593:21] 2025-06-03T10:29:13.565965Z 13 00h00m00.100000s :BS_NODE DEBUG: [13] State switched from 0 to 1 2025-06-03T10:29:13.565972Z 14 00h00m00.100000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:594:21] Status# OK ClientId# [14:594:21] ServerId# [1:623:74] PipeClient# [14:594:21] 2025-06-03T10:29:13.565976Z 14 00h00m00.100000s :BS_NODE DEBUG: [14] State switched from 0 to 1 2025-06-03T10:29:13.565983Z 15 00h00m00.100000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:595:21] Status# OK ClientId# [15:595:21] ServerId# [1:624:75] PipeClient# [15:595:21] 2025-06-03T10:29:13.565987Z 15 00h00m00.100000s :BS_NODE DEBUG: [15] State switched from 0 to 1 2025-06-03T10:29:13.566495Z 1 00h00m00.100512s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-03T10:29:13.566510Z 1 00h00m00.100512s :BS_NODE DEBUG: [1] VDiskId# [80000000:1:0:0:0] PDiskId# 1000 VSlotId# 1000 created 2025-06-03T10:29:13.570139Z 1 00h00m00.100512s :BS_NODE DEBUG: [1] VDiskId# [80000000:1:0:0:0] status changed to INIT_PENDING 2025-06-03T10:29:13.570544Z 2 00h00m00.100512s :BS_NODE DEBUG: [2] NodeServiceSetUpdate 2025-06-03T10:29:13.570559Z 2 00h00m00.100512s :BS_NODE DEBUG: [2] VDiskId# [80000000:1:0:1:0] PDiskId# 1000 VSlotId# 1000 created 2025-06-03T10:29:13.570584Z 2 00h00m00.100512s :BS_NODE DEBUG: [2] VDiskId# [80000000:1:0:1:0] status changed to INIT_PENDING 2025-06-03T10:29:13.570603Z 3 00h00m00.100512s :BS_NODE DEBUG: [3] NodeServiceSetUpdate 2025-06-03T10:29:13.570610Z 3 00h00m00.100512s :BS_NODE DEBUG: [3] VDiskId# [80000000:1:0:2:0] PDiskId# 1000 VSlotId# 1000 created 2025-06-03T10:29:13.570618Z 3 00h00m00.100512s :BS_NODE DEBUG: [3] VDiskId# [80000000:1:0:2:0] status changed to INIT_PENDING 2025-06-03T10:29:13.570634Z 4 00h00m00.100512s :BS_NODE DEBUG: [4] NodeServiceSetUpdate 2025-06-03T10:29:13.570641Z 4 00h00m00.100512s :BS_NODE DEBUG: [4] VDiskId# [80000000:1:1:0:0] PDiskId# 1000 VSlotId# 1000 created 2025-06-03T10:29:13.570650Z 4 00h00m00.100512s :BS_NODE DEBUG: [4] VDiskId# [80000000:1:1:0:0] status changed to INIT_PENDING 2025-06-03T10:29:13.570666Z 5 00h00m00.100512s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-03T10:29:13.570673Z 5 00h00m00.100512s :BS_NODE DEBUG: [5] VDiskId# [80000000:1:1:1:0] PDiskId# 1000 VSlotId# 1000 created 2025-06-03T10:29:13.570680Z 5 00h00m00.100512s :BS_NODE DEBUG: [5] VDiskId# [80000000:1:1:1:0] status changed to INIT_PENDING 2025-06-03T1 ... [80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-03T10:29:13.648185Z 1 00h01m05.639512s :BS_NODE DEBUG: [1] VDiskId# [80000001:1:2:0:0] status changed to REPLICATING 2025-06-03T10:29:13.648219Z 1 00h01m05.639512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-03T10:29:13.648246Z 14 00h01m06.170512s :BS_NODE DEBUG: [14] VDiskId# [80000001:1:1:1:0] status changed to REPLICATING 2025-06-03T10:29:13.648298Z 1 00h01m06.170512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-03T10:29:13.648338Z 1 00h01m10.000000s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-03T10:29:13.648397Z 12 00h01m10.570512s :BS_NODE DEBUG: [12] VDiskId# [80000001:1:0:2:0] status changed to READY 2025-06-03T10:29:13.648455Z 1 00h01m10.570512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-03T10:29:13.648483Z 11 00h01m12.290512s :BS_NODE DEBUG: [11] VDiskId# [80000001:1:0:1:0] status changed to READY 2025-06-03T10:29:13.648526Z 1 00h01m12.290512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-03T10:29:13.648636Z 1 00h01m18.088512s :BS_NODE DEBUG: [1] VDiskId# [80000001:1:2:0:0] status changed to READY 2025-06-03T10:29:13.648673Z 1 00h01m18.088512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-03T10:29:13.648709Z 1 00h01m20.000000s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-03T10:29:13.648723Z 14 00h01m20.210536s :BS_NODE DEBUG: [14] VDiskId# [80000000:3:2:1:0] status changed to READY 2025-06-03T10:29:13.648781Z 1 00h01m20.210536s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-03T10:29:13.648921Z 8 00h01m20.211048s :BS_NODE DEBUG: [8] NodeServiceSetUpdate 2025-06-03T10:29:13.648931Z 8 00h01m20.211048s :BS_NODE DEBUG: [8] VDiskId# [80000000:2:2:1:0] destroyed 2025-06-03T10:29:13.649029Z 1 00h01m25.570512s :BS_SELFHEAL INFO: {BSSH11@self_heal.cpp:709} group can't be reassigned right now [{[80000000:3:0:0:0] Ready},{[80000000:3:0:1:0] Ready},{[80000000:3:0:2:0] Ready},{[80000000:3:1:0:0] Ready},{[80000000:3:1:1:0] Ready},{[80000000:3:1:2:0] Ready},{[80000000:3:2:0:0] NotReady},{[80000000:3:2:1:0] NotReady},{[80000000:3:2:2:0] Ready RequiresReassignment Decommitted}] GroupId# 2147483648 2025-06-03T10:29:13.649058Z 13 00h01m26.128024s :BS_NODE DEBUG: [13] VDiskId# [80000000:3:2:0:0] status changed to READY 2025-06-03T10:29:13.649131Z 1 00h01m26.128024s :BS_SELFHEAL DEBUG: {BSSH01@self_heal.cpp:71} Reassigner starting GroupId# 2147483648 2025-06-03T10:29:13.649281Z 1 00h01m26.128024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-03T10:29:13.649290Z 1 00h01m26.128024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:0:0:0] DiskIsOk# true 2025-06-03T10:29:13.649373Z 1 00h01m26.128024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-03T10:29:13.649378Z 1 00h01m26.128024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:0:1:0] DiskIsOk# true 2025-06-03T10:29:13.649386Z 1 00h01m26.128024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-03T10:29:13.649392Z 1 00h01m26.128024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:0:2:0] DiskIsOk# true 2025-06-03T10:29:13.649398Z 1 00h01m26.128024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-03T10:29:13.649403Z 1 00h01m26.128024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:1:0:0] DiskIsOk# true 2025-06-03T10:29:13.649409Z 1 00h01m26.128024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-03T10:29:13.649414Z 1 00h01m26.128024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:1:1:0] DiskIsOk# true 2025-06-03T10:29:13.649420Z 1 00h01m26.128024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-03T10:29:13.649425Z 1 00h01m26.128024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:1:2:0] DiskIsOk# true 2025-06-03T10:29:13.649432Z 1 00h01m26.128024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-03T10:29:13.649437Z 1 00h01m26.128024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:2:0:0] DiskIsOk# true 2025-06-03T10:29:13.649444Z 1 00h01m26.128024s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483648 Status# OK JoinedGroup# true Replicated# true 2025-06-03T10:29:13.649453Z 1 00h01m26.128024s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483648 VDiskId# [80000000:3:2:1:0] DiskIsOk# true 2025-06-03T10:29:13.649953Z 1 00h01m26.128536s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-03T10:29:13.649969Z 1 00h01m26.128536s :BS_NODE DEBUG: [1] VDiskId# [80000000:3:0:0:0] -> [80000000:4:0:0:0] 2025-06-03T10:29:13.650087Z 1 00h01m26.128536s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483648 Items# [80000000:3:2:2:0]: 9:1000:1000 -> 15:1000:1001 ConfigTxSeqNo# 23 2025-06-03T10:29:13.650094Z 1 00h01m26.128536s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483648 Success# true 2025-06-03T10:29:13.650117Z 7 00h01m26.128536s :BS_NODE DEBUG: [7] NodeServiceSetUpdate 2025-06-03T10:29:13.650124Z 7 00h01m26.128536s :BS_NODE DEBUG: [7] VDiskId# [80000000:1:2:0:0] destroyed 2025-06-03T10:29:13.650140Z 2 00h01m26.128536s :BS_NODE DEBUG: [2] NodeServiceSetUpdate 2025-06-03T10:29:13.650147Z 2 00h01m26.128536s :BS_NODE DEBUG: [2] VDiskId# [80000000:3:0:1:0] -> [80000000:4:0:1:0] 2025-06-03T10:29:13.650160Z 3 00h01m26.128536s :BS_NODE DEBUG: [3] NodeServiceSetUpdate 2025-06-03T10:29:13.650167Z 3 00h01m26.128536s :BS_NODE DEBUG: [3] VDiskId# [80000000:3:0:2:0] -> [80000000:4:0:2:0] 2025-06-03T10:29:13.650181Z 4 00h01m26.128536s :BS_NODE DEBUG: [4] NodeServiceSetUpdate 2025-06-03T10:29:13.650188Z 4 00h01m26.128536s :BS_NODE DEBUG: [4] VDiskId# [80000000:3:1:0:0] -> [80000000:4:1:0:0] 2025-06-03T10:29:13.650201Z 5 00h01m26.128536s :BS_NODE DEBUG: [5] NodeServiceSetUpdate 2025-06-03T10:29:13.650208Z 5 00h01m26.128536s :BS_NODE DEBUG: [5] VDiskId# [80000000:3:1:1:0] -> [80000000:4:1:1:0] 2025-06-03T10:29:13.650221Z 6 00h01m26.128536s :BS_NODE DEBUG: [6] NodeServiceSetUpdate 2025-06-03T10:29:13.650228Z 6 00h01m26.128536s :BS_NODE DEBUG: [6] VDiskId# [80000000:3:1:2:0] -> [80000000:4:1:2:0] 2025-06-03T10:29:13.650237Z 9 00h01m26.128536s :BS_NODE DEBUG: [9] NodeServiceSetUpdate 2025-06-03T10:29:13.650249Z 13 00h01m26.128536s :BS_NODE DEBUG: [13] NodeServiceSetUpdate 2025-06-03T10:29:13.650257Z 13 00h01m26.128536s :BS_NODE DEBUG: [13] VDiskId# [80000000:3:2:0:0] -> [80000000:4:2:0:0] 2025-06-03T10:29:13.650270Z 14 00h01m26.128536s :BS_NODE DEBUG: [14] NodeServiceSetUpdate 2025-06-03T10:29:13.650276Z 14 00h01m26.128536s :BS_NODE DEBUG: [14] VDiskId# [80000000:3:2:1:0] -> [80000000:4:2:1:0] 2025-06-03T10:29:13.650288Z 15 00h01m26.128536s :BS_NODE DEBUG: [15] NodeServiceSetUpdate 2025-06-03T10:29:13.650295Z 15 00h01m26.128536s :BS_NODE DEBUG: [15] VDiskId# [80000000:4:2:2:0] PDiskId# 1000 VSlotId# 1001 created 2025-06-03T10:29:13.650305Z 15 00h01m26.128536s :BS_NODE DEBUG: [15] VDiskId# [80000000:4:2:2:0] status changed to INIT_PENDING 2025-06-03T10:29:13.650509Z 10 00h01m26.868512s :BS_NODE DEBUG: [10] VDiskId# [80000001:1:0:0:0] status changed to READY 2025-06-03T10:29:13.650601Z 2 00h01m27.985512s :BS_NODE DEBUG: [2] VDiskId# [80000001:1:2:1:0] status changed to READY 2025-06-03T10:29:13.650683Z 15 00h01m28.421536s :BS_NODE DEBUG: [15] VDiskId# [80000000:4:2:2:0] status changed to REPLICATING 2025-06-03T10:29:13.650905Z 3 00h01m30.313512s :BS_NODE DEBUG: [3] VDiskId# [80000001:1:2:2:0] status changed to READY 2025-06-03T10:29:13.651006Z 15 00h01m31.498512s :BS_NODE DEBUG: [15] VDiskId# [80000001:1:1:2:0] status changed to READY 2025-06-03T10:29:13.651073Z 13 00h01m32.221512s :BS_NODE DEBUG: [13] VDiskId# [80000001:1:1:0:0] status changed to READY 2025-06-03T10:29:13.651151Z 14 00h01m32.845512s :BS_NODE DEBUG: [14] VDiskId# [80000001:1:1:1:0] status changed to READY 2025-06-03T10:29:13.651324Z 15 00h01m40.531536s :BS_NODE DEBUG: [15] VDiskId# [80000000:4:2:2:0] status changed to READY 2025-06-03T10:29:13.651490Z 9 00h01m40.532048s :BS_NODE DEBUG: [9] NodeServiceSetUpdate 2025-06-03T10:29:13.651500Z 9 00h01m40.532048s :BS_NODE DEBUG: [9] VDiskId# [80000000:3:2:2:0] destroyed ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/backup_ut/unittest >> ListObjectsInS3Export::ParametersValidation [GOOD] Test command err: 2025-06-03T10:27:36.000542Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667828178706494:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:27:36.000605Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0025ff/r3tmp/tmpSRfxtw/pdisk_1.dat TServer::EnableGrpc on GrpcPort 22311, node 1 TClient is connected to server localhost:7672 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:27:36.394760Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:27:36.448018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:27:36.448049Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:27:36.454421Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:27:36.457045Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:27:36.457057Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:27:36.457059Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:27:36.457111Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:27:36.459342Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 273285138, Sender [1:7511667828178706349:2068], Recipient [1:7511667832473674097:2251]: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "PostgreSQL" } } ItemKinds: 26 ItemKinds: 34 ItemKinds: 52 ItemKinds: 54 ItemKinds: 73 Local: true } 2025-06-03T10:27:36.459366Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "ObjectStorage" AvailableExternalDataSources: "PostgreSQL" } 2025-06-03T10:27:36.459369Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:36.459480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage, ObjectStorage, PostgreSQL 2025-06-03T10:27:36.459491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:36.459501Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:7663: Send TEvConfigNotificationResponse: SubscriptionId: 0 ConfigId { } 2025-06-03T10:27:36.715776Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667832473674619:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:36.715864Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:36.716086Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667832473674655:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:27:36.716185Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7511667832473673922:2140] Handle TEvProposeTransaction 2025-06-03T10:27:36.716193Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7511667832473673922:2140] TxId# 281474976715658 ProcessProposeTransaction 2025-06-03T10:27:36.716205Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7511667832473673922:2140] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7511667832473674658:2609] 2025-06-03T10:27:36.732965Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:7511667832473674658:2609] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/Root" 2025-06-03T10:27:36.733015Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:7511667832473674658:2609] txid# 281474976715658 Bootstrap, UserSID: metadata@system CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-03T10:27:36.733019Z node 1 :TX_PROXY DEBUG: schemereq.cpp:578: Actor# [1:7511667832473674658:2609] txid# 281474976715658 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 1 2025-06-03T10:27:36.733837Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1627: Actor# [1:7511667832473674658:2609] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-03T10:27:36.733860Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:7511667832473674658:2609] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:27:36.733900Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:7511667832473674658:2609] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:27:36.733944Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:7511667832473674658:2609] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:27:36.733957Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7511667832473674658:2609] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-03T10:27:36.734005Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:7511667832473674658:2609] txid# 281474976715658 HANDLE EvClientConnected 2025-06-03T10:27:36.734025Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [1:7511667832473674683:2615], Recipient [1:7511667832473674097:2251]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:36.734030Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:27:36.734032Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:27:36.734041Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [1:7511667832473674658:2609], Recipient [1:7511667832473674097:2251]: {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-03T10:27:36.734043Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:27:36.735557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: ".metadata/workload_manager/pools/default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } TxId: 281474976715658 TabletId: 72057594046644480 Owner: "metadata@system" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-03T10:27:36.735696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:27:36.735726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: .metadata, child id: [OwnerId: 72057594046644480, LocalPathId: 2], at schemeshard: 72057594046644480 2025-06-03T10:27:36.735738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 2] was 0 2025-06-03T10:27:36.735755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:27:36.735760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager, o ... , Sender [55:7511668227878755786:2384], Recipient [55:7511668223583786760:2200]: NKikimrExport.TEvGetExportRequest Request { Id: 281474976715664 } DatabaseName: "/Root" 2025-06-03T10:29:08.609767Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4984: StateWork, processing event TEvExport::TEvGetExportRequest 2025-06-03T10:29:08.609999Z node 55 :TX_PROXY DEBUG: rpc_get_operation.cpp:209: [GetExport] [55:7511668227878755786:2384] [0] Handle TEvExport::TEvGetExportResponse: record# Entry { Id: 281474976715664 Status: SUCCESS Progress: PROGRESS_DONE ExportToS3Settings { endpoint: "localhost:15076" scheme: HTTP bucket: "test_bucket" items { source_path: "dir1/Table1" destination_prefix: "Prefix/dir1/Table1" } items { source_path: "Table0" destination_prefix: "Prefix/Table0" } items { source_path: "dir1/dir2/Table2" destination_prefix: "Prefix/dir1/dir2/Table2" } source_path: "/Root" destination_prefix: "Prefix//" } ItemsProgress { parts_total: 1 parts_completed: 1 start_time { seconds: 1748946548 } end_time { seconds: 1748946548 } } ItemsProgress { parts_total: 1 parts_completed: 1 start_time { seconds: 1748946548 } end_time { seconds: 1748946548 } } ItemsProgress { parts_total: 1 parts_completed: 1 start_time { seconds: 1748946548 } end_time { seconds: 1748946548 } } StartTime { seconds: 1748946548 } EndTime { seconds: 1748946548 } } 2025-06-03T10:29:08.611316Z node 55 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:57: [ListObjectsInS3Export] [55:7511668227878755790:2385] Resolve database: name# /Root 2025-06-03T10:29:08.611431Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [55:7511668227878755789:3490], Recipient [55:7511668223583786760:2200]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:08.611449Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:08.611453Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-03T10:29:08.611516Z node 55 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:73: [ListObjectsInS3Export] [55:7511668227878755790:2385] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:29:08.611527Z node 55 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:134: [ListObjectsInS3Export] [55:7511668227878755790:2385] Send request: schemeShardId# 72057594046644480 2025-06-03T10:29:08.611733Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [55:7511668227878755793:3492], Recipient [55:7511668223583786760:2200]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:08.611739Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:08.611742Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:29:08.611867Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 275251210, Sender [55:7511668227878755790:2385], Recipient [55:7511668223583786760:2200]: NKikimrImport.TEvListObjectsInS3ExportRequest OperationParams { } Settings { endpoint: "localhost:15076" scheme: HTTP bucket: "test_bucket" access_key: "test_key" secret_key: "test_secret" } PageSize: 0 PageToken: "" 2025-06-03T10:29:08.611873Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4999: StateWork, processing event TEvImport::TEvListObjectsInS3ExportRequest 2025-06-03T10:29:08.611916Z node 55 :IMPORT INFO: schemeshard_import_getters.cpp:1308: Reply: self# [55:7511668227878755794:3493], status# 400010, error# Empty S3 prefix specified 2025-06-03T10:29:08.612004Z node 55 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:156: [ListObjectsInS3Export] [55:7511668227878755790:2385] Handle TListObjectsInS3ExportRPC::TEvListObjectsInS3ExportResponse: record# Status: BAD_REQUEST Issues { message: "Empty S3 prefix specified" } 2025-06-03T10:29:08.612844Z node 55 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:57: [ListObjectsInS3Export] [55:7511668227878755795:2386] Resolve database: name# /Root 2025-06-03T10:29:08.612939Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [55:7511668227878755793:3492], Recipient [55:7511668223583786760:2200]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:08.612947Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:08.612950Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-03T10:29:08.613043Z node 55 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:73: [ListObjectsInS3Export] [55:7511668227878755795:2386] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:29:08.613050Z node 55 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:134: [ListObjectsInS3Export] [55:7511668227878755795:2386] Send request: schemeShardId# 72057594046644480 2025-06-03T10:29:08.613164Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [55:7511668227878755798:3495], Recipient [55:7511668223583786760:2200]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:08.613170Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:08.613173Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:29:08.613207Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 275251210, Sender [55:7511668227878755795:2386], Recipient [55:7511668223583786760:2200]: NKikimrImport.TEvListObjectsInS3ExportRequest OperationParams { } Settings { endpoint: "localhost:15076" scheme: HTTP bucket: "test_bucket" access_key: "test_key" secret_key: "test_secret" prefix: "Prefix" } PageSize: -42 PageToken: "" 2025-06-03T10:29:08.613210Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4999: StateWork, processing event TEvImport::TEvListObjectsInS3ExportRequest 2025-06-03T10:29:08.613245Z node 55 :IMPORT INFO: schemeshard_import_getters.cpp:1308: Reply: self# [55:7511668227878755799:3496], status# 400010, error# Page size should be greater than or equal to 0 2025-06-03T10:29:08.613361Z node 55 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:156: [ListObjectsInS3Export] [55:7511668227878755795:2386] Handle TListObjectsInS3ExportRPC::TEvListObjectsInS3ExportResponse: record# Status: BAD_REQUEST Issues { message: "Page size should be greater than or equal to 0" } 2025-06-03T10:29:08.613784Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [55:7511668227878755798:3495], Recipient [55:7511668223583786760:2200]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:08.613800Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:08.613803Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-03T10:29:08.614658Z node 55 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:57: [ListObjectsInS3Export] [55:7511668227878755800:2387] Resolve database: name# /Root 2025-06-03T10:29:08.614759Z node 55 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:73: [ListObjectsInS3Export] [55:7511668227878755800:2387] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: request# { ErrorCount: 0 DatabaseName: /Root DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root TableId: [72057594046644480:1:0] RequestType: ByPath Operation: OpPath RedirectRequired: true ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:29:08.614766Z node 55 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:134: [ListObjectsInS3Export] [55:7511668227878755800:2387] Send request: schemeShardId# 72057594046644480 2025-06-03T10:29:08.614859Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [55:7511668227878755803:3498], Recipient [55:7511668223583786760:2200]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:08.614865Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:08.614868Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:29:08.614899Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 275251210, Sender [55:7511668227878755800:2387], Recipient [55:7511668223583786760:2200]: NKikimrImport.TEvListObjectsInS3ExportRequest OperationParams { } Settings { endpoint: "localhost:15076" scheme: HTTP bucket: "test_bucket" access_key: "test_key" secret_key: "test_secret" prefix: "Prefix" } PageSize: 42 PageToken: "incorrect page token" 2025-06-03T10:29:08.614901Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4999: StateWork, processing event TEvImport::TEvListObjectsInS3ExportRequest 2025-06-03T10:29:08.614936Z node 55 :IMPORT INFO: schemeshard_import_getters.cpp:1308: Reply: self# [55:7511668227878755804:3499], status# 400010, error# Failed to parse page token 2025-06-03T10:29:08.615016Z node 55 :TX_PROXY DEBUG: rpc_list_objects_in_s3_export.cpp:156: [ListObjectsInS3Export] [55:7511668227878755800:2387] Handle TListObjectsInS3ExportRPC::TEvListObjectsInS3ExportResponse: record# Status: BAD_REQUEST Issues { message: "Failed to parse page token" } 2025-06-03T10:29:08.615965Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [55:7511668227878755803:3498], Recipient [55:7511668223583786760:2200]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:08.615977Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:08.615979Z node 55 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046644480 >> TopicAutoscaling::PartitionMerge_PreferedPartition_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ControlPlane_CreateAlterDescribe >> BsControllerTest::TestLocalBrokenRelocation |65.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_selfheal/unittest >> SelfHealActorTest::NoMoreThanOneReplicating [GOOD] >> KqpEffects::InsertAbort_Literal_Conflict-UseSink [GOOD] >> Viewer::JsonStorageListingV2 [GOOD] >> Viewer::JsonStorageListingV2GroupIdFilter >> TTransferTests::Create_Disabled >> DataShardReadIterator::ShouldNotReadAfterCancel [GOOD] >> DataShardReadIterator::ShouldLimitReadRangeChunk1Limit100 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/effects/unittest >> KqpEffects::InsertAbort_Literal_Conflict-UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 25042, MsgBus: 23947 2025-06-03T10:29:09.611677Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668231275576508:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:09.612020Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001176/r3tmp/tmp9lCgdQ/pdisk_1.dat 2025-06-03T10:29:09.723860Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:09.724491Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:09.724510Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:09.725772Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 25042, node 1 2025-06-03T10:29:09.764836Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:09.764852Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:09.764854Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:09.764905Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23947 TClient is connected to server localhost:23947 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:29:09.918611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:29:09.923871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:29:09.930724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:10.014370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:10.099465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:10.147354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:10.334026Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668235570545401:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.334061Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.415920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.431545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.444312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.456229Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.466669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.481322Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.503499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:29:10.522701Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668235570546054:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.522725Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.522730Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668235570546059:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:10.523608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:29:10.527787Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668235570546061:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:29:10.628704Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668235570546121:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:10.984072Z node 1 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=3; 2025-06-03T10:29:10.986176Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 3 at tablet 72075186224037888 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-03T10:29:10.986238Z node 1 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 3 at tablet 72075186224037888 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-03T10:29:10.986350Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:815: SelfId: [1:7511668235570546406:2508], Table: `/Root/TwoShard` ([72057594046644480:2:1]), SessionActorId: [1:7511668235570546384:2508]Got CONSTRAINT VIOLATION for table `/Root/TwoShard`. ShardID=72075186224037888, Sink=[1:7511668235570546406:2508].{
: Error: Conflict with existing key., code: 2012 } 2025-06-03T10:29:10.986475Z node 1 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2935: SelfId: [1:7511668235570546399:2508], SessionActorId: [1:7511668235570546384:2508], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/TwoShard`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[1:7511668235570546384:2508]. isRollback=0 2025-06-03T10:29:10.986555Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:1848: SessionId: ydb://session/3?node_id=1&id=NTljNTM4ZDMtMTVjYTc2NTYtZGY4ZGM1YzAtODllMmZkMzM=, ActorId: [1:7511668235570546384:2508], ActorState: ExecuteState, TraceId: 01jwtnd0515xk4hre53zexhgbt, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [1:7511668235570546400:2508] from: [1:7511668235570546399:2508] 2025-06-03T10:29:10.986576Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [1:7511668235570546400:2508] TxId: 281474976715672. Ctx: { TraceId: 01jwtnd0515xk4hre53zexhgbt, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTljNTM4ZDMtMTVjYTc2NTYtZGY4ZGM1YzAtODllMmZkMzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/TwoShard`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-03T10:29:10.986634Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=NTljNTM4ZDMtMTVjYTc2NTYtZGY4ZGM1YzAtODllMmZkMzM=, ActorId: [1:7511668235570546384:2508], ActorState: ExecuteState, TraceId: 01jwtnd0515xk4hre53zexhgbt, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 2457, MsgBus: 9198 2025-06-03T10:29:11.451548Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668242581399768:2208];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/bui ... ResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:12.651645Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [2:7511668246876369492:2512], TxId: 281474976715673, task: 1. Ctx: { SessionId : ydb://session/3?node_id=2&id=ZTExZjhiNjEtMjYwZmZjOTItNmZiNjY0MDMtNmZlYmU5NDI=. CustomerSuppliedId : . TraceId : 01jwtnd1r55362k3xzr64282yz. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Duplicated keys found., code: 2012 }. 2025-06-03T10:29:12.651868Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [2:7511668246876369493:2513], TxId: 281474976715673, task: 2. Ctx: { SessionId : ydb://session/3?node_id=2&id=ZTExZjhiNjEtMjYwZmZjOTItNmZiNjY0MDMtNmZlYmU5NDI=. TraceId : 01jwtnd1r55362k3xzr64282yz. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [2:7511668246876369489:2499], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-03T10:29:12.651946Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=2&id=ZTExZjhiNjEtMjYwZmZjOTItNmZiNjY0MDMtNmZlYmU5NDI=, ActorId: [2:7511668246876369458:2499], ActorState: ExecuteState, TraceId: 01jwtnd1r55362k3xzr64282yz, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 3730, MsgBus: 20624 2025-06-03T10:29:13.018318Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511668250058611080:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:13.018354Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001176/r3tmp/tmponMos4/pdisk_1.dat 2025-06-03T10:29:13.039228Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3730, node 3 2025-06-03T10:29:13.052941Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:13.052964Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:13.052966Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:13.053039Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20624 TClient is connected to server localhost:20624 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:13.123173Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:13.123214Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:13.124015Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:29:13.124098Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:29:13.125390Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:29:13.128773Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:13.142504Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:13.168549Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:13.190859Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:13.434875Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668250058612664:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:13.434907Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:13.456178Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:29:13.474422Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:29:13.495112Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:29:13.515040Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:29:13.531304Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:29:13.551985Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:29:13.570228Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:29:13.637762Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668250058613318:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:13.637808Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:13.637820Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668250058613324:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:13.638739Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:29:13.645987Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:29:13.646131Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511668250058613326:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:29:13.722585Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511668250058613377:3393] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:13.995883Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [3:7511668250058613634:2511], TxId: 281474976715673, task: 1. Ctx: { TraceId : 01jwtnd33028v0ft0028t32xfa. SessionId : ydb://session/3?node_id=3&id=MzViYTQ1MmItODFjYjg1MzktZTVmNDQ1NzEtMThkY2RiOTI=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: PRECONDITION_FAILED KIKIMR_CONSTRAINT_VIOLATION: {
: Error: Conflict with existing key., code: 2012 }. 2025-06-03T10:29:13.996010Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7511668250058613636:2512], TxId: 281474976715673, task: 2. Ctx: { TraceId : 01jwtnd33028v0ft0028t32xfa. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=3&id=MzViYTQ1MmItODFjYjg1MzktZTVmNDQ1NzEtMThkY2RiOTI=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [3:7511668250058613631:2498], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-03T10:29:13.996100Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=3&id=MzViYTQ1MmItODFjYjg1MzktZTVmNDQ1NzEtMThkY2RiOTI=, ActorId: [3:7511668250058613600:2498], ActorState: ExecuteState, TraceId: 01jwtnd33028v0ft0028t32xfa, Create QueryResponse for error on request, msg: >> TTransferTests::Create >> TFileStoreWithReboots::CreateWithIntermediateDirs >> DataShardReadIterator::ShouldReadKeyOnlyValueColumn [GOOD] >> DataShardReadIterator::ShouldReadKeyValueColumnAndSomeKeyColumn |65.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest |65.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_selfheal/unittest >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRange+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRange-EvWrite >> TTransferTests::Create_Disabled [GOOD] >> TTransferTests::CreateWithoutCredentials >> TFileStoreWithReboots::AlterAssignDrop >> TFileStoreWithReboots::CreateAlter >> DataShardReadIteratorSysTables::ShouldNotAllowArrow [GOOD] >> ReadIteratorExternalBlobs::ExtBlobs >> BsControllerTest::SelfHealBlock4Plus2 [GOOD] >> TTransferTests::Create [GOOD] >> TTransferTests::CreateSequential >> BsControllerTest::TestLocalBrokenRelocation [GOOD] >> TFileStoreWithReboots::Create >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression >> Balancing::Balancing_ManyTopics_TopicApi [GOOD] >> Balancing::Balancing_ManyTopics_PQv1 >> TTransferTests::CreateWithoutCredentials [GOOD] >> TTransferTests::CreateWrongConfig |65.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::TestLocalBrokenRelocation [GOOD] Test command err: 2025-06-03T10:29:14.470963Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2025-06-03T10:29:14.470992Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2025-06-03T10:29:14.471020Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2025-06-03T10:29:14.471025Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2025-06-03T10:29:14.471032Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2025-06-03T10:29:14.471037Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2025-06-03T10:29:14.471046Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2025-06-03T10:29:14.471052Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2025-06-03T10:29:14.471061Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2025-06-03T10:29:14.471065Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2025-06-03T10:29:14.471071Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2025-06-03T10:29:14.471078Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2025-06-03T10:29:14.471085Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2025-06-03T10:29:14.471090Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2025-06-03T10:29:14.471104Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2025-06-03T10:29:14.471108Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2025-06-03T10:29:14.471115Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2025-06-03T10:29:14.471119Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2025-06-03T10:29:14.471127Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2025-06-03T10:29:14.471132Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2025-06-03T10:29:14.471138Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2025-06-03T10:29:14.471143Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2025-06-03T10:29:14.471151Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2025-06-03T10:29:14.471156Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2025-06-03T10:29:14.471163Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2025-06-03T10:29:14.471168Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2025-06-03T10:29:14.471175Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2025-06-03T10:29:14.471180Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2025-06-03T10:29:14.471187Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2025-06-03T10:29:14.471192Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2025-06-03T10:29:14.471198Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Bootstrap 2025-06-03T10:29:14.471203Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Connect 2025-06-03T10:29:14.471210Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Bootstrap 2025-06-03T10:29:14.471215Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Connect 2025-06-03T10:29:14.471229Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Bootstrap 2025-06-03T10:29:14.471234Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Connect 2025-06-03T10:29:14.471241Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Bootstrap 2025-06-03T10:29:14.471245Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Connect 2025-06-03T10:29:14.471253Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Bootstrap 2025-06-03T10:29:14.471258Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Connect 2025-06-03T10:29:14.471266Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Bootstrap 2025-06-03T10:29:14.471271Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Connect 2025-06-03T10:29:14.471278Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Bootstrap 2025-06-03T10:29:14.471283Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Connect 2025-06-03T10:29:14.471290Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Bootstrap 2025-06-03T10:29:14.471294Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Connect 2025-06-03T10:29:14.471302Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Bootstrap 2025-06-03T10:29:14.471306Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Connect 2025-06-03T10:29:14.471313Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Bootstrap 2025-06-03T10:29:14.471317Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Connect 2025-06-03T10:29:14.471325Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Bootstrap 2025-06-03T10:29:14.471329Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Connect 2025-06-03T10:29:14.471336Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Bootstrap 2025-06-03T10:29:14.471343Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Connect 2025-06-03T10:29:14.471353Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Bootstrap 2025-06-03T10:29:14.471358Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Connect 2025-06-03T10:29:14.471381Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Bootstrap 2025-06-03T10:29:14.471385Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Connect 2025-06-03T10:29:14.471397Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Bootstrap 2025-06-03T10:29:14.471402Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Connect 2025-06-03T10:29:14.471408Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Bootstrap 2025-06-03T10:29:14.471412Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Connect 2025-06-03T10:29:14.471419Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Bootstrap 2025-06-03T10:29:14.471424Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Connect 2025-06-03T10:29:14.471430Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Bootstrap 2025-06-03T10:29:14.471434Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] Connect 2025-06-03T10:29:14.471442Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Bootstrap 2025-06-03T10:29:14.471447Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] Connect 2025-06-03T10:29:14.471454Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Bootstrap 2025-06-03T10:29:14.471458Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] Connect 2025-06-03T10:29:14.471466Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Bootstrap 2025-06-03T10:29:14.471472Z 36 00h00m00.000000s :BS_NODE DEBUG: [36] Connect 2025-06-03T10:29:14.476042Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:2713:53] Status# ERROR ClientId# [1:2713:53] ServerId# [0:0:0] PipeClient# [1:2713:53] 2025-06-03T10:29:14.476437Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:2714:41] Status# ERROR ClientId# [2:2714:41] ServerId# [0:0:0] PipeClient# [2:2714:41] 2025-06-03T10:29:14.476449Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:2715:41] Status# ERROR ClientId# [3:2715:41] ServerId# [0:0:0] PipeClient# [3:2715:41] 2025-06-03T10:29:14.476459Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:2716:41] Status# ERROR ClientId# [4:2716:41] ServerId# [0:0:0] PipeClient# [4:2716:41] 2025-06-03T10:29:14.476467Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:2717:41] Status# ERROR ClientId# [5:2717:41] ServerId# [0:0:0] PipeClient# [5:2717:41] 2025-06-03T10:29:14.476476Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:2718:41] Status# ERROR ClientId# [6:2718:41] ServerId# [0:0:0] PipeClient# [6:2718:41] 2025-06-03T10:29:14.476485Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:2719:41] Status# ERROR ClientId# [7:2719:41] ServerId# [0:0:0] PipeClient# [7:2719:41] 2025-06-03T10:29:14.476494Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:2720:41] Status# ERROR ClientId# [8:2720:41] ServerId# [0:0:0] PipeClient# [8:2720:41] 2025-06-03T10:29:14.476502Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:2721:41] Status# ERROR ClientId# [9:2721:41] ServerId# [0:0:0] PipeClient# [9:2721:41] 2025-06-03T10:29:14.476511Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:2722:41] Status# ERROR ClientId# [10:2722:41] ServerId# [0:0:0] PipeClient# [10:2722:41] 2025-06-03T10:29:14.476520Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:2723:41] Status# ERROR ClientId# [11:2723:41] ServerId# [0:0:0] PipeClient# [11:2723:41] 2025-06-03T10:29:14.476528Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:2724:41] Status# ERROR ClientId# [12:2724:41] ServerId# [0:0:0] PipeClient# [12:2724:41] 2025-06-03T10:29:14.476536Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:2725:41] Status# ERROR ClientId# [13:2725:41] ServerId# [0:0:0] PipeClient# [13:2725:41] 2025-06-03T10:29:14.476545Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:2726:41] Status# ERROR ClientId# [14:2726:41] ServerId# [0:0:0] PipeClient# [14:2726:41] 2025-06-03T10:29:14.476557Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:2727:41] Status# ERROR ClientId# [15:2727:41] ServerId# [0:0:0] PipeClient# [15:2727:41] 2025-06-03T10:29:14.476566Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] ClientConnected Sender# [16:2728:41] Status# ERROR ClientId# [16:2728:41] ServerId# [0:0:0] PipeClient# [16:2728:41] 2025-06-03T10:29:14.476574Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] ClientConnected Sender# [17:2729:41] Status# ERROR ClientId# [17:2729:41] ServerId# [0:0:0] PipeClient# [17:2729:41] 2025-06-03T10:29:14.476582Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] ClientConnected Sender# [18:2730:41] Status# ERROR ClientId# [18:2730:41] ServerId# [0:0:0] PipeClient# [18:2730:41] 2025-06-03T10:29:14.476591Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] ClientConnected Sender# [19:2731:41] Status# ERROR ClientId# [19:2731:41] ServerId# [0:0:0] PipeClient# [19:2731:41] 2025-06-03T10:29:14.476600Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] ClientConnected Sender# [20:2732:41] Status# ERROR ClientId# [20:2732:41] ServerId# [0:0:0] PipeClient# [20:2732:41] 2025-06-03T10:29:14.476609Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] ClientConnected Sender# [21:2733:41] Status# ERROR ClientId# [21:2733:41] ServerId# [0:0:0] PipeClient# [21:2733:41] 2025-06-03T10:29:14.476617Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] ClientConnected Sender# [22:2734:41] Status# ERROR ClientId# [22:2734:41] ServerId# [0:0:0] PipeClient# [22:2734:41] 2025-06-03T10:29:14.476625Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] ClientConnected Sender# [23:2735:41] Status# ERROR ClientId# [23:2735:41] ServerId# [0:0:0] PipeClient# [23:2735:41] 2025-06-03T10:29:14.476634Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] ClientConnected Sender# [24:2736:41] Status# ERROR ClientId# [24:2736:41] ServerId# [0:0:0] PipeClient# [24:2736:41] 2025-06-03T10:29:14.476642Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] ClientConnected Sender# [25:2737:41] Status# ERROR ClientId# [25:2737:41] ServerId# [0:0:0] PipeClient# [25:2737:41] 2025-06-03T10:29:14.476651Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] ClientConnected Sender# [26:2738:41] Status# ERROR ClientId# [26:2738:41] ServerId# [0:0:0] PipeClient# [26:2738:41] 2025-06-03T10:29:14.476659Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] ClientConnected Sender# [27:2739:41] Status# ERROR ClientId# [27:2739:41] ServerId# [0:0:0] PipeClient# [27:2739:41] 2025-06-03T10:29:14.476668Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] ClientConnected Sender# [28:2740:41] Status# ERROR ClientId# [28:2740:41] ServerId# [0:0:0] PipeClient# [28:2740:41] 2025-06-03T10:29:14.476677Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] ClientConnected Sender# [29:2741:41] Status# ERROR ClientId# [29:2741:41] ServerId# [0:0:0] PipeClient# [29:2741:41] 2025-06-03T10:29:14.476686Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] ClientConnected Sender# [30:2742:41] Status# ERROR ClientId# [30:2742:41] ServerId# [0:0:0] PipeClient# [30:2742:41] 2025-06-03T10:29:14.476695Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] ClientConnected Sender# [31:2743:41] Status# ERROR ClientId# [31:2743:41] ServerId# [0:0:0] PipeClient# [31:2743:41] 2025-06-03T10:29:14.476703Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] ClientConnected Sender# [32:2744:41] Status# ERROR ClientId# [32:2744:41] ServerId# [0:0:0] PipeClient# [32:2744:41] 2025-06-03T10:29:14.476711Z 33 00h00m00.000000s :BS_NODE DEBUG: [33] ClientConnected Sender# [33:2745:41] Status# ERROR ClientId# [33:2745:41] ServerId# [0:0:0] PipeClient# [33:2745:41] 2025-06-03T10:29:14.476719Z 34 00h00m00.000000s :BS_NODE DEBUG: [34] ClientConnected Sender# [34:2746:41] Status# ERROR ClientId# [34:2746:41] ServerId# [0:0:0] PipeClient# [34:2746:41] 2025-06-03T10:29:14.476728Z 35 00h00m00.000000s :BS_NODE DEBUG: [35] ClientConnected Sender# [35:2747:41] Status# ERROR ClientId# [35:2747:41 ... 25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000001:2:2:2:0] -> [80000001:3:2:2:0] 2025-06-03T10:29:15.571802Z 28 01h25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000021:2:2:2:0] -> [80000021:3:2:2:0] 2025-06-03T10:29:15.571808Z 28 01h25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000031:2:2:2:0] -> [80000031:3:2:2:0] 2025-06-03T10:29:15.571814Z 28 01h25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000051:2:2:2:0] -> [80000051:3:2:2:0] 2025-06-03T10:29:15.571820Z 28 01h25m00.102560s :BS_NODE DEBUG: [28] VDiskId# [80000061:2:2:2:0] -> [80000061:3:2:2:0] 2025-06-03T10:29:15.571903Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] NodeServiceSetUpdate 2025-06-03T10:29:15.571916Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000010:2:1:0:0] -> [80000010:3:1:0:0] 2025-06-03T10:29:15.571923Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000040:2:1:0:0] -> [80000040:3:1:0:0] 2025-06-03T10:29:15.571930Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000070:2:1:0:0] -> [80000070:3:1:0:0] 2025-06-03T10:29:15.571936Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000001:2:1:1:0] -> [80000001:3:1:1:0] 2025-06-03T10:29:15.571943Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000021:2:1:1:0] -> [80000021:3:1:1:0] 2025-06-03T10:29:15.571948Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000031:2:1:1:0] -> [80000031:3:1:1:0] 2025-06-03T10:29:15.571954Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000051:2:1:1:0] -> [80000051:3:1:1:0] 2025-06-03T10:29:15.571959Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000061:2:1:1:0] -> [80000061:3:1:1:0] 2025-06-03T10:29:15.571966Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000002:1:1:2:0] -> [80000002:2:1:2:0] 2025-06-03T10:29:15.571971Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000012:1:1:2:0] -> [80000012:2:1:2:0] 2025-06-03T10:29:15.571977Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000022:1:1:2:0] -> [80000022:2:1:2:0] 2025-06-03T10:29:15.571983Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000032:1:1:2:0] -> [80000032:2:1:2:0] 2025-06-03T10:29:15.571989Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000042:1:1:2:0] -> [80000042:2:1:2:0] 2025-06-03T10:29:15.571995Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000052:1:1:2:0] -> [80000052:2:1:2:0] 2025-06-03T10:29:15.572001Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000062:1:1:2:0] -> [80000062:2:1:2:0] 2025-06-03T10:29:15.572007Z 13 01h25m00.102560s :BS_NODE DEBUG: [13] VDiskId# [80000072:1:1:2:0] -> [80000072:2:1:2:0] 2025-06-03T10:29:15.572087Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] NodeServiceSetUpdate 2025-06-03T10:29:15.572096Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000010:2:2:2:0] -> [80000010:3:2:2:0] 2025-06-03T10:29:15.572103Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000040:2:2:2:0] -> [80000040:3:2:2:0] 2025-06-03T10:29:15.572108Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000070:2:2:2:0] -> [80000070:3:2:2:0] 2025-06-03T10:29:15.572114Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000002:1:2:0:0] -> [80000002:2:2:0:0] 2025-06-03T10:29:15.572120Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000012:1:2:0:0] -> [80000012:2:2:0:0] 2025-06-03T10:29:15.572126Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000022:1:2:0:0] -> [80000022:2:2:0:0] 2025-06-03T10:29:15.572132Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000032:1:2:0:0] -> [80000032:2:2:0:0] 2025-06-03T10:29:15.572138Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000042:1:2:0:0] -> [80000042:2:2:0:0] 2025-06-03T10:29:15.572144Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000052:1:2:0:0] -> [80000052:2:2:0:0] 2025-06-03T10:29:15.572151Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000062:1:2:0:0] -> [80000062:2:2:0:0] 2025-06-03T10:29:15.572156Z 31 01h25m00.102560s :BS_NODE DEBUG: [31] VDiskId# [80000072:1:2:0:0] -> [80000072:2:2:0:0] 2025-06-03T10:29:15.572208Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] NodeServiceSetUpdate 2025-06-03T10:29:15.572217Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000010:2:1:1:0] -> [80000010:3:1:1:0] 2025-06-03T10:29:15.572221Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000040:2:1:1:0] -> [80000040:3:1:1:0] 2025-06-03T10:29:15.572225Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000070:2:1:1:0] -> [80000070:3:1:1:0] 2025-06-03T10:29:15.572229Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000001:2:1:2:0] -> [80000001:3:1:2:0] 2025-06-03T10:29:15.572233Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000021:2:1:2:0] -> [80000021:3:1:2:0] 2025-06-03T10:29:15.572237Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000031:2:1:2:0] -> [80000031:3:1:2:0] 2025-06-03T10:29:15.572243Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000051:2:1:2:0] -> [80000051:3:1:2:0] 2025-06-03T10:29:15.572247Z 16 01h25m00.102560s :BS_NODE DEBUG: [16] VDiskId# [80000061:2:1:2:0] -> [80000061:3:1:2:0] 2025-06-03T10:29:15.572932Z 4 01h25m01.549560s :BS_NODE DEBUG: [4] VDiskId# [80000012:2:0:2:0] status changed to REPLICATING 2025-06-03T10:29:15.573036Z 5 01h25m01.677560s :BS_NODE DEBUG: [5] VDiskId# [80000072:2:0:2:0] status changed to REPLICATING 2025-06-03T10:29:15.573094Z 2 01h25m01.759560s :BS_NODE DEBUG: [2] VDiskId# [80000062:2:0:2:0] status changed to REPLICATING 2025-06-03T10:29:15.573153Z 7 01h25m01.945560s :BS_NODE DEBUG: [7] VDiskId# [80000021:3:0:1:0] status changed to REPLICATING 2025-06-03T10:29:15.573223Z 10 01h25m01.986560s :BS_NODE DEBUG: [10] VDiskId# [80000010:3:0:0:0] status changed to REPLICATING 2025-06-03T10:29:15.573548Z 4 01h25m02.392560s :BS_NODE DEBUG: [4] VDiskId# [80000002:2:0:2:0] status changed to REPLICATING 2025-06-03T10:29:15.573663Z 5 01h25m02.437560s :BS_NODE DEBUG: [5] VDiskId# [80000052:2:0:2:0] status changed to REPLICATING 2025-06-03T10:29:15.573760Z 8 01h25m03.221560s :BS_NODE DEBUG: [8] VDiskId# [80000061:3:0:1:0] status changed to REPLICATING 2025-06-03T10:29:15.573843Z 2 01h25m03.280560s :BS_NODE DEBUG: [2] VDiskId# [80000042:2:0:2:0] status changed to REPLICATING 2025-06-03T10:29:15.573909Z 4 01h25m03.560560s :BS_NODE DEBUG: [4] VDiskId# [80000022:2:0:2:0] status changed to REPLICATING 2025-06-03T10:29:15.573972Z 7 01h25m03.891560s :BS_NODE DEBUG: [7] VDiskId# [80000001:3:0:1:0] status changed to REPLICATING 2025-06-03T10:29:15.574031Z 7 01h25m04.049560s :BS_NODE DEBUG: [7] VDiskId# [80000051:3:0:1:0] status changed to REPLICATING 2025-06-03T10:29:15.574093Z 7 01h25m04.612560s :BS_NODE DEBUG: [7] VDiskId# [80000031:3:0:1:0] status changed to REPLICATING 2025-06-03T10:29:15.574161Z 10 01h25m04.754560s :BS_NODE DEBUG: [10] VDiskId# [80000070:3:0:0:0] status changed to REPLICATING 2025-06-03T10:29:15.574230Z 4 01h25m04.839560s :BS_NODE DEBUG: [4] VDiskId# [80000032:2:0:2:0] status changed to REPLICATING 2025-06-03T10:29:15.575545Z 10 01h25m05.050560s :BS_NODE DEBUG: [10] VDiskId# [80000040:3:0:0:0] status changed to REPLICATING 2025-06-03T10:29:15.576365Z 5 01h25m11.527560s :BS_NODE DEBUG: [5] VDiskId# [80000052:2:0:2:0] status changed to READY 2025-06-03T10:29:15.578434Z 1 01h25m11.528072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-03T10:29:15.578455Z 1 01h25m11.528072s :BS_NODE DEBUG: [1] VDiskId# [80000052:1:0:2:0] destroyed 2025-06-03T10:29:15.578499Z 7 01h25m11.677560s :BS_NODE DEBUG: [7] VDiskId# [80000031:3:0:1:0] status changed to READY 2025-06-03T10:29:15.580395Z 1 01h25m11.678072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-03T10:29:15.580418Z 1 01h25m11.678072s :BS_NODE DEBUG: [1] VDiskId# [80000031:2:0:1:0] destroyed 2025-06-03T10:29:15.580485Z 2 01h25m14.393560s :BS_NODE DEBUG: [2] VDiskId# [80000042:2:0:2:0] status changed to READY 2025-06-03T10:29:15.582399Z 1 01h25m14.394072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-03T10:29:15.582422Z 1 01h25m14.394072s :BS_NODE DEBUG: [1] VDiskId# [80000042:1:0:2:0] destroyed 2025-06-03T10:29:15.583289Z 7 01h25m19.966560s :BS_NODE DEBUG: [7] VDiskId# [80000051:3:0:1:0] status changed to READY 2025-06-03T10:29:15.585128Z 1 01h25m19.967072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-03T10:29:15.585148Z 1 01h25m19.967072s :BS_NODE DEBUG: [1] VDiskId# [80000051:2:0:1:0] destroyed 2025-06-03T10:29:15.585945Z 10 01h25m22.251560s :BS_NODE DEBUG: [10] VDiskId# [80000010:3:0:0:0] status changed to READY 2025-06-03T10:29:15.587906Z 1 01h25m22.252072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-03T10:29:15.587928Z 1 01h25m22.252072s :BS_NODE DEBUG: [1] VDiskId# [80000010:2:0:0:0] destroyed 2025-06-03T10:29:15.587981Z 10 01h25m23.021560s :BS_NODE DEBUG: [10] VDiskId# [80000040:3:0:0:0] status changed to READY 2025-06-03T10:29:15.589806Z 1 01h25m23.022072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-03T10:29:15.589835Z 1 01h25m23.022072s :BS_NODE DEBUG: [1] VDiskId# [80000040:2:0:0:0] destroyed 2025-06-03T10:29:15.589888Z 7 01h25m24.048560s :BS_NODE DEBUG: [7] VDiskId# [80000001:3:0:1:0] status changed to READY 2025-06-03T10:29:15.591632Z 1 01h25m24.049072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-03T10:29:15.591654Z 1 01h25m24.049072s :BS_NODE DEBUG: [1] VDiskId# [80000001:2:0:1:0] destroyed 2025-06-03T10:29:15.591694Z 8 01h25m24.298560s :BS_NODE DEBUG: [8] VDiskId# [80000061:3:0:1:0] status changed to READY 2025-06-03T10:29:15.593366Z 1 01h25m24.299072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-03T10:29:15.593387Z 1 01h25m24.299072s :BS_NODE DEBUG: [1] VDiskId# [80000061:2:0:1:0] destroyed 2025-06-03T10:29:15.593450Z 4 01h25m24.488560s :BS_NODE DEBUG: [4] VDiskId# [80000012:2:0:2:0] status changed to READY 2025-06-03T10:29:15.595260Z 1 01h25m24.489072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-03T10:29:15.595283Z 1 01h25m24.489072s :BS_NODE DEBUG: [1] VDiskId# [80000012:1:0:2:0] destroyed 2025-06-03T10:29:15.596126Z 10 01h25m25.070560s :BS_NODE DEBUG: [10] VDiskId# [80000070:3:0:0:0] status changed to READY 2025-06-03T10:29:15.598251Z 1 01h25m25.071072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-03T10:29:15.598278Z 1 01h25m25.071072s :BS_NODE DEBUG: [1] VDiskId# [80000070:2:0:0:0] destroyed 2025-06-03T10:29:15.598326Z 2 01h25m25.657560s :BS_NODE DEBUG: [2] VDiskId# [80000062:2:0:2:0] status changed to READY 2025-06-03T10:29:15.599937Z 1 01h25m25.658072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-03T10:29:15.599953Z 1 01h25m25.658072s :BS_NODE DEBUG: [1] VDiskId# [80000062:1:0:2:0] destroyed 2025-06-03T10:29:15.600091Z 4 01h25m28.369560s :BS_NODE DEBUG: [4] VDiskId# [80000032:2:0:2:0] status changed to READY 2025-06-03T10:29:15.602018Z 1 01h25m28.370072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-03T10:29:15.602041Z 1 01h25m28.370072s :BS_NODE DEBUG: [1] VDiskId# [80000032:1:0:2:0] destroyed 2025-06-03T10:29:15.602985Z 7 01h25m30.104560s :BS_NODE DEBUG: [7] VDiskId# [80000021:3:0:1:0] status changed to READY 2025-06-03T10:29:15.604702Z 1 01h25m30.105072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-03T10:29:15.604720Z 1 01h25m30.105072s :BS_NODE DEBUG: [1] VDiskId# [80000021:2:0:1:0] destroyed 2025-06-03T10:29:15.604757Z 4 01h25m30.579560s :BS_NODE DEBUG: [4] VDiskId# [80000002:2:0:2:0] status changed to READY 2025-06-03T10:29:15.606451Z 1 01h25m30.580072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-03T10:29:15.606468Z 1 01h25m30.580072s :BS_NODE DEBUG: [1] VDiskId# [80000002:1:0:2:0] destroyed 2025-06-03T10:29:15.607551Z 5 01h25m35.403560s :BS_NODE DEBUG: [5] VDiskId# [80000072:2:0:2:0] status changed to READY 2025-06-03T10:29:15.609474Z 1 01h25m35.404072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-03T10:29:15.609493Z 1 01h25m35.404072s :BS_NODE DEBUG: [1] VDiskId# [80000072:1:0:2:0] destroyed 2025-06-03T10:29:15.609535Z 4 01h25m35.891560s :BS_NODE DEBUG: [4] VDiskId# [80000022:2:0:2:0] status changed to READY 2025-06-03T10:29:15.611123Z 1 01h25m35.892072s :BS_NODE DEBUG: [1] NodeServiceSetUpdate 2025-06-03T10:29:15.611143Z 1 01h25m35.892072s :BS_NODE DEBUG: [1] VDiskId# [80000022:1:0:2:0] destroyed ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut_selfheal/unittest >> BsControllerTest::SelfHealBlock4Plus2 [GOOD] Test command err: 2025-06-03T10:29:13.265493Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Bootstrap 2025-06-03T10:29:13.265520Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] Connect 2025-06-03T10:29:13.265546Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Bootstrap 2025-06-03T10:29:13.265551Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] Connect 2025-06-03T10:29:13.265556Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Bootstrap 2025-06-03T10:29:13.265560Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] Connect 2025-06-03T10:29:13.265567Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Bootstrap 2025-06-03T10:29:13.265571Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] Connect 2025-06-03T10:29:13.265578Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Bootstrap 2025-06-03T10:29:13.265582Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] Connect 2025-06-03T10:29:13.265587Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Bootstrap 2025-06-03T10:29:13.265592Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] Connect 2025-06-03T10:29:13.265599Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Bootstrap 2025-06-03T10:29:13.265611Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] Connect 2025-06-03T10:29:13.265617Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Bootstrap 2025-06-03T10:29:13.265622Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] Connect 2025-06-03T10:29:13.265628Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Bootstrap 2025-06-03T10:29:13.265632Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] Connect 2025-06-03T10:29:13.265640Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Bootstrap 2025-06-03T10:29:13.265644Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] Connect 2025-06-03T10:29:13.265650Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Bootstrap 2025-06-03T10:29:13.265654Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] Connect 2025-06-03T10:29:13.265660Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Bootstrap 2025-06-03T10:29:13.265664Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] Connect 2025-06-03T10:29:13.265670Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Bootstrap 2025-06-03T10:29:13.265674Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] Connect 2025-06-03T10:29:13.265681Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Bootstrap 2025-06-03T10:29:13.265685Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] Connect 2025-06-03T10:29:13.265691Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Bootstrap 2025-06-03T10:29:13.265696Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] Connect 2025-06-03T10:29:13.265702Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Bootstrap 2025-06-03T10:29:13.265706Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] Connect 2025-06-03T10:29:13.265713Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Bootstrap 2025-06-03T10:29:13.265717Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] Connect 2025-06-03T10:29:13.265732Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Bootstrap 2025-06-03T10:29:13.265737Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] Connect 2025-06-03T10:29:13.265743Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Bootstrap 2025-06-03T10:29:13.265747Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] Connect 2025-06-03T10:29:13.265752Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Bootstrap 2025-06-03T10:29:13.265757Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] Connect 2025-06-03T10:29:13.265764Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Bootstrap 2025-06-03T10:29:13.265768Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] Connect 2025-06-03T10:29:13.265774Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Bootstrap 2025-06-03T10:29:13.265779Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] Connect 2025-06-03T10:29:13.265785Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Bootstrap 2025-06-03T10:29:13.265789Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] Connect 2025-06-03T10:29:13.265803Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Bootstrap 2025-06-03T10:29:13.265808Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] Connect 2025-06-03T10:29:13.265814Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Bootstrap 2025-06-03T10:29:13.265818Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] Connect 2025-06-03T10:29:13.265824Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Bootstrap 2025-06-03T10:29:13.265829Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] Connect 2025-06-03T10:29:13.265836Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Bootstrap 2025-06-03T10:29:13.265842Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] Connect 2025-06-03T10:29:13.265851Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Bootstrap 2025-06-03T10:29:13.265855Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] Connect 2025-06-03T10:29:13.265866Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Bootstrap 2025-06-03T10:29:13.265869Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] Connect 2025-06-03T10:29:13.265875Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Bootstrap 2025-06-03T10:29:13.265880Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] Connect 2025-06-03T10:29:13.265885Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Bootstrap 2025-06-03T10:29:13.265889Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] Connect 2025-06-03T10:29:13.265895Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Bootstrap 2025-06-03T10:29:13.265900Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] Connect 2025-06-03T10:29:13.269135Z 1 00h00m00.000000s :BS_NODE DEBUG: [1] ClientConnected Sender# [1:2157:49] Status# ERROR ClientId# [1:2157:49] ServerId# [0:0:0] PipeClient# [1:2157:49] 2025-06-03T10:29:13.269470Z 2 00h00m00.000000s :BS_NODE DEBUG: [2] ClientConnected Sender# [2:2158:37] Status# ERROR ClientId# [2:2158:37] ServerId# [0:0:0] PipeClient# [2:2158:37] 2025-06-03T10:29:13.269482Z 3 00h00m00.000000s :BS_NODE DEBUG: [3] ClientConnected Sender# [3:2159:37] Status# ERROR ClientId# [3:2159:37] ServerId# [0:0:0] PipeClient# [3:2159:37] 2025-06-03T10:29:13.269490Z 4 00h00m00.000000s :BS_NODE DEBUG: [4] ClientConnected Sender# [4:2160:37] Status# ERROR ClientId# [4:2160:37] ServerId# [0:0:0] PipeClient# [4:2160:37] 2025-06-03T10:29:13.269497Z 5 00h00m00.000000s :BS_NODE DEBUG: [5] ClientConnected Sender# [5:2161:37] Status# ERROR ClientId# [5:2161:37] ServerId# [0:0:0] PipeClient# [5:2161:37] 2025-06-03T10:29:13.269504Z 6 00h00m00.000000s :BS_NODE DEBUG: [6] ClientConnected Sender# [6:2162:37] Status# ERROR ClientId# [6:2162:37] ServerId# [0:0:0] PipeClient# [6:2162:37] 2025-06-03T10:29:13.269512Z 7 00h00m00.000000s :BS_NODE DEBUG: [7] ClientConnected Sender# [7:2163:37] Status# ERROR ClientId# [7:2163:37] ServerId# [0:0:0] PipeClient# [7:2163:37] 2025-06-03T10:29:13.269519Z 8 00h00m00.000000s :BS_NODE DEBUG: [8] ClientConnected Sender# [8:2164:37] Status# ERROR ClientId# [8:2164:37] ServerId# [0:0:0] PipeClient# [8:2164:37] 2025-06-03T10:29:13.269526Z 9 00h00m00.000000s :BS_NODE DEBUG: [9] ClientConnected Sender# [9:2165:37] Status# ERROR ClientId# [9:2165:37] ServerId# [0:0:0] PipeClient# [9:2165:37] 2025-06-03T10:29:13.269533Z 10 00h00m00.000000s :BS_NODE DEBUG: [10] ClientConnected Sender# [10:2166:37] Status# ERROR ClientId# [10:2166:37] ServerId# [0:0:0] PipeClient# [10:2166:37] 2025-06-03T10:29:13.269540Z 11 00h00m00.000000s :BS_NODE DEBUG: [11] ClientConnected Sender# [11:2167:37] Status# ERROR ClientId# [11:2167:37] ServerId# [0:0:0] PipeClient# [11:2167:37] 2025-06-03T10:29:13.269547Z 12 00h00m00.000000s :BS_NODE DEBUG: [12] ClientConnected Sender# [12:2168:37] Status# ERROR ClientId# [12:2168:37] ServerId# [0:0:0] PipeClient# [12:2168:37] 2025-06-03T10:29:13.269554Z 13 00h00m00.000000s :BS_NODE DEBUG: [13] ClientConnected Sender# [13:2169:37] Status# ERROR ClientId# [13:2169:37] ServerId# [0:0:0] PipeClient# [13:2169:37] 2025-06-03T10:29:13.269561Z 14 00h00m00.000000s :BS_NODE DEBUG: [14] ClientConnected Sender# [14:2170:37] Status# ERROR ClientId# [14:2170:37] ServerId# [0:0:0] PipeClient# [14:2170:37] 2025-06-03T10:29:13.269569Z 15 00h00m00.000000s :BS_NODE DEBUG: [15] ClientConnected Sender# [15:2171:37] Status# ERROR ClientId# [15:2171:37] ServerId# [0:0:0] PipeClient# [15:2171:37] 2025-06-03T10:29:13.269577Z 16 00h00m00.000000s :BS_NODE DEBUG: [16] ClientConnected Sender# [16:2172:37] Status# ERROR ClientId# [16:2172:37] ServerId# [0:0:0] PipeClient# [16:2172:37] 2025-06-03T10:29:13.269583Z 17 00h00m00.000000s :BS_NODE DEBUG: [17] ClientConnected Sender# [17:2173:37] Status# ERROR ClientId# [17:2173:37] ServerId# [0:0:0] PipeClient# [17:2173:37] 2025-06-03T10:29:13.269590Z 18 00h00m00.000000s :BS_NODE DEBUG: [18] ClientConnected Sender# [18:2174:37] Status# ERROR ClientId# [18:2174:37] ServerId# [0:0:0] PipeClient# [18:2174:37] 2025-06-03T10:29:13.269601Z 19 00h00m00.000000s :BS_NODE DEBUG: [19] ClientConnected Sender# [19:2175:37] Status# ERROR ClientId# [19:2175:37] ServerId# [0:0:0] PipeClient# [19:2175:37] 2025-06-03T10:29:13.269608Z 20 00h00m00.000000s :BS_NODE DEBUG: [20] ClientConnected Sender# [20:2176:37] Status# ERROR ClientId# [20:2176:37] ServerId# [0:0:0] PipeClient# [20:2176:37] 2025-06-03T10:29:13.269616Z 21 00h00m00.000000s :BS_NODE DEBUG: [21] ClientConnected Sender# [21:2177:37] Status# ERROR ClientId# [21:2177:37] ServerId# [0:0:0] PipeClient# [21:2177:37] 2025-06-03T10:29:13.269623Z 22 00h00m00.000000s :BS_NODE DEBUG: [22] ClientConnected Sender# [22:2178:37] Status# ERROR ClientId# [22:2178:37] ServerId# [0:0:0] PipeClient# [22:2178:37] 2025-06-03T10:29:13.269631Z 23 00h00m00.000000s :BS_NODE DEBUG: [23] ClientConnected Sender# [23:2179:37] Status# ERROR ClientId# [23:2179:37] ServerId# [0:0:0] PipeClient# [23:2179:37] 2025-06-03T10:29:13.269638Z 24 00h00m00.000000s :BS_NODE DEBUG: [24] ClientConnected Sender# [24:2180:37] Status# ERROR ClientId# [24:2180:37] ServerId# [0:0:0] PipeClient# [24:2180:37] 2025-06-03T10:29:13.269644Z 25 00h00m00.000000s :BS_NODE DEBUG: [25] ClientConnected Sender# [25:2181:37] Status# ERROR ClientId# [25:2181:37] ServerId# [0:0:0] PipeClient# [25:2181:37] 2025-06-03T10:29:13.269652Z 26 00h00m00.000000s :BS_NODE DEBUG: [26] ClientConnected Sender# [26:2182:37] Status# ERROR ClientId# [26:2182:37] ServerId# [0:0:0] PipeClient# [26:2182:37] 2025-06-03T10:29:13.269659Z 27 00h00m00.000000s :BS_NODE DEBUG: [27] ClientConnected Sender# [27:2183:37] Status# ERROR ClientId# [27:2183:37] ServerId# [0:0:0] PipeClient# [27:2183:37] 2025-06-03T10:29:13.269666Z 28 00h00m00.000000s :BS_NODE DEBUG: [28] ClientConnected Sender# [28:2184:37] Status# ERROR ClientId# [28:2184:37] ServerId# [0:0:0] PipeClient# [28:2184:37] 2025-06-03T10:29:13.269673Z 29 00h00m00.000000s :BS_NODE DEBUG: [29] ClientConnected Sender# [29:2185:37] Status# ERROR ClientId# [29:2185:37] ServerId# [0:0:0] PipeClient# [29:2185:37] 2025-06-03T10:29:13.269680Z 30 00h00m00.000000s :BS_NODE DEBUG: [30] ClientConnected Sender# [30:2186:37] Status# ERROR ClientId# [30:2186:37] ServerId# [0:0:0] PipeClient# [30:2186:37] 2025-06-03T10:29:13.269687Z 31 00h00m00.000000s :BS_NODE DEBUG: [31] ClientConnected Sender# [31:2187:37] Status# ERROR ClientId# [31:2187:37] ServerId# [0:0:0] PipeClient# [31:2187:37] 2025-06-03T10:29:13.269694Z 32 00h00m00.000000s :BS_NODE DEBUG: [32] ClientConnected Sender# [32:2188:37] Status# ERROR ClientId# [32:2188:37] ServerId# [0:0:0] PipeClient# [32:2188:37] 2025-06-03T10:29:13.303843Z 1 00h00m00.002560s :BS_NODE DEBUG: [1] CheckState from [1:2257:73] expected 1 current 0 2025-06-03T10:29:13.303878Z 2 00h00m00.002560s :BS_NODE DEBUG: [2] CheckState from [2:2258:38] expected 1 current 0 2025-06-03T10:29:13.303886Z 3 00h00m00.002560s :BS_NODE DEBUG: [3] CheckState from [3:2259:38] expected 1 current 0 2025-06-03T10:29:13.303894Z 4 00h00m00.002560s :BS_NODE DEBUG: [4] CheckState from [4:2260:38] expected 1 current 0 2025-06-03T10:29:13.303900Z 5 00h00m00.002560s :BS_NODE DEBUG: [5] CheckState from [5:2261:38] expected 1 current 0 2025-06-03T10:29:13.303907Z 6 00h00m00.002560s :BS_NODE DEBUG: [6] CheckState from [6:2262:38] expected 1 current 0 2025-06-03T10:29:13.303914Z 7 00h00m00.002560s :BS_NODE DEBUG: [7] CheckState from [7:2263:38] expected 1 current 0 2025-06-03T10:29:13.303921Z 8 00h00m00.002560s :BS_NODE DEBUG: [8] CheckState from [8:2264:38] expected 1 current 0 2025-06-03T10:29:13.303928Z 9 00h00m00.002560s :BS_NODE DEBUG: [9] CheckState from [9:2265:38] expected 1 current 0 2025-06-03T10:29:13.303935Z 10 00h00m00.002560s :BS_NODE DEBUG: [10] CheckState from [10:2266 ... :29:15.609227Z 1 05h15m00.120992s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483651 Status# OK JoinedGroup# true Replicated# true 2025-06-03T10:29:15.609232Z 1 05h15m00.120992s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483651 VDiskId# [80000003:4:0:6:0] DiskIsOk# true 2025-06-03T10:29:15.609238Z 1 05h15m00.120992s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483651 Status# OK JoinedGroup# true Replicated# true 2025-06-03T10:29:15.609243Z 1 05h15m00.120992s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483651 VDiskId# [80000003:4:0:7:0] DiskIsOk# true 2025-06-03T10:29:15.611716Z 1 05h15m00.121504s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483651 Items# [80000003:4:0:3:0]: 17:1000:1008 -> 27:1001:1017 ConfigTxSeqNo# 489 2025-06-03T10:29:15.611732Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483651 Success# true 2025-06-03T10:29:15.611763Z 17 05h15m00.121504s :BS_NODE DEBUG: [17] NodeServiceSetUpdate 2025-06-03T10:29:15.611785Z 4 05h15m00.121504s :BS_NODE DEBUG: [4] NodeServiceSetUpdate 2025-06-03T10:29:15.611798Z 4 05h15m00.121504s :BS_NODE DEBUG: [4] VDiskId# [80000003:4:0:2:0] -> [80000003:5:0:2:0] 2025-06-03T10:29:15.611814Z 25 05h15m00.121504s :BS_NODE DEBUG: [25] NodeServiceSetUpdate 2025-06-03T10:29:15.611823Z 25 05h15m00.121504s :BS_NODE DEBUG: [25] VDiskId# [80000003:4:0:0:0] -> [80000003:5:0:0:0] 2025-06-03T10:29:15.611839Z 26 05h15m00.121504s :BS_NODE DEBUG: [26] NodeServiceSetUpdate 2025-06-03T10:29:15.611848Z 26 05h15m00.121504s :BS_NODE DEBUG: [26] VDiskId# [80000003:4:0:1:0] -> [80000003:5:0:1:0] 2025-06-03T10:29:15.611864Z 27 05h15m00.121504s :BS_NODE DEBUG: [27] NodeServiceSetUpdate 2025-06-03T10:29:15.611872Z 27 05h15m00.121504s :BS_NODE DEBUG: [27] VDiskId# [80000003:5:0:3:0] PDiskId# 1001 VSlotId# 1017 created 2025-06-03T10:29:15.611894Z 27 05h15m00.121504s :BS_NODE DEBUG: [27] VDiskId# [80000003:5:0:3:0] status changed to INIT_PENDING 2025-06-03T10:29:15.611910Z 28 05h15m00.121504s :BS_NODE DEBUG: [28] NodeServiceSetUpdate 2025-06-03T10:29:15.611918Z 28 05h15m00.121504s :BS_NODE DEBUG: [28] VDiskId# [80000003:4:0:5:0] -> [80000003:5:0:5:0] 2025-06-03T10:29:15.611933Z 29 05h15m00.121504s :BS_NODE DEBUG: [29] NodeServiceSetUpdate 2025-06-03T10:29:15.611941Z 29 05h15m00.121504s :BS_NODE DEBUG: [29] VDiskId# [80000003:4:0:4:0] -> [80000003:5:0:4:0] 2025-06-03T10:29:15.611955Z 31 05h15m00.121504s :BS_NODE DEBUG: [31] NodeServiceSetUpdate 2025-06-03T10:29:15.611964Z 31 05h15m00.121504s :BS_NODE DEBUG: [31] VDiskId# [80000003:4:0:6:0] -> [80000003:5:0:6:0] 2025-06-03T10:29:15.611980Z 32 05h15m00.121504s :BS_NODE DEBUG: [32] NodeServiceSetUpdate 2025-06-03T10:29:15.611991Z 32 05h15m00.121504s :BS_NODE DEBUG: [32] VDiskId# [80000003:4:0:7:0] -> [80000003:5:0:7:0] 2025-06-03T10:29:15.612068Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH01@self_heal.cpp:71} Reassigner starting GroupId# 2147483706 2025-06-03T10:29:15.612289Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483706 Status# OK JoinedGroup# true Replicated# true 2025-06-03T10:29:15.612298Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483706 VDiskId# [8000003a:3:0:1:0] DiskIsOk# true 2025-06-03T10:29:15.612305Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483706 Status# OK JoinedGroup# true Replicated# true 2025-06-03T10:29:15.612314Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483706 VDiskId# [8000003a:3:0:2:0] DiskIsOk# true 2025-06-03T10:29:15.612321Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483706 Status# OK JoinedGroup# true Replicated# true 2025-06-03T10:29:15.612326Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483706 VDiskId# [8000003a:3:0:3:0] DiskIsOk# true 2025-06-03T10:29:15.612332Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483706 Status# OK JoinedGroup# true Replicated# true 2025-06-03T10:29:15.612338Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483706 VDiskId# [8000003a:3:0:4:0] DiskIsOk# true 2025-06-03T10:29:15.612344Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483706 Status# OK JoinedGroup# true Replicated# true 2025-06-03T10:29:15.612349Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483706 VDiskId# [8000003a:3:0:5:0] DiskIsOk# true 2025-06-03T10:29:15.612356Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483706 Status# OK JoinedGroup# true Replicated# true 2025-06-03T10:29:15.612361Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483706 VDiskId# [8000003a:3:0:6:0] DiskIsOk# true 2025-06-03T10:29:15.612367Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH03@self_heal.cpp:111} Reassigner TEvVStatusResult GroupId# 2147483706 Status# OK JoinedGroup# true Replicated# true 2025-06-03T10:29:15.612372Z 1 05h15m00.121504s :BS_SELFHEAL DEBUG: {BSSH02@self_heal.cpp:96} Reassigner ProcessVDiskReply GroupId# 2147483706 VDiskId# [8000003a:3:0:7:0] DiskIsOk# true 2025-06-03T10:29:15.614985Z 1 05h15m00.122016s :BS_SELFHEAL INFO: {BSSH09@self_heal.cpp:207} Reassigner succeeded GroupId# 2147483706 Items# [8000003a:3:0:0:0]: 17:1000:1007 -> 13:1000:1017 ConfigTxSeqNo# 490 2025-06-03T10:29:15.615008Z 1 05h15m00.122016s :BS_SELFHEAL DEBUG: {BSSH08@self_heal.cpp:218} Reassigner finished GroupId# 2147483706 Success# true 2025-06-03T10:29:15.615047Z 17 05h15m00.122016s :BS_NODE DEBUG: [17] NodeServiceSetUpdate 2025-06-03T10:29:15.615073Z 18 05h15m00.122016s :BS_NODE DEBUG: [18] NodeServiceSetUpdate 2025-06-03T10:29:15.615087Z 18 05h15m00.122016s :BS_NODE DEBUG: [18] VDiskId# [8000003a:3:0:1:0] -> [8000003a:4:0:1:0] 2025-06-03T10:29:15.615106Z 19 05h15m00.122016s :BS_NODE DEBUG: [19] NodeServiceSetUpdate 2025-06-03T10:29:15.615117Z 19 05h15m00.122016s :BS_NODE DEBUG: [19] VDiskId# [8000003a:3:0:2:0] -> [8000003a:4:0:2:0] 2025-06-03T10:29:15.615135Z 20 05h15m00.122016s :BS_NODE DEBUG: [20] NodeServiceSetUpdate 2025-06-03T10:29:15.615145Z 20 05h15m00.122016s :BS_NODE DEBUG: [20] VDiskId# [8000003a:3:0:3:0] -> [8000003a:4:0:3:0] 2025-06-03T10:29:15.615162Z 21 05h15m00.122016s :BS_NODE DEBUG: [21] NodeServiceSetUpdate 2025-06-03T10:29:15.615171Z 21 05h15m00.122016s :BS_NODE DEBUG: [21] VDiskId# [8000003a:3:0:4:0] -> [8000003a:4:0:4:0] 2025-06-03T10:29:15.615188Z 24 05h15m00.122016s :BS_NODE DEBUG: [24] NodeServiceSetUpdate 2025-06-03T10:29:15.615198Z 24 05h15m00.122016s :BS_NODE DEBUG: [24] VDiskId# [8000003a:3:0:7:0] -> [8000003a:4:0:7:0] 2025-06-03T10:29:15.615216Z 28 05h15m00.122016s :BS_NODE DEBUG: [28] NodeServiceSetUpdate 2025-06-03T10:29:15.615226Z 28 05h15m00.122016s :BS_NODE DEBUG: [28] VDiskId# [8000003a:3:0:5:0] -> [8000003a:4:0:5:0] 2025-06-03T10:29:15.615244Z 30 05h15m00.122016s :BS_NODE DEBUG: [30] NodeServiceSetUpdate 2025-06-03T10:29:15.615253Z 30 05h15m00.122016s :BS_NODE DEBUG: [30] VDiskId# [8000003a:3:0:6:0] -> [8000003a:4:0:6:0] 2025-06-03T10:29:15.615274Z 13 05h15m00.122016s :BS_NODE DEBUG: [13] NodeServiceSetUpdate 2025-06-03T10:29:15.615287Z 13 05h15m00.122016s :BS_NODE DEBUG: [13] VDiskId# [8000003a:4:0:0:0] PDiskId# 1000 VSlotId# 1017 created 2025-06-03T10:29:15.615310Z 13 05h15m00.122016s :BS_NODE DEBUG: [13] VDiskId# [8000003a:4:0:0:0] status changed to INIT_PENDING 2025-06-03T10:29:15.615624Z 25 05h15m01.176456s :BS_NODE DEBUG: [25] VDiskId# [80000032:4:0:0:0] status changed to REPLICATING 2025-06-03T10:29:15.615767Z 28 05h15m01.441432s :BS_NODE DEBUG: [28] VDiskId# [8000001a:5:0:0:0] status changed to REPLICATING 2025-06-03T10:29:15.615983Z 13 05h15m02.641016s :BS_NODE DEBUG: [13] VDiskId# [8000003a:4:0:0:0] status changed to REPLICATING 2025-06-03T10:29:15.616063Z 28 05h15m02.674968s :BS_NODE DEBUG: [28] VDiskId# [80000022:5:0:0:0] status changed to REPLICATING 2025-06-03T10:29:15.616223Z 28 05h15m03.043944s :BS_NODE DEBUG: [28] VDiskId# [8000000a:5:0:0:0] status changed to REPLICATING 2025-06-03T10:29:15.616386Z 22 05h15m04.617480s :BS_NODE DEBUG: [22] VDiskId# [80000012:4:0:0:0] status changed to REPLICATING 2025-06-03T10:29:15.616465Z 28 05h15m04.652992s :BS_NODE DEBUG: [28] VDiskId# [80000002:5:0:0:0] status changed to REPLICATING 2025-06-03T10:29:15.616622Z 28 05h15m04.968920s :BS_NODE DEBUG: [28] VDiskId# [8000002a:5:0:0:0] status changed to REPLICATING 2025-06-03T10:29:15.617355Z 27 05h15m05.167504s :BS_NODE DEBUG: [27] VDiskId# [80000003:5:0:3:0] status changed to REPLICATING 2025-06-03T10:29:15.617940Z 25 05h15m15.765456s :BS_NODE DEBUG: [25] VDiskId# [80000032:4:0:0:0] status changed to READY 2025-06-03T10:29:15.619367Z 17 05h15m15.765968s :BS_NODE DEBUG: [17] NodeServiceSetUpdate 2025-06-03T10:29:15.619390Z 17 05h15m15.765968s :BS_NODE DEBUG: [17] VDiskId# [80000032:3:0:0:0] destroyed 2025-06-03T10:29:15.619454Z 13 05h15m16.419016s :BS_NODE DEBUG: [13] VDiskId# [8000003a:4:0:0:0] status changed to READY 2025-06-03T10:29:15.620745Z 17 05h15m16.419528s :BS_NODE DEBUG: [17] NodeServiceSetUpdate 2025-06-03T10:29:15.620766Z 17 05h15m16.419528s :BS_NODE DEBUG: [17] VDiskId# [8000003a:3:0:0:0] destroyed 2025-06-03T10:29:15.620819Z 28 05h15m17.613432s :BS_NODE DEBUG: [28] VDiskId# [8000001a:5:0:0:0] status changed to READY 2025-06-03T10:29:15.622235Z 17 05h15m17.613944s :BS_NODE DEBUG: [17] NodeServiceSetUpdate 2025-06-03T10:29:15.622255Z 17 05h15m17.613944s :BS_NODE DEBUG: [17] VDiskId# [8000001a:4:0:0:0] destroyed 2025-06-03T10:29:15.622940Z 22 05h15m30.989480s :BS_NODE DEBUG: [22] VDiskId# [80000012:4:0:0:0] status changed to READY 2025-06-03T10:29:15.624227Z 17 05h15m30.989992s :BS_NODE DEBUG: [17] NodeServiceSetUpdate 2025-06-03T10:29:15.624247Z 17 05h15m30.989992s :BS_NODE DEBUG: [17] VDiskId# [80000012:3:0:0:0] destroyed 2025-06-03T10:29:15.624373Z 28 05h15m32.411968s :BS_NODE DEBUG: [28] VDiskId# [80000022:5:0:0:0] status changed to READY 2025-06-03T10:29:15.625801Z 17 05h15m32.412480s :BS_NODE DEBUG: [17] NodeServiceSetUpdate 2025-06-03T10:29:15.625822Z 17 05h15m32.412480s :BS_NODE DEBUG: [17] VDiskId# [80000022:4:0:0:0] destroyed 2025-06-03T10:29:15.625934Z 28 05h15m32.706944s :BS_NODE DEBUG: [28] VDiskId# [8000000a:5:0:0:0] status changed to READY 2025-06-03T10:29:15.627219Z 17 05h15m32.707456s :BS_NODE DEBUG: [17] NodeServiceSetUpdate 2025-06-03T10:29:15.627240Z 17 05h15m32.707456s :BS_NODE DEBUG: [17] VDiskId# [8000000a:4:0:0:0] destroyed 2025-06-03T10:29:15.627653Z 27 05h15m35.420504s :BS_NODE DEBUG: [27] VDiskId# [80000003:5:0:3:0] status changed to READY 2025-06-03T10:29:15.628917Z 17 05h15m35.421016s :BS_NODE DEBUG: [17] NodeServiceSetUpdate 2025-06-03T10:29:15.628943Z 17 05h15m35.421016s :BS_NODE DEBUG: [17] VDiskId# [80000003:4:0:3:0] destroyed 2025-06-03T10:29:15.629014Z 28 05h15m37.209992s :BS_NODE DEBUG: [28] VDiskId# [80000002:5:0:0:0] status changed to READY 2025-06-03T10:29:15.630395Z 17 05h15m37.210504s :BS_NODE DEBUG: [17] NodeServiceSetUpdate 2025-06-03T10:29:15.630416Z 17 05h15m37.210504s :BS_NODE DEBUG: [17] VDiskId# [80000002:4:0:0:0] destroyed 2025-06-03T10:29:15.630480Z 28 05h15m39.487920s :BS_NODE DEBUG: [28] VDiskId# [8000002a:5:0:0:0] status changed to READY 2025-06-03T10:29:15.631744Z 17 05h15m39.488432s :BS_NODE DEBUG: [17] NodeServiceSetUpdate 2025-06-03T10:29:15.631761Z 17 05h15m39.488432s :BS_NODE DEBUG: [17] VDiskId# [8000002a:4:0:0:0] destroyed >> TTransferTests::CreateSequential [GOOD] >> TTransferTests::CreateInParallel |65.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_vector_index_build_reboots/unittest >> TTransferTests::CreateWrongConfig [GOOD] >> TTransferTests::CreateWrongBatchSize >> DataShardReadIterator::ShouldLimitReadRangeChunk1Limit100 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit98 >> Viewer::StorageGroupOutputWithSpaceCheckDependsOnVDiskSpaceStatus [GOOD] >> Viewer::StorageGroupOutputWithSpaceCheckDependsOnUsage |65.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_vector_index_build_reboots/unittest >> TTransferTests::CreateInParallel [GOOD] >> TTransferTests::CreateDropRecreate >> TTransferTests::CreateWrongBatchSize [GOOD] >> TTransferTests::CreateWrongFlushIntervalIsSmall >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureBlock42Count6Idx2 [GOOD] >> DataShardReadIterator::ShouldReadKeyValueColumnAndSomeKeyColumn [GOOD] >> DataShardReadIterator::ShouldReadMultipleKeys |65.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_vector_index_build_reboots/unittest >> VectorIndexBuildTestReboots::BaseCase[PipeResets] |65.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_vector_index_build_reboots/unittest >> TTransferTests::CreateWrongFlushIntervalIsSmall [GOOD] >> TTransferTests::CreateWrongFlushIntervalIsBig ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_ftol/unittest >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureBlock42Count6Idx2 [GOOD] Test command err: iteration# 2 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 8 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 14 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 20 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 26 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 32 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 38 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 44 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 50 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 56 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 62 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 68 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 74 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 80 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 86 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 92 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 98 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 104 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 110 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 116 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 122 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 128 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 134 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 140 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 146 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 152 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 158 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 164 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 170 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 176 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 182 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 188 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 194 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 200 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 206 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 212 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 218 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 224 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 230 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 236 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 242 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 248 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 254 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 260 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 266 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 272 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 278 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 284 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 290 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 296 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 302 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 308 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 314 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 320 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 326 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 332 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 338 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 344 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 350 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 356 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 362 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 368 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 374 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 380 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 386 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 392 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 398 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 404 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 410 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 416 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 422 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 428 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 434 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 440 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 446 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 452 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 458 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 464 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 470 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 476 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 482 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 488 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 494 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 500 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 506 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 512 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 518 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 524 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 530 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 536 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 542 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 548 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 554 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 560 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 566 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 572 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 578 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 584 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 590 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 596 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 602 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 608 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 614 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 620 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 626 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 632 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 638 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 644 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 650 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 656 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 662 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 668 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 674 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 680 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 686 BlobsWritten# 2041 blobsWrittenFul ... blobsUnwritten# 1218 iteration# 1364 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1370 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1376 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1382 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1388 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1394 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1400 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1406 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1412 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1418 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1424 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1430 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1436 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1442 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1448 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1454 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1460 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1466 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1472 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1478 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1484 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1490 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1496 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1502 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1508 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1514 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1520 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1526 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1532 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1538 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1544 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1550 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1556 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1562 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1568 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1574 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1580 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1586 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1592 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1598 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1604 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1610 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1616 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1622 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1628 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1634 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1640 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1646 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1652 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1658 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1664 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1670 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1676 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1682 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1688 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1694 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1700 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1706 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1712 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1718 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1724 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1730 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1736 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1742 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1748 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1754 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1760 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1766 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1772 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1778 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1784 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1790 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1796 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1802 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1808 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1814 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1820 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1826 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1832 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1838 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1844 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1850 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1856 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1862 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1868 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1874 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1880 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1886 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1892 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1898 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1904 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1910 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1916 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1922 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1928 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1934 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1940 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1946 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1952 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1958 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1964 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1970 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1976 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1982 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1988 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1994 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2000 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2006 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2012 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2018 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2024 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2030 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2036 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 >> TTransferTests::CreateDropRecreate [GOOD] >> TTransferTests::ConsistencyLevel >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRange-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips+EvWrite |65.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_vector_index_build_reboots/unittest |65.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_vector_index_build_reboots/unittest >> TTransferTests::CreateWrongFlushIntervalIsBig [GOOD] >> CommitOffset::DistributedTxCommit [GOOD] >> CommitOffset::DistributedTxCommit_ChildFirst >> VectorIndexBuildTestReboots::BaseCase[TabletReboots] >> TTransferTests::ConsistencyLevel [GOOD] >> TTransferTests::Alter >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_PQv1 |65.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut |65.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut |65.1%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/federated_topic/ut/ydb-public-sdk-cpp-src-client-federated_topic-ut >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit98 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit99 >> TopicAutoscaling::PartitionMerge_PreferedPartition_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ManySession_BeforeAutoscaleAwareSDK ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_transfer/unittest >> TTransferTests::CreateWrongFlushIntervalIsBig [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:15.013252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:15.013285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:15.013316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:15.013323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:15.013338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:15.013342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:15.013354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:15.013370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:15.013560Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:15.013644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:15.030804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:15.030833Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:15.035411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:15.035559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:15.035595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:15.038013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:15.038080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:15.038209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:15.038261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:15.038946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:15.039005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:15.039311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:15.039325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:15.039339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:15.039349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:15.039356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:15.039379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.040811Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:15.063137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:15.063218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.063281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:15.063336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:15.063347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.065103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:15.065137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:15.065189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.065200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:15.065206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:15.065211Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:15.065992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.066008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:15.066015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:15.066404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.066417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.066424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:15.066431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:15.067180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:15.067619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:15.067666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:15.067862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:15.067892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:15.067899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:15.067966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:15.067974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:15.068008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:15.068021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:15.068486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:15.068496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:15.068541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... nResult Origin: 72075186233409546 TxId: 101 2025-06-03T10:29:18.685536Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4925: StateWork, processing event TEvColumnShard::TEvNotifyTxCompletionResult 2025-06-03T10:29:18.685551Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6151: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 101 2025-06-03T10:29:18.685563Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409546, partId: 0 2025-06-03T10:29:18.685608Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409546 TxId: 101 2025-06-03T10:29:18.685647Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 FAKE_COORDINATOR: Erasing txId 101 2025-06-03T10:29:18.686423Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:29:18.686438Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:29:18.686447Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 101:0 2025-06-03T10:29:18.686495Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435072, Sender [6:133:2155], Recipient [6:133:2155]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-03T10:29:18.686503Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4899: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-03T10:29:18.686517Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:29:18.686526Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-03T10:29:18.686547Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:29:18.686554Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:29:18.686560Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:29:18.686568Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:29:18.686572Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:29:18.686578Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-03T10:29:18.686600Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [6:344:2320] message: TxId: 101 2025-06-03T10:29:18.686609Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:29:18.686616Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-06-03T10:29:18.686623Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 101:0 2025-06-03T10:29:18.686668Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:29:18.687282Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:29:18.687311Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [6:344:2320] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 101 at schemeshard: 72057594046678944 2025-06-03T10:29:18.687359Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:29:18.687367Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [6:345:2321] 2025-06-03T10:29:18.687411Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [6:347:2323], Recipient [6:133:2155]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:18.687419Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:18.687425Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 101 TestModificationResults wait txId: 102 2025-06-03T10:29:18.687594Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [6:391:2360], Recipient [6:133:2155]: {TEvModifySchemeTransaction txid# 102 TabletId# 72057594046678944} 2025-06-03T10:29:18.687600Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:29:18.688508Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateTransfer Replication { Name: "Transfer" Config { TransferSpecific { Target { SrcPath: "/MyRoot1/Table" DstPath: "/MyRoot/Table" } Batching { FlushIntervalMilliSeconds: 86400001 } } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:18.688575Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_replication.cpp:348: [72057594046678944] TCreateReplication Propose: opId# 102:0, path# /MyRoot/Transfer 2025-06-03T10:29:18.688594Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: Flush interval must be less than or equal to 24 hours, at schemeshard: 72057594046678944 2025-06-03T10:29:18.688666Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:29:18.689288Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "Flush interval must be less than or equal to 24 hours" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:18.689361Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Flush interval must be less than or equal to 24 hours, operation: CREATE TRANSFER, path: /MyRoot/Transfer 2025-06-03T10:29:18.689368Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-03T10:29:18.689447Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-03T10:29:18.689454Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-03T10:29:18.689514Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [6:397:2366], Recipient [6:133:2155]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:18.689520Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:18.689523Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046678944 2025-06-03T10:29:18.689540Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124996, Sender [6:344:2320], Recipient [6:133:2155]: NKikimrScheme.TEvNotifyTxCompletion TxId: 102 2025-06-03T10:29:18.689544Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4895: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-03T10:29:18.689555Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-03T10:29:18.689575Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:29:18.689579Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [6:395:2364] 2025-06-03T10:29:18.689596Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [6:397:2366], Recipient [6:133:2155]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:18.689599Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:18.689601Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 2025-06-03T10:29:18.689647Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122945, Sender [6:398:2367], Recipient [6:133:2155]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Transfer" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-03T10:29:18.689650Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4894: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-03T10:29:18.689658Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Transfer" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:18.689686Z node 6 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Transfer" took 26us result status StatusPathDoesNotExist 2025-06-03T10:29:18.689715Z node 6 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Transfer\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Transfer" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |65.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_vector_index_build_reboots/unittest >> TTransferTests::Alter [GOOD] >> Viewer::TenantInfo5kkTablets [GOOD] >> Viewer::UseTransactionWhenExecuteDataActionQuery |65.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut |65.1%| [LD] {RESULT} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut |65.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/workload_service/ut/ydb-core-kqp-workload_service-ut |65.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_vector_index_build_reboots/unittest >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureBlock42Count6Idx4 [GOOD] >> DataShardReadIterator::ShouldReadMultipleKeys [GOOD] >> DataShardReadIterator::ShouldReadMultipleKeysOneByOne |65.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest >> TopicAutoscaling::ControlPlane_CDC_Disable [GOOD] >> TopicAutoscaling::BalancingAfterSplit_sessionsWithPartition |65.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut |65.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut |65.1%| [LD] {RESULT} $(B)/ydb/core/keyvalue/ut/ydb-core-keyvalue-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_transfer/unittest >> TTransferTests::Alter [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:15.252920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:15.252947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:15.252952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:15.252959Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:15.252991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:15.252995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:15.253006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:15.253021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:15.253132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:15.253201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:15.270194Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:15.270224Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:15.275076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:15.275222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:15.275261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:15.277430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:15.277496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:15.277635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:15.277689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:15.278390Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:15.278446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:15.278758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:15.278772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:15.278784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:15.278793Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:15.278799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:15.278821Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.280513Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:15.307155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:15.307247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.307317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:15.307368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:15.307381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.308495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:15.308532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:15.308593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.308605Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:15.308613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:15.308620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:15.309213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.309227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:15.309234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:15.309660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.309674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.309682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:15.309690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:15.310500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:15.310977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:15.311022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:15.311220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:15.311252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:15.311261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:15.311335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:15.311343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:15.311387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:15.311401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:15.311882Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:15.311894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:15.311942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... lterReplication TConfigureParts opId# 104:0 HandleReply NKikimrReplication.TEvAlterReplicationResult OperationId { TxId: 104 PartId: 0 } Origin: 72075186233409547 Status: SUCCESS 2025-06-03T10:29:19.415668Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 104:0 3 -> 128 2025-06-03T10:29:19.415694Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:29:19.415704Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:693: Ack tablet strongly msg opId: 104:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:3 2025-06-03T10:29:19.416276Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-03T10:29:19.416300Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:29:19.416308Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 104:0 2025-06-03T10:29:19.416354Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435072, Sender [6:133:2155], Recipient [6:133:2155]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-03T10:29:19.416362Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4899: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-03T10:29:19.416374Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-03T10:29:19.416384Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_replication.cpp:189: [72057594046678944] TAlterReplication TPropose opId# 104:0 ProgressState 2025-06-03T10:29:19.416393Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:29:19.416405Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 104 ready parts: 1/1 2025-06-03T10:29:19.416452Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 104 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:19.417019Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:29:19.417038Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 104:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:104 msg type: 269090816 2025-06-03T10:29:19.417076Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 104, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 104 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 104 at step: 5000005 2025-06-03T10:29:19.417170Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269287424, Sender [6:129:2153], Recipient [6:259:2249] 2025-06-03T10:29:19.417178Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4902: StateWork, processing event TEvTxProcessing::TEvPlanStep 2025-06-03T10:29:19.417197Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:19.417225Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 129 RawX2: 25769805929 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:19.417238Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_replication.cpp:203: [72057594046678944] TAlterReplication TPropose opId# 104:0 HandleReply TEvOperationPlan: step# 5000005 2025-06-03T10:29:19.417315Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 104:0 128 -> 240 2025-06-03T10:29:19.417371Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:29:19.417399Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:29:19.417421Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:693: Ack tablet strongly msg opId: 104:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:104 2025-06-03T10:29:19.417997Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:29:19.418012Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:384: Ack coordinator stepId#5000005 first txId#104 countTxs#1 2025-06-03T10:29:19.418021Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:354: Ack mediator stepId#5000005 2025-06-03T10:29:19.418027Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 104:0 2025-06-03T10:29:19.418076Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435072, Sender [6:133:2155], Recipient [6:133:2155]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-03T10:29:19.418084Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4899: StateWork, processing event TEvPrivate::TEvProgressOperation FAKE_COORDINATOR: Erasing txId 104 2025-06-03T10:29:19.418112Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:19.418120Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:29:19.418222Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:19.418232Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [6:208:2209], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-03T10:29:19.418396Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-03T10:29:19.418411Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-03T10:29:19.418427Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:29:19.418434Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-06-03T10:29:19.418440Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:29:19.418447Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-06-03T10:29:19.418452Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:29:19.418459Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-03T10:29:19.418467Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:29:19.418475Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-06-03T10:29:19.418481Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 104:0 2025-06-03T10:29:19.418536Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-03T10:29:19.418546Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 104, publications: 1, subscribers: 0 2025-06-03T10:29:19.418552Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 4 2025-06-03T10:29:19.418776Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 274137603, Sender [6:208:2209], Recipient [6:133:2155]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 3] Version: 4 } 2025-06-03T10:29:19.418790Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-06-03T10:29:19.418812Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:29:19.418829Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 4 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:29:19.418836Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-03T10:29:19.418843Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 4 2025-06-03T10:29:19.418849Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:29:19.418873Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-03T10:29:19.418879Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:29:19.420007Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:29:19.420090Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-03T10:29:19.420098Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 >> Viewer::JsonStorageListingV1GroupIdFilter [GOOD] >> Viewer::JsonStorageListingV1NodeIdFilter |65.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_ftol/unittest >> TBsProxyFaultToleranceTest::CheckGetHardenedErasureBlock42Count6Idx4 [GOOD] Test command err: iteration# 4 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 10 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 16 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 22 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 28 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 34 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 40 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 46 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 52 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 58 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 64 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 70 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 76 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 82 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 88 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 94 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 100 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 106 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 112 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 118 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 124 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 130 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 136 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 142 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 148 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 154 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 160 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 166 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 172 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 178 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 184 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 190 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 196 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 202 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 208 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 214 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 220 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 226 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 232 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 238 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 244 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 250 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 256 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 262 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 268 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 274 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 280 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 286 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 292 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 298 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 304 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 310 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 316 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 322 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 328 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 334 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 340 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 346 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 352 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 358 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 364 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 370 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 376 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 382 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 388 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 394 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 400 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 406 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 412 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 418 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 424 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 430 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 436 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 442 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 448 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 454 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 460 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 466 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 472 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 478 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 484 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 490 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 496 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 502 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 508 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 514 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 520 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 526 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 532 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 538 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 544 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 550 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 556 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 562 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 568 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 574 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 580 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 586 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 592 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 598 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 604 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 610 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 616 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 622 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 628 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 634 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 640 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 646 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 652 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 658 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 664 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 670 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 676 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 682 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 688 BlobsWritten# 2041 blobsWrittenF ... blobsUnwritten# 1218 iteration# 1366 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1372 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1378 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1384 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1390 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1396 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1402 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1408 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1414 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1420 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1426 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1432 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1438 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1444 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1450 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1456 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1462 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1468 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1474 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1480 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1486 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1492 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1498 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1504 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1510 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1516 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1522 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1528 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1534 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1540 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1546 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1552 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1558 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1564 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1570 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1576 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1582 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1588 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1594 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1600 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1606 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1612 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1618 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1624 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1630 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1636 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1642 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1648 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1654 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1660 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1666 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1672 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1678 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1684 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1690 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1696 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1702 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1708 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1714 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1720 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1726 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1732 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1738 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1744 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1750 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1756 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1762 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1768 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1774 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1780 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1786 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1792 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1798 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1804 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1810 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1816 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1822 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1828 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1834 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1840 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1846 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1852 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1858 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1864 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1870 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1876 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1882 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1888 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1894 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1900 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1906 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1912 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1918 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1924 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1930 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1936 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1942 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1948 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1954 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1960 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1966 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1972 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1978 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1984 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1990 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 1996 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2002 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2008 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2014 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2020 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2026 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2032 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 iteration# 2038 BlobsWritten# 2041 blobsWrittenFull# 157 blobsWrittenAlmostFull# 666 blobsUnwritten# 1218 |65.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest >> TopicAutoscaling::ControlPlane_CreateAlterDescribe [GOOD] >> TopicAutoscaling::ControlPlane_DisableAutoPartitioning >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadRangeInvisibleRowSkips+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefix+EvWrite |65.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup |65.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup |65.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_backup/ydb-core-tx-schemeshard-ut_backup >> Viewer::UseTransactionWhenExecuteDataActionQuery [GOOD] >> ViewerTopicDataTests::TopicDataTest |65.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest |65.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit99 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit100 |65.2%| [TA] $(B)/ydb/core/tx/schemeshard/ut_transfer/test-results/unittest/{meta.json ... results_accumulator.log} |65.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest >> Viewer::StorageGroupOutputWithSpaceCheckDependsOnUsage [GOOD] >> Viewer::SimpleFeatureFlags |65.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest >> TFileStoreWithReboots::CreateDrop >> TFileStoreWithReboots::CreateAlterChannels >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_PQv1 |65.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest >> DataShardReadIterator::ShouldReadMultipleKeysOneByOne [GOOD] >> DataShardReadIterator::ShouldReadKeyPrefix1 |65.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest >> TopicAutoscaling::ReadingAfterSplitTest_AutoscaleAwareSDK_AutoCommit [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PQv1 |65.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move |65.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move >> Viewer::SimpleFeatureFlags [GOOD] |65.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest |65.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest >> BasicUsage::WriteSessionNoAvailableDatabase >> BasicUsage::BasicWriteSession >> ViewerTopicDataTests::TopicDataTest [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit100 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit101 |65.2%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_transfer/test-results/unittest/{meta.json ... results_accumulator.log} |65.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_move/ydb-core-tx-schemeshard-ut_move ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/viewer/ut/unittest >> Viewer::SimpleFeatureFlags [GOOD] Test command err: BASE_PERF = 1.117276547 Build = 5.069175856 Merge = 6.833610602 Destroy = 3.61691661 2025-06-03T10:29:12.038155Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:319:2362], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:12.038232Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:12.038255Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 30019, node 1 TClient is connected to server localhost:32269 json result: {"TotalGroups":"1","FoundGroups":"1","StorageGroups":[{"PoolName":"static","Kind":"","MediaType":"","Erasure":"none","Degraded":"1","Usage":"0.1","Used":"10","Limit":"100","Read":"0","Write":"0","GroupID":0,"ErasureSpecies":"none","VDisks":[{"VDiskId":{"GroupID":0,"GroupGeneration":1,"VDisk":0},"PDisk":{"PDiskId":0,"NodeId":1},"NodeId":1,"VDiskState":"OK","DiskSpace":"Green","AllocatedSize":"10","AvailableSize":"90","Overall":"Green"}],"DiskSpace":"Green","GroupGeneration":1,"VDiskNodeIds":[1],"Overall":"Red"}]} 2025-06-03T10:29:13.430571Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:336:2378], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:13.430633Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:13.430652Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 20822, node 2 TClient is connected to server localhost:16804 json result: {"TotalGroups":"1","FoundGroups":"1","StorageGroups":[{"PoolName":"static","Kind":"","MediaType":"","Erasure":"none","Degraded":"1","Usage":"0.9","Used":"90","Limit":"100","Read":"0","Write":"0","GroupID":0,"ErasureSpecies":"none","VDisks":[{"VDiskId":{"GroupID":0,"GroupGeneration":1,"VDisk":0},"PDisk":{"PDiskId":0,"NodeId":2},"NodeId":2,"VDiskState":"OK","DiskSpace":"Red","AllocatedSize":"90","AvailableSize":"10","Overall":"Red"}],"DiskSpace":"Red","GroupGeneration":1,"VDiskNodeIds":[2],"Overall":"Red"}]} 2025-06-03T10:29:14.912356Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:336:2378], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:14.912467Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:14.912557Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 1746, node 3 TClient is connected to server localhost:21911 json result: {"TotalGroups":"1","FoundGroups":"0"} 2025-06-03T10:29:16.773479Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:317:2360], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:16.773546Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:16.773568Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 7510, node 4 TClient is connected to server localhost:6211 json result: {"TotalGroups":"1","FoundGroups":"1","StorageGroups":[{"PoolName":"static","Kind":"","MediaType":"","Erasure":"none","Degraded":"1","Usage":"0.1","Used":"10","Limit":"100","Read":"0","Write":"0","GroupID":0,"ErasureSpecies":"none","VDisks":[{"VDiskId":{"GroupID":0,"GroupGeneration":1,"VDisk":0},"PDisk":{"PDiskId":0,"NodeId":4},"NodeId":4,"VDiskState":"OK","DiskSpace":"Red","AllocatedSize":"10","AvailableSize":"90","Overall":"Red"}],"DiskSpace":"Red","GroupGeneration":1,"VDiskNodeIds":[4],"Overall":"Red"}]} 2025-06-03T10:29:18.194684Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:339:2381], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:18.194777Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:18.194796Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 14173, node 5 TClient is connected to server localhost:20599 json result: {"TotalGroups":"1","FoundGroups":"0"} 2025-06-03T10:29:19.550267Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:19.550324Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:340:2381], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:19.550339Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 24101, node 6 TClient is connected to server localhost:32633 json result: {"TotalGroups":"1","FoundGroups":"1","StorageGroups":[{"PoolName":"static","Kind":"","MediaType":"","Erasure":"none","Degraded":"1","Usage":"0.8","Used":"80","Limit":"100","Read":"0","Write":"0","GroupID":0,"ErasureSpecies":"none","VDisks":[{"VDiskId":{"GroupID":0,"GroupGeneration":1,"VDisk":0},"PDisk":{"PDiskId":0,"NodeId":6},"NodeId":6,"VDiskState":"OK","DiskSpace":"Green","AllocatedSize":"80","AvailableSize":"20","Overall":"Green"}],"DiskSpace":"Green","GroupGeneration":1,"VDiskNodeIds":[6],"Overall":"Red"}]} 2025-06-03T10:29:21.263306Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:315:2359], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:21.263402Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:21.263414Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 24662, node 7 TClient is connected to server localhost:3866 json result: {"TotalGroups":"1","FoundGroups":"1","StorageGroups":[{"PoolName":"static","Kind":"","MediaType":"","Erasure":"none","Degraded":"1","Usage":"0.9","Used":"90","Limit":"100","Read":"0","Write":"0","GroupID":0,"ErasureSpecies":"none","VDisks":[{"VDiskId":{"GroupID":0,"GroupGeneration":1,"VDisk":0},"PDisk":{"PDiskId":0,"NodeId":7},"NodeId":7,"VDiskState":"OK","DiskSpace":"Green","AllocatedSize":"90","AvailableSize":"10","Overall":"Green"}],"DiskSpace":"Green","GroupGeneration":1,"VDiskNodeIds":[7],"Overall":"Red"}]} 2025-06-03T10:29:21.652972Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7511668286023551835:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:21.652991Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:29:21.671612Z node 8 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:21.671918Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:7511668286023551814:2079] 1748946561652812 != 1748946561652815 TServer::EnableGrpc on GrpcPort 27245, node 8 2025-06-03T10:29:21.694928Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:21.694945Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:21.694948Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:21.695006Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23672 2025-06-03T10:29:21.759192Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:21.759227Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:21.760297Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefix+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefix-EvWrite >> KqpWorkloadServiceActors::TestPoolFetcher >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterDrop ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/viewer/ut/unittest >> ViewerTopicDataTests::TopicDataTest [GOOD] Test command err: Build = 0.208735339 Merge = 0.8292953658 Destroy = 0.04370261496 Data has built Merge = 0.04820387188 Data has merged 2025-06-03T10:29:02.114736Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:1537:2429], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:02.115145Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:02.115335Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:02.115526Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:02.115550Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:02.115697Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:1540:2372], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:02.115777Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:1546:2372], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:02.115892Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:1543:2372], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:02.116032Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:02.116093Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:02.116106Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:02.116203Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:02.116276Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:1034:2174], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:02.116452Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:02.116491Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:29:02.262503Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:02.353837Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-03T10:29:02.360560Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:424} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-03T10:29:02.412073Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:364} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 21379, node 1 TClient is connected to server localhost:24415 2025-06-03T10:29:02.460667Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:02.460691Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:02.460696Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:02.460782Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration Request timer = 14.36869516 BASE_PERF = 0.991032619 test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:29:19.754096Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:19.762286Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:19.763356Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7511668276313843015:2079] 1748946559676061 != 1748946559676064 TServer::EnableGrpc on GrpcPort 26690, node 6 2025-06-03T10:29:19.801566Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:19.801583Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:19.801586Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:19.801648Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:29:19.802171Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:19.802192Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:19.809990Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24934 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:19.878549Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:19.888380Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:29:19.897821Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:29:19.902401Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterUserAttributes, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:19.904059Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 2025-06-03T10:29:20.521677Z node 6 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:20.521701Z node 6 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:20.523820Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511668280608811006:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:20.523847Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:20.523991Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511668280608811018:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:20.524978Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480 2025-06-03T10:29:20.527504Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511668280608811020:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-03T10:29:20.586120Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511668280608811071:2347] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:20.647227Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:29:20.714378Z node 6 :TICKET_PARSER INFO: viewer_ut.cpp:437: Ticket parser: got TEvAuthorizeTicket event: test_ydb_token /Root 1 2025-06-03T10:29:20.714395Z node 6 :TICKET_PARSER INFO: viewer_ut.cpp:496: Send TEvAuthorizeTicketResult success 2025-06-03T10:29:20.785712Z node 6 :TICKET_PARSE ... 1.790040Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: try to update token 2025-06-03T10:29:21.790062Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Send 2 message(s) (11 left), first sequence number is 8 2025-06-03T10:29:21.791890Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session got write response: sequence_numbers: 8 sequence_numbers: 9 offsets: 47 offsets: 48 already_written: false already_written: false write_statistics { } 2025-06-03T10:29:21.791908Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: acknoledged message 8 2025-06-03T10:29:21.791917Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: acknoledged message 9 2025-06-03T10:29:21.796161Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: try to update token 2025-06-03T10:29:21.796195Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Send 1 message(s) (10 left), first sequence number is 10 2025-06-03T10:29:21.797959Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session got write response: sequence_numbers: 10 offsets: 49 already_written: false write_statistics { } 2025-06-03T10:29:21.797976Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: acknoledged message 10 2025-06-03T10:29:21.800668Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: try to update token 2025-06-03T10:29:21.800689Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Send 1 message(s) (9 left), first sequence number is 11 2025-06-03T10:29:21.802911Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: try to update token 2025-06-03T10:29:21.802932Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Send 1 message(s) (8 left), first sequence number is 12 2025-06-03T10:29:21.805377Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session got write response: sequence_numbers: 11 offsets: 50 already_written: false write_statistics { } 2025-06-03T10:29:21.805390Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: acknoledged message 11 2025-06-03T10:29:21.805430Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session got write response: sequence_numbers: 12 offsets: 51 already_written: false write_statistics { } 2025-06-03T10:29:21.805433Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: acknoledged message 12 2025-06-03T10:29:21.811513Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: try to update token 2025-06-03T10:29:21.811538Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Send 2 message(s) (6 left), first sequence number is 13 2025-06-03T10:29:21.816319Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: try to update token 2025-06-03T10:29:21.816353Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Send 1 message(s) (5 left), first sequence number is 15 2025-06-03T10:29:21.816713Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session got write response: sequence_numbers: 13 sequence_numbers: 14 offsets: 52 offsets: 53 already_written: false already_written: false write_statistics { } 2025-06-03T10:29:21.816727Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: acknoledged message 13 2025-06-03T10:29:21.816733Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: acknoledged message 14 2025-06-03T10:29:21.818783Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session got write response: sequence_numbers: 15 offsets: 54 already_written: false write_statistics { } 2025-06-03T10:29:21.818797Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: acknoledged message 15 2025-06-03T10:29:21.822177Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: try to update token 2025-06-03T10:29:21.822203Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Send 1 message(s) (4 left), first sequence number is 16 2025-06-03T10:29:21.823357Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: try to update token 2025-06-03T10:29:21.823380Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Send 1 message(s) (3 left), first sequence number is 17 2025-06-03T10:29:21.824254Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session got write response: sequence_numbers: 16 offsets: 55 already_written: false write_statistics { persist_duration_ms: 1 } 2025-06-03T10:29:21.824265Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: acknoledged message 16 2025-06-03T10:29:21.825774Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session got write response: sequence_numbers: 17 offsets: 56 already_written: false write_statistics { } 2025-06-03T10:29:21.825789Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: acknoledged message 17 2025-06-03T10:29:21.832973Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: try to update token 2025-06-03T10:29:21.832994Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Send 2 message(s) (1 left), first sequence number is 18 2025-06-03T10:29:21.837601Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session got write response: sequence_numbers: 18 sequence_numbers: 19 offsets: 57 offsets: 58 already_written: false already_written: false write_statistics { } 2025-06-03T10:29:21.837636Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: acknoledged message 18 2025-06-03T10:29:21.837643Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: acknoledged message 19 2025-06-03T10:29:21.838282Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: try to update token 2025-06-03T10:29:21.838299Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Send 1 message(s) (0 left), first sequence number is 20 2025-06-03T10:29:21.839952Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session got write response: sequence_numbers: 20 offsets: 59 already_written: false write_statistics { } 2025-06-03T10:29:21.839965Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: acknoledged message 20 2025-06-03T10:29:21.866570Z :INFO: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session will now close 2025-06-03T10:29:21.866600Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: aborting 2025-06-03T10:29:21.866880Z :INFO: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: gracefully shut down, all writes complete 2025-06-03T10:29:21.866908Z :DEBUG: [] MessageGroupId [producer3] SessionId [producer3|e08f8929-168f6a99-f023cce2-5f5ae60f_0] Write session: destroy Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 Size: 1398104 2025-06-03T10:29:22.151254Z :DEBUG: [] MessageGroupId [producer4] SessionId [] Write session: try to update token 2025-06-03T10:29:22.151456Z :INFO: [] MessageGroupId [producer4] SessionId [] Write session: Do CDS request 2025-06-03T10:29:22.151464Z :INFO: [] MessageGroupId [producer4] SessionId [] Start write session. Will connect to endpoint: localhost:23966 2025-06-03T10:29:22.152635Z :DEBUG: [] MessageGroupId [producer4] SessionId [] Write session: send init request: init_request { topic: "/Root/topic1" message_group_id: "producer4" } 2025-06-03T10:29:22.153780Z :INFO: [] MessageGroupId [producer4] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1748946562153 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:29:22.153808Z :INFO: [] MessageGroupId [producer4] SessionId [] Write session established. Init response: session_id: "producer4|ec46241f-73b24b99-b60a315d-60d5e0dd_0" topic: "topic1" 2025-06-03T10:29:22.154336Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|ec46241f-73b24b99-b60a315d-60d5e0dd_0] Write 1 messages with Id from 1 to 1 2025-06-03T10:29:22.154387Z :INFO: [] MessageGroupId [producer4] SessionId [producer4|ec46241f-73b24b99-b60a315d-60d5e0dd_0] Write session: close. Timeout = 18446744073709551 ms 2025-06-03T10:29:22.171337Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|ec46241f-73b24b99-b60a315d-60d5e0dd_0] Write session: try to update token 2025-06-03T10:29:22.171368Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|ec46241f-73b24b99-b60a315d-60d5e0dd_0] Send 1 message(s) (0 left), first sequence number is 1 2025-06-03T10:29:22.173355Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|ec46241f-73b24b99-b60a315d-60d5e0dd_0] Write session got write response: sequence_numbers: 1 offsets: 60 already_written: false write_statistics { } 2025-06-03T10:29:22.173377Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|ec46241f-73b24b99-b60a315d-60d5e0dd_0] Write session: acknoledged message 1 2025-06-03T10:29:22.254491Z :INFO: [] MessageGroupId [producer4] SessionId [producer4|ec46241f-73b24b99-b60a315d-60d5e0dd_0] Write session will now close 2025-06-03T10:29:22.254517Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|ec46241f-73b24b99-b60a315d-60d5e0dd_0] Write session: aborting 2025-06-03T10:29:22.254779Z :INFO: [] MessageGroupId [producer4] SessionId [producer4|ec46241f-73b24b99-b60a315d-60d5e0dd_0] Write session: gracefully shut down, all writes complete 2025-06-03T10:29:22.254810Z :DEBUG: [] MessageGroupId [producer4] SessionId [producer4|ec46241f-73b24b99-b60a315d-60d5e0dd_0] Write session: destroy Size: 4194320 Got response:400: PathErrorUnknown Got response:400: No such partition in topic 2025-06-03T10:29:22.367796Z node 7 :PERSQUEUE ERROR: partition_read.cpp:677: [PQ: 72075186224037889, Partition: 0, State: StateIdle] reading from too big offset - topic topic1 partition 0 client $without_consumer EndOffset 61 offset 10000 Got response:400: Bad offset >> TKeyValueTest::TestRenameWorks >> TKeyValueTest::TestWriteReadRangeLimitThenLimitWorks >> DataShardReadIterator::ShouldReadKeyPrefix1 [GOOD] >> DataShardReadIterator::ShouldReadKeyPrefix2 >> KqpWorkloadServiceSubscriptions::TestResourcePoolSubscriptionAfterDrop [GOOD] >> KqpWorkloadServiceTables::TestCreateWorkloadSerivceTables >> KqpWorkloadServiceActors::TestPoolFetcher [GOOD] >> KqpWorkloadServiceActors::TestPoolFetcherAclValidation >> TKeyValueTest::TestEmptyWriteReadDeleteWithRestartsThenResponseOk >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithNoCompression >> HttpRequest::ProbeServerless [GOOD] >> KeyValueReadStorage::ReadOk [GOOD] >> KeyValueReadStorage::ReadNotWholeBlobOk [GOOD] >> KeyValueReadStorage::ReadOneItemError [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit101 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit198 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> KeyValueReadStorage::ReadOneItemError [GOOD] Test command err: 2025-06-03T10:29:24.583742Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-03T10:29:24.584184Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:492} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 2025-06-03T10:29:24.586065Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-03T10:29:24.586092Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:492} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 2025-06-03T10:29:24.587698Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-03T10:29:24.587729Z 1 00h00m00.000000s :KEYVALUE ERROR: {KV317@keyvalue_storage_read_request.cpp:310} Unexpected EvGetResult. KeyValue# 1 Status# OK Id# [1:2:3:2:0:1:0] ResponseStatus# ERROR Deadline# 586524-01-19T08:01:49.551615Z Now# 1970-01-01T00:00:00.000000Z SentAt# 1970-01-01T00:00:00.000000Z GotAt# 2025-06-03T10:29:24.587645Z ErrorReason# >> Balancing::Balancing_ManyTopics_PQv1 [GOOD] >> CommitOffset::Commit_Flat_WithWrongSession ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> HttpRequest::ProbeServerless [GOOD] Test command err: 2025-06-03T10:26:26.460129Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:26.460156Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:26:26.460163Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001c33/r3tmp/tmpwPiyOk/pdisk_1.dat 2025-06-03T10:26:26.560198Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8407, node 1 2025-06-03T10:26:26.668582Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:26.668601Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:26.668605Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:26.668658Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:26.669230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:26:26.747036Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:26.747084Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:26.759422Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28732 2025-06-03T10:26:27.107582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:26:27.860956Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:26:27.869723Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:27.869780Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:27.923827Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:26:27.924387Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:28.083384Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.083530Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.083635Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.083664Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.083741Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.083759Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.083771Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.083786Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.083802Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.235606Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:28.235642Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:28.246992Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:28.283494Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:28.298545Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:26:28.298575Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:26:28.305723Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:26:28.305778Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:26:28.305797Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:26:28.305802Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:26:28.305806Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:26:28.305811Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:26:28.305816Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:26:28.305822Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:26:28.305989Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:26:28.319192Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:28.319220Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:28.320473Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:26:28.321264Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:26:28.321408Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:26:28.323146Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-03T10:26:28.326689Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:26:28.326709Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:26:28.326719Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-03T10:26:28.330673Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:26:28.332249Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:26:28.332285Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:26:28.436112Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:26:28.506669Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:26:28.559629Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:26:29.067383Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:26:29.499716Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:29.579424Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7814: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-03T10:26:29.579450Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7830: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-03T10:26:29.579461Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:2567:2933], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-03T10:26:29.579822Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2569:2935] 2025-06-03T10:26:29.580022Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:2569:2935], schemeshard id = 72075186224037899 2025-06-03T10:26:30.347949Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2691:3233], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:30.348009Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:30.353571Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715661:0, at schemeshard: 72075186224037899 2025-06-03T10:26:30.439563Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037905;self_id=[2:2841:3070];tablet_id=72075186224037905;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:26:30.439680Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037905;self_id=[2:2841:3070];tablet_id=72075186224037905;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:26:30.439789Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037905;self_id=[2:2841:3070];tablet_id=72075186224037905;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register; ... ete 2025-06-03T10:29:23.871056Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:29:23.893458Z node 2 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:29:23.893564Z node 2 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 2, Round: 3, current Round: 0 2025-06-03T10:29:23.893810Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11587:8598], server id = [2:11592:8603], tablet id = 72075186224037905, status = OK 2025-06-03T10:29:23.893847Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11587:8598], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-03T10:29:23.894243Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11588:8599], server id = [2:11593:8604], tablet id = 72075186224037906, status = OK 2025-06-03T10:29:23.894261Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11588:8599], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-03T10:29:23.894315Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11589:8600], server id = [2:11594:8605], tablet id = 72075186224037907, status = OK 2025-06-03T10:29:23.894323Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11589:8600], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-03T10:29:23.894550Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11590:8601], server id = [2:11595:8606], tablet id = 72075186224037908, status = OK 2025-06-03T10:29:23.894563Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11590:8601], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-03T10:29:23.894616Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11591:8602], server id = [2:11597:8608], tablet id = 72075186224037909, status = OK 2025-06-03T10:29:23.894624Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11591:8602], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-03T10:29:23.894812Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037905 2025-06-03T10:29:23.894996Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11587:8598], server id = [2:11592:8603], tablet id = 72075186224037905 2025-06-03T10:29:23.895004Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:29:23.895072Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037906 2025-06-03T10:29:23.895159Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037907 2025-06-03T10:29:23.895226Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11588:8599], server id = [2:11593:8604], tablet id = 72075186224037906 2025-06-03T10:29:23.895230Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:29:23.895261Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037908 2025-06-03T10:29:23.895335Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11589:8600], server id = [2:11594:8605], tablet id = 72075186224037907 2025-06-03T10:29:23.895339Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:29:23.895371Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037909 2025-06-03T10:29:23.895412Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11602:8613], server id = [2:11603:8614], tablet id = 72075186224037910, status = OK 2025-06-03T10:29:23.895425Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11602:8613], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-03T10:29:23.895576Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11590:8601], server id = [2:11595:8606], tablet id = 72075186224037908 2025-06-03T10:29:23.895582Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:29:23.895623Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11604:8615], server id = [2:11606:8617], tablet id = 72075186224037911, status = OK 2025-06-03T10:29:23.895633Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11604:8615], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-03T10:29:23.895748Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11605:8616], server id = [2:11607:8618], tablet id = 72075186224037912, status = OK 2025-06-03T10:29:23.895760Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11605:8616], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-03T10:29:23.895890Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11591:8602], server id = [2:11597:8608], tablet id = 72075186224037909 2025-06-03T10:29:23.895897Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:29:23.895933Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11608:8619], server id = [2:11609:8620], tablet id = 72075186224037913, status = OK 2025-06-03T10:29:23.895943Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11608:8619], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-03T10:29:23.896061Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:11610:8621], server id = [2:11611:8622], tablet id = 72075186224037914, status = OK 2025-06-03T10:29:23.896070Z node 2 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [2:11610:8621], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-03T10:29:23.896192Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037910 2025-06-03T10:29:23.896244Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037911 2025-06-03T10:29:23.896319Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037912 2025-06-03T10:29:23.896368Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11602:8613], server id = [2:11603:8614], tablet id = 72075186224037910 2025-06-03T10:29:23.896371Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:29:23.896397Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11604:8615], server id = [2:11606:8617], tablet id = 72075186224037911 2025-06-03T10:29:23.896400Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:29:23.896407Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037913 2025-06-03T10:29:23.896448Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11605:8616], server id = [2:11607:8618], tablet id = 72075186224037912 2025-06-03T10:29:23.896451Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:29:23.896461Z node 2 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037914 2025-06-03T10:29:23.896468Z node 2 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 2 2025-06-03T10:29:23.896516Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:29:23.896550Z node 2 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:29:23.896595Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11608:8619], server id = [2:11609:8620], tablet id = 72075186224037913 2025-06-03T10:29:23.896598Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:29:23.896646Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-03T10:29:23.897355Z node 2 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 2, client id = [2:11610:8621], server id = [2:11611:8622], tablet id = 72075186224037914 2025-06-03T10:29:23.897369Z node 2 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:29:23.897630Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:29:23.912491Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZjFiMmE0ZTAtODUyMDJjN2QtNWMwNzc0NDEtYWEzNTczMDg=, TxId: 2025-06-03T10:29:23.912528Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZjFiMmE0ZTAtODUyMDJjN2QtNWMwNzc0NDEtYWEzNTczMDg=, TxId: 2025-06-03T10:29:23.912741Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:29:23.924688Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete force traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-03T10:29:23.924716Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:50: [72075186224037894] TTxFinishTraversal::Complete. Send TEvAnalyzeResponse, OperationId=5VTĮ܂B, ActorId=[1:6349:4032] 2025-06-03T10:29:23.925063Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:11639:6251]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:23.925114Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:29:23.925118Z node 1 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-03T10:29:23.925155Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:29:23.925164Z node 1 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-03T10:29:23.925171Z node 1 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037899, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 2 ] 2025-06-03T10:29:23.927542Z node 1 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 Answer: '/Root/Database/Table1[Value]=4' >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefix-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixLeftBorder+EvWrite >> DataShardReadIterator::ShouldReadKeyPrefix2 [GOOD] |65.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots |65.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots >> DataShardReadIterator::ShouldReadKeyPrefix3 |65.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/ydb-core-tx-schemeshard-ut_index_build_reboots >> TopicAutoscaling::PartitionSplit_PreferedPartition_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_PreferedPartition_PQv1 >> KqpWorkloadServiceActors::TestPoolFetcherAclValidation [GOOD] >> KqpWorkloadServiceActors::TestPoolFetcherNotExistingPool >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit198 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit900 >> KqpWorkloadServiceActors::TestPoolFetcherNotExistingPool [GOOD] >> KqpWorkloadServiceActors::TestDefaultPoolUsePermissions >> Viewer::JsonStorageListingV2GroupIdFilter [GOOD] >> Viewer::JsonStorageListingV2NodeIdFilter >> TopicAutoscaling::ControlPlane_DisableAutoPartitioning [GOOD] >> TopicAutoscaling::ControlPlane_PauseAutoPartitioning >> KqpWorkloadServiceTables::TestCreateWorkloadSerivceTables [GOOD] >> KqpWorkloadServiceTables::TestPoolStateFetcherActor >> CommitOffset::Commit_WithoutSession_ParentNotFinished [GOOD] >> CommitOffset::Commit_WithoutSession_ToPastParentPartition >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldCompactBorrowedAfterSplitMerge [GOOD] >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldHandleBorrowCompactionTimeouts |65.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest |65.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest >> KqpWorkloadServiceActors::TestDefaultPoolUsePermissions [GOOD] >> KqpWorkloadServiceActors::TestDefaultPoolAdminPermissions >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixLeftBorder+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixLeftBorder-EvWrite >> DataShardReadIterator::ShouldReadKeyPrefix3 [GOOD] >> DataShardReadIterator::ShouldReadHeadFromFollower >> DataShardReadIterator::ShouldLimitRead10RangesChunk99Limit900 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit900 >> KqpWorkloadServiceTables::TestPoolStateFetcherActor [GOOD] >> KqpWorkloadServiceTables::TestCleanupOnServiceRestart >> KqpWorkloadServiceActors::TestDefaultPoolAdminPermissions [GOOD] >> KqpWorkloadServiceDistributed::TestDistributedLargeConcurrentQueryLimit >> TFileStoreWithReboots::AlterAssignDrop [GOOD] >> TBackupTests::BackupUuidColumn[Raw] >> TBackupTests::ShouldSucceedOnLargeData[Raw] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest >> TFileStoreWithReboots::AlterAssignDrop [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:127:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:130:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:134:2058] recipient: [1:111:2142] 2025-06-03T10:29:15.825140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:15.825176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:15.825186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:15.825191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:15.825207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:15.825212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:15.825224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:15.825240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:15.825403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:15.825485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:15.841719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:29:15.841754Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:15.841890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:176:2058] recipient: [1:15:2062] 2025-06-03T10:29:15.846341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:15.846385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:15.846428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:15.847678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:15.847729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:15.847868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:15.847957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:15.848936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:15.849018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:15.849291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:15.849322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:15.849370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:15.849382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:15.849390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:15.849416Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:29:15.851113Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:29:15.887471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:15.887587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.887688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:15.887757Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:15.887770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.893100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:15.893160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:15.893238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.893253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:15.893261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:15.893268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:15.894251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.894276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:15.894283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:15.901779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.901821Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.901832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:15.901845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:15.902779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:15.903745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:15.903845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:15.904129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:15.904176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:15.904187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:15.904294Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... :29:28.431036Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:29:28.431042Z node 42 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1005:0 progress is 1/1 2025-06-03T10:29:28.431046Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:29:28.431059Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:29:28.431070Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:29:28.431078Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1005, ready parts: 1/1, is published: false 2025-06-03T10:29:28.431087Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:29:28.431093Z node 42 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1005:0 2025-06-03T10:29:28.431098Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1005:0 2025-06-03T10:29:28.431125Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:29:28.431132Z node 42 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1005, publications: 2, subscribers: 0 2025-06-03T10:29:28.431137Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-03T10:29:28.431141Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-03T10:29:28.431237Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:29:28.431248Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:29:28.431747Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:29:28.431777Z node 42 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:28.431784Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:28.431834Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:29:28.431861Z node 42 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:28.431868Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [42:207:2208], at schemeshard: 72057594046678944, txId: 1005, path id: 1 2025-06-03T10:29:28.431875Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [42:207:2208], at schemeshard: 72057594046678944, txId: 1005, path id: 3 FAKE_COORDINATOR: Erasing txId 1005 2025-06-03T10:29:28.432011Z node 42 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:29:28.432025Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:29:28.432030Z node 42 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1005 2025-06-03T10:29:28.432036Z node 42 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-03T10:29:28.432042Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:29:28.432118Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:29:28.432129Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:29:28.432141Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:29:28.432187Z node 42 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:29:28.432196Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:29:28.432201Z node 42 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1005 2025-06-03T10:29:28.432205Z node 42 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-03T10:29:28.432210Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:29:28.432219Z node 42 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1005, subscribers: 0 2025-06-03T10:29:28.432261Z node 42 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 2025-06-03T10:29:28.432325Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:28.433106Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-03T10:29:28.433152Z node 42 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:29:28.433238Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-03T10:29:28.433252Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 TestModificationResult got TxId: 1005, wait until txId: 1005 TestWaitNotification wait txId: 1005 2025-06-03T10:29:28.433363Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1005: send EvNotifyTxCompletion 2025-06-03T10:29:28.433372Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1005 2025-06-03T10:29:28.433445Z node 42 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1005, at schemeshard: 72057594046678944 2025-06-03T10:29:28.433465Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1005: got EvNotifyTxCompletionResult 2025-06-03T10:29:28.433470Z node 42 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1005: satisfy waiter [42:450:2429] TestWaitNotification: OK eventTxId 1005 2025-06-03T10:29:28.433548Z node 42 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/FS" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:28.433584Z node 42 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/FS" took 46us result status StatusPathDoesNotExist 2025-06-03T10:29:28.433624Z node 42 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/FS\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/FS" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted wait until 72075186233409548 is deleted wait until 72075186233409549 is deleted wait until 72075186233409550 is deleted 2025-06-03T10:29:28.433691Z node 42 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-03T10:29:28.433704Z node 42 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 2025-06-03T10:29:28.433712Z node 42 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409548 2025-06-03T10:29:28.433721Z node 42 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409549 2025-06-03T10:29:28.433730Z node 42 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409550 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 Deleted tabletId 72075186233409548 Deleted tabletId 72075186233409549 Deleted tabletId 72075186233409550 >> TBackupTests::BackupUuidColumn[Raw] [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixLeftBorder-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixRightBorder+EvWrite >> DataShardReadIterator::ShouldReadHeadFromFollower [GOOD] >> DataShardReadIterator::ShouldReadFromHead >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit900 [GOOD] >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit1001 >> TopicAutoscaling::BalancingAfterSplit_sessionsWithPartition [GOOD] >> TPersQueueMirrorer::ValidStartStream ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::BackupUuidColumn[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:29.024038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:29.024066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:29.024071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:29.024088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:29.024102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:29.024107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:29.024118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:29.024142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:29.024253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:29.024340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:29.040809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:29.040844Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:29.049986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:29.050173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:29.050229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:29.053706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:29.053798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:29.053953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:29.054028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:29.054809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:29.054881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:29.055236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:29.055253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:29.055263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:29.055278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:29.055285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:29.055309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:29.057332Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:29.083759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:29.083869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:29.083953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:29.084018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:29.084031Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:29.085163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:29.085206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:29.085313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:29.085328Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:29.085336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:29.085344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:29.086132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:29.086150Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:29.086158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:29.086679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:29.086693Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:29.086705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:29.086714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:29.087647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:29.088288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:29.088340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:29.088574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:29.088612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:29.088621Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:29.088710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:29.088720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:29.088762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:29.088775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:29.089390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:29.089403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:29.089466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... schemeshard: 72057594046678944 2025-06-03T10:29:29.204070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-03T10:29:29.204095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 128 -> 129 2025-06-03T10:29:29.204128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:29:29.206600Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:783: [Export] [s3] Bootstrap: self# [1:412:2382], attempt# 0 2025-06-03T10:29:29.211388Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:427: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:412:2382], sender# [1:411:2381] FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-03T10:29:29.212678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:29.212697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:29:29.212801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:29.212808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 102, path id: 2 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:3840 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 1331905E-8994-4466-AE81-CC3CFF39ADC5 amz-sdk-request: attempt=1 content-length: 61 content-md5: 5ZuHSMjV1bVKZhThhMGD5g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 61 2025-06-03T10:29:29.212953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:29.212966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:29.213143Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:387: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:412:2382], result# PutObjectResult { ETag: e59b8748c8d5d5b54a6614e184c183e6 } 2025-06-03T10:29:29.213932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:29:29.213966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:29:29.213973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:29:29.213981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-03T10:29:29.213990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:29:29.214020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 102 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:3840 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 7A08FD61-E29E-412C-9DBE-6529A49BE159 amz-sdk-request: attempt=1 content-length: 357 content-md5: IxJB3qM/y2xlsv8qcwTF7g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-03T10:29:29.214701Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:294: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:412:2382], result# PutObjectResult { ETag: 231241dea33fcb6c65b2ff2a7304c5ee } 2025-06-03T10:29:29.214785Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:411:2381] 2025-06-03T10:29:29.214809Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:445: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:412:2382], sender# [1:411:2381], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } 2025-06-03T10:29:29.215213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:3840 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 6F64DB34-790A-42E8-BF5B-2815A4E44DFA amz-sdk-request: attempt=1 content-length: 39 content-md5: GLX1nc5/cKhlAfxBHlykQA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv / / 39 2025-06-03T10:29:29.215776Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:487: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:412:2382], result# PutObjectResult { ETag: 18b5f59dce7f70a86501fc411e5ca440 } 2025-06-03T10:29:29.215793Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:412:2382], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-03T10:29:29.215839Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:411:2381], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-03T10:29:29.218084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-03T10:29:29.218109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-03T10:29:29.218140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-03T10:29:29.218157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-03T10:29:29.218174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:29.218179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:29.218185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-03T10:29:29.218194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 129 -> 240 2025-06-03T10:29:29.218244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:29.218817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:29.218871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:29.218881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-03T10:29:29.218898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:29:29.218904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:29.218910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:29:29.218914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:29.218920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-03T10:29:29.218940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:335:2313] message: TxId: 102 2025-06-03T10:29:29.218950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:29.218956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:29:29.218962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:29:29.218998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:29:29.219550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:29:29.219562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:397:2368] TestWaitNotification: OK eventTxId 102 >> BasicUsage::BasicWriteSession [GOOD] >> BasicUsage::CloseWriteSessionImmediately >> TFileStoreWithReboots::Create [GOOD] >> BasicUsage::WriteSessionNoAvailableDatabase [GOOD] >> BasicUsage::WriteSessionSwitchDatabases >> TopicAutoscaling::PartitionSplit_ManySession_BeforeAutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_ManySession_AutoscaleAwareSDK >> TBackupTests::ShouldSucceedOnLargeData_MinWriteBatch ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest >> TFileStoreWithReboots::Create [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:29:16.257359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:16.257386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:16.257392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:16.257397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:16.257409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:16.257414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:16.257424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:16.257438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:16.257587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:16.257667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:16.275573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:29:16.275608Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:16.275727Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:29:16.279627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:16.279794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:16.279837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:16.282088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:16.282161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:16.282331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:16.282396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:16.282991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:16.283050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:16.283353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:16.283366Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:16.283382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:16.283391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:16.283397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:16.283441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:29:16.285280Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:29:16.312581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:16.312689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:16.312780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:16.312851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:16.312865Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:16.314238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:16.314282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:16.314345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:16.314359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:16.314367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:16.314375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:16.315171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:16.315191Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:16.315198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:16.315742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:16.315756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:16.315763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:16.315773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:16.316633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:16.317362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:16.317418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:16.317695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:16.317735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:16.317744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:16.317840Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... r: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1001 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:30.459990Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1001:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1001 msg type: 269090816 2025-06-03T10:29:30.460012Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1001, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1001 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1001 at step: 5000003 2025-06-03T10:29:30.460072Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:30.460088Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1001 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 214748366955 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:30.460096Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_fs.cpp:155: TCreateFileStore::TPropose operationId# 1001:0 HandleReply TEvOperationPlan, step: 5000003, at schemeshard: 72057594046678944 2025-06-03T10:29:30.460114Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1001:0 128 -> 240 2025-06-03T10:29:30.460147Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:29:30.460160Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 FAKE_COORDINATOR: Erasing txId 1001 2025-06-03T10:29:30.460573Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:30.460582Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1001, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:29:30.460621Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1001, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:29:30.460643Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:30.460647Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:205:2206], at schemeshard: 72057594046678944, txId: 1001, path id: 2 2025-06-03T10:29:30.460651Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:205:2206], at schemeshard: 72057594046678944, txId: 1001, path id: 3 2025-06-03T10:29:30.460741Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1001:0, at schemeshard: 72057594046678944 2025-06-03T10:29:30.460748Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1001:0 ProgressState 2025-06-03T10:29:30.460759Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1001:0 progress is 1/1 2025-06-03T10:29:30.460762Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1001 ready parts: 1/1 2025-06-03T10:29:30.460766Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1001:0 progress is 1/1 2025-06-03T10:29:30.460768Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1001 ready parts: 1/1 2025-06-03T10:29:30.460771Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1001, ready parts: 1/1, is published: false 2025-06-03T10:29:30.460775Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1001 ready parts: 1/1 2025-06-03T10:29:30.460779Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1001:0 2025-06-03T10:29:30.460785Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1001:0 2025-06-03T10:29:30.460808Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-03T10:29:30.460812Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1001, publications: 2, subscribers: 0 2025-06-03T10:29:30.460815Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1001, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-03T10:29:30.460817Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1001, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-03T10:29:30.460934Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 1001 2025-06-03T10:29:30.460945Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 1001 2025-06-03T10:29:30.460948Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1001 2025-06-03T10:29:30.460952Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1001, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-03T10:29:30.460954Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:29:30.461064Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1001 2025-06-03T10:29:30.461077Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1001 2025-06-03T10:29:30.461082Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1001 2025-06-03T10:29:30.461086Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1001, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-03T10:29:30.461091Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:29:30.461102Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1001, subscribers: 0 2025-06-03T10:29:30.461719Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1001 2025-06-03T10:29:30.461988Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1001 TestModificationResult got TxId: 1001, wait until txId: 1001 TestWaitNotification wait txId: 1001 2025-06-03T10:29:30.462053Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1001: send EvNotifyTxCompletion 2025-06-03T10:29:30.462071Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1001 2025-06-03T10:29:30.462137Z node 50 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1001, at schemeshard: 72057594046678944 2025-06-03T10:29:30.462157Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1001: got EvNotifyTxCompletionResult 2025-06-03T10:29:30.462163Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1001: satisfy waiter [50:378:2357] TestWaitNotification: OK eventTxId 1001 2025-06-03T10:29:30.462239Z node 50 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/FS_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:30.462281Z node 50 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/FS_1" took 53us result status StatusSuccess 2025-06-03T10:29:30.462373Z node 50 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/FS_1" PathDescription { Self { Name: "FS_1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeFileStore CreateFinished: true CreateTxId: 1001 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 FileStoreVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } FileStoreDescription { Name: "FS_1" PathId: 3 IndexTabletId: 72075186233409546 Config { Version: 1 FileSystemId: "FS_1" FolderId: "folder" CloudId: "cloud" BlockSize: 4096 BlocksCount: 4096 ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-2" } } Version: 1 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixRightBorder+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixRightBorder-EvWrite >> DataShardReadIterator::ShouldReadFromHead [GOOD] >> DataShardReadIterator::ShouldReadFromHeadWithConflict+UseSink >> ReadIteratorExternalBlobs::ExtBlobs [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithSpecificKeys >> Viewer::JsonStorageListingV1NodeIdFilter [GOOD] >> Viewer::JsonStorageListingV1PDiskIdFilter >> KqpWorkloadServiceTables::TestCleanupOnServiceRestart [GOOD] >> KqpWorkloadServiceTables::TestLeaseExpiration >> DataShardReadIterator::ShouldLimitRead10RangesChunk100Limit1001 [GOOD] >> DataShardReadIterator::ShouldReadFromFollower >> TBackupTests::BackupUuidColumn[Zstd] >> BasicUsage::WriteAndReadSomeMessagesWithNoCompression [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessages >> CommitOffset::Commit_Flat_WithWrongSession [GOOD] >> CommitOffset::Commit_Flat_WithWrongSession_ToPast >> TSchemeShardMoveTest::ResetCachedPath >> TBackupTests::BackupUuidColumn[Zstd] [GOOD] >> ColumnStatistics::CountMinSketchServerlessStatistics [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> ColumnStatistics::CountMinSketchServerlessStatistics [GOOD] Test command err: 2025-06-03T10:26:26.153329Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:607:2415], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:26.153410Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:26:26.153439Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001c71/r3tmp/tmp4qfgS1/pdisk_1.dat 2025-06-03T10:26:26.266144Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27878, node 1 2025-06-03T10:26:26.353616Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:26.353639Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:26.353644Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:26.354314Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:26:26.354411Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:26.442853Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:26.442901Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:26.454777Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16877 2025-06-03T10:26:26.808815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:26:27.582306Z node 4 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 4 2025-06-03T10:26:27.591513Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:27.591547Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:27.624890Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-03T10:26:27.625740Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:27.796537Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:27.796746Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:27.796925Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:27.796985Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:27.797052Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:27.797079Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:27.797126Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:27.797150Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:27.797175Z node 4 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:27.950567Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:27.950609Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:27.961789Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:27.998566Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:28.010951Z node 4 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:26:28.010980Z node 4 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:26:28.019761Z node 4 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:26:28.019925Z node 4 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:26:28.019944Z node 4 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:26:28.019948Z node 4 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:26:28.019952Z node 4 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:26:28.019956Z node 4 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:26:28.019961Z node 4 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:26:28.019966Z node 4 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:26:28.020077Z node 4 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:26:28.036645Z node 4 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:28.036673Z node 4 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [4:2026:2601], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:28.038731Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [4:2053:2616] 2025-06-03T10:26:28.039966Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [4:2084:2631] 2025-06-03T10:26:28.040017Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [4:2084:2631], schemeshard id = 72075186224037897 2025-06-03T10:26:28.043249Z node 4 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared 2025-06-03T10:26:28.045116Z node 4 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:26:28.045133Z node 4 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:26:28.045144Z node 4 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared/.metadata/_statistics 2025-06-03T10:26:28.047379Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:26:28.049018Z node 4 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:26:28.049049Z node 4 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:26:28.161159Z node 4 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:26:28.220016Z node 4 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:26:28.283005Z node 4 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:26:28.924334Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:26:29.713761Z node 3 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 3 2025-06-03T10:26:29.723542Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:29.723572Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:29.723617Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:29.723632Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:29.766260Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-03T10:26:29.766885Z node 4 :HIVE WARN: hive_impl.cpp:771: HIVE#72075186224037888 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-03T10:26:29.767130Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:29.767553Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:29.781575Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:29.876369Z node 4 :STATISTICS DEBUG: schemeshard_impl.cpp:7814: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, at schemeshard: 72075186224037899 2025-06-03T10:26:29.876390Z node 4 :STATISTICS DEBUG: schemeshard_impl.cpp:7830: Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult, StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037899 2025-06-03T10:26:29.876400Z node 4 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [4:3072:2942], at schemeshard: 72075186224037899, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037899 2025-06-03T10:26:29.876642Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [4:3073:2943] 2025-06-03T10:26:29.876798Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [4:3073:2943], schemeshard id = 72075186224037899 2025-06-03T10:26:30.857916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:26:31.715909Z node 2 :S ... Z node 4 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-03T10:29:28.288368Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:29:28.288405Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:29:28.288415Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is column table. 2025-06-03T10:29:28.288421Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-03T10:29:28.290269Z node 4 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:29:28.307225Z node 4 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:29:28.307496Z node 4 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:29:28.307522Z node 4 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:29:28.308040Z node 4 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:29:28.319729Z node 4 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:29:28.319819Z node 4 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 4, Round: 2, current Round: 0 2025-06-03T10:29:28.320041Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:12355:7579], server id = [4:12356:7580], tablet id = 72075186224037911, status = OK 2025-06-03T10:29:28.320177Z node 4 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [4:12355:7579], path = { OwnerId: 72075186224037899 LocalId: 2 } 2025-06-03T10:29:28.321386Z node 4 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037911 2025-06-03T10:29:28.321408Z node 4 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 4 2025-06-03T10:29:28.321462Z node 4 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:29:28.321488Z node 4 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:29:28.321596Z node 4 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-03T10:29:28.322089Z node 4 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 4, client id = [4:12355:7579], server id = [4:12356:7580], tablet id = 72075186224037911 2025-06-03T10:29:28.322097Z node 4 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:29:28.322255Z node 4 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:29:28.329258Z node 4 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [4:12376:7599]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:28.329394Z node 4 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:29:28.329402Z node 4 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [4:12376:7599], StatRequests.size() = 1 2025-06-03T10:29:28.357426Z node 4 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=4&id=YzUwMzc1Y2MtOGQ5MjdmMTYtNDI4OWIzNTUtYTUyYWY3Nzg=, TxId: 2025-06-03T10:29:28.357455Z node 4 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=4&id=YzUwMzc1Y2MtOGQ5MjdmMTYtNDI4OWIzNTUtYTUyYWY3Nzg=, TxId: 2025-06-03T10:29:28.357663Z node 4 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:29:28.369942Z node 4 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-03T10:29:28.369972Z node 4 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:29:29.009084Z node 4 :STATISTICS DEBUG: service_impl.cpp:252: Event round 2 is different from the current 0 2025-06-03T10:29:29.009115Z node 4 :STATISTICS DEBUG: service_impl.cpp:379: Skip TEvDispatchKeepAlive 2025-06-03T10:29:31.057708Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:29:31.057810Z node 4 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 4 2025-06-03T10:29:31.078966Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:29:31.079016Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:29:31.079027Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037905, LocalPathId: 2] is column table. 2025-06-03T10:29:31.079033Z node 4 :STATISTICS DEBUG: aggregator_impl.cpp:732: [72075186224037894] Start schedule traversal navigate for path [OwnerId: 72075186224037905, LocalPathId: 2] 2025-06-03T10:29:31.080486Z node 4 :STATISTICS DEBUG: tx_navigate.cpp:19: [72075186224037894] TTxNavigate::Execute 2025-06-03T10:29:31.093813Z node 4 :STATISTICS DEBUG: tx_navigate.cpp:72: [72075186224037894] TTxNavigate::Complete 2025-06-03T10:29:31.094118Z node 4 :STATISTICS DEBUG: tx_resolve.cpp:102: [72075186224037894] TTxResolve::Execute 2025-06-03T10:29:31.094145Z node 4 :STATISTICS DEBUG: tx_resolve.cpp:133: [72075186224037894] TTxResolve::Complete 2025-06-03T10:29:31.094460Z node 4 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:58: [72075186224037894] TTxResponseTabletDistribution::Execute. Node count = 1 2025-06-03T10:29:31.116986Z node 4 :STATISTICS DEBUG: tx_response_tablet_distribution.cpp:92: [72075186224037894] TTxResponseTabletDistribution::Complete 2025-06-03T10:29:31.117099Z node 4 :STATISTICS DEBUG: service_impl.cpp:588: Received TEvAggregateStatistics from node: 4, Round: 3, current Round: 0 2025-06-03T10:29:31.117358Z node 4 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 4, client id = [4:12522:7671], server id = [4:12523:7672], tablet id = 72075186224037912, status = OK 2025-06-03T10:29:31.117393Z node 4 :STATISTICS DEBUG: service_impl.cpp:1055: TEvStatisticsRequest send, client id = [4:12522:7671], path = { OwnerId: 72075186224037905 LocalId: 2 } 2025-06-03T10:29:31.118726Z node 4 :STATISTICS DEBUG: service_impl.cpp:317: Received TEvStatisticsResponse TabletId: 72075186224037912 2025-06-03T10:29:31.118754Z node 4 :STATISTICS DEBUG: service_impl.cpp:502: Send aggregate statistics response to node: 4 2025-06-03T10:29:31.118796Z node 4 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:27: [72075186224037894] TTxAggregateStatisticsResponse::Execute 2025-06-03T10:29:31.118831Z node 4 :STATISTICS DEBUG: tx_aggr_stat_response.cpp:118: [72075186224037894] TTxAggregateStatisticsResponse::Complete 2025-06-03T10:29:31.118950Z node 4 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared 2025-06-03T10:29:31.119561Z node 4 :STATISTICS DEBUG: service_impl.cpp:1121: EvClientDestroyed, node id = 4, client id = [4:12522:7671], server id = [4:12523:7672], tablet id = 72075186224037912 2025-06-03T10:29:31.119577Z node 4 :STATISTICS DEBUG: service_impl.cpp:1139: Skip EvClientDestroyed 2025-06-03T10:29:31.119889Z node 4 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DECLARE $stat_type AS Uint32; DECLARE $column_tags AS List; DECLARE $data AS List; UPSERT INTO `.metadata/_statistics` (owner_id, local_path_id, stat_type, column_tag, data) VALUES ($owner_id, $local_path_id, $stat_type, $column_tags[0], $data[0]), ($owner_id, $local_path_id, $stat_type, $column_tags[1], $data[1]); 2025-06-03T10:29:31.145055Z node 4 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=4&id=MWFiNGY3NTktZWM2N2QyYWMtNzA3OGM0NWEtZTRkOGI3ZmM=, TxId: 2025-06-03T10:29:31.145080Z node 4 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=4&id=MWFiNGY3NTktZWM2N2QyYWMtNzA3OGM0NWEtZTRkOGI3ZmM=, TxId: 2025-06-03T10:29:31.145371Z node 4 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:29:31.145774Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:12546:6216]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:31.145882Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:29:31.145890Z node 1 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-03T10:29:31.146610Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:29:31.146630Z node 1 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 1 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-03T10:29:31.146640Z node 1 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 1 ], PathId[ [OwnerId: 72075186224037899, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-03T10:29:31.149473Z node 1 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 1 2025-06-03T10:29:31.149691Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [1:12546:6216]], StatType[ 2 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:31.149782Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:29:31.149790Z node 1 :STATISTICS DEBUG: service_impl.cpp:812: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] resolve DatabasePath[ [OwnerId: 72057594046644480, LocalPathId: 2] ] 2025-06-03T10:29:31.149879Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:29:31.149889Z node 1 :STATISTICS DEBUG: service_impl.cpp:715: [TStatService::QueryStatistics] RequestId[ 2 ], Database[ Root/Shared ], TablePath[ /Root/Shared/.metadata/_statistics ] 2025-06-03T10:29:31.149896Z node 1 :STATISTICS DEBUG: service_impl.cpp:656: [TStatService::LoadStatistics] QueryId[ 2 ], PathId[ [OwnerId: 72075186224037905, LocalPathId: 2] ], StatType[ 2 ], ColumnTag[ 1 ] 2025-06-03T10:29:31.150619Z node 1 :STATISTICS DEBUG: service_impl.cpp:1152: TEvLoadStatisticsQueryResponse, request id = 2 >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_AutoscaleAwareSDK >> TSchemeShardMoveTest::ResetCachedPath [GOOD] >> TopicAutoscaling::ControlPlane_PauseAutoPartitioning [GOOD] >> TopicAutoscaling::ControlPlane_CDC_Enable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::BackupUuidColumn[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:31.957121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:31.957143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:31.957148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:31.957162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:31.957173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:31.957176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:31.957183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:31.957197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:31.957279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:31.957365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:31.968112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:31.968139Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:31.971298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:31.971390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:31.971424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:31.973144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:31.973192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:31.973286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:31.973371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:31.973959Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:31.974004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:31.974275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:31.974285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:31.974292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:31.974302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:31.974306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:31.974321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:31.975565Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:31.990860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:31.990948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:31.991010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:31.991055Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:31.991065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:31.991887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:31.991913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:31.991977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:31.991986Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:31.991990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:31.991995Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:31.992373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:31.992382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:31.992386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:31.992716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:31.992724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:31.992730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:31.992736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:31.993265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:31.993676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:31.993714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:31.993867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:31.993887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:31.993894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:31.993952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:31.993958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:31.993986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:31.993995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:31.994357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:31.994364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:31.994407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... ard: 72057594046678944 2025-06-03T10:29:32.088923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-03T10:29:32.088945Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 128 -> 129 2025-06-03T10:29:32.088980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:29:32.091932Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:783: [Export] [s3] Bootstrap: self# [1:412:2382], attempt# 0 2025-06-03T10:29:32.095195Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:427: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:412:2382], sender# [1:411:2381] FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-03T10:29:32.096189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:32.096200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:29:32.096280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:32.096285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-03T10:29:32.096382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:32.096391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:8732 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: AA781161-4AA4-4273-9135-7F05EE8F3C90 amz-sdk-request: attempt=1 content-length: 61 content-md5: 5ZuHSMjV1bVKZhThhMGD5g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 61 2025-06-03T10:29:32.096583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:29:32.096595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:29:32.096599Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:29:32.096603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-03T10:29:32.096610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:29:32.096622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 102 2025-06-03T10:29:32.096761Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:387: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:412:2382], result# PutObjectResult { ETag: e59b8748c8d5d5b54a6614e184c183e6 } REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:8732 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: D21487DA-7AC4-4B79-90BF-51CDB34D5136 amz-sdk-request: attempt=1 content-length: 357 content-md5: IxJB3qM/y2xlsv8qcwTF7g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 2025-06-03T10:29:32.099100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 357 2025-06-03T10:29:32.099330Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:294: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:412:2382], result# PutObjectResult { ETag: 231241dea33fcb6c65b2ff2a7304c5ee } 2025-06-03T10:29:32.099347Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:411:2381] 2025-06-03T10:29:32.099422Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:445: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:412:2382], sender# [1:411:2381], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:8732 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 8929CF96-34DB-4CC5-AF95-F820EE3E74B3 amz-sdk-request: attempt=1 content-length: 40 content-md5: LXbLDYru8NmFsYXNSXjnpQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv.zst / / 40 2025-06-03T10:29:32.100435Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:487: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:412:2382], result# PutObjectResult { ETag: 2d76cb0d8aeef0d985b185cd4978e7a5 } 2025-06-03T10:29:32.100443Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:412:2382], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-03T10:29:32.100477Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:411:2381], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-03T10:29:32.113114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-03T10:29:32.113153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-03T10:29:32.113184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-03T10:29:32.113200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 20 RowsProcessed: 1 } 2025-06-03T10:29:32.113212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:32.113216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:32.113220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-03T10:29:32.113226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 129 -> 240 2025-06-03T10:29:32.113279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:32.113961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:32.114073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:32.114083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-03T10:29:32.114101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:29:32.114107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:32.114113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:29:32.114117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:32.114123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-03T10:29:32.114141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:335:2313] message: TxId: 102 2025-06-03T10:29:32.114148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:32.114154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:29:32.114160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:29:32.114192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:29:32.114709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:29:32.114723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:397:2368] TestWaitNotification: OK eventTxId 102 >> TFileStoreWithReboots::CreateWithIntermediateDirs [GOOD] >> SystemView::PartitionStatsTtlFields [GOOD] >> SystemView::PartitionStatsLocksFields >> TopicAutoscaling::ReadingAfterSplitTest_PQv1 [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_AutoscaleAwareSDK >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyPrefixRightBorder-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinue+EvWrite >> DataShardReadIterator::ShouldReadFromHeadWithConflict+UseSink [GOOD] >> DataShardReadIterator::ShouldReadFromHeadWithConflict-UseSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::ResetCachedPath [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:32.247845Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:32.247869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:32.247875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:32.247881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:32.247898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:32.247902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:32.247912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:32.247926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:32.248032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:32.248103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:32.258126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:32.258149Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:32.261586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:32.261695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:32.261720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:32.263238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:32.263304Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:32.263384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:32.263419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:32.264098Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:32.264152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:32.264396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:32.264404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:32.264413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:32.264418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:32.264423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:32.264438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:32.265600Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:32.287014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:32.287121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:32.287196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:32.287252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:32.287265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:32.288178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:32.288207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:32.288266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:32.288279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:32.288285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:32.288291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:32.288763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:32.288776Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:32.288783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:32.289138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:32.289152Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:32.289159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:32.289167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:32.290046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:32.290530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:32.290577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:32.290788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:32.290816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:32.290840Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:32.290908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:32.290916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:32.290952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:32.290966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:32.291568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:32.291582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:32.291646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... .cpp:418: TAlterTable TPropose operationId# 105:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:32.636352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 105 ready parts: 1/1 2025-06-03T10:29:32.636391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } AffectedSet { TabletId: 72075186233409549 Flags: 2 } ExecLevel: 0 TxId: 105 MinStep: 1 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:32.636832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 105:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:105 msg type: 269090816 2025-06-03T10:29:32.636884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 105, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 105 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 105 at step: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72075186233409549 for txId: 105 at step: 5000004 2025-06-03T10:29:32.637002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:32.637026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 105 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:32.637035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_table.cpp:359: TAlterTable TPropose operationId# 105:0 HandleReply TEvOperationPlan, operationId: 105:0, stepId: 5000004, at schemeshard: 72057594046678944 2025-06-03T10:29:32.637161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 105:0 128 -> 129 2025-06-03T10:29:32.637198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000004 2025-06-03T10:29:32.638735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:32.638748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 105, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:29:32.638819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:32.638827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 105, path id: 3 2025-06-03T10:29:32.638932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-03T10:29:32.638943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1045: NTableState::TProposedWaitParts operationId# 105:0 ProgressState at tablet: 72057594046678944 2025-06-03T10:29:32.639106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:29:32.639121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 8 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:29:32.639127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-03T10:29:32.639137Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 8 2025-06-03T10:29:32.639145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-03T10:29:32.639163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 105, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 105 2025-06-03T10:29:32.639618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6290: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 105 Step: 5000004 OrderId: 105 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 297 } } CommitVersion { Step: 5000004 TxId: 105 } 2025-06-03T10:29:32.639628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 105, tablet: 72075186233409549, partId: 0 2025-06-03T10:29:32.639652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 105:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 105 Step: 5000004 OrderId: 105 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 297 } } CommitVersion { Step: 5000004 TxId: 105 } 2025-06-03T10:29:32.639668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 105 Step: 5000004 OrderId: 105 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 297 } } CommitVersion { Step: 5000004 TxId: 105 } 2025-06-03T10:29:32.639827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 674 RawX2: 4294969906 } Origin: 72075186233409549 State: 2 TxId: 105 Step: 0 Generation: 2 2025-06-03T10:29:32.639834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 105, tablet: 72075186233409549, partId: 0 2025-06-03T10:29:32.639848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 105:0, at schemeshard: 72057594046678944, message: Source { RawX1: 674 RawX2: 4294969906 } Origin: 72075186233409549 State: 2 TxId: 105 Step: 0 Generation: 2 2025-06-03T10:29:32.639855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1014: NTableState::TProposedWaitParts operationId# 105:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-06-03T10:29:32.639864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1018: NTableState::TProposedWaitParts operationId# 105:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 674 RawX2: 4294969906 } Origin: 72075186233409549 State: 2 TxId: 105 Step: 0 Generation: 2 2025-06-03T10:29:32.639876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 105:0, shardIdx: 72057594046678944:4, datashard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:32.639884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-03T10:29:32.639890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 105:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-06-03T10:29:32.639896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 105:0 129 -> 240 2025-06-03T10:29:32.640555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-03T10:29:32.640619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-03T10:29:32.640672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-03T10:29:32.640732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-03T10:29:32.640741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 105:0 ProgressState 2025-06-03T10:29:32.640757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#105:0 progress is 1/1 2025-06-03T10:29:32.640762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-03T10:29:32.640767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#105:0 progress is 1/1 2025-06-03T10:29:32.640772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-03T10:29:32.640777Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 105, ready parts: 1/1, is published: true 2025-06-03T10:29:32.640791Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:335:2313] message: TxId: 105 2025-06-03T10:29:32.640798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 105 ready parts: 1/1 2025-06-03T10:29:32.640804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 105:0 2025-06-03T10:29:32.640809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 105:0 2025-06-03T10:29:32.640838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:29:32.641255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-03T10:29:32.641266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [1:836:2753] TestWaitNotification: OK eventTxId 105 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest >> TFileStoreWithReboots::CreateWithIntermediateDirs [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:127:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:130:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:134:2058] recipient: [1:111:2142] 2025-06-03T10:29:15.254685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:15.254722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:15.254733Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:15.254739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:15.254754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:15.254759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:15.254770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:15.254787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:15.254912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:15.254989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:15.283850Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:29:15.283889Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:15.284034Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:176:2058] recipient: [1:15:2062] 2025-06-03T10:29:15.293943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:15.294001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:15.294043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:15.295921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:15.296003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:15.296243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:15.296400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:15.298562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:15.298647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:15.298957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:15.298972Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:15.299013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:15.299025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:15.299032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:15.299058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:29:15.300728Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:29:15.327525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:15.327638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.327728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:15.327796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:15.327810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.331574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:15.331626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:15.331695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.331710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:15.331719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:15.331726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:15.332781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.332805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:15.332813Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:15.333346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.333360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:15.333368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:15.333377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:15.334192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:15.334903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:15.334956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:15.335199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:15.335235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:15.335245Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:15.335357Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... etails: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 5], 5 2025-06-03T10:29:32.758014Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 6], 2 2025-06-03T10:29:32.758533Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:29:32.758555Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:29:32.758560Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 5, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:29:32.758566Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-03T10:29:32.758571Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:29:32.758964Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 6 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:29:32.758984Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 6 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:29:32.758990Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 4, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:29:32.758995Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 6 2025-06-03T10:29:32.759001Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:29:32.759109Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 6 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:29:32.759120Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 6 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:29:32.759125Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:29:32.759129Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 6 2025-06-03T10:29:32.759134Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:29:32.759229Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 5 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:29:32.759240Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 5 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:29:32.759247Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:29:32.759251Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 5 2025-06-03T10:29:32.759255Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-03T10:29:32.759308Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:29:32.759318Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:29:32.759322Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:29:32.759326Z node 61 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 6], version: 2 2025-06-03T10:29:32.759330Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-03T10:29:32.759339Z node 61 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-03T10:29:32.759753Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:29:32.760268Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:29:32.760296Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:29:32.760308Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:29:32.760319Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-03T10:29:32.760378Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-03T10:29:32.760386Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-03T10:29:32.760465Z node 61 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-03T10:29:32.760490Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-03T10:29:32.760495Z node 61 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [61:393:2372] TestWaitNotification: OK eventTxId 1003 2025-06-03T10:29:32.760582Z node 61 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Valid/x/y/z" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:32.760637Z node 61 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Valid/x/y/z" took 68us result status StatusSuccess 2025-06-03T10:29:32.760726Z node 61 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Valid/x/y/z" PathDescription { Self { Name: "z" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeFileStore CreateFinished: true CreateTxId: 1003 CreateStep: 5000003 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 FileStoreVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } FileStoreDescription { Name: "z" PathId: 6 IndexTabletId: 72075186233409546 Config { Version: 1 FileSystemId: "Valid/x/y/z" FolderId: "folder" CloudId: "cloud" BlockSize: 4096 BlocksCount: 4096 ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-2" } } Version: 1 } } PathId: 6 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:32.760797Z node 61 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Invalid" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:32.760824Z node 61 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Invalid" took 29us result status StatusPathDoesNotExist 2025-06-03T10:29:32.760847Z node 61 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Invalid\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Invalid" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardMoveTest::MoveMigratedTable >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_PQv1 [GOOD] >> TopicAutoscaling::WithDir_PartitionSplit_AutosplitByLoad >> TSchemeShardMoveTest::Boot >> DataShardReadIterator::ShouldReadFromFollower [GOOD] >> DataShardReadIterator::ShouldNotReadFutureMvccFromFollower >> TSchemeShardMoveTest::Boot [GOOD] >> TSchemeShardMoveTest::AsyncIndexWithSyncInFly >> TSchemeShardMoveTest::TwoTables >> TSchemeShardMoveTest::MoveMigratedTable [GOOD] >> TSchemeShardMoveTest::MoveOldTableWithIndex >> TSchemeShardMoveTest::MoveOldTableWithIndex [GOOD] >> TSchemeShardMoveTest::MoveIndex >> TSchemeShardMoveTest::AsyncIndexWithSyncInFly [GOOD] >> TSchemeShardMoveTest::TwoTables [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::MoveOldTableWithIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:33.745030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:33.745061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:33.745067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:33.745072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:33.745087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:33.745091Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:33.745100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:33.745115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:33.745227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:33.745339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:33.760204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:33.760232Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:33.764158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:33.764271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:33.764306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:33.766097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:33.766189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:33.766293Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:33.766341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:33.766897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:33.766937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:33.767214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:33.767226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:33.767239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:33.767247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:33.767253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:33.767277Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:33.768709Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:33.793233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:33.793378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:33.793460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:33.793517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:33.793531Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:33.794571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:33.794607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:33.794670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:33.794683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:33.794690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:33.794697Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:33.795244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:33.795258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:33.795263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:33.795675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:33.795684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:33.795691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:33.795698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:33.796421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:33.796902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:33.796950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:33.797167Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:33.797193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:33.797213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:33.797276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:33.797285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:33.797341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:33.797355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:33.797851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:33.797860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:33.797907Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 025-06-03T10:29:34.334617Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1014: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-06-03T10:29:34.334627Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1018: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 327 RawX2: 8589936901 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-03T10:29:34.334645Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:34.334650Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.334656Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-03T10:29:34.334664Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 129 -> 240 2025-06-03T10:29:34.334768Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 324 RawX2: 8589936899 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-03T10:29:34.334774Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 2 2025-06-03T10:29:34.334787Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:2, at schemeshard: 72057594046678944, message: Source { RawX1: 324 RawX2: 8589936899 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-03T10:29:34.334793Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1014: NTableState::TProposedWaitParts operationId# 102:2 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-06-03T10:29:34.334802Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1018: NTableState::TProposedWaitParts operationId# 102:2 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 324 RawX2: 8589936899 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-03T10:29:34.334812Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:2, shardIdx: 72057594046678944:2, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:34.334816Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-03T10:29:34.334821Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 102:2, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-03T10:29:34.334826Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:2 129 -> 240 2025-06-03T10:29:34.335550Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.335688Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-03T10:29:34.336049Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.336154Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.336167Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_move_table.cpp:482: TMoveTable TDone, operationId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:34.336176Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_move_table.cpp:492: TMoveTable TDone, operationId: 102:0 ProgressState, SourcePathId: [OwnerId: 72057594046678944, LocalPathId: 2], TargetPathId: [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-03T10:29:34.336192Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 2/3 2025-06-03T10:29:34.336197Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 2/3 2025-06-03T10:29:34.336204Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 2/3 2025-06-03T10:29:34.336208Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 2/3 2025-06-03T10:29:34.336214Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 2/3, is published: true 2025-06-03T10:29:34.336273Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-03T10:29:34.336328Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:2, at schemeshard: 72057594046678944 2025-06-03T10:29:34.336335Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_move_table.cpp:482: TMoveTable TDone, operationId: 102:2 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:34.336341Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_move_table.cpp:492: TMoveTable TDone, operationId: 102:2 ProgressState, SourcePathId: [OwnerId: 72057594046678944, LocalPathId: 4], TargetPathId: [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-03T10:29:34.336354Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:2 progress is 3/3 2025-06-03T10:29:34.336358Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 3/3 2025-06-03T10:29:34.336364Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:2 progress is 3/3 2025-06-03T10:29:34.336368Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 3/3 2025-06-03T10:29:34.336372Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 3/3, is published: true 2025-06-03T10:29:34.336394Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:380:2347] message: TxId: 102 2025-06-03T10:29:34.336401Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 3/3 2025-06-03T10:29:34.336410Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:29:34.336416Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:29:34.336449Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-03T10:29:34.336455Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:29:34.336460Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:1 2025-06-03T10:29:34.336465Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:1 2025-06-03T10:29:34.336470Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-03T10:29:34.336475Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:29:34.336479Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:2 2025-06-03T10:29:34.336483Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:2 2025-06-03T10:29:34.336491Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-03T10:29:34.336496Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:29:34.336560Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:29:34.336568Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-03T10:29:34.336582Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:29:34.336593Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:29:34.336599Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:29:34.336605Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:29:34.336611Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:29:34.337480Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:29:34.337498Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:480:2440] 2025-06-03T10:29:34.337522Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 3 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinue+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinue-EvWrite >> TSchemeShardMoveTest::MoveIndex [GOOD] >> TSchemeShardMoveTest::MoveIndexDoesNonExisted ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::AsyncIndexWithSyncInFly [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:33.938794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:33.938833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:33.938838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:33.938842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:33.938856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:33.938859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:33.938866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:33.938879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:33.938963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:33.939035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:33.950893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:33.950927Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:33.954712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:33.954823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:33.954853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:33.956637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:33.956687Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:33.956770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:33.956801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:33.957458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:33.957496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:33.957736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:33.957744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:33.957754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:33.957759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:33.957764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:33.957777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:33.958875Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:33.975887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:33.975976Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:33.976046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:33.976099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:33.976110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:33.977011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:33.977038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:33.977090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:33.977101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:33.977108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:33.977114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:33.977607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:33.977622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:33.977627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:33.978028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:33.978042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:33.978049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:33.978056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:33.978763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:33.979173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:33.979215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:33.979421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:33.979448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:33.979465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:33.979524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:33.979531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:33.979563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:33.979575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:33.979960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:33.979969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:33.980014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 324 RawX2: 8589936899 } Origin: 72075186233409546 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-03T10:29:34.413182Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 103:2, shardIdx: 72057594046678944:2, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:34.413185Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 103:2, at schemeshard: 72057594046678944 2025-06-03T10:29:34.413189Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 103:2, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-03T10:29:34.413194Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 103:2 129 -> 240 2025-06-03T10:29:34.413286Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 327 RawX2: 8589936901 } Origin: 72075186233409547 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-03T10:29:34.413320Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 103, tablet: 72075186233409547, partId: 0 2025-06-03T10:29:34.413336Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 103:0, at schemeshard: 72057594046678944, message: Source { RawX1: 327 RawX2: 8589936901 } Origin: 72075186233409547 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-03T10:29:34.413341Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1014: NTableState::TProposedWaitParts operationId# 103:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-06-03T10:29:34.413351Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1018: NTableState::TProposedWaitParts operationId# 103:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 327 RawX2: 8589936901 } Origin: 72075186233409547 State: 2 TxId: 103 Step: 0 Generation: 2 2025-06-03T10:29:34.413359Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 103:0, shardIdx: 72057594046678944:1, datashard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:34.413363Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.413367Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 103:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-03T10:29:34.413373Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 103:0 129 -> 240 2025-06-03T10:29:34.414390Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:2, at schemeshard: 72057594046678944 2025-06-03T10:29:34.414885Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.415185Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:2, at schemeshard: 72057594046678944 2025-06-03T10:29:34.415318Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:2, at schemeshard: 72057594046678944 2025-06-03T10:29:34.415328Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_move_table.cpp:482: TMoveTable TDone, operationId: 103:2 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:34.415337Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_move_table.cpp:492: TMoveTable TDone, operationId: 103:2 ProgressState, SourcePathId: [OwnerId: 72057594046678944, LocalPathId: 4], TargetPathId: [OwnerId: 72057594046678944, LocalPathId: 7], at schemeshard: 72057594046678944 2025-06-03T10:29:34.415353Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:2 progress is 2/3 2025-06-03T10:29:34.415358Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 2/3 2025-06-03T10:29:34.415363Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:2 progress is 2/3 2025-06-03T10:29:34.415367Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 2/3 2025-06-03T10:29:34.415372Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 2/3, is published: true 2025-06-03T10:29:34.415450Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.415489Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.415493Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_move_table.cpp:482: TMoveTable TDone, operationId: 103:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:34.415496Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_move_table.cpp:492: TMoveTable TDone, operationId: 103:0 ProgressState, SourcePathId: [OwnerId: 72057594046678944, LocalPathId: 2], TargetPathId: [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-03T10:29:34.415502Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 3/3 2025-06-03T10:29:34.415504Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 3/3 2025-06-03T10:29:34.415507Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 3/3 2025-06-03T10:29:34.415510Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 3/3 2025-06-03T10:29:34.415512Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 3/3, is published: true 2025-06-03T10:29:34.415515Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 3/3 2025-06-03T10:29:34.415520Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-06-03T10:29:34.415527Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:0 2025-06-03T10:29:34.415549Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-03T10:29:34.415553Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:29:34.415556Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:1 2025-06-03T10:29:34.415559Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:1 2025-06-03T10:29:34.415563Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-03T10:29:34.415565Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:29:34.415567Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:2 2025-06-03T10:29:34.415569Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:2 2025-06-03T10:29:34.415575Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-03T10:29:34.415577Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:29:34.415630Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:29:34.415635Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-03T10:29:34.415644Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:29:34.415649Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:29:34.415653Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:29:34.415656Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:29:34.415660Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:29:34.416609Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 3 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:29:34.416796Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:392: actor# [2:270:2260] Handle TEvGetProxyServicesRequest TestWaitNotification wait txId: 103 2025-06-03T10:29:34.459506Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-03T10:29:34.459527Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-03T10:29:34.459626Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-03T10:29:34.459646Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:29:34.459652Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [2:678:2561] TestWaitNotification: OK eventTxId 103 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::TwoTables [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:34.251270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:34.251299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:34.251303Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:34.251308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:34.251319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:34.251322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:34.251329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:34.251342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:34.251435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:34.251500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:34.261082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:34.261108Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:34.264695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:34.264797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:34.264837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:34.266641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:34.266726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:34.266815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:34.266871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:34.267669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:34.267717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:34.267971Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:34.267979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:34.267990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:34.267996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:34.268001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:34.268039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.269323Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:34.290634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:34.290738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.290811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:34.290865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:34.290879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.291819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:34.291858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:34.291924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.291938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:34.291944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:34.291951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:34.292488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.292503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:34.292510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:34.293268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.293288Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.293319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:34.293327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:34.294227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:34.294821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:34.294874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:34.295099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:34.295134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:34.295159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:34.295233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:34.295244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:34.295286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:34.295300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:34.295832Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:34.295844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:34.295897Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... :29:34.472534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-03T10:29:34.472540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:29:34.472544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:29:34.473163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:29:34.473175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:503:2462] 2025-06-03T10:29:34.473187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 103 2025-06-03T10:29:34.473337Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:34.473391Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table1" took 89us result status StatusPathDoesNotExist 2025-06-03T10:29:34.473453Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Table1" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:29:34.473528Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableMove1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:34.473568Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableMove1" took 42us result status StatusSuccess 2025-06-03T10:29:34.473671Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableMove1" PathDescription { Self { Name: "TableMove1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "TableMove1" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:34.473775Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:34.473792Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table2" took 18us result status StatusPathDoesNotExist 2025-06-03T10:29:34.473810Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table2\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Table2" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:29:34.473856Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableMove2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:34.473873Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableMove2" took 18us result status StatusSuccess 2025-06-03T10:29:34.473928Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableMove2" PathDescription { Self { Name: "TableMove2" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "TableMove2" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:34.473990Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:34.474012Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 24us result status StatusSuccess 2025-06-03T10:29:34.474085Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 15 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 15 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 13 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "TableMove1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "TableMove2" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> DataShardReadIterator::ShouldReadFromHeadWithConflict-UseSink [GOOD] >> DataShardReadIterator::ShouldReadFromHeadToMvccWithConflict+UseSink >> TSchemeShardMoveTest::MoveIndexDoesNonExisted [GOOD] >> Viewer::JsonStorageListingV2NodeIdFilter [GOOD] >> Viewer::JsonStorageListingV2PDiskIdFilter |65.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/control/ut/ydb-core-control-ut |65.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/control/ut/ydb-core-control-ut |65.3%| [LD] {RESULT} $(B)/ydb/core/control/ut/ydb-core-control-ut >> KqpOlapTiering::LocksInterference [GOOD] >> DataShardReadIterator::ShouldNotReadFutureMvccFromFollower [GOOD] >> DataShardReadIterator::ShouldProperlyOrderConflictingTransactionsMvcc+UseSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::MoveIndexDoesNonExisted [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:34.556192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:34.556214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:34.556219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:34.556223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:34.556237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:34.556241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:34.556248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:34.556260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:34.556341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:34.556409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:34.566065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:34.566091Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:34.569404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:34.569513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:34.569539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:34.571017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:34.571095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:34.571190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:34.571231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:34.571794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:34.571836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:34.572093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:34.572101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:34.572111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:34.572128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:34.572133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:34.572155Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.573288Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:34.588830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:34.588939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.589005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:34.589045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:34.589054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.589886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:34.589914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:34.589965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.589974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:34.589978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:34.589983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:34.590443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.590460Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:34.590466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:34.591077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.591089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:34.591095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:34.591101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:34.591686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:34.592105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:34.592138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:34.592338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:34.592362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:34.592384Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:34.592447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:34.592453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:34.592484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:34.592494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:34.592850Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:34.592871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:34.592919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... e ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:35.130672Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 18us result status StatusSuccess 2025-06-03T10:29:35.130719Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:35.130763Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Sync" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:29:35.130789Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Sync" took 26us result status StatusSuccess 2025-06-03T10:29:35.130914Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Sync" PathDescription { Self { Name: "Sync" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } TableIndex { Name: "Sync" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value0" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value0" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:35.130971Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Async" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:29:35.130986Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Async" took 16us result status StatusSuccess 2025-06-03T10:29:35.131049Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Async" PathDescription { Self { Name: "Async" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 5 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } TableIndex { Name: "Async" LocalPathId: 5 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "value1" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value1" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardMoveTest::MoveTableForBackup ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapTiering::LocksInterference [GOOD] Test command err: Trying to start YDB, gRPC: 11080, MsgBus: 63061 2025-06-03T10:29:06.780569Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668219136729802:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:06.780707Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d6e/r3tmp/tmpSYfDIh/pdisk_1.dat 2025-06-03T10:29:06.897883Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:06.897915Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:06.904833Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:06.906249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 11080, node 1 2025-06-03T10:29:06.925549Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:06.925568Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:06.925570Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:06.925620Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63061 TClient is connected to server localhost:63061 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:29:07.057777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:29:07.061105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:29:07.415559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:29:07.445859Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668223431697672:2330];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:07.445933Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668223431697672:2330];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:07.446011Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668223431697672:2330];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:07.446041Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668223431697672:2330];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:07.446065Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668223431697672:2330];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:07.446088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668223431697672:2330];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:29:07.446110Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668223431697672:2330];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:29:07.446128Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668223431697672:2330];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:29:07.446155Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668223431697672:2330];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:29:07.446179Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668223431697672:2330];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:29:07.446202Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668223431697672:2330];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:29:07.446225Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668223431697672:2330];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:29:07.448656Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:07.448703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:07.448749Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:07.448763Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:07.448777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:07.448792Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:29:07.448804Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:29:07.448815Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:29:07.448828Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:29:07.448841Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:29:07.448864Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:29:07.448875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];tablet_id=72075186224037890;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:29:07.449288Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:128;event=start_subscribing_metadata; 2025-06-03T10:29:07.449386Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:128;event=start_subscribing_metadata; 2025-06-03T10:29:07.451491Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-03T10:29:07.451511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-03T10:29:07.451525Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-03T10:29:07.451531Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-03T10:29:07.451551Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-03T10:29:07.451557Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2 ... ;self_id=[1:7511668223431697728:2346];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037889;fline=column_engine_logs.cpp:364;event=StartTtl;rw_tasks_count=0; 2025-06-03T10:29:34.800139Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668223431697728:2346];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037889;storage_id=__MEMORY;tablet_id=72075186224037889;fline=gc_info.h:24;event=extract_for_gc_skip;reason=no_data; 2025-06-03T10:29:34.800144Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668223431697728:2346];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037889;storage_id=__MEMORY;tablet_id=72075186224037889;fline=storage.cpp:39;event=start_gc_skipped;reason=cannot_extract; 2025-06-03T10:29:34.812220Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668223431697674:2332];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037888;fline=column_engine_logs.cpp:335;event=StartTtl;external=0; 2025-06-03T10:29:34.812279Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668223431697674:2332];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037888;queue=ttl;external_count=0;fline=tiering.cpp:214;event=ExtractTtlTasks;total_portions=0;tasks=0; 2025-06-03T10:29:34.812297Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668223431697674:2332];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037888;queue=ttl;external_count=0;fline=scheme.cpp:67;rw_count=0; 2025-06-03T10:29:34.812309Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668223431697674:2332];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037888;queue=ttl;external_count=0;fline=scheme.cpp:121;internal_queue=0;external_queue=0; 2025-06-03T10:29:34.812321Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668223431697674:2332];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037888;fline=column_engine_logs.cpp:364;event=StartTtl;rw_tasks_count=0; 2025-06-03T10:29:34.812359Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668223431697674:2332];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037888;storage_id=__MEMORY;tablet_id=72075186224037888;fline=gc_info.h:24;event=extract_for_gc_skip;reason=no_data; 2025-06-03T10:29:34.812370Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668223431697674:2332];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037888;storage_id=__MEMORY;tablet_id=72075186224037888;fline=storage.cpp:39;event=start_gc_skipped;reason=cannot_extract; 2025-06-03T10:29:34.812413Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668223431697674:2332];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037888;fline=column_engine_logs.cpp:335;event=StartTtl;external=0; 2025-06-03T10:29:34.812435Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668223431697674:2332];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037888;fline=column_engine_logs.cpp:364;event=StartTtl;rw_tasks_count=0; 2025-06-03T10:29:34.812440Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668223431697674:2332];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037888;storage_id=__MEMORY;tablet_id=72075186224037888;fline=gc_info.h:24;event=extract_for_gc_skip;reason=no_data; 2025-06-03T10:29:34.812444Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668223431697674:2332];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037888;storage_id=__MEMORY;tablet_id=72075186224037888;fline=storage.cpp:39;event=start_gc_skipped;reason=cannot_extract; 2025-06-03T10:29:34.813099Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037890;fline=column_engine_logs.cpp:335;event=StartTtl;external=0; 2025-06-03T10:29:34.813136Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037890;queue=ttl;external_count=0;fline=tiering.cpp:214;event=ExtractTtlTasks;total_portions=0;tasks=0; 2025-06-03T10:29:34.813140Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037890;queue=ttl;external_count=0;fline=scheme.cpp:67;rw_count=0; 2025-06-03T10:29:34.813149Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037890;queue=ttl;external_count=0;fline=scheme.cpp:121;internal_queue=0;external_queue=0; 2025-06-03T10:29:34.813160Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037890;fline=column_engine_logs.cpp:364;event=StartTtl;rw_tasks_count=0; 2025-06-03T10:29:34.813183Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037890;storage_id=__MEMORY;tablet_id=72075186224037890;fline=gc_info.h:24;event=extract_for_gc_skip;reason=no_data; 2025-06-03T10:29:34.813189Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037890;storage_id=__MEMORY;tablet_id=72075186224037890;fline=storage.cpp:39;event=start_gc_skipped;reason=cannot_extract; 2025-06-03T10:29:34.813217Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037890;fline=column_engine_logs.cpp:335;event=StartTtl;external=0; 2025-06-03T10:29:34.813230Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037890;fline=column_engine_logs.cpp:364;event=StartTtl;rw_tasks_count=0; 2025-06-03T10:29:34.813241Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037890;storage_id=__MEMORY;tablet_id=72075186224037890;fline=gc_info.h:24;event=extract_for_gc_skip;reason=no_data; 2025-06-03T10:29:34.813254Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037890;storage_id=__MEMORY;tablet_id=72075186224037890;fline=storage.cpp:39;event=start_gc_skipped;reason=cannot_extract; ==================================== QUERY: SELECT * FROM `/Root/olapStore/olapTable/.sys/primary_index_portion_stats` WHERE Activity == 1 RESULT: 2025-06-03T10:29:34.919064Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668223431697674:2332];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037888;fline=column_engine_logs.cpp:335;event=StartTtl;external=0; 2025-06-03T10:29:34.919064Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037890;fline=column_engine_logs.cpp:335;event=StartTtl;external=0; 2025-06-03T10:29:34.919085Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668223431697674:2332];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037888;fline=column_engine_logs.cpp:364;event=StartTtl;rw_tasks_count=0; 2025-06-03T10:29:34.919085Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037890;fline=column_engine_logs.cpp:364;event=StartTtl;rw_tasks_count=0; 2025-06-03T10:29:34.919099Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668223431697674:2332];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037888;storage_id=__MEMORY;tablet_id=72075186224037888;fline=gc_info.h:24;event=extract_for_gc_skip;reason=no_data; 2025-06-03T10:29:34.919101Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037890;storage_id=__MEMORY;tablet_id=72075186224037890;fline=gc_info.h:24;event=extract_for_gc_skip;reason=no_data; 2025-06-03T10:29:34.919104Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668223431697674:2332];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037888;storage_id=__MEMORY;tablet_id=72075186224037888;fline=storage.cpp:39;event=start_gc_skipped;reason=cannot_extract; 2025-06-03T10:29:34.919106Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668223431697673:2331];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037890;storage_id=__MEMORY;tablet_id=72075186224037890;fline=storage.cpp:39;event=start_gc_skipped;reason=cannot_extract; 2025-06-03T10:29:34.920306Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668223431697728:2346];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037889;fline=column_engine_logs.cpp:335;event=StartTtl;external=0; 2025-06-03T10:29:34.920330Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668223431697728:2346];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037889;fline=column_engine_logs.cpp:364;event=StartTtl;rw_tasks_count=0; 2025-06-03T10:29:34.920341Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668223431697728:2346];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037889;storage_id=__MEMORY;tablet_id=72075186224037889;fline=gc_info.h:24;event=extract_for_gc_skip;reason=no_data; 2025-06-03T10:29:34.920345Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668223431697728:2346];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037889;storage_id=__MEMORY;tablet_id=72075186224037889;fline=storage.cpp:39;event=start_gc_skipped;reason=cannot_extract; 2025-06-03T10:29:34.931329Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946574907, txId: 281474976715665] shutting down >> IndexBuildTestReboots::IndexPartitioning >> LabeledDbCounters::OneTablet [GOOD] >> LabeledDbCounters::OneTabletRemoveCounters |65.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build_reboots/unittest >> TSchemeShardMoveTest::MoveTableForBackup [GOOD] >> TSchemeShardMoveTest::MoveTableWithSequence >> TFileStoreWithReboots::CreateAlter [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinue-EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinueInvisibleRowSkips+EvWrite >> TSchemeShardMoveTest::MoveTableWithSequence [GOOD] |65.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build_reboots/unittest >> IndexBuildTestReboots::BaseCaseWithDataColumns ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::MoveTableWithSequence [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:35.804779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:35.804803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:35.804807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:35.804812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:35.804824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:35.804826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:35.804833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:35.804844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:35.804954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:35.805022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:35.815186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:35.815213Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:35.819058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:35.819179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:35.819210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:35.820910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:35.820981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:35.821086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:35.821133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:35.821791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:35.821834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:35.822121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:35.822132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:35.822144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:35.822153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:35.822159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:35.822185Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:35.823395Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:35.840016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:35.840097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:35.840156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:35.840195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:35.840205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:35.841067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:35.841088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:35.841134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:35.841142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:35.841146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:35.841151Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:35.842380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:35.842424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:35.842436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:35.843431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:35.843455Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:35.843462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:35.843470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:35.844474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:35.845275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:35.845347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:35.845616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:35.845665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:35.845695Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:35.845794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:35.845809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:35.845862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:35.845880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:35.846683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:35.846696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:35.846750Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... e id#102:1 progress is 2/2 2025-06-03T10:29:36.471678Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 2/2 2025-06-03T10:29:36.471681Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:1 progress is 2/2 2025-06-03T10:29:36.471684Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 2/2 2025-06-03T10:29:36.471687Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 2/2, is published: true 2025-06-03T10:29:36.471704Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:377:2345] message: TxId: 102 2025-06-03T10:29:36.471709Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 2/2 2025-06-03T10:29:36.471714Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:29:36.471728Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:29:36.471756Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-03T10:29:36.471760Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:29:36.471763Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:1 2025-06-03T10:29:36.471765Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:1 2025-06-03T10:29:36.471771Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-03T10:29:36.471773Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:29:36.471853Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:29:36.471859Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:29:36.471869Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:29:36.471874Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:29:36.471878Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:29:36.472579Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:29:36.472597Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:475:2430] 2025-06-03T10:29:36.472672Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 2025-06-03T10:29:36.473590Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/myseq" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:36.473669Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/myseq" took 91us result status StatusPathDoesNotExist 2025-06-03T10:29:36.473719Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/myseq\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Table/myseq" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:29:36.473813Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:36.473837Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 28us result status StatusPathDoesNotExist 2025-06-03T10:29:36.473854Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Table" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:29:36.473903Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableMove" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:36.473951Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableMove" took 49us result status StatusSuccess 2025-06-03T10:29:36.474090Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableMove" PathDescription { Self { Name: "TableMove" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: true } Table { Name: "TableMove" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 DefaultFromSequence: "myseq" NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 IsBackup: false Sequences { Name: "myseq" PathId { OwnerId: 72057594046678944 LocalId: 5 } Version: 2 SequenceShard: 72075186233409546 MinValue: 1 MaxValue: 9223372036854775807 StartValue: 1 Cache: 1 Increment: 1 Cycle: false DataType: "Int64" } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:36.474201Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableMove/myseq" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:29:36.474230Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableMove/myseq" took 31us result status StatusSuccess 2025-06-03T10:29:36.474278Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableMove/myseq" PathDescription { Self { Name: "myseq" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeSequence CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SequenceVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } SequenceDescription { Name: "myseq" PathId { OwnerId: 72057594046678944 LocalId: 5 } Version: 2 SequenceShard: 72075186233409546 MinValue: 1 MaxValue: 9223372036854775807 StartValue: 1 Cache: 1 Increment: 1 Cycle: false DataType: "Int64" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest >> TFileStoreWithReboots::CreateAlter [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:29:15.996347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:15.996384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:15.996390Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:15.996397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:15.996413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:15.996418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:15.996433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:15.996449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:15.996595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:15.996675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:16.033566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:29:16.033602Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:16.033718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:29:16.041921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:16.042106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:16.042151Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:16.044076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:16.044135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:16.044288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:16.044348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:16.044885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:16.044951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:16.045254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:16.045266Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:16.045285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:16.045313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:16.045323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:16.045376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:29:16.050452Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:29:16.097258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:16.097388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:16.097487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:16.097557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:16.097572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:16.103879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:16.103934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:16.104007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:16.104023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:16.104031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:16.104038Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:16.104772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:16.104784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:16.104791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:16.105209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:16.105220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:16.105228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:16.105237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:16.106126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:16.106575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:16.106626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:16.106860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:16.106890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:16.106899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:16.106988Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... 2Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-06-03T10:29:36.358606Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-03T10:29:36.358688Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-03T10:29:36.358699Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_fs.cpp:89: TAlterFileStore::TConfigureParts operationId# 1002:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:36.359286Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1002:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275054593 2025-06-03T10:29:36.359328Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1002, partId: 0, tablet: 72075186233409546 2025-06-03T10:29:36.359378Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 1002, tablet: 72075186233409546, partId: 0 2025-06-03T10:29:36.359416Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 1002:0, at schemeshard: 72057594046678944, message: TxId: 1002 Origin: 72075186233409546 Status: OK 2025-06-03T10:29:36.359424Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_fs.cpp:43: TAlterFileStore::TConfigureParts operationId# 1002:0 HandleReply TEvUpdateConfigResponse, at schemeshard: 72057594046678944 2025-06-03T10:29:36.359434Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1002:0 3 -> 128 2025-06-03T10:29:36.359933Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-03T10:29:36.359971Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-03T10:29:36.359980Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_fs.cpp:197: TAlterFileStore::TPropose operationId# 1002:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:36.359990Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1002 ready parts: 1/1 2025-06-03T10:29:36.360030Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1002 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:36.360460Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1002:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1002 msg type: 269090816 2025-06-03T10:29:36.360493Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1002, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1002 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1002 at step: 5000004 2025-06-03T10:29:36.360581Z node 73 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:36.360605Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1002 Coordinator: 72057594046316545 AckTo { RawX1: 124 RawX2: 313532614757 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:36.360614Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_fs.cpp:153: TAlterFileStore::TPropose operationId# 1002:0 HandleReply TEvOperationPlan, step: 5000004, at schemeshard: 72057594046678944 2025-06-03T10:29:36.360658Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1002:0 progress is 1/1 2025-06-03T10:29:36.360664Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-03T10:29:36.360671Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1002:0 progress is 1/1 2025-06-03T10:29:36.360676Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-03T10:29:36.360693Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:29:36.360708Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1002, ready parts: 1/1, is published: false 2025-06-03T10:29:36.360715Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-03T10:29:36.360720Z node 73 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1002:0 2025-06-03T10:29:36.360725Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1002:0 2025-06-03T10:29:36.360756Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-03T10:29:36.360763Z node 73 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1002, publications: 1, subscribers: 0 2025-06-03T10:29:36.360768Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1002, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-03T10:29:36.361277Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:36.361314Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1002, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:29:36.361360Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:36.361367Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [73:209:2210], at schemeshard: 72057594046678944, txId: 1002, path id: 3 FAKE_COORDINATOR: Erasing txId 1002 2025-06-03T10:29:36.361510Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:29:36.361524Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:29:36.361529Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1002 2025-06-03T10:29:36.361534Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-03T10:29:36.361540Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:29:36.361560Z node 73 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1002, subscribers: 0 2025-06-03T10:29:36.362012Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 TestModificationResult got TxId: 1002, wait until txId: 1002 TestWaitNotification wait txId: 1002 2025-06-03T10:29:36.362088Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1002: send EvNotifyTxCompletion 2025-06-03T10:29:36.362097Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1002 2025-06-03T10:29:36.362174Z node 73 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1002, at schemeshard: 72057594046678944 2025-06-03T10:29:36.362194Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1002: got EvNotifyTxCompletionResult 2025-06-03T10:29:36.362200Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1002: satisfy waiter [73:408:2387] TestWaitNotification: OK eventTxId 1002 2025-06-03T10:29:36.362281Z node 73 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/FS_2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:36.362323Z node 73 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/FS_2" took 53us result status StatusSuccess 2025-06-03T10:29:36.362420Z node 73 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/FS_2" PathDescription { Self { Name: "FS_2" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeFileStore CreateFinished: true CreateTxId: 1001 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 FileStoreVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } FileStoreDescription { Name: "FS_2" PathId: 3 IndexTabletId: 72075186233409546 Config { Version: 2 FolderId: "bar" CloudId: "baz" BlockSize: 4096 } Version: 2 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> DataShardReadIterator::ShouldReadFromHeadToMvccWithConflict+UseSink [GOOD] >> DataShardReadIterator::ShouldReadFromHeadToMvccWithConflict-UseSink >> IndexBuildTestReboots::BaseCase >> DataShardReadIterator::ShouldProperlyOrderConflictingTransactionsMvcc+UseSink [GOOD] >> DataShardReadIterator::ShouldProperlyOrderConflictingTransactionsMvcc-UseSink >> KqpOlapTiering::EvictionResetTiering [GOOD] >> IndexBuildTestReboots::DropIndexWithDataColumns >> BasicUsage::CloseWriteSessionImmediately [GOOD] >> IndexBuildTestReboots::DropIndex >> IndexBuildTestReboots::CancelBuild ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapTiering::EvictionResetTiering [GOOD] Test command err: Trying to start YDB, gRPC: 25976, MsgBus: 10272 2025-06-03T10:29:03.326063Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668206626694412:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:03.326086Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000da0/r3tmp/tmpz9q3zz/pdisk_1.dat 2025-06-03T10:29:03.405680Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:03.405902Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668206626694391:2079] 1748946543325876 != 1748946543325879 TServer::EnableGrpc on GrpcPort 25976, node 1 2025-06-03T10:29:03.421976Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:03.421988Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:03.421989Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:03.422026Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:29:03.431266Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:03.431339Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:03.432396Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10272 TClient is connected to server localhost:10272 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:03.501061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:03.504568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:29:03.810438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:29:03.830010Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668206626695139:2331];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:03.830103Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668206626695139:2331];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:03.830177Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668206626695139:2331];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:03.830200Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668206626695139:2331];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:03.830221Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668206626695139:2331];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:03.830246Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668206626695139:2331];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:29:03.830268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668206626695139:2331];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:29:03.830290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668206626695139:2331];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:29:03.830311Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668206626695139:2331];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:29:03.830337Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668206626695139:2331];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:29:03.830358Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668206626695139:2331];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:29:03.830379Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[1:7511668206626695139:2331];tablet_id=72075186224037891;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:29:03.832035Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206626695138:2330];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:03.832052Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206626695138:2330];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:03.832118Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206626695138:2330];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:03.832143Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206626695138:2330];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:03.832164Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206626695138:2330];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:03.832186Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206626695138:2330];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:29:03.832206Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206626695138:2330];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:29:03.832228Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206626695138:2330];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:29:03.832249Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206626695138:2330];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:29:03.832268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206626695138:2330];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:29:03.832295Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206626695138:2330];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:29:03.832317Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206626695138:2330];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:29:03.832917Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:128;event=start_subscribing_metadata; 2025-06-03T10:29:03.843963Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206626695187:2341];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:03.843993Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206626695187:2341];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:03.844047Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206626695187:2341];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:03.844072Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206626695187:2341];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:03.844096Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206626695187:2341];tablet_id=72075186224037889;process=TTxIni ... ER INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206626695138:2330];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037888;storage_id=__MEMORY;tablet_id=72075186224037888;fline=gc_info.h:24;event=extract_for_gc_skip;reason=no_data; 2025-06-03T10:29:37.210148Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206626695138:2330];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037888;storage_id=__MEMORY;tablet_id=72075186224037888;fline=storage.cpp:39;event=start_gc_skipped;reason=cannot_extract; 2025-06-03T10:29:37.210155Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206626695138:2330];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037888;storage_id=/Root/tier1;tablet_id=72075186224037888;fline=gc_info.h:24;event=extract_for_gc_skip;reason=no_data; 2025-06-03T10:29:37.210156Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668206626695138:2330];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037888;storage_id=/Root/tier1;tablet_id=72075186224037888;fline=storage.cpp:39;event=start_gc_skipped;reason=cannot_extract; 2025-06-03T10:29:37.210325Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206626695142:2332];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037890;fline=column_engine_logs.cpp:335;event=StartTtl;external=0; 2025-06-03T10:29:37.210339Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206626695142:2332];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037890;queue=ttl;external_count=0;fline=tiering.cpp:214;event=ExtractTtlTasks;total_portions=0;tasks=0; 2025-06-03T10:29:37.210343Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206626695142:2332];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037890;queue=ttl;external_count=0;fline=scheme.cpp:67;rw_count=0; 2025-06-03T10:29:37.210345Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206626695142:2332];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037890;queue=ttl;external_count=0;fline=scheme.cpp:121;internal_queue=0;external_queue=0; 2025-06-03T10:29:37.210347Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206626695142:2332];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037890;fline=column_engine_logs.cpp:364;event=StartTtl;rw_tasks_count=0; 2025-06-03T10:29:37.210351Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206626695142:2332];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037890;storage_id=__MEMORY;tablet_id=72075186224037890;fline=gc_info.h:24;event=extract_for_gc_skip;reason=no_data; 2025-06-03T10:29:37.210353Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206626695142:2332];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037890;storage_id=__MEMORY;tablet_id=72075186224037890;fline=storage.cpp:39;event=start_gc_skipped;reason=cannot_extract; 2025-06-03T10:29:37.210358Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206626695142:2332];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037890;storage_id=/Root/tier1;tablet_id=72075186224037890;fline=gc_info.h:24;event=extract_for_gc_skip;reason=no_data; 2025-06-03T10:29:37.210359Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206626695142:2332];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037890;storage_id=/Root/tier1;tablet_id=72075186224037890;fline=storage.cpp:39;event=start_gc_skipped;reason=cannot_extract; 2025-06-03T10:29:37.210806Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206626695142:2332];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037890;fline=column_engine_logs.cpp:335;event=StartTtl;external=0; 2025-06-03T10:29:37.210821Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206626695142:2332];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037890;fline=column_engine_logs.cpp:364;event=StartTtl;rw_tasks_count=0; 2025-06-03T10:29:37.210827Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206626695142:2332];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037890;storage_id=__MEMORY;tablet_id=72075186224037890;fline=gc_info.h:24;event=extract_for_gc_skip;reason=no_data; 2025-06-03T10:29:37.210841Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206626695142:2332];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037890;storage_id=__MEMORY;tablet_id=72075186224037890;fline=storage.cpp:39;event=start_gc_skipped;reason=cannot_extract; 2025-06-03T10:29:37.210845Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206626695142:2332];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037890;storage_id=/Root/tier1;tablet_id=72075186224037890;fline=gc_info.h:24;event=extract_for_gc_skip;reason=no_data; 2025-06-03T10:29:37.210847Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037890;self_id=[1:7511668206626695142:2332];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037890;storage_id=/Root/tier1;tablet_id=72075186224037890;fline=storage.cpp:39;event=start_gc_skipped;reason=cannot_extract; 2025-06-03T10:29:37.210876Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206626695187:2341];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037889;fline=column_engine_logs.cpp:335;event=StartTtl;external=0; 2025-06-03T10:29:37.210884Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206626695187:2341];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037889;queue=ttl;external_count=0;fline=tiering.cpp:214;event=ExtractTtlTasks;total_portions=0;tasks=0; 2025-06-03T10:29:37.210891Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206626695187:2341];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037889;queue=ttl;external_count=0;fline=scheme.cpp:67;rw_count=0; 2025-06-03T10:29:37.210892Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206626695187:2341];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037889;queue=ttl;external_count=0;fline=scheme.cpp:121;internal_queue=0;external_queue=0; 2025-06-03T10:29:37.210894Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206626695187:2341];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037889;fline=column_engine_logs.cpp:364;event=StartTtl;rw_tasks_count=0; 2025-06-03T10:29:37.210900Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206626695187:2341];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037889;storage_id=__MEMORY;tablet_id=72075186224037889;fline=gc_info.h:24;event=extract_for_gc_skip;reason=no_data; 2025-06-03T10:29:37.210901Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206626695187:2341];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037889;storage_id=__MEMORY;tablet_id=72075186224037889;fline=storage.cpp:39;event=start_gc_skipped;reason=cannot_extract; 2025-06-03T10:29:37.210906Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206626695187:2341];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037889;storage_id=/Root/tier1;tablet_id=72075186224037889;fline=gc_info.h:24;event=extract_for_gc_skip;reason=no_data; 2025-06-03T10:29:37.210907Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206626695187:2341];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=72075186224037889;storage_id=/Root/tier1;tablet_id=72075186224037889;fline=storage.cpp:39;event=start_gc_skipped;reason=cannot_extract; 2025-06-03T10:29:37.211240Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206626695187:2341];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037889;fline=column_engine_logs.cpp:335;event=StartTtl;external=0; 2025-06-03T10:29:37.211253Z node 1 :TX_COLUMNSHARD_ACTUALIZATION DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206626695187:2341];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037889;fline=column_engine_logs.cpp:364;event=StartTtl;rw_tasks_count=0; 2025-06-03T10:29:37.211257Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206626695187:2341];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037889;storage_id=__MEMORY;tablet_id=72075186224037889;fline=gc_info.h:24;event=extract_for_gc_skip;reason=no_data; 2025-06-03T10:29:37.211259Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206626695187:2341];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037889;storage_id=__MEMORY;tablet_id=72075186224037889;fline=storage.cpp:39;event=start_gc_skipped;reason=cannot_extract; 2025-06-03T10:29:37.211262Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206626695187:2341];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037889;storage_id=/Root/tier1;tablet_id=72075186224037889;fline=gc_info.h:24;event=extract_for_gc_skip;reason=no_data; 2025-06-03T10:29:37.211264Z node 1 :TX_COLUMNSHARD_BLOBS_TIER INFO: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668206626695187:2341];ev=NKikimr::TEvMediatorTimecast::TEvNotifyPlanStep;tablet_id=72075186224037889;storage_id=/Root/tier1;tablet_id=72075186224037889;fline=storage.cpp:39;event=start_gc_skipped;reason=cannot_extract; TierName: __DEFAULT Rows: 100000 RawBytes: 119239000 2025-06-03T10:29:37.248662Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946576003, txId: 281474976715846] shutting down FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037888 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037888 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037890 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037890 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037889 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037889 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::CloseWriteSessionImmediately [GOOD] Test command err: 2025-06-03T10:29:22.785315Z :BasicWriteSession INFO: Random seed for debugging is 1748946562785306 2025-06-03T10:29:22.914203Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668290282085455:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:22.914272Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:29:22.917735Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668288394553836:2210];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:22.920459Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:29:22.957114Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0018a5/r3tmp/tmpKE4EwO/pdisk_1.dat 2025-06-03T10:29:22.968945Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:29:23.034644Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:23.034679Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:23.036708Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:23.037474Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24591, node 1 2025-06-03T10:29:23.049355Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/0018a5/r3tmp/yandex64tuQ1.tmp 2025-06-03T10:29:23.049368Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/0018a5/r3tmp/yandex64tuQ1.tmp 2025-06-03T10:29:23.049445Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/0018a5/r3tmp/yandex64tuQ1.tmp 2025-06-03T10:29:23.049505Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:29:23.058267Z INFO: TTestServer started on Port 12681 GrpcPort 24591 TClient is connected to server localhost:12681 PQClient connected to localhost:24591 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:29:23.075018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:23.075051Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:23.076673Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:29:23.077062Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:23.099344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-06-03T10:29:23.389322Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668294577053493:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:23.389505Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:23.390363Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668294577053528:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:23.391350Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710661:3, at schemeshard: 72057594046644480 2025-06-03T10:29:23.397110Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668294577053530:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710661 completed, doublechecking } 2025-06-03T10:29:23.448251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:29:23.448448Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7511668292689521308:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:29:23.449042Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=2&id=Mjc3M2UyLWY4NTEwN2QwLTdlOTIxMWUwLTQzNjM4YWJm, ActorId: [2:7511668292689521267:2305], ActorState: ExecuteState, TraceId: 01jwtndcam75evryj5yvmwy7zc, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:29:23.449489Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:29:23.491366Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668294577053683:2695] txid# 281474976710663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:23.497198Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668294577053693:2350], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:29:23.497861Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=ODBhY2MwNjYtZTk0M2RkMzAtMTg2YjU4NzItOTljNGM5YzU=, ActorId: [1:7511668294577053489:2332], ActorState: ExecuteState, TraceId: 01jwtndc9tcetagcw01gaycge1, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:29:23.498033Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:29:23.529993Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:29:23.567248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:24591", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, false, 1000); 2025-06-03T10:29:23.716069Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710666. Ctx: { TraceId: 01jwtndckg5q55rfysrw932a4t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmVmNTNlNDMtODVkNzEwMTctM2Y0OTRkZC00MGRjMjFlMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7511668294577054022:2944] 2025-06-03T10:29:27.913950Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668290282085455:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:27.916445Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:29:27.917809Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7511668288394553836:2210];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:27.917851Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence; ... c" name rt3.dc1--test-topic version1 CallPersQueueGRPC request to localhost:22226 MetaRequest { CmdGetTopicMetadata { Topic: "rt3.dc1--test-topic" } } 2025-06-03T10:29:36.342706Z node 3 :PERSQUEUE INFO: msgbus_server_persqueue.cpp:1531: proxy answer CallPersQueueGRPC response: Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--test-topic, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC CallPersQueueGRPC request to localhost:22226 MetaRequest { CmdGetTopicMetadata { Topic: "rt3.dc1--test-topic" } } 2025-06-03T10:29:36.843851Z node 3 :PERSQUEUE INFO: msgbus_server_persqueue.cpp:1531: proxy answer CallPersQueueGRPC response: Status: 128 ErrorReason: "the following topics are not created: rt3.dc1--test-topic, Marker# PQ95" ErrorCode: UNKNOWN_TOPIC CallPersQueueGRPC request to localhost:22226 MetaRequest { CmdGetTopicMetadata { Topic: "rt3.dc1--test-topic" } } 2025-06-03T10:29:37.345514Z node 3 :PERSQUEUE INFO: msgbus_server_persqueue.cpp:1531: proxy answer CallPersQueueGRPC response: Status: 1 ErrorCode: OK MetaResponse { CmdGetTopicMetadataResult { TopicInfo { Topic: "rt3.dc1--test-topic" NumPartitions: 1 Config { PartitionConfig { LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 SourceIdMaxCounts: 6000000 } Version: 1 LocalDC: true Codecs { Ids: 0 Ids: 1 Ids: 2 Codecs: "raw" Codecs: "gzip" Codecs: "lzop" } TopicPath: "/Root/PQ/rt3.dc1--test-topic" YdbDatabasePath: "/Root" Consumers { Name: "user" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } Version: 0 Important: false } } ErrorCode: OK } } } === Topic created, have version: 1 2025-06-03T10:29:37.347039Z :DEBUG: [] MessageGroupId [src] SessionId [] Write session: try to update token 2025-06-03T10:29:37.347318Z :INFO: [] MessageGroupId [src] SessionId [] Write session: Do CDS request 2025-06-03T10:29:37.347325Z :INFO: [] MessageGroupId [src] SessionId [] Start write session. Will connect to endpoint: localhost:22226 2025-06-03T10:29:37.347739Z :DEBUG: [] MessageGroupId [src] SessionId [] Write session: send init request: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-03T10:29:37.347893Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-03T10:29:37.347911Z node 3 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 1 2025-06-03T10:29:37.348055Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: grpc read done: success: 1 data: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-03T10:29:37.348093Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 1 topic: "test-topic" message_group_id: "src" from ipv6:[::1]:50786 2025-06-03T10:29:37.348100Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=1 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:50786 proto=v1 topic=test-topic durationSec=0 2025-06-03T10:29:37.348104Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-03T10:29:37.348493Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-03T10:29:37.348533Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-06-03T10:29:37.348534Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-03T10:29:37.348536Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-03T10:29:37.348541Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [3:7511668351234715066:2490] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-06-03T10:29:37.349008Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [3:7511668351234715066:2490] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-06-03T10:29:37.367766Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [3:7511668351234715066:2490] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-06-03T10:29:37.367836Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [3:7511668351234715100:2490] connected; active server actors: 1 2025-06-03T10:29:37.367845Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [3:7511668351234715066:2490] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-06-03T10:29:37.367849Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [3:7511668351234715066:2490] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-03T10:29:37.367953Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [3:7511668351234715100:2490] disconnected; active server actors: 1 2025-06-03T10:29:37.367963Z node 3 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [3:7511668351234715100:2490] disconnected no session 2025-06-03T10:29:37.384044Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7511668351234715066:2490] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-03T10:29:37.384063Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [3:7511668351234715066:2490] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-03T10:29:37.384066Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7511668351234715066:2490] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-03T10:29:37.384074Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-03T10:29:37.384350Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72075186224037892] server connected, pipe [3:7511668351234715122:2490], now have 1 active actors on pipe 2025-06-03T10:29:37.384412Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:798: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 4, Generation: 1 2025-06-03T10:29:37.384522Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-03T10:29:37.384540Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-03T10:29:37.384587Z node 4 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|51d80743-c0cfb38f-b9cda436-c1e6bbe8_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-03T10:29:37.384644Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:35: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-03T10:29:37.384682Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-03T10:29:37.384913Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-03T10:29:37.384923Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-03T10:29:37.384951Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-03T10:29:37.385060Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|51d80743-c0cfb38f-b9cda436-c1e6bbe8_0 2025-06-03T10:29:37.385517Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1748946577385 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:29:37.385556Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|51d80743-c0cfb38f-b9cda436-c1e6bbe8_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-03T10:29:37.385709Z :INFO: [] MessageGroupId [src] SessionId [src|51d80743-c0cfb38f-b9cda436-c1e6bbe8_0] Write session: close. Timeout = 0 ms 2025-06-03T10:29:37.385715Z :INFO: [] MessageGroupId [src] SessionId [src|51d80743-c0cfb38f-b9cda436-c1e6bbe8_0] Write session will now close 2025-06-03T10:29:37.385720Z :DEBUG: [] MessageGroupId [src] SessionId [src|51d80743-c0cfb38f-b9cda436-c1e6bbe8_0] Write session: aborting 2025-06-03T10:29:37.385873Z :INFO: [] MessageGroupId [src] SessionId [src|51d80743-c0cfb38f-b9cda436-c1e6bbe8_0] Write session: gracefully shut down, all writes complete 2025-06-03T10:29:37.385878Z :DEBUG: [] MessageGroupId [src] SessionId [src|51d80743-c0cfb38f-b9cda436-c1e6bbe8_0] Write session: destroy 2025-06-03T10:29:37.386088Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|51d80743-c0cfb38f-b9cda436-c1e6bbe8_0 grpc read done: success: 0 data: 2025-06-03T10:29:37.386103Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|51d80743-c0cfb38f-b9cda436-c1e6bbe8_0 grpc read failed 2025-06-03T10:29:37.386112Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|51d80743-c0cfb38f-b9cda436-c1e6bbe8_0 grpc closed 2025-06-03T10:29:37.386119Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|51d80743-c0cfb38f-b9cda436-c1e6bbe8_0 is DEAD 2025-06-03T10:29:37.386506Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:29:37.386662Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037892] server disconnected, pipe [3:7511668351234715122:2490] destroyed 2025-06-03T10:29:37.386689Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. Session was created >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinueInvisibleRowSkips+EvWrite [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinueInvisibleRowSkips-EvWrite >> TopicAutoscaling::ControlPlane_CDC_Enable [GOOD] >> TopicAutoscaling::MidOfRange [GOOD] >> TPersQueueMirrorer::ValidStartStream [GOOD] >> DataShardReadIterator::ShouldReadFromHeadToMvccWithConflict-UseSink [GOOD] >> CommitOffset::Commit_Flat_WithWrongSession_ToPast [GOOD] >> CommitOffset::Commit_WithSession_ParentNotFinished_OtherSession >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessages [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessagesNoClusterDiscovery |65.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build_reboots/unittest >> CommitOffset::Commit_WithoutSession_ToPastParentPartition [GOOD] >> CommitOffset::Commit_WithSession_ParentNotFinished_SameSession >> DataShardReadIterator::ShouldProperlyOrderConflictingTransactionsMvcc-UseSink [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::MidOfRange [GOOD] Test command err: 2025-06-03T10:28:47.943735Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668140106192227:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:47.943760Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000cc5/r3tmp/tmp2YGavR/pdisk_1.dat 2025-06-03T10:28:47.984385Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:28:48.016882Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668140106192208:2079] 1748946527943582 != 1748946527943585 2025-06-03T10:28:48.017506Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7230, node 1 2025-06-03T10:28:48.032448Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/000cc5/r3tmp/yandexITYeE3.tmp 2025-06-03T10:28:48.032462Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/000cc5/r3tmp/yandexITYeE3.tmp 2025-06-03T10:28:48.032589Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/000cc5/r3tmp/yandexITYeE3.tmp 2025-06-03T10:28:48.032641Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:28:48.039546Z INFO: TTestServer started on Port 18709 GrpcPort 7230 2025-06-03T10:28:48.048382Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:48.048446Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:48.048928Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18709 PQClient connected to localhost:7230 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:48.083206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:28:48.092753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-03T10:28:48.340404Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668144401160300:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.340428Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668144401160326:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.340434Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.341092Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668144401160357:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.341130Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.341431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480 2025-06-03T10:28:48.343708Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668144401160329:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-03T10:28:48.373168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:48.381578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:48.402264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:28:48.408195Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668144401160600:2554] txid# 281474976715666, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7511668144401160689:2608] 2025-06-03T10:28:52.943892Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668140106192227:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:52.943938Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:28:53.646310Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-03T10:28:53.649978Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-03T10:28:53.650450Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [1:7511668165875997363:2673], Recipient [1:7511668144401159933:2188]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:53.650465Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:53.650469Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:28:53.650480Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [1:7511668165875997359:2670], Recipient [1:7511668144401159933:2188]: {TEvModifySchemeTransaction txid# 281474976715674 TabletId# 72057594046644480} 2025-06-03T10:28:53.650482Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:28:53.656977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 2 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } PartitionStrategy { MinPartitionCount: 2 MaxPartitionCount: 100 ScaleThresholdSeconds: 300 ScaleUpPartitionWriteSpeedThresholdPercent: 90 ScaleDownPartitionWriteSpeedThresholdPercent: 30 PartitionStrategyType: CAN_SPLIT } Consumers { Name: "test-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 } } } } TxId: 281474976715674 TabletId: 72057594046644480 Owner: "root@builtin" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-03T10:28:53.657078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_pq.cpp:307: TCreatePQ Propose, path: /Root/test-topic, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:28:53.657158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: test-topic, child id: [OwnerId: 72057594046644480, LocalPathId: 13], at schemeshard: 72057594046644480 2025-06-03T10:28:53.657174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 13] was 0 2025-06-03T10:28:53.657183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 13] was 1 2025-06-03T10:28:53.657188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason new shard create ... ssing event TEvKeyValue::TEvResponse 2025-06-03T10:29:38.551091Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1231: [PQ: 72075186224037893] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-03T10:29:38.551093Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4291: [PQ: 72075186224037893] Try execute txs with state EXECUTED 2025-06-03T10:29:38.551095Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4336: [PQ: 72075186224037893] TxId 281474976715675, State EXECUTED 2025-06-03T10:29:38.551098Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4283: [PQ: 72075186224037893] TxId 281474976715675 State EXECUTED FrontTxId 281474976715675 2025-06-03T10:29:38.551101Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3987: [PQ: 72075186224037893] TPersQueue::SendEvReadSetAckToSenders 2025-06-03T10:29:38.551118Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4226: [PQ: 72075186224037893] TxId 281474976715675, NewState WAIT_RS_ACKS 2025-06-03T10:29:38.551121Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4261: [PQ: 72075186224037893] TxId 281474976715675 moved from EXECUTED to WAIT_RS_ACKS 2025-06-03T10:29:38.551124Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715675] PredicateAcks: 0/0 2025-06-03T10:29:38.551126Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4537: [PQ: 72075186224037893] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-03T10:29:38.551127Z node 6 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715675] PredicateAcks: 0/0 2025-06-03T10:29:38.551129Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4598: [PQ: 72075186224037893] add an TxId 281474976715675 to the list for deletion 2025-06-03T10:29:38.551131Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4226: [PQ: 72075186224037893] TxId 281474976715675, NewState DELETING 2025-06-03T10:29:38.551135Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3832: [PQ: 72075186224037893] delete key for TxId 281474976715675 2025-06-03T10:29:38.551138Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:29:38.551141Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3633: [PQ: 72075186224037893] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-03T10:29:38.551147Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 270794756, Sender [6:7511668357702333656:2472], Recipient [6:7511668357702333656:2472]: NKikimr::TEvKeyValue::TEvCollect 2025-06-03T10:29:38.551177Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 270794752, Sender [6:7511668357702333656:2472], Recipient [6:7511668357702333656:2472]: NKikimrClient.TKeyValueRequest Cookie: 5 CmdDeleteRange { Range { From: "tx_00000281474976715675" IncludeFrom: true To: "tx_00000281474976715675" IncludeTo: true } } CmdWrite { Key: "_txinfo" Value: "\020\244\331\333\252\3632\030\233\247\200\200\200\200@(\240\215\0060\244\331\333\252\36328\233\247\200\200\200\200@" StorageChannel: INLINE } 2025-06-03T10:29:38.551182Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 274137603, Sender [6:7511668331932528918:2240], Recipient [6:7511668331932528778:2157]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046644480 Generation: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 14] Version: 4 } 2025-06-03T10:29:38.551185Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-06-03T10:29:38.551195Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 270794753, Sender [6:7511668357702333777:2472], Recipient [6:7511668357702333656:2472]: NKikimr::TEvKeyValue::TEvIntermediate 2025-06-03T10:29:38.551197Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 14 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976715675 2025-06-03T10:29:38.551207Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 14 Version: 4 PathOwnerId: 72057594046644480, cookie: 281474976715675 2025-06-03T10:29:38.551210Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046644480, txId: 281474976715675 2025-06-03T10:29:38.551213Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715675, pathId: [OwnerId: 72057594046644480, LocalPathId: 14], version: 4 2025-06-03T10:29:38.551215Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 14] was 3 2025-06-03T10:29:38.551232Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:29:38.551256Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 274137603, Sender [6:7511668331932528918:2240], Recipient [6:7511668331932528778:2157]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046644480 Generation: 2 PathId: [OwnerId: 72057594046644480, LocalPathId: 15] Version: 2 } 2025-06-03T10:29:38.551257Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-06-03T10:29:38.551261Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 15 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976715675 2025-06-03T10:29:38.551267Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 15 Version: 2 PathOwnerId: 72057594046644480, cookie: 281474976715675 2025-06-03T10:29:38.551269Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046644480, txId: 281474976715675 2025-06-03T10:29:38.551271Z node 6 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976715675, pathId: [OwnerId: 72057594046644480, LocalPathId: 15], version: 2 2025-06-03T10:29:38.551275Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046644480, LocalPathId: 15] was 4 2025-06-03T10:29:38.551281Z node 6 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046644480, txId: 281474976715675, subscribers: 1 2025-06-03T10:29:38.551284Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046644480, to actorId: [6:7511668357702333630:2470] 2025-06-03T10:29:38.551287Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046644480 2025-06-03T10:29:38.551346Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 270794760, Sender [6:7511668357702333776:2482], Recipient [6:7511668357702333656:2472]: NKikimr::TEvKeyValue::TEvCompleteGC 2025-06-03T10:29:38.551358Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715675 2025-06-03T10:29:38.551359Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:29:38.551366Z node 6 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046644480, cookie: 281474976715675 2025-06-03T10:29:38.551367Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046644480 2025-06-03T10:29:38.551379Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [6:7511668357702333630:2470] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715675 at schemeshard: 72057594046644480 2025-06-03T10:29:38.551402Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 270795264, Sender [6:7511668357702333656:2472], Recipient [6:7511668357702333656:2472]: NKikimrClient.TResponse Status: 1 Cookie: 5 DeleteRangeResult { Status: 0 } WriteResult { Status: 0 StatusFlags: 1 } 2025-06-03T10:29:38.551403Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5251: HandleHook, processing event TEvKeyValue::TEvResponse 2025-06-03T10:29:38.551405Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1231: [PQ: 72075186224037893] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-03T10:29:38.551407Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4291: [PQ: 72075186224037893] Try execute txs with state DELETING 2025-06-03T10:29:38.551410Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4336: [PQ: 72075186224037893] TxId 281474976715675, State DELETING 2025-06-03T10:29:38.551413Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:4548: [PQ: 72075186224037893] delete TxId 281474976715675 2025-06-03T10:29:38.551430Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 270794756, Sender [6:7511668357702333656:2472], Recipient [6:7511668357702333656:2472]: NKikimr::TEvKeyValue::TEvCollect 2025-06-03T10:29:38.551465Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [6:7511668357702333637:2725], Recipient [6:7511668331932528778:2157]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:38.551468Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:38.551470Z node 6 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046644480 2025-06-03T10:29:38.551502Z node 6 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 270794760, Sender [6:7511668357702333782:2483], Recipient [6:7511668357702333656:2472]: NKikimr::TEvKeyValue::TEvCompleteGC 2025-06-03T10:29:38.554106Z node 6 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:141: new alter topic request 2025-06-03T10:29:38.649617Z node 6 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [6:7511668357702333656:2472], Partition 0, Sender [0:0:0], Recipient [6:7511668357702333728:2478], Cookie: 0 2025-06-03T10:29:38.649641Z node 6 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [6:7511668357702333728:2478]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:29:38.649645Z node 6 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:29:38.649659Z node 6 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:29:38.649680Z node 6 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037893, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:29:38.649683Z node 6 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:29:38.649688Z node 6 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037893, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 |65.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldReadFromHeadToMvccWithConflict-UseSink [GOOD] Test command err: 2025-06-03T10:29:08.395537Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:08.395639Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:08.395676Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00144a/r3tmp/tmp0cHrvq/pdisk_1.dat 2025-06-03T10:29:08.569820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:29:08.591636Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:08.593144Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946547697983 != 1748946547697987 2025-06-03T10:29:08.637178Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:08.637251Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:08.649072Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:08.728136Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:29:08.749089Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:29:08.749525Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:29:08.749673Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:29:08.749759Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:29:08.761492Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:29:08.761784Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:29:08.761826Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:29:08.762057Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:29:08.762070Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:29:08.762079Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:29:08.762159Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:29:08.762190Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:29:08.762207Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:29:08.772746Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:29:08.779662Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:29:08.779791Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:29:08.779832Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:29:08.779841Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:29:08.779848Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:29:08.779855Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:29:08.779960Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:29:08.779971Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:29:08.780127Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:29:08.780161Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:29:08.780310Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:29:08.780322Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:29:08.780333Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:29:08.780340Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:29:08.780349Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:29:08.780356Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:29:08.780363Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:29:08.780383Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:08.780391Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:08.780401Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:29:08.780430Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:29:08.780436Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:29:08.780464Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:29:08.780560Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:29:08.780576Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:29:08.780604Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:29:08.780617Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:29:08.780623Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:29:08.780630Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:29:08.780636Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:29:08.780701Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-03T10:29:08.780707Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-03T10:29:08.780713Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-03T10:29:08.780718Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:29:08.780733Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-03T10:29:08.780738Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-03T10:29:08.780745Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-03T10:29:08.780749Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-03T10:29:08.780759Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-03T10:29:08.781152Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269746185, Sender [1:683:2579], Recipient [1:663:2568]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-03T10:29:08.781168Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:29:08.793707Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:29:08.793755Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:29:08.793766Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:29:08.793784Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-06-03T10:29:08.793806Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:29:08.950607Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:08.950641Z node 1 :TX_DATASHARD TRACE: datashard_impl. ... IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v1500/18446744073709551615 ImmediateWriteEdgeReplied# v1500/18446744073709551615 2025-06-03T10:29:38.605061Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v3001/18446744073709551615 2025-06-03T10:29:38.605070Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit CheckRead 2025-06-03T10:29:38.605092Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-03T10:29:38.605098Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit CheckRead 2025-06-03T10:29:38.605104Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-03T10:29:38.605109Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-03T10:29:38.605124Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037888 2025-06-03T10:29:38.605130Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-03T10:29:38.605134Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-03T10:29:38.605139Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit ExecuteRead 2025-06-03T10:29:38.605143Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit ExecuteRead 2025-06-03T10:29:38.605158Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 ResultFormat: FORMAT_ARROW MaxRowsInResult: 2 } 2025-06-03T10:29:38.605237Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Continue 2025-06-03T10:29:38.605244Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Continue at tablet# 72075186224037888 2025-06-03T10:29:38.605253Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-03T10:29:38.625985Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287425, Sender [15:1045:2839], Recipient [15:665:2569]: {TEvReadSet step# 3001 txid# 281474976715667 TabletSource# 72075186224037891 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037891 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-03T10:29:38.626011Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3148: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-03T10:29:38.626019Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037888 source 72075186224037891 dest 72075186224037888 producer 72075186224037891 txId 281474976715667 2025-06-03T10:29:38.626059Z node 15 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037888 got read set: {TEvReadSet step# 3001 txid# 281474976715667 TabletSource# 72075186224037891 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037891 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-03T10:29:38.626117Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:705: Complete [3001 : 281474976715667] from 72075186224037888 at tablet 72075186224037888 send result to client [15:1134:2889], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:29:38.626131Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 1 active planned 0 immediate 1 planned 0 2025-06-03T10:29:38.626142Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:307: Found ready candidate operation [0:4] at 72075186224037888 for ExecuteRead 2025-06-03T10:29:38.626206Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [15:665:2569], Recipient [15:665:2569]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:29:38.626217Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:29:38.626247Z node 15 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:1364: ActorId: [15:1134:2889] TxId: 281474976715667. Ctx: { TraceId: 01jwtndv3r26pmnh4k7tfxwnf6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=ZjQ0OThjZDgtODJjOGVjZWYtN2NmN2NjOWMtOTc2OWY2MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got propose result, shard: 72075186224037888, status: COMPLETE, error: 2025-06-03T10:29:38.626294Z node 15 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2151: ActorId: [15:1134:2889] TxId: 281474976715667. Ctx: { TraceId: 01jwtndv3r26pmnh4k7tfxwnf6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=ZjQ0OThjZDgtODJjOGVjZWYtN2NmN2NjOWMtOTc2OWY2MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-03T10:29:38.626308Z node 15 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:839: ActorId: [15:1134:2889] TxId: 281474976715667. Ctx: { TraceId: 01jwtndv3r26pmnh4k7tfxwnf6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=ZjQ0OThjZDgtODJjOGVjZWYtN2NmN2NjOWMtOTc2OWY2MA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-03T10:29:38.626544Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:29:38.626619Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:29:38.626626Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 1 active planned 0 immediate 1 planned 0 2025-06-03T10:29:38.626632Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:282: Return cached ready operation [0:4] at 72075186224037888 2025-06-03T10:29:38.626638Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit ExecuteRead 2025-06-03T10:29:38.626688Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 2, request: { ReadId: 1 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 ResultFormat: FORMAT_ARROW MaxRowsInResult: 2 } 2025-06-03T10:29:38.626773Z node 15 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3001/18446744073709551615 2025-06-03T10:29:38.626781Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[15:592:2518], 1} after executionsCount# 2 2025-06-03T10:29:38.626790Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[15:592:2518], 1} sends rowCount# 2, bytes# 32, quota rows left# 18446744073709551613, quota bytes left# 18446744073709551583, hasUnreadQueries# 1, total queries# 6, firstUnprocessed# 0 2025-06-03T10:29:38.626822Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-03T10:29:38.626828Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit ExecuteRead 2025-06-03T10:29:38.626834Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit CompletedOperations 2025-06-03T10:29:38.626839Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit CompletedOperations 2025-06-03T10:29:38.626851Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-03T10:29:38.626858Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit CompletedOperations 2025-06-03T10:29:38.626863Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:4] at 72075186224037888 has finished 2025-06-03T10:29:38.626868Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:29:38.626873Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-03T10:29:38.626878Z node 15 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:29:38.626883Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:29:38.626939Z node 15 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 5, sender: [15:592:2518], selfId: [15:57:2104], source: [15:1112:2889] 2025-06-03T10:29:38.626967Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553217, Sender [15:665:2569], Recipient [15:665:2569]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-03T10:29:38.626978Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037888 ReadContinue for iterator# {[15:592:2518], 1}, firstUnprocessedQuery# 2 2025-06-03T10:29:38.626991Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037888 ReadContinue: iterator# {[15:592:2518], 1}, FirstUnprocessedQuery# 2 2025-06-03T10:29:38.627006Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037888 readContinue iterator# {[15:592:2518], 1} sends rowCount# 2, bytes# 32, quota rows left# 18446744073709551611, quota bytes left# 18446744073709551551, hasUnreadQueries# 1, total queries# 6, firstUnprocessed# 2 2025-06-03T10:29:38.627162Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553217, Sender [15:665:2569], Recipient [15:665:2569]: NKikimr::TEvDataShard::TEvReadContinue 2025-06-03T10:29:38.627170Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2836: 72075186224037888 ReadContinue for iterator# {[15:592:2518], 1}, firstUnprocessedQuery# 4 2025-06-03T10:29:38.627183Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2929: 72075186224037888 ReadContinue: iterator# {[15:592:2518], 1}, FirstUnprocessedQuery# 4 2025-06-03T10:29:38.627197Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3079: 72075186224037888 readContinue iterator# {[15:592:2518], 1} sends rowCount# 2, bytes# 32, quota rows left# 18446744073709551609, quota bytes left# 18446744073709551519, hasUnreadQueries# 0, total queries# 6, firstUnprocessed# 4 2025-06-03T10:29:38.627210Z node 15 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:3103: 72075186224037888 read iterator# {[15:592:2518], 1} finished in ReadContinue 2025-06-03T10:29:38.627234Z node 15 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=15&id=ZjQ0OThjZDgtODJjOGVjZWYtN2NmN2NjOWMtOTc2OWY2MA==, workerId: [15:1112:2889], local sessions count: 0 2025-06-03T10:29:38.627266Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 275709965, Sender [15:61:2108], Recipient [15:1045:2839]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715666 LockNode: 15 Status: STATUS_NOT_FOUND ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TPersQueueMirrorer::ValidStartStream [GOOD] Test command err: 2025-06-03T10:28:47.691032Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668139047610342:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:47.691060Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:28:47.730093Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d98/r3tmp/tmpQF8ioI/pdisk_1.dat 2025-06-03T10:28:47.763945Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668139047610324:2079] 1748946527690839 != 1748946527690842 2025-06-03T10:28:47.764380Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25775, node 1 2025-06-03T10:28:47.780816Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/000d98/r3tmp/yandexuPeE3f.tmp 2025-06-03T10:28:47.780830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/000d98/r3tmp/yandexuPeE3f.tmp 2025-06-03T10:28:47.792348Z INFO: TTestServer started on Port 14604 GrpcPort 25775 2025-06-03T10:28:47.801443Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/000d98/r3tmp/yandexuPeE3f.tmp 2025-06-03T10:28:47.801547Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14604 PQClient connected to localhost:25775 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:28:47.835185Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:47.835208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:47.836257Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:47.840551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:28:47.847214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-03T10:28:48.151670Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668143342578426:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.151692Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668143342578435:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.151699Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.152321Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668143342578474:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.152339Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.152563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480 2025-06-03T10:28:48.154944Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668143342578445:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-03T10:28:48.195634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:48.203912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:48.218195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-03T10:28:48.248195Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668143342578753:2581] txid# 281474976715666, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } === CheckClustersList. Subcribe to ClusterTracker from [1:7511668143342578794:2603] 2025-06-03T10:28:52.691317Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668139047610342:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:52.691358Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:28:53.497171Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-03T10:28:53.501169Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-03T10:28:53.501711Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [1:7511668164817415477:2673], Recipient [1:7511668139047610748:2185]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:53.501729Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:53.501733Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:28:53.501743Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [1:7511668164817415473:2670], Recipient [1:7511668139047610748:2185]: {TEvModifySchemeTransaction txid# 281474976715674 TabletId# 72057594046644480} 2025-06-03T10:28:53.501746Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:28:53.512864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "back-compatibility-test" TotalGroupCount: 3 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } } } } TxId: 281474976715674 TabletId: 72057594046644480 Owner: "root@builtin" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-03T10:28:53.512970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_pq.cpp:307: TCreatePQ Propose, path: /Root/back-compatibility-test, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:28:53.513042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: back-compatibility-test, child id: [OwnerId: 72057594046644480, LocalPathId: 13], at schemeshard: 72057594046644480 2025-06-03T10:28:53.513062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 13] was 0 2025-06-03T10:28:53.513069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 13] was 1 2025-06-03T10:28:53.513080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 13] was 2 2025-06-03T10:28:53.513102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 13] was 3 2025-06-03T10:28:53.513107Z node 1 :FLAT ... 652850622581_v1 sending to client partition status 2025-06-03T10:29:38.512208Z :INFO: [] [] [f119fd30-67a8ff17-2b5db79a-85ced5ca] [] Confirm partition stream create. Partition stream id: 1. Cluster: "-". Topic: "/topic1". Partition: 0. Read offset: 5 2025-06-03T10:29:38.512415Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/user session shared/user_7_2_2057935652850622581_v1 grpc read done: success# 1, data# { start_partition_session_response { partition_session_id: 1 read_offset: 5 } } 2025-06-03T10:29:38.512473Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:533: session cookie 2 consumer shared/user session shared/user_7_2_2057935652850622581_v1 got StartRead from client: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 5, commitOffset# (empty maybe) 2025-06-03T10:29:38.512490Z node 7 :PQ_READ_PROXY INFO: partition_actor.cpp:1012: session cookie 2 consumer shared/user session shared/user_7_2_2057935652850622581_v1 Start reading TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 10 readOffset 1 committedOffset 1 clientCommitOffset (empty maybe) clientReadOffset 5 2025-06-03T10:29:38.512498Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:958: session cookie 2 consumer shared/user session shared/user_7_2_2057935652850622581_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) ready for read with readOffset 5 endOffset 10 2025-06-03T10:29:38.512511Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2309: session cookie 2 consumer shared/user session shared/user_7_2_2057935652850622581_v1 partition ready for read: partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), readOffset# 5, endOffset# 10, WTime# 1748946578391, sizeLag# 1237 2025-06-03T10:29:38.512513Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2320: session cookie 2 consumer shared/user session shared/user_7_2_2057935652850622581_v1TEvPartitionReady. Aval parts: 1 2025-06-03T10:29:38.512523Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2243: session cookie 2 consumer shared/user session shared/user_7_2_2057935652850622581_v1 performing read request: guid# 1c14bfa2-4f562204-e5ebd1e0-9a047f63, from# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), count# 6, size# 1484, partitionsAsked# 1, maxTimeLag# 0ms 2025-06-03T10:29:38.512548Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1384: session cookie 2 consumer shared/user session shared/user_7_2_2057935652850622581_v1 READ FROM TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1)maxCount 6 maxSize 1484 maxTimeLagMs 0 readTimestampMs 0 readOffset 5 EndOffset 10 ClientCommitOffset 1 committedOffset 1 Guid 1c14bfa2-4f562204-e5ebd1e0-9a047f63 2025-06-03T10:29:38.512640Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--topic1' requestId: 2025-06-03T10:29:38.512653Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--topic1' partition 0 2025-06-03T10:29:38.512692Z node 7 :PERSQUEUE DEBUG: partition_read.cpp:736: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 3 Topic 'rt3.dc1--topic1' partition 0 user user offset 5 count 6 size 1484 endOffset 10 max time lag 0ms effective offset 5 2025-06-03T10:29:38.512703Z node 7 :PERSQUEUE DEBUG: partition_read.cpp:936: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 3 added 0 blobs, size 0 count 0 last offset 5, current partition end offset: 10 2025-06-03T10:29:38.512738Z node 7 :PERSQUEUE DEBUG: partition_read.cpp:953: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 3. All data is from uncompacted head. 2025-06-03T10:29:38.512750Z node 7 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-06-03T10:29:38.512799Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--topic1' partition: 0 messageNo: 0 requestId: cookie: 5 2025-06-03T10:29:38.512900Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 2 consumer shared/user session shared/user_7_2_2057935652850622581_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { CmdReadResult { MaxOffset: 10 Result { Offset: 5 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 6 WriteTimestampMS: 1748946578408 CreateTimestampMS: 1748946578407 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } Result { Offset: 6 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 7 WriteTimestampMS: 1748946578409 CreateTimestampMS: 1748946578407 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } Result { Offset: 7 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 8 WriteTimestampMS: 1748946578409 CreateTimestampMS: 1748946578407 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } Result { Offset: 8 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 9 WriteTimestampMS: 1748946578409 CreateTimestampMS: 1748946578407 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } Result { Offset: 9 Data: "... 94 bytes ..." SourceId: "\000src-id-test" SeqNo: 10 WriteTimestampMS: 1748946578409 CreateTimestampMS: 1748946578407 UncompressedSize: 10 PartitionKey: "" ExplicitHash: "" } BlobsFromDisk: 0 BlobsFromCache: 0 SizeLag: 18446744073709551581 RealReadOffset: 9 WaitQuotaTimeMs: 0 EndOffset: 10 StartOffset: 0 } Cookie: 5 } 2025-06-03T10:29:38.512952Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 2 consumer shared/user session shared/user_7_2_2057935652850622581_v1 TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 1 from offset10 2025-06-03T10:29:38.512965Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:890: session cookie 2 consumer shared/user session shared/user_7_2_2057935652850622581_v1 after read state TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 10 ReadOffset 10 ReadGuid 1c14bfa2-4f562204-e5ebd1e0-9a047f63 has messages 1 2025-06-03T10:29:38.512995Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 2 consumer shared/user session shared/user_7_2_2057935652850622581_v1 read done: guid# 1c14bfa2-4f562204-e5ebd1e0-9a047f63, partition# TopicId: Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0(assignId:1), size# 558 2025-06-03T10:29:38.513008Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 2 consumer shared/user session shared/user_7_2_2057935652850622581_v1 response to read: guid# 1c14bfa2-4f562204-e5ebd1e0-9a047f63 2025-06-03T10:29:38.513082Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 2 consumer shared/user session shared/user_7_2_2057935652850622581_v1 Process answer. Aval parts: 0 2025-06-03T10:29:38.513221Z :DEBUG: [] [] [f119fd30-67a8ff17-2b5db79a-85ced5ca] [] Got ReadResponse, serverBytesSize = 558, now ReadSizeBudget = 0, ReadSizeServerDelta = 52428242 2025-06-03T10:29:38.513256Z :DEBUG: [] [] [f119fd30-67a8ff17-2b5db79a-85ced5ca] [] In ContinueReadingDataImpl, ReadSizeBudget = 0, ReadSizeServerDelta = 52428242 2025-06-03T10:29:38.513375Z :DEBUG: [] Decompression task done. Partition/PartitionSessionId: 1 (5-9) 2025-06-03T10:29:38.513403Z :DEBUG: [] [] [f119fd30-67a8ff17-2b5db79a-85ced5ca] [] Returning serverBytesSize = 558 to budget 2025-06-03T10:29:38.513410Z :DEBUG: [] [] [f119fd30-67a8ff17-2b5db79a-85ced5ca] [] In ContinueReadingDataImpl, ReadSizeBudget = 558, ReadSizeServerDelta = 52428242 2025-06-03T10:29:38.513503Z :DEBUG: [] [] [f119fd30-67a8ff17-2b5db79a-85ced5ca] [] After sending read request: ReadSizeBudget = 0, ReadSizeServerDelta = 52428800 2025-06-03T10:29:38.513562Z :DEBUG: [] Take Data. Partition 0. Read: {0, 0} (5-5) 2025-06-03T10:29:38.513587Z :DEBUG: [] Take Data. Partition 0. Read: {1, 0} (6-6) 2025-06-03T10:29:38.513594Z :DEBUG: [] Take Data. Partition 0. Read: {1, 1} (7-7) 2025-06-03T10:29:38.513598Z :DEBUG: [] Take Data. Partition 0. Read: {1, 2} (8-8) 2025-06-03T10:29:38.513605Z :DEBUG: [] Take Data. Partition 0. Read: {1, 3} (9-9) 2025-06-03T10:29:38.513617Z :DEBUG: [] [] [f119fd30-67a8ff17-2b5db79a-85ced5ca] [] The application data is transferred to the client. Number of messages 5, size 115 bytes 2025-06-03T10:29:38.513625Z :DEBUG: [] [] [f119fd30-67a8ff17-2b5db79a-85ced5ca] [] Returning serverBytesSize = 0 to budget 2025-06-03T10:29:38.513605Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/user session shared/user_7_2_2057935652850622581_v1 grpc read done: success# 1, data# { read_request { bytes_size: 558 } } 2025-06-03T10:29:38.513649Z :INFO: [] [] [f119fd30-67a8ff17-2b5db79a-85ced5ca] Closing read session. Close timeout: 0.000000s 2025-06-03T10:29:38.513656Z :INFO: [] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic1:0:1:9:1 2025-06-03T10:29:38.513651Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 2 consumer shared/user session shared/user_7_2_2057935652850622581_v1 got read request: guid# a5ee0f38-23f15b79-2a28c0d7-329be891 2025-06-03T10:29:38.513664Z :INFO: [] [] [f119fd30-67a8ff17-2b5db79a-85ced5ca] Counters: { Errors: 0 CurrentSessionLifetimeMs: 5 BytesRead: 115 MessagesRead: 5 BytesReadCompressed: 115 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:29:38.513683Z :NOTICE: [] [] [f119fd30-67a8ff17-2b5db79a-85ced5ca] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-03T10:29:38.513691Z :DEBUG: [] [] [f119fd30-67a8ff17-2b5db79a-85ced5ca] [] Abort session to cluster 2025-06-03T10:29:38.513830Z :NOTICE: [] [] [f119fd30-67a8ff17-2b5db79a-85ced5ca] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-03T10:29:38.514122Z :DEBUG: [] MessageGroupId [src-id-test] SessionId [src-id-test|2943f531-6a4a405b-53f491d5-429cdb8a_0] Write session: destroy 2025-06-03T10:29:38.514415Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/user session shared/user_7_2_2057935652850622581_v1 grpc read done: success# 0, data# { } 2025-06-03T10:29:38.514427Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer shared/user session shared/user_7_2_2057935652850622581_v1 grpc read failed 2025-06-03T10:29:38.514433Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer shared/user session shared/user_7_2_2057935652850622581_v1 grpc closed 2025-06-03T10:29:38.514448Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer shared/user session shared/user_7_2_2057935652850622581_v1 is DEAD 2025-06-03T10:29:38.514547Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037892] Destroy direct read session shared/user_7_2_2057935652850622581_v1 2025-06-03T10:29:38.514563Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037892] server disconnected, pipe [7:7511668356296689629:2532] destroyed 2025-06-03T10:29:38.514593Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_7_2_2057935652850622581_v1 2025-06-03T10:29:38.514877Z node 8 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--topic1] pipe [7:7511668356296689627:2529] disconnected; active server actors: 1 2025-06-03T10:29:38.514893Z node 8 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--topic1] pipe [7:7511668356296689627:2529] client user disconnected session shared/user_7_2_2057935652850622581_v1 >> TopicAutoscaling::PartitionSplit_PreferedPartition_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_AutoscaleAwareSDK >> SystemView::PartitionStatsLocksFields [GOOD] >> SystemView::QueryStatsAllTables ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldProperlyOrderConflictingTransactionsMvcc-UseSink [GOOD] Test command err: 2025-06-03T10:29:08.998501Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:08.998607Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:08.998646Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001427/r3tmp/tmpniRbzJ/pdisk_1.dat 2025-06-03T10:29:09.128535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:29:09.146775Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:09.147929Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946548429033 != 1748946548429037 2025-06-03T10:29:09.190367Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:09.190412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:09.201125Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:09.281700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:29:09.303765Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:29:09.304137Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:29:09.304282Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:29:09.304377Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:29:09.316668Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:29:09.316928Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:29:09.316964Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:29:09.317220Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:29:09.317233Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:29:09.317243Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:29:09.317345Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:29:09.317370Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:29:09.317384Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:29:09.327906Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:29:09.333994Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:29:09.334131Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:29:09.334173Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:29:09.334181Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:29:09.334186Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:29:09.334194Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:29:09.334297Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:29:09.334308Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:29:09.334503Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:29:09.334546Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:29:09.334700Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:29:09.334713Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:29:09.334723Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:29:09.334731Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:29:09.334736Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:29:09.334742Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:29:09.334750Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:29:09.334772Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:09.334783Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:09.334792Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:29:09.334819Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:29:09.334825Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:29:09.334851Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:29:09.334943Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:29:09.334956Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:29:09.334980Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:29:09.334989Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:29:09.334994Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:29:09.335001Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:29:09.335006Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:29:09.335063Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-03T10:29:09.335068Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-03T10:29:09.335073Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-03T10:29:09.335077Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:29:09.335092Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-03T10:29:09.335096Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-03T10:29:09.335101Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-03T10:29:09.335105Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-03T10:29:09.335111Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-03T10:29:09.335435Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269746185, Sender [1:683:2579], Recipient [1:663:2568]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-03T10:29:09.335448Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:29:09.345854Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:29:09.345894Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:29:09.345903Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:29:09.345917Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-06-03T10:29:09.345939Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:29:09.496960Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:09.497015Z node 1 :TX_DATASHARD TRACE: datashard_impl. ... Columns: 4 Snapshot { Step: 3001 TxId: 18446744073709551615 } ResultFormat: FORMAT_ARROW } 2025-06-03T10:29:39.164342Z node 16 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3001/18446744073709551615 2025-06-03T10:29:39.164346Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[16:591:2517], 3} after executionsCount# 2 2025-06-03T10:29:39.164349Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[16:591:2517], 3} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-03T10:29:39.164359Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[16:591:2517], 3} finished in read 2025-06-03T10:29:39.164364Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-03T10:29:39.164367Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit ExecuteRead 2025-06-03T10:29:39.164369Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:8] at 72075186224037888 to execution unit CompletedOperations 2025-06-03T10:29:39.164372Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:8] at 72075186224037888 on unit CompletedOperations 2025-06-03T10:29:39.164376Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:8] at 72075186224037888 is Executed 2025-06-03T10:29:39.164379Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:8] at 72075186224037888 executing on unit CompletedOperations 2025-06-03T10:29:39.164381Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:8] at 72075186224037888 has finished 2025-06-03T10:29:39.164384Z node 16 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:29:39.164387Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037888 2025-06-03T10:29:39.164390Z node 16 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:29:39.164393Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:29:39.164411Z node 16 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=16&id=NjIzMWJlYi01ZTFkOTQ0OS01NjlhMWY3NS1mYzhjZGNlZQ==, workerId: [16:1138:2906], local sessions count: 0 2025-06-03T10:29:39.164511Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553215, Sender [16:591:2517], Recipient [16:664:2568]: NKikimrTxDataShard.TEvRead ReadId: 4 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3001 TxId: 18446744073709551615 } ResultFormat: FORMAT_ARROW KeysSize: 1 2025-06-03T10:29:39.164526Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-03T10:29:39.164536Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:9] at 72075186224037888 on unit CheckRead 2025-06-03T10:29:39.164544Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:9] at 72075186224037888 is Executed 2025-06-03T10:29:39.164546Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:9] at 72075186224037888 executing on unit CheckRead 2025-06-03T10:29:39.164549Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:9] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-03T10:29:39.164551Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:9] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-03T10:29:39.164560Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:9] at 72075186224037888 2025-06-03T10:29:39.164564Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:9] at 72075186224037888 is Executed 2025-06-03T10:29:39.164566Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:9] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-03T10:29:39.164568Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:9] at 72075186224037888 to execution unit ExecuteRead 2025-06-03T10:29:39.164570Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:9] at 72075186224037888 on unit ExecuteRead 2025-06-03T10:29:39.164577Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 4 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3001 TxId: 18446744073709551615 } ResultFormat: FORMAT_ARROW } 2025-06-03T10:29:39.164591Z node 16 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3001/18446744073709551615 2025-06-03T10:29:39.164594Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[16:591:2517], 4} after executionsCount# 1 2025-06-03T10:29:39.164597Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[16:591:2517], 4} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-03T10:29:39.164605Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[16:591:2517], 4} finished in read 2025-06-03T10:29:39.164609Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:9] at 72075186224037888 is Executed 2025-06-03T10:29:39.164611Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:9] at 72075186224037888 executing on unit ExecuteRead 2025-06-03T10:29:39.164613Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:9] at 72075186224037888 to execution unit CompletedOperations 2025-06-03T10:29:39.164616Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:9] at 72075186224037888 on unit CompletedOperations 2025-06-03T10:29:39.164619Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:9] at 72075186224037888 is Executed 2025-06-03T10:29:39.164621Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:9] at 72075186224037888 executing on unit CompletedOperations 2025-06-03T10:29:39.164624Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:9] at 72075186224037888 has finished 2025-06-03T10:29:39.164626Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-03T10:29:39.164675Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553215, Sender [16:591:2517], Recipient [16:664:2568]: NKikimrTxDataShard.TEvRead ReadId: 5 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3000 TxId: 18446744073709551615 } ResultFormat: FORMAT_ARROW KeysSize: 1 2025-06-03T10:29:39.164688Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-03T10:29:39.164693Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:10] at 72075186224037888 on unit CheckRead 2025-06-03T10:29:39.164700Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:10] at 72075186224037888 is Executed 2025-06-03T10:29:39.164704Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:10] at 72075186224037888 executing on unit CheckRead 2025-06-03T10:29:39.164708Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:10] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-03T10:29:39.164710Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:10] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-03T10:29:39.164714Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:10] at 72075186224037888 2025-06-03T10:29:39.164717Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:10] at 72075186224037888 is Executed 2025-06-03T10:29:39.164719Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:10] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-03T10:29:39.164721Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:10] at 72075186224037888 to execution unit ExecuteRead 2025-06-03T10:29:39.164723Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:10] at 72075186224037888 on unit ExecuteRead 2025-06-03T10:29:39.164731Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 5 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3000 TxId: 18446744073709551615 } ResultFormat: FORMAT_ARROW } 2025-06-03T10:29:39.164743Z node 16 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3000/18446744073709551615 2025-06-03T10:29:39.164746Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[16:591:2517], 5} after executionsCount# 1 2025-06-03T10:29:39.164749Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[16:591:2517], 5} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-03T10:29:39.164757Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[16:591:2517], 5} finished in read 2025-06-03T10:29:39.164761Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:10] at 72075186224037888 is Executed 2025-06-03T10:29:39.164763Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:10] at 72075186224037888 executing on unit ExecuteRead 2025-06-03T10:29:39.164765Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:10] at 72075186224037888 to execution unit CompletedOperations 2025-06-03T10:29:39.164767Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:10] at 72075186224037888 on unit CompletedOperations 2025-06-03T10:29:39.164772Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:10] at 72075186224037888 is Executed 2025-06-03T10:29:39.164775Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:10] at 72075186224037888 executing on unit CompletedOperations 2025-06-03T10:29:39.164777Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:10] at 72075186224037888 has finished 2025-06-03T10:29:39.164779Z node 16 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 >> TSchemeShardMoveTest::Replace >> TSchemeShardMoveTest::Chain >> IcbAsActorTests::TestHttpGetResponse [GOOD] >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinueInvisibleRowSkips-EvWrite [GOOD] |65.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/control/ut/unittest |65.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/control/ut/unittest |65.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/control/ut/unittest >> IcbAsActorTests::TestHttpGetResponse [GOOD] >> TopicAutoscaling::WithDir_PartitionSplit_AutosplitByLoad [GOOD] >> WithSDK::DescribeConsumer ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_read_iterator/unittest >> DataShardReadIterator::ShouldReturnBrokenLockWhenReadKeyWithContinueInvisibleRowSkips-EvWrite [GOOD] Test command err: 2025-06-03T10:29:08.652974Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:08.653093Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:08.653132Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00142e/r3tmp/tmp8Q7jLn/pdisk_1.dat 2025-06-03T10:29:08.782532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:29:08.803218Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:08.804588Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946548113214 != 1748946548113218 2025-06-03T10:29:08.853976Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:08.854018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:08.865812Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:08.954010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:29:08.975395Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:29:08.975813Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:29:08.975970Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:29:08.976068Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:29:08.986975Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:29:08.987261Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:29:08.987305Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:29:08.987561Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:29:08.987575Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:29:08.987584Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:29:08.987672Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:29:08.987712Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:29:08.987737Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:29:08.998201Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:29:09.005096Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:29:09.005246Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:29:09.005289Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:29:09.005356Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:29:09.005363Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:29:09.005371Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:29:09.005502Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:29:09.005513Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:29:09.005671Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:29:09.005711Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:29:09.005877Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:29:09.005890Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:29:09.005904Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:29:09.005911Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:29:09.005916Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:29:09.005922Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:29:09.005930Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:29:09.005952Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:09.005959Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:09.005968Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:29:09.005996Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:29:09.006001Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:29:09.006031Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:29:09.006132Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:29:09.006149Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:29:09.006176Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:29:09.006188Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:29:09.006194Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:29:09.006200Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:29:09.006205Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:29:09.006271Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-03T10:29:09.006277Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-03T10:29:09.006282Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-03T10:29:09.006286Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:29:09.006302Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-03T10:29:09.006307Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-03T10:29:09.006314Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-03T10:29:09.006319Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-03T10:29:09.006326Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-03T10:29:09.006690Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269746185, Sender [1:683:2579], Recipient [1:663:2568]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-03T10:29:09.006706Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:29:09.017170Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:29:09.017209Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:29:09.017219Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:29:09.017236Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-06-03T10:29:09.017256Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:29:09.173561Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:09.173599Z node 1 :TX_DATASHARD TRACE: datashard_impl. ... pipeline.cpp:634: LoadTxDetails at 72075186224037889 loaded tx from db 3500:281474976715666 keys extracted: 0 2025-06-03T10:29:40.180241Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-03T10:29:40.180244Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit LoadTxDetails 2025-06-03T10:29:40.180246Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-03T10:29:40.180249Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-03T10:29:40.180253Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:455: Operation [3500:281474976715666] is the new logically complete end at 72075186224037889 2025-06-03T10:29:40.180255Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:461: Operation [3500:281474976715666] is the new logically incomplete end at 72075186224037889 2025-06-03T10:29:40.180258Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [3500:281474976715666] at 72075186224037889 2025-06-03T10:29:40.180261Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-03T10:29:40.180263Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-03T10:29:40.180266Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit CreateVolatileSnapshot 2025-06-03T10:29:40.180268Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit CreateVolatileSnapshot 2025-06-03T10:29:40.180281Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is ExecutedNoMoreRestarts 2025-06-03T10:29:40.180283Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit CreateVolatileSnapshot 2025-06-03T10:29:40.180286Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit DropVolatileSnapshot 2025-06-03T10:29:40.180288Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit DropVolatileSnapshot 2025-06-03T10:29:40.180291Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-03T10:29:40.180293Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit DropVolatileSnapshot 2025-06-03T10:29:40.180295Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit CompleteOperation 2025-06-03T10:29:40.180298Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit CompleteOperation 2025-06-03T10:29:40.180315Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is DelayComplete 2025-06-03T10:29:40.180318Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit CompleteOperation 2025-06-03T10:29:40.180320Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [3500:281474976715666] at 72075186224037889 to execution unit CompletedOperations 2025-06-03T10:29:40.180323Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [3500:281474976715666] at 72075186224037889 on unit CompletedOperations 2025-06-03T10:29:40.180327Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [3500:281474976715666] at 72075186224037889 is Executed 2025-06-03T10:29:40.180330Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [3500:281474976715666] at 72075186224037889 executing on unit CompletedOperations 2025-06-03T10:29:40.180332Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [3500:281474976715666] at 72075186224037889 has finished 2025-06-03T10:29:40.180335Z node 15 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037889 (dry run) active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:29:40.180337Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:327: Check candidate unit PlanQueue at 72075186224037889 2025-06-03T10:29:40.180339Z node 15 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037889 has no attached operations 2025-06-03T10:29:40.180341Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:341: Unit PlanQueue has no ready operations at 72075186224037889 2025-06-03T10:29:40.190737Z node 15 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037888 step# 3500} 2025-06-03T10:29:40.190774Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:29:40.190784Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715666] at 72075186224037888 on unit CompleteOperation 2025-06-03T10:29:40.190803Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715666] from 72075186224037888 at tablet 72075186224037888 send result to client [15:1069:2847], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:29:40.190832Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:29:40.190926Z node 15 :TX_DATASHARD DEBUG: datashard__plan_step.cpp:98: Sending '{TEvPlanStepAccepted TabletId# 72075186224037889 step# 3500} 2025-06-03T10:29:40.190935Z node 15 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037889 2025-06-03T10:29:40.190941Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [3500:281474976715666] at 72075186224037889 on unit CompleteOperation 2025-06-03T10:29:40.190948Z node 15 :TX_DATASHARD DEBUG: datashard.cpp:801: Complete [3500 : 281474976715666] from 72075186224037889 at tablet 72075186224037889 send result to client [15:1069:2847], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:29:40.190954Z node 15 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:29:40.191326Z node 15 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553215, Sender [15:592:2518], Recipient [15:665:2569]: NKikimrTxDataShard.TEvRead ReadId: 10 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3500 TxId: 281474976715666 } LockTxId: 1011121314 ResultFormat: FORMAT_ARROW KeysSize: 1 2025-06-03T10:29:40.191358Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-03T10:29:40.191371Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckRead 2025-06-03T10:29:40.191391Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-03T10:29:40.191397Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckRead 2025-06-03T10:29:40.191403Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-03T10:29:40.191407Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-03T10:29:40.191416Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-06-03T10:29:40.191422Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-03T10:29:40.191426Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-03T10:29:40.191431Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteRead 2025-06-03T10:29:40.191453Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteRead 2025-06-03T10:29:40.191470Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 10 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 Columns: 4 Snapshot { Step: 3500 TxId: 281474976715666 } LockTxId: 1011121314 ResultFormat: FORMAT_ARROW } 2025-06-03T10:29:40.191553Z node 15 :TX_DATASHARD DEBUG: datashard__read_iterator.cpp:2427: 72075186224037888 Acquired lock# 1011121314, counter# 18446744073709551615 for [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-03T10:29:40.191561Z node 15 :TX_DATASHARD TRACE: datashard.cpp:2476: PromoteImmediatePostExecuteEdges at 72075186224037888 promoting UnprotectedReadEdge to v3500/281474976715666 2025-06-03T10:29:40.191569Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[15:592:2518], 10} after executionsCount# 1 2025-06-03T10:29:40.191577Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[15:592:2518], 10} sends rowCount# 1, bytes# 16, quota rows left# 18446744073709551614, quota bytes left# 18446744073709551599, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-03T10:29:40.191608Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[15:592:2518], 10} finished in read 2025-06-03T10:29:40.191619Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-03T10:29:40.191624Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteRead 2025-06-03T10:29:40.191628Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-06-03T10:29:40.191632Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-06-03T10:29:40.191644Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-03T10:29:40.191648Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-06-03T10:29:40.191653Z node 15 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-06-03T10:29:40.191659Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-03T10:29:40.191677Z node 15 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 |65.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/control/ut/unittest >> TSchemeShardMoveTest::Chain [GOOD] >> TSchemeShardMoveTest::Index >> TFileStoreWithReboots::CreateAlterChannels [GOOD] >> TSchemeShardMoveTest::Replace [GOOD] >> TopicAutoscaling::PartitionSplit_ReadNotEmptyPartitions_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReBalancingAfterSplit_sessionsWithPartition >> TSchemeShardMoveTest::Index [GOOD] >> CommitOffset::DistributedTxCommit_ChildFirst [GOOD] >> CommitOffset::DistributedTxCommit_CheckSessionResetAfterCommit ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::Replace [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:40.360196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:40.360228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:40.360235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:40.360241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:40.360257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:40.360262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:40.360272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:40.360288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:40.360390Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:40.360484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:40.376294Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:40.376328Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:40.385958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:40.386148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:40.386179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:40.389156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:40.389247Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:40.389392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:40.389456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:40.397854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:40.397945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:40.398324Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:40.398338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:40.398347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:40.398371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:40.398376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:40.398398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:40.400176Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:40.423550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:40.423651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:40.423722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:40.423777Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:40.423792Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:40.424683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:40.424717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:40.424778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:40.424790Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:40.424796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:40.424802Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:40.425336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:40.425351Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:40.425358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:40.425825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:40.425837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:40.425849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:40.425856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:40.426589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:40.427124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:40.427172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:40.427337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:40.427364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:40.427383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:40.427435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:40.427441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:40.427470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:40.427480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:40.427905Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:40.427912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:40.427950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 2057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:40.965146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:29:40.965152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 14], at schemeshard: 72057594046678944 2025-06-03T10:29:40.965167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 13] was 1 2025-06-03T10:29:40.965173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 13], at schemeshard: 72057594046678944 2025-06-03T10:29:40.965179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 12] was 3 2025-06-03T10:29:40.965266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 16] was 1 2025-06-03T10:29:40.965403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 12] was 2 2025-06-03T10:29:40.965868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:29:40.965882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409546 2025-06-03T10:29:40.966527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-03T10:29:40.966567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:29:40.966576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 16], at schemeshard: 72057594046678944 2025-06-03T10:29:40.966596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 15] was 1 2025-06-03T10:29:40.966604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 15], at schemeshard: 72057594046678944 2025-06-03T10:29:40.966610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 12] was 1 2025-06-03T10:29:40.966615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 12], at schemeshard: 72057594046678944 2025-06-03T10:29:40.966623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:29:40.966662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:29:40.966669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:29:40.966698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:29:40.966705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409548 2025-06-03T10:29:40.967147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 3 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 105 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted wait until 72075186233409548 is deleted 2025-06-03T10:29:40.967737Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-03T10:29:40.967758Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 2025-06-03T10:29:40.967768Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409548 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 Deleted tabletId 72075186233409548 2025-06-03T10:29:40.967899Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Src" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:40.967951Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Src" took 66us result status StatusPathDoesNotExist 2025-06-03T10:29:40.967991Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Src\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Src" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:29:40.968066Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Dst" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:40.968108Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Dst" took 44us result status StatusSuccess 2025-06-03T10:29:40.968224Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Dst" PathDescription { Self { Name: "Dst" PathId: 22 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 105 CreateStep: 5000006 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "Async" LocalPathId: 23 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "value1" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableIndexes { Name: "Sync" LocalPathId: 25 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value0" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 22 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:40.968345Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:40.968373Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 26us result status StatusSuccess 2025-06-03T10:29:40.968443Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 28 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 28 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 26 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 22 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 105 CreateStep: 5000006 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> IcbAsActorTests::TestHttpPostReaction [GOOD] |65.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/control/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest >> TFileStoreWithReboots::CreateAlterChannels [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:127:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:130:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:134:2058] recipient: [1:111:2142] 2025-06-03T10:29:21.988446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:21.988477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:21.988484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:21.988490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:21.988505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:21.988510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:21.988521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:21.988538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:21.988680Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:21.988754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:22.005543Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:29:22.005567Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:22.005673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:176:2058] recipient: [1:15:2062] 2025-06-03T10:29:22.009245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:22.009287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:22.009827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:22.011255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:22.011310Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:22.011430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:22.011505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:22.012881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:22.012951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:22.013201Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:22.013212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:22.013237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:22.013244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:22.013248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:22.013266Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:29:22.015145Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:29:22.033851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:22.033962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:22.034063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:22.034136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:22.034148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:22.035244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:22.035284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:22.035344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:22.035355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:22.035360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:22.035365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:22.035965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:22.035984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:22.035992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:22.036415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:22.036425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:22.036430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:22.036437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:22.037136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:22.037597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:22.037647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:22.037892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:22.037923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:22.037930Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:22.038020Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... :647: TTxOperationReply complete, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-03T10:29:40.952156Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-03T10:29:40.952161Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_fs.cpp:89: TAlterFileStore::TConfigureParts operationId# 1002:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:40.952456Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1002:0 from tablet: 72057594046678944 to tablet: 72075186233409546 cookie: 72057594046678944:1 msg type: 275054593 2025-06-03T10:29:40.952479Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1002, partId: 0, tablet: 72075186233409546 2025-06-03T10:29:40.952520Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 1002, tablet: 72075186233409546, partId: 0 2025-06-03T10:29:40.952544Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 1002:0, at schemeshard: 72057594046678944, message: TxId: 1002 Origin: 72075186233409546 Status: OK 2025-06-03T10:29:40.952550Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_fs.cpp:43: TAlterFileStore::TConfigureParts operationId# 1002:0 HandleReply TEvUpdateConfigResponse, at schemeshard: 72057594046678944 2025-06-03T10:29:40.952556Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1002:0 3 -> 128 2025-06-03T10:29:40.952919Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-03T10:29:40.952949Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-03T10:29:40.952955Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_fs.cpp:197: TAlterFileStore::TPropose operationId# 1002:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:40.952967Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1002 ready parts: 1/1 2025-06-03T10:29:40.953002Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1002 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:40.953379Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1002:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1002 msg type: 269090816 2025-06-03T10:29:40.953402Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1002, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1002 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1002 at step: 5000004 2025-06-03T10:29:40.953466Z node 73 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:40.953483Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1002 Coordinator: 72057594046316545 AckTo { RawX1: 124 RawX2: 313532614757 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:40.953491Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_fs.cpp:153: TAlterFileStore::TPropose operationId# 1002:0 HandleReply TEvOperationPlan, step: 5000004, at schemeshard: 72057594046678944 2025-06-03T10:29:40.953526Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1002:0 progress is 1/1 2025-06-03T10:29:40.953531Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-03T10:29:40.953536Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1002:0 progress is 1/1 2025-06-03T10:29:40.953540Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-03T10:29:40.953555Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:29:40.953566Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1002, ready parts: 1/1, is published: false 2025-06-03T10:29:40.953570Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-03T10:29:40.953574Z node 73 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1002:0 2025-06-03T10:29:40.953577Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1002:0 2025-06-03T10:29:40.953598Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-03T10:29:40.953603Z node 73 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1002, publications: 1, subscribers: 0 2025-06-03T10:29:40.953607Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1002, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-03T10:29:40.953991Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:40.954001Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1002, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:29:40.954041Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:40.954047Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [73:209:2210], at schemeshard: 72057594046678944, txId: 1002, path id: 3 FAKE_COORDINATOR: Erasing txId 1002 2025-06-03T10:29:40.954156Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:29:40.954169Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:29:40.954175Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1002 2025-06-03T10:29:40.954182Z node 73 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-03T10:29:40.954186Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:29:40.954201Z node 73 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1002, subscribers: 0 2025-06-03T10:29:40.954502Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 TestModificationResult got TxId: 1002, wait until txId: 1002 TestWaitNotification wait txId: 1002 2025-06-03T10:29:40.954561Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1002: send EvNotifyTxCompletion 2025-06-03T10:29:40.954571Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1002 2025-06-03T10:29:40.954652Z node 73 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1002, at schemeshard: 72057594046678944 2025-06-03T10:29:40.954665Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1002: got EvNotifyTxCompletionResult 2025-06-03T10:29:40.954668Z node 73 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1002: satisfy waiter [73:408:2387] TestWaitNotification: OK eventTxId 1002 2025-06-03T10:29:40.954729Z node 73 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/FS_2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:40.954760Z node 73 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/FS_2" took 38us result status StatusSuccess 2025-06-03T10:29:40.954826Z node 73 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/FS_2" PathDescription { Self { Name: "FS_2" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeFileStore CreateFinished: true CreateTxId: 1001 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 FileStoreVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } FileStoreDescription { Name: "FS_2" PathId: 3 IndexTabletId: 72075186233409546 Config { Version: 2 FolderId: "bar" CloudId: "baz" BlockSize: 4096 ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-2" } ExplicitChannelProfiles { PoolKind: "pool-kind-2" } } Version: 2 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |65.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/control/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::Index [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:40.461366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:40.461394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:40.461399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:40.461404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:40.461415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:40.461418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:40.461425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:40.461438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:40.461519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:40.461593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:40.472048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:40.472077Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:40.475599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:40.475706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:40.475737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:40.477731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:40.477818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:40.477948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:40.478001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:40.478809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:40.478859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:40.479112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:40.479122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:40.479129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:40.479137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:40.479142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:40.479165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:40.480309Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:40.502216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:40.502312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:40.502379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:40.502427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:40.502438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:40.503217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:40.503239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:40.503284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:40.503292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:40.503295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:40.503300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:40.503601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:40.503609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:40.503612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:40.503878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:40.503885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:40.503893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:40.503899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:40.504431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:40.504748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:40.504784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:40.504971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:40.504993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:40.505010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:40.505062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:40.505069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:40.505097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:40.505107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:40.505482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:40.505489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:40.505527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... eTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:41.313598Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 32us result status StatusSuccess 2025-06-03T10:29:41.313672Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 12 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 12 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 10 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "TableMove" PathId: 7 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:41.313741Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableMove/Sync" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:29:41.313781Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableMove/Sync" took 42us result status StatusSuccess 2025-06-03T10:29:41.313961Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableMove/Sync" PathDescription { Self { Name: "Sync" PathId: 10 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 7 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 11 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 10 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } TableIndex { Name: "Sync" LocalPathId: 10 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value0" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value0" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 10 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:41.314068Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/TableMove/Async" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:29:41.314110Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/TableMove/Async" took 43us result status StatusSuccess 2025-06-03T10:29:41.314240Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/TableMove/Async" PathDescription { Self { Name: "Async" PathId: 8 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 7 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 9 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 8 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeAsyncIndexImplTable Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } TableIndex { Name: "Async" LocalPathId: 8 Type: EIndexTypeGlobalAsync State: EIndexStateReady KeyColumnNames: "value1" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value1" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 8 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TopicAutoscaling::PartitionSplit_ManySession_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_AutosplitByLoad |65.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/control/ut/unittest >> IcbAsActorTests::TestHttpPostReaction [GOOD] |65.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/control/ut/unittest >> TKeyValueTest::TestRenameWorks [GOOD] >> TKeyValueTest::TestRenameWorksNewApi |65.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/control/ut/unittest >> TSchemeShardMoveTest::Reject >> TSchemeShardMoveTest::MoveIndexSameDst |65.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/control/ut/unittest |65.5%| [TA] $(B)/ydb/core/control/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TSchemeShardMoveTest::MoveIndexSameDst [GOOD] >> TSchemeShardMoveTest::MoveIntoBuildingIndex >> IndexBuildTestReboots::CancelBuild [GOOD] >> Viewer::JsonStorageListingV2PDiskIdFilter [GOOD] >> TSchemeShardMoveTest::Reject [GOOD] >> TSchemeShardMoveTest::OneTable >> TBackupTests::ShouldSucceedOnMultiShardTable[Zstd] >> TBackupTests::ShouldSucceedOnSingleShardTable[Raw] >> TBackupTests::ShouldSucceedOnMultiShardTable[Raw] >> TBackupTests::ShouldSucceedOnSingleShardTable[Zstd] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/viewer/ut/unittest >> Viewer::JsonStorageListingV2PDiskIdFilter [GOOD] Test command err: 2025-06-03T10:29:10.023744Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:2918:2433], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:10.024101Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:10.024211Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:10.024898Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:2921:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:10.024964Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:2924:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:10.033355Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:2927:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:10.033687Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:10.034039Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:10.034059Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:10.034290Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:10.034311Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:10.034373Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:2930:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:10.034421Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:2933:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:10.034656Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:10.034730Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:10.034752Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:10.034900Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:10.035180Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:10.035375Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:2936:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:10.035751Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:1506:2181], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:10.035823Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:10.036067Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:10.036146Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:10.036202Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:2939:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:10.036301Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:10.036397Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:10.036533Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:29:10.423824Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:10.559207Z node 1 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-03T10:29:10.582258Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:424} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-03T10:29:10.750122Z node 1 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:364} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 30399, node 1 TClient is connected to server localhost:14265 2025-06-03T10:29:10.844254Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:10.844280Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:10.844285Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:10.844602Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:29:23.464633Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [13:2705:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:23.464807Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:1334:2236], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:23.465006Z node 12 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [12:2702:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:23.465131Z node 14 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [14:2708:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:23.466158Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:23.466467Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:23.466490Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:23.466500Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:23.466535Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:23.466598Z node 15 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [15:2711:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:23.466658Z node 18 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [18:2720:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:23.466784Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:3146:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:23.466806Z node 12 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:23.466853Z node 18 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:23.467001Z node 14 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:23.467014Z node 14 :METADATA_PROVIDER ERROR: ... path status: LookupError; 2025-06-03T10:29:33.111383Z node 23 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:33.111391Z node 24 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:33.111481Z node 26 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:33.111635Z node 26 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:33.111835Z node 20 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [20:3144:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:33.111942Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:33.112059Z node 20 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:33.112182Z node 21 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [21:3151:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:33.112336Z node 21 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:33.112380Z node 21 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:29:33.211238Z node 19 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:33.316089Z node 19 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-03T10:29:33.320466Z node 19 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:424} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-03T10:29:33.363852Z node 19 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:364} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 24574, node 19 TClient is connected to server localhost:9216 2025-06-03T10:29:33.395719Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:33.395735Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:33.395739Z node 19 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:33.395826Z node 19 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:29:40.785043Z node 28 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [28:2922:2433], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:40.785353Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:40.785546Z node 28 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:40.785809Z node 32 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [32:2931:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:40.785848Z node 35 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [35:2940:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:40.785959Z node 34 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [34:2937:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:40.786149Z node 32 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:40.786190Z node 35 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:40.786280Z node 32 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:40.786292Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:40.786302Z node 35 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:40.786322Z node 36 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [36:2943:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:40.786437Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:40.786558Z node 33 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [33:2934:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:40.786571Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:40.786674Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:40.786790Z node 30 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [30:2925:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:40.786832Z node 33 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:40.786905Z node 29 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [29:1508:2179], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:40.786988Z node 33 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:40.787039Z node 30 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:40.787121Z node 30 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:40.787142Z node 31 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [31:2928:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:40.787204Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:40.787211Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:40.787295Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:40.787371Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:29:40.887888Z node 28 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:41.000229Z node 28 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-03T10:29:41.004148Z node 28 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:424} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-03T10:29:41.042565Z node 28 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:364} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 61040, node 28 TClient is connected to server localhost:16342 2025-06-03T10:29:41.070994Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:41.071013Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:41.071016Z node 28 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:41.071149Z node 28 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> TKeyValueTest::TestWriteReadDeleteWithRestartsAndCatchCollectGarbageEvents >> TSchemeShardMoveTest::MoveIntoBuildingIndex [GOOD] >> TBackupTests::ShouldSucceedOnLargeData[Zstd] >> TBackupTests::ShouldSucceedOnSingleShardTable[Raw] [GOOD] >> TKeyValueTest::TestWriteReadDeleteWithRestartsAndCatchCollectGarbageEvents [GOOD] >> TKeyValueTest::TestWriteLongKey ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build_reboots/unittest >> IndexBuildTestReboots::CancelBuild [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:29:38.233308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:38.233336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:38.233342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:38.233347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:38.233363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:38.233368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:38.233379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:38.233395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:38.233508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:38.233585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:38.245241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:29:38.245261Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:38.245397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:29:38.247714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:38.247798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:38.247825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:38.249161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:38.249235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:38.249385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:38.249452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:38.249812Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:38.249847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:38.250038Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:38.250047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:38.250063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:38.250070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:38.250076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:38.250113Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:29:38.251252Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:29:38.267349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:38.267406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:38.267458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:38.267498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:38.267507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:38.268097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:38.268115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:38.268171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:38.268179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:38.268183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:38.268187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:38.268562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:38.268572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:38.268575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:38.268822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:38.268828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:38.268832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:38.268837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:38.269355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:38.269724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:38.269756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:38.269921Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:38.269948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:38.269957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:38.270024Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... ::TXTYPE_CANCEL_INDEX_BUILD: DoExecute TxId: 1004 DatabaseName: "/MyRoot" IndexBuildId: 1003 2025-06-03T10:29:43.244784Z node 20 :BUILD_INDEX NOTICE: schemeshard_build_index_tx_base.h:91: TIndexBuilder::TXTYPE_CANCEL_INDEX_BUILD: Reply TxId: 1004 Status: PRECONDITION_FAILED Issues { message: "Index build process with id <1003> has been finished already" severity: 1 } BUILDINDEX RESPONSE CANCEL: NKikimrIndexBuilder.TEvCancelResponse TxId: 1004 Status: PRECONDITION_FAILED Issues { message: "Index build process with id <1003> has been finished already" severity: 1 } TestWaitNotification wait txId: 1004 2025-06-03T10:29:43.244849Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1004: send EvNotifyTxCompletion 2025-06-03T10:29:43.244856Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1004 2025-06-03T10:29:43.244909Z node 20 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1004, at schemeshard: 72057594046678944 2025-06-03T10:29:43.244920Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-03T10:29:43.244924Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [20:847:2786] TestWaitNotification: OK eventTxId 1004 TestWaitNotification wait txId: 1003 2025-06-03T10:29:43.244957Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-03T10:29:43.244959Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-03T10:29:43.244986Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:103: NotifyTxCompletion index build in-flight, txId: 1003, at schemeshard: 72057594046678944 2025-06-03T10:29:43.244990Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:110: NotifyTxCompletion, index build is ready to notify, txId: 1003, at schemeshard: 72057594046678944 2025-06-03T10:29:43.244996Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-03T10:29:43.244998Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [20:850:2789] TestWaitNotification: OK eventTxId 1003 2025-06-03T10:29:43.245032Z node 20 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 1003 2025-06-03T10:29:43.245065Z node 20 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:93: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 1003 State: STATE_DONE Settings { source_path: "/MyRoot/dir/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 1003 State: STATE_DONE Settings { source_path: "/MyRoot/dir/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } 2025-06-03T10:29:43.245121Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:43.245152Z node 20 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table" took 36us result status StatusSuccess 2025-06-03T10:29:43.245249Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/Table" PathDescription { Self { Name: "Table" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "index1" LocalPathId: 5 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "index" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:43.245353Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:29:43.245389Z node 20 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1" took 39us result status StatusSuccess 2025-06-03T10:29:43.245510Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/Table/index1" PathDescription { Self { Name: "index1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 5 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } TableIndex { Name: "index1" LocalPathId: 5 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "index" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "index" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:43.245565Z node 20 :BUILD_INDEX NOTICE: schemeshard_build_index__forget.cpp:18: TIndexBuilder::TXTYPE_FORGET_INDEX_BUILD: DoExecute TxId: 1005 DatabaseName: "/MyRoot" IndexBuildId: 1003 2025-06-03T10:29:43.245600Z node 20 :BUILD_INDEX NOTICE: schemeshard_build_index_tx_base.h:91: TIndexBuilder::TXTYPE_FORGET_INDEX_BUILD: Reply TxId: 1005 Status: SUCCESS BUILDINDEX RESPONSE Forget: NKikimrIndexBuilder.TEvForgetResponse TxId: 1005 Status: SUCCESS >> TBackupTests::ShouldSucceedOnSingleShardTable[Zstd] [GOOD] >> TFileStoreWithReboots::CreateDrop [GOOD] >> TBackupTests::ShouldSucceedOnLargeData[Raw] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::MoveIntoBuildingIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:42.979356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:42.979394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:42.979402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:42.979411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:42.979430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:42.979435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:42.979446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:42.979463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:42.979581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:42.979671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:42.996226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:42.996264Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:43.001055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:43.001191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:43.001226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:43.003970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:43.004079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:43.004213Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.004276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:43.005116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:43.005171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:43.005541Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:43.005558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:43.005570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:43.005582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:43.005589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:43.005615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.007394Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:43.034569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:43.034660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.034726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:43.034785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:43.034800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.035801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.035844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:43.035909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.035922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:43.035930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:43.035936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:43.036547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.036564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:43.036572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:43.037100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.037115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.037123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:43.037132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:43.038045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:43.038632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:43.038683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:43.038911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.038947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:43.038974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:43.039046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:43.039056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:43.039093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:43.039107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:43.039625Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:43.039638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:43.039679Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 16545 2025-06-03T10:29:43.710836Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976710760, at schemeshard: 72057594046678944 2025-06-03T10:29:43.710841Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 0/1, is published: true 2025-06-03T10:29:43.710844Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976710760, at schemeshard: 72057594046678944 FAKE_COORDINATOR: Add transaction: 281474976710760 at step: 5000006 FAKE_COORDINATOR: advance: minStep5000006 State->FrontStep: 5000005 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710760 at step: 5000006 2025-06-03T10:29:43.710893Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000006, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.710913Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710760 Coordinator: 72057594046316545 AckTo { RawX1: 131 RawX2: 8589936746 } } Step: 5000006 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:43.710921Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72057594046678944] TDropLock TPropose opId# 281474976710760:0 HandleReply TEvOperationPlan: step# 5000006 2025-06-03T10:29:43.710924Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710760:0 128 -> 240 2025-06-03T10:29:43.711285Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710760:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.711296Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 281474976710760:0 ProgressState 2025-06-03T10:29:43.711309Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-03T10:29:43.711313Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-03T10:29:43.711318Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-03T10:29:43.711324Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-03T10:29:43.711329Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 1/1, is published: true 2025-06-03T10:29:43.711340Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:127:2152] message: TxId: 281474976710760 2025-06-03T10:29:43.711347Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-03T10:29:43.711351Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710760:0 2025-06-03T10:29:43.711356Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976710760:0 2025-06-03T10:29:43.711369Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 FAKE_COORDINATOR: Erasing txId 281474976710760 2025-06-03T10:29:43.711679Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvNotifyTxCompletionResult: txId# 281474976710760 2025-06-03T10:29:43.711689Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6753: Message: TxId: 281474976710760 2025-06-03T10:29:43.711698Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2331: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976710760, buildInfoId: 102 2025-06-03T10:29:43.711710Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2334: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976710760, buildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: Sync, IndexColumn: value0, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:454:2414], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-03T10:29:43.711968Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1117: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking 2025-06-03T10:29:43.711979Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1118: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: Sync, IndexColumn: value0, State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:454:2414], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-03T10:29:43.711987Z node 2 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-03T10:29:43.712317Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1117: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done 2025-06-03T10:29:43.712335Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1118: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: Sync, IndexColumn: value0, State: Done, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:454:2414], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-03T10:29:43.712339Z node 2 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:339: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-06-03T10:29:43.712355Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:29:43.712359Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:633:2581] TestWaitNotification: OK eventTxId 102 2025-06-03T10:29:43.712451Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:43.712493Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 50us result status StatusSuccess 2025-06-03T10:29:43.712590Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "SomeIndex" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value1" SchemaVersion: 1 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableIndexes { Name: "Sync" LocalPathId: 5 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value0" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TBackupTests::ShouldSucceedOnMultiShardTable[Zstd] [GOOD] >> TBackupTests::ShouldSucceedOnMultiShardTable[Raw] [GOOD] >> TSchemeShardMoveTest::OneTable [GOOD] >> TopicAutoscaling::ReadingAfterSplitTest_PreferedPartition_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::ReadFromTimestamp_BeforeAutoscaleAwareSDK ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnSingleShardTable[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:43.826386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:43.826411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:43.826415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:43.826427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:43.826441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:43.826444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:43.826452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:43.826467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:43.826550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:43.826621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:43.837181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:43.837207Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:43.840780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:43.840898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:43.840939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:43.842762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:43.842818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:43.842926Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.842974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:43.843487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:43.843540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:43.843789Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:43.843798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:43.843807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:43.843818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:43.843823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:43.843838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.845073Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:43.861482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:43.861561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.861621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:43.861669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:43.861677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.862678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.862703Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:43.862759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.862767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:43.862771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:43.862775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:43.863285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.863298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:43.863302Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:43.863663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.863676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.863681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:43.863687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:43.864206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:43.864633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:43.864671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:43.864839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.864861Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:43.864866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:43.864934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:43.864940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:43.864966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:43.864976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:43.865397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:43.865405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:43.865444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... emeshard: 72057594046678944 2025-06-03T10:29:43.955899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-03T10:29:43.955919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 128 -> 129 2025-06-03T10:29:43.955944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:29:43.958227Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:783: [Export] [s3] Bootstrap: self# [1:412:2382], attempt# 0 2025-06-03T10:29:43.961643Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:427: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:412:2382], sender# [1:411:2381] FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-03T10:29:43.962487Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:43.962496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:29:43.962556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:43.962561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-03T10:29:43.962632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.962639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:43.962785Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:29:43.962794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:29:43.962798Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:29:43.962802Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-03T10:29:43.962805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:29:43.962821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 102 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:27958 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: FA373AEA-987A-4CF8-B1F7-6D47F943B074 amz-sdk-request: attempt=1 content-length: 61 content-md5: 5ZuHSMjV1bVKZhThhMGD5g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 61 2025-06-03T10:29:43.963134Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:387: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:412:2382], result# PutObjectResult { ETag: e59b8748c8d5d5b54a6614e184c183e6 } 2025-06-03T10:29:43.963871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:27958 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 0F7C063D-06EF-44B9-A8E8-FFC142E37108 amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-03T10:29:43.964175Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:294: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:412:2382], result# PutObjectResult { ETag: 72cbc2e67a8d4d9b122f2e329a5a74fd } 2025-06-03T10:29:43.964186Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:411:2381] 2025-06-03T10:29:43.964201Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:445: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:412:2382], sender# [1:411:2381], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:27958 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: E01D1B27-0729-49EB-AA88-30B167785252 amz-sdk-request: attempt=1 content-length: 11 content-md5: bj4KQf2rit2DOGLxvSlUww== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv / / 11 2025-06-03T10:29:43.964952Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:487: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:412:2382], result# PutObjectResult { ETag: 6e3e0a41fdab8add833862f1bd2954c3 } 2025-06-03T10:29:43.964958Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:412:2382], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-03T10:29:43.964981Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:411:2381], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-03T10:29:43.977095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:29:43.977125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-03T10:29:43.977157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:29:43.977172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:29:43.977182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.977185Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.977189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-03T10:29:43.977195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 129 -> 240 2025-06-03T10:29:43.977236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:43.977748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.977824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.977830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-03T10:29:43.977842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:29:43.977846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:43.977850Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:29:43.977852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:43.977855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-03T10:29:43.977869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:335:2313] message: TxId: 102 2025-06-03T10:29:43.977874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:43.977878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:29:43.977882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:29:43.977910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:29:43.978255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:29:43.978264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:397:2368] TestWaitNotification: OK eventTxId 102 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_move/unittest >> TSchemeShardMoveTest::OneTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:42.979419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:42.979450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:42.979455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:42.979459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:42.979468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:42.979472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:42.979478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:42.979489Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:42.979569Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:42.979638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:42.991663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:42.991696Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:42.996314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:42.996460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:42.996498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:42.998983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:42.999072Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:42.999201Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:42.999272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:43.000005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:43.000057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:43.000377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:43.000391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:43.000405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:43.000415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:43.000423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:43.000451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.002048Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:43.025836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:43.025943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.026022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:43.026076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:43.026090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.027117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.027172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:43.027232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.027243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:43.027250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:43.027256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:43.027921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.027943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:43.027951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:43.028453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.028468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.028475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:43.028485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:43.029336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:43.029979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:43.030040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:43.030270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.030307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:43.030335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:43.030406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:43.030416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:43.030455Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:43.030471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:43.031137Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:43.031150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:43.031205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-03T10:29:44.010249Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:44.010253Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:207:2208], at schemeshard: 72057594046678944, txId: 108, path id: 1 2025-06-03T10:29:44.010257Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:207:2208], at schemeshard: 72057594046678944, txId: 108, path id: 4 2025-06-03T10:29:44.010307Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.010312Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1045: NTableState::TProposedWaitParts operationId# 108:0 ProgressState at tablet: 72057594046678944 2025-06-03T10:29:44.010324Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.010328Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 108:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-03T10:29:44.010332Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 108:0 129 -> 240 2025-06-03T10:29:44.010469Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 23 PathOwnerId: 72057594046678944, cookie: 108 2025-06-03T10:29:44.010479Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 23 PathOwnerId: 72057594046678944, cookie: 108 2025-06-03T10:29:44.010485Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 108 2025-06-03T10:29:44.010488Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 108, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 23 2025-06-03T10:29:44.010493Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:29:44.010640Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 108 2025-06-03T10:29:44.010648Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 108 2025-06-03T10:29:44.010651Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 108 2025-06-03T10:29:44.010654Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 108, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-03T10:29:44.010657Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-03T10:29:44.010664Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 108, ready parts: 0/1, is published: true 2025-06-03T10:29:44.011014Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.011021Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 108:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:44.011091Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-03T10:29:44.011113Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#108:0 progress is 1/1 2025-06-03T10:29:44.011116Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-03T10:29:44.011120Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#108:0 progress is 1/1 2025-06-03T10:29:44.011122Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-03T10:29:44.011125Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 108, ready parts: 1/1, is published: true 2025-06-03T10:29:44.011134Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:338:2316] message: TxId: 108 2025-06-03T10:29:44.011138Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-03T10:29:44.011142Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 108:0 2025-06-03T10:29:44.011147Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 108:0 2025-06-03T10:29:44.011162Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:29:44.011290Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 108 2025-06-03T10:29:44.011513Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 108 2025-06-03T10:29:44.011757Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-06-03T10:29:44.011763Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [2:834:2790] TestWaitNotification: OK eventTxId 108 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted 2025-06-03T10:29:44.011881Z node 2 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-03T10:29:44.011890Z node 2 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 Deleted tabletId 72075186233409547 2025-06-03T10:29:44.023548Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5554: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 310 RawX2: 8589936888 } TabletId: 72075186233409546 State: 4 2025-06-03T10:29:44.023582Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409546, state: Offline, at schemeshard: 72057594046678944 2025-06-03T10:29:44.023926Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:29:44.024004Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-03T10:29:44.024048Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:44.024100Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 Forgetting tablet 72075186233409546 2025-06-03T10:29:44.024550Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:29:44.024557Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-03T10:29:44.024568Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:44.025081Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:29:44.025096Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:29:44.025128Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 Deleted tabletId 72075186233409546 2025-06-03T10:29:44.025267Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:44.025331Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 68us result status StatusSuccess 2025-06-03T10:29:44.025410Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 23 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 23 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 21 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnSingleShardTable[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:43.939353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:43.939376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:43.939381Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:43.939392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:43.939404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:43.939407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:43.939414Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:43.939424Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:43.939494Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:43.939561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:43.949809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:43.949832Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:43.954098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:43.954205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:43.954248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:43.956134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:43.956176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:43.956266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.956300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:43.956828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:43.956868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:43.957054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:43.957061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:43.957069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:43.957075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:43.957078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:43.957096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.958304Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:43.972804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:43.972892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.972945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:43.972988Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:43.972996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.973687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.973709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:43.973760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.973768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:43.973772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:43.973776Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:43.974124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.974133Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:43.974138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:43.974480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.974494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.974501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:43.974508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:43.975099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:43.975595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:43.975631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:43.975799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.975822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:43.975829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:43.975888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:43.975894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:43.975922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:43.975931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:43.976320Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:43.976327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:43.976365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... : 72057594046678944 2025-06-03T10:29:44.067191Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-03T10:29:44.067212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 128 -> 129 2025-06-03T10:29:44.067236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:29:44.069481Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:783: [Export] [s3] Bootstrap: self# [1:412:2382], attempt# 0 2025-06-03T10:29:44.072564Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:427: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:412:2382], sender# [1:411:2381] REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:11158 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: B4FBE86C-2CD5-43FE-8219-1533B19DFFF8 amz-sdk-request: attempt=1 content-length: 61 content-md5: 5ZuHSMjV1bVKZhThhMGD5g== content-type: binary/octet-stream FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 61 2025-06-03T10:29:44.073962Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:44.073979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:29:44.074061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:44.074066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-03T10:29:44.074195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.074213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:44.074319Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:387: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:412:2382], result# PutObjectResult { ETag: e59b8748c8d5d5b54a6614e184c183e6 } 2025-06-03T10:29:44.074988Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:29:44.075010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:29:44.075014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:29:44.075023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-03T10:29:44.075029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:29:44.075049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 102 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:11158 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 7F6A277C-A124-4C52-8FC9-34D08D93448F amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-03T10:29:44.075747Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:294: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:412:2382], result# PutObjectResult { ETag: 72cbc2e67a8d4d9b122f2e329a5a74fd } 2025-06-03T10:29:44.075775Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:411:2381] 2025-06-03T10:29:44.075828Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:445: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:412:2382], sender# [1:411:2381], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } 2025-06-03T10:29:44.075880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 REQUEST: PUT /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:11158 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: FAB5B77A-91B7-44E2-AB8A-88CD83F79986 amz-sdk-request: attempt=1 content-length: 20 content-md5: 2qFn9G0TW8wfvJ9C+A5Jbw== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv.zst / / 20 2025-06-03T10:29:44.076721Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:487: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:412:2382], result# PutObjectResult { ETag: daa167f46d135bcc1fbc9f42f80e496f } 2025-06-03T10:29:44.076735Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:412:2382], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-03T10:29:44.076776Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:411:2381], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-03T10:29:44.079155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:29:44.079180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-03T10:29:44.079202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:29:44.079218Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:29:44.079231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:44.079234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.079238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-03T10:29:44.079244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 129 -> 240 2025-06-03T10:29:44.079289Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:44.079706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.079773Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.079779Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-03T10:29:44.079791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:29:44.079795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:44.079799Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:29:44.079801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:44.079804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-03T10:29:44.079815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:335:2313] message: TxId: 102 2025-06-03T10:29:44.079820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:44.079823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:29:44.079827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:29:44.079854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:29:44.080260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:29:44.080271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:397:2368] TestWaitNotification: OK eventTxId 102 >> TKeyValueTest::TestWriteReadWithRestartsThenResponseOk ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnMultiShardTable[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:43.829492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:43.829521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:43.829527Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:43.829541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:43.829558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:43.829563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:43.829573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:43.829589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:43.829705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:43.829790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:43.845634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:43.845665Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:43.849802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:43.849919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:43.849958Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:43.851941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:43.851991Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:43.852107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.852153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:43.852741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:43.852783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:43.853041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:43.853052Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:43.853064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:43.853071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:43.853077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:43.853095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.854414Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:43.877878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:43.877965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.878029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:43.878086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:43.878099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.878899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.878934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:43.878992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.879004Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:43.879010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:43.879016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:43.879508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.879521Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:43.879525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:43.879939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.879952Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.879959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:43.879968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:43.880764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:43.881246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:43.881289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:43.881528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.881558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:43.881567Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:43.881653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:43.881664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:43.881700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:43.881715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:43.882158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:43.882166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:43.882207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... art# 0, uploadId# (empty maybe) 2025-06-03T10:29:44.016713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:29:44.016727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:29:44.016732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:29:44.016738Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-03T10:29:44.016745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:29:44.016766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-03T10:29:44.016849Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:480:2434], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:28354 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: ADCBCB7E-E1E8-44E6-8DCF-FBA05902BFBF amz-sdk-request: attempt=1 content-length: 638 content-md5: Myp3UygaBNGp6+7AMgyRnQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 638 2025-06-03T10:29:44.018522Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:294: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:474:2431], result# PutObjectResult { ETag: 332a7753281a04d1a9ebeec0320c919d } 2025-06-03T10:29:44.018650Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:473:2430] 2025-06-03T10:29:44.018668Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:445: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:474:2431], sender# [1:473:2430], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 FAKE_COORDINATOR: Erasing txId 102 REQUEST: PUT /data_00.csv HTTP/1.1 HEADERS: Host: localhost:28354 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 22A21D67-F2F7-44EE-A5E8-BB8BC9FFE948 amz-sdk-request: attempt=1 content-length: 11 content-md5: bj4KQf2rit2DOGLxvSlUww== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv / / 11 2025-06-03T10:29:44.019615Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:487: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:474:2431], result# PutObjectResult { ETag: 6e3e0a41fdab8add833862f1bd2954c3 } 2025-06-03T10:29:44.019630Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:474:2431], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-03T10:29:44.019770Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:473:2430], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-03T10:29:44.021958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:29:44.043249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 319 RawX2: 4294969597 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:29:44.043273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-03T10:29:44.043304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 319 RawX2: 4294969597 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:29:44.043319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 319 RawX2: 4294969597 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:29:44.043334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 1, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:44.043375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:44.043482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 322 RawX2: 4294969598 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:29:44.043487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-03T10:29:44.043500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 322 RawX2: 4294969598 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:29:44.043510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 322 RawX2: 4294969598 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:29:44.043519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:2, datashard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:44.043523Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.043528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-03T10:29:44.043534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-03T10:29:44.043539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 129 -> 240 2025-06-03T10:29:44.043552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:44.044235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.044284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.044382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.044392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-03T10:29:44.044407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:29:44.044412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:44.044418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:29:44.044421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:44.044426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-03T10:29:44.044448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:370:2335] message: TxId: 102 2025-06-03T10:29:44.044454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:44.044461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:29:44.044466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:29:44.044505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:29:44.044959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:29:44.044968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:453:2411] TestWaitNotification: OK eventTxId 102 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_filestore_reboots/unittest >> TFileStoreWithReboots::CreateDrop [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:29:21.841464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:21.841493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:21.841499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:21.841505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:21.841520Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:21.841524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:21.841536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:21.841551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:21.841679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:21.841760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:21.856585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:29:21.856610Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:21.856697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:29:21.858933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:21.859034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:21.859068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:21.860505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:21.860557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:21.860700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:21.860759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:21.861156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:21.861205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:21.861441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:21.861451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:21.861464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:21.861470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:21.861474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:21.861506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:29:21.862661Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:29:21.881379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:21.881468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:21.881556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:21.881612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:21.881622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:21.882266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:21.882292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:21.882337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:21.882346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:21.882350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:21.882356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:21.882758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:21.882770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:21.882774Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:21.883085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:21.883095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:21.883099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:21.883106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:21.883702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:21.884073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:21.884111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:21.884259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:21.884279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:21.884285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:21.884355Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... published: false 2025-06-03T10:29:43.987191Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-03T10:29:43.987194Z node 87 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1002:0 2025-06-03T10:29:43.987197Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1002:0 2025-06-03T10:29:43.987213Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:29:43.987218Z node 87 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1002, publications: 3, subscribers: 0 2025-06-03T10:29:43.987221Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1002, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-03T10:29:43.987223Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1002, [OwnerId: 72057594046678944, LocalPathId: 2], 7 2025-06-03T10:29:43.987226Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1002, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-03T10:29:43.987674Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:29:43.987686Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:29:43.987740Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:29:43.987757Z node 87 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:43.987761Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1002, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:43.987788Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1002, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:29:43.987795Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1002, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:29:43.987812Z node 87 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:43.987816Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [87:208:2209], at schemeshard: 72057594046678944, txId: 1002, path id: 1 2025-06-03T10:29:43.987820Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [87:208:2209], at schemeshard: 72057594046678944, txId: 1002, path id: 2 2025-06-03T10:29:43.987822Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [87:208:2209], at schemeshard: 72057594046678944, txId: 1002, path id: 3 FAKE_COORDINATOR: Erasing txId 1002 2025-06-03T10:29:43.987919Z node 87 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:29:43.987930Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:29:43.987935Z node 87 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1002 2025-06-03T10:29:43.987940Z node 87 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-03T10:29:43.987945Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:29:43.988012Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:29:43.988017Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:29:43.988026Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:29:43.988068Z node 87 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 2025-06-03T10:29:43.988098Z node 87 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:29:43.988104Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:29:43.988107Z node 87 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1002 2025-06-03T10:29:43.988110Z node 87 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-03T10:29:43.988112Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:29:43.988132Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.988174Z node 87 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:29:43.988181Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:29:43.988184Z node 87 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1002 2025-06-03T10:29:43.988186Z node 87 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 7 2025-06-03T10:29:43.988189Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:29:43.988194Z node 87 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1002, subscribers: 0 2025-06-03T10:29:43.988501Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-06-03T10:29:43.988824Z node 87 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:29:43.988857Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-06-03T10:29:43.988888Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:29:43.989112Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 TestModificationResult got TxId: 1002, wait until txId: 1002 TestWaitNotification wait txId: 1002 2025-06-03T10:29:43.989158Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1002: send EvNotifyTxCompletion 2025-06-03T10:29:43.989165Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1002 2025-06-03T10:29:43.989210Z node 87 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1002, at schemeshard: 72057594046678944 2025-06-03T10:29:43.989225Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1002: got EvNotifyTxCompletionResult 2025-06-03T10:29:43.989230Z node 87 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1002: satisfy waiter [87:418:2397] TestWaitNotification: OK eventTxId 1002 2025-06-03T10:29:43.989387Z node 87 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/FS_3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:29:43.989429Z node 87 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/FS_3" took 68us result status StatusPathDoesNotExist 2025-06-03T10:29:43.989472Z node 87 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/DirA/FS_3\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/DirA\' (id: [OwnerId: 72057594046678944, LocalPathId: 2]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/DirA/FS_3" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/DirA" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1000 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnLargeData[Raw] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:29.118668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:29.118710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:29.118717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:29.118737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:29.118753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:29.118759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:29.118771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:29.118795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:29.118897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:29.118975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:29.132446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:29.132478Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:29.136909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:29.137058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:29.137109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:29.139872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:29.139939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:29.140062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:29.140115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:29.140835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:29.140906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:29.141184Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:29.141193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:29.141202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:29.141209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:29.141214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:29.141230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:29.143210Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:29.160555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:29.160666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:29.160750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:29.160818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:29.160834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:29.161931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:29.161968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:29.162034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:29.162044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:29.162049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:29.162054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:29.162672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:29.162692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:29.162700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:29.163238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:29.163254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:29.163262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:29.163270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:29.164195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:29.164766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:29.164816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:29.165078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:29.165116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:29.165125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:29.165209Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:29.165219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:29.165251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:29.165265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:29.165950Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:29.165965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:29.166032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 3453:5415], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 0 Checksum: } REQUEST: PUT /data_00.csv?partNumber=100&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:2864 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 80B7C9B8-A6DD-4069-A1A4-47AC74DE06DE amz-sdk-request: attempt=1 content-length: 130 content-md5: Wyd1w7MZYbbZucaVvuRDAw== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv / partNumber=100&uploadId=1 / 130 2025-06-03T10:29:43.944349Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:578: [Export] [s3] Handle TEvExternalStorage::TEvUploadPartResponse: self# [1:3454:5416], result# UploadPartResult { ETag: 5b2775c3b31961b6d9b9c695bee44303 } 2025-06-03T10:29:43.944381Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:3453:5415] 2025-06-03T10:29:43.944388Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:445: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:3454:5416], sender# [1:3453:5415], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv?partNumber=101&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:2864 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: EB06BD8E-CF58-484C-B854-6C5A3A7D17BE amz-sdk-request: attempt=1 content-length: 0 content-md5: 1B2M2Y8AsgTpgAmY7PhCfg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv / partNumber=101&uploadId=1 / 0 2025-06-03T10:29:43.944952Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:578: [Export] [s3] Handle TEvExternalStorage::TEvUploadPartResponse: self# [1:3454:5416], result# UploadPartResult { ETag: d41d8cd98f00b204e9800998ecf8427e } 2025-06-03T10:29:43.944959Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:3454:5416], success# 1, error# , multipart# 1, uploadId# 1 2025-06-03T10:29:43.946668Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:512: [Export] [s3] Handle TEvDataShard::TEvS3Upload: self# [1:3454:5416], upload# { Id: 1 Status: Complete Error: (empty maybe) Parts: [a59dd9a97cf3685e69093fb2d96653c6,bdbb215613239cb3a835fee1fe7e7ca3,cb38dbc776d5763f1926dfb22d508c87,3c430d66d07a0a4b1fa889f321fce197,43baf91083f286b60bf15e7786459cd9,90b5581bef612fa3bf9b38b336af405f,fd4869c26a12d22ee79256d778954d04,a9459bc28198b0b6bd67732c492fd740,697a3f8386ea1ff4e327de943224cb1a,614da0b4ec9464e69cd0c59909e80fbb,9b94eb3f67aa4c8a0bcbf546833ed966,fd45c3afacec641ad19e59d2b31aeba4,fd69678aecbc149601f58cf13c64d33e,90c09ab4923bc9f97f825d36e32bf362,c1586416a281a4cca2b2b4e333d9b079,f31908576272623f9f0a19bf774cde8e,6fe3b42388304d2af07c629aeb683581,7bc90eec21ca5bb3648e6a48e83c5730,8e1dda26de1af89bdffe2eefdcebea1d,14dc42d90caa1575bbfffa9dc8f21d66,92efb2368eecb32d4075c09294fde0b7,98efff5f7c7ecb42e7af65142ce05af9,6206c81807b3b9283b0173ee2c682100,616b431b91aedc9de4593321eb42ba96,9ae4762563ffdec596cc9ca4cb8913e1,946ebf2d95b4796ea2faee21f017be79,45834a9948bb4ab8b62d1894156d13ed,6ad3fe7286856927c1e00422bc8da697,ef89464d20eae46829e1bf557e4d04ce,f128e5de32097d205453080b01c94ac3,c13e650ee2cfcecfdf4f578a2e5b1c2d,fc26314711b25d20fc654cf59301b806,56f6f2c574fba86496a87a7dd5fab46c,c7951eace72cfe0f14f808173e07bc64,3d9ad3340e58b973eaf8d4f14ba3b0f9,fc41d6fdfb52389dda8b26d7a0a3a889,9974b6ae96ffd0b756acb67088e890f9,cde8a5604010abe8fccfa9492144036f,0364e048eaac35c26d48b0c5072b5255,aac5a84927124d6ae4931e2650c80d9f,eab068fe4ca35c2f3e35890bd727eb4f,bc3646bdbcbc7f97dcddf2202ea9421f,6d3f63d672eda4a4617c9e7589a68bfc,0401bade6c3031b5be872238520b993a,1c6405688f86423480173e3e316a20bd,52395f68e877cbb8d7115a247331b0a7,4b0673ac18058554d2c53bf9f99b34b2,87bc1b9e650b31e81a9ad2531e3ef9da,b29053c8cd093c8b92ad3954c42cb7be,faf1084f6b33b00e2e822d1d3c3f0083,eedec03ee8d7eda4654db7206ad0889e,be4469dd028d5519a67098055f25513f,a7afa9827ec27c565cff1ed505a06f4b,91fe8109d2ad934c4364d90c29aaba71,73b81ea00e11db12d66497d30eb48446,cce69ef69777afeab34eefa515abc7f4,4e4ac1a421353964356400b8be8e21da,32cd6083b12660bcd4062af08d89eb05,71957b9db37811c7680638b82dc6384b,a8787e692c423a2dfa07dd261e72790a,283838ab16206b27738ea6653110f833,88bf084fb3029f0d5c0705eece930d70,1ed2f9f7221f1718b81fdf2d846347dd,406706cfbc454922dcad50b9c534b8d1,dbb606c993d798974ed4f5c9ebf195ca,1a4a3868dc6fa26c6b019d237f9ea6f4,82660a3c6b576a1b3fea925f3c179a2e,d393db2749ae42e854e85eeec2ea3592,b42c92ad14ee0e5351fec7e5a045a91b,2c7af27f9dc77efbcbe71c2d7997d6e9,278aba62ab1d9e3ff16df2d82ac5f5c7,6b8380404a7e7ec95ad5f3941d5d404c,c9813b9fc1d6b5087e64849076edd0f8,160785e4dac02a91c43a497ee59eea06,db529a9ba22f60f404031cfe85e966e9,9b70af168e2d3769bd8bc4dffa3202ea,9ac39c3843b6621ace44acf430a59e06,4603ff564a46e93951f246ed18926071,66b85f35ee76a7f71f50e9aad56758de,1665c284ad04d6b893b69372bf8fc6b9,8c1c27ec88fb52f06de6e7516a392672,0a5f992db51277a05ec12f0d6459ef21,8debe3a6023155561cb0890fc05bd7fb,938ece258b7596f8eea7e82bc2b8f88c,767ca0dcf0b154fa3c818044bbfc58fd,914cc7165d994bb05824332ac120446f,ab0ece250f5959a510170ee07aa21b5d,8bf4b44d67f062026b0010a8a0b39cc0,e0aa13fa8246e68c18905d3abadfc44d,27b021b75b6a95f63ea27f7ec238c05f,673e661e4cfea1e431678dd9881c2a8c,f101b34943f1831ae8c0b46ffcb1c2d6,562b32a8142b29c1a88e507ab1981a6b,fdea4c6fc2befb44614992ca8bf34b21,b7c8ec6acc45b037978482996e910b75,aec72fbd2e171b798900b22897d00941,710ef5b5e8eba750b6acc9b32dff42a3,821c7e22ef9c22098171e7f837dcfcc8,aecc9f6d0e6f54e938a10d40fda96d7b,5b2775c3b31961b6d9b9c695bee44303,d41d8cd98f00b204e9800998ecf8427e] } REQUEST: POST /data_00.csv?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:2864 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: A05EF2FC-4339-4F32-A498-BFC3459E466A amz-sdk-request: attempt=1 content-length: 11529 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /data_00.csv / uploadId=1 2025-06-03T10:29:43.948670Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:609: [Export] [s3] Handle TEvExternalStorage::TEvCompleteMultipartUploadResponse: self# [1:3454:5416], result# CompleteMultipartUploadResult { Bucket: Key: data_00.csv ETag: 5d8c28efc812b445ddd02900ff3ee599 } 2025-06-03T10:29:43.948764Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:3453:5415], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-03T10:29:43.951420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-03T10:29:43.951444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-03T10:29:43.951483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-03T10:29:43.951499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-03T10:29:43.951514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.951519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.951524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-03T10:29:43.951534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 129 -> 240 2025-06-03T10:29:43.951623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:43.952462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.952587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.952595Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-03T10:29:43.952608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:29:43.952612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:43.952615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:29:43.952619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:43.952623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-03T10:29:43.952643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:335:2313] message: TxId: 102 2025-06-03T10:29:43.952648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:43.952653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:29:43.952656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:29:43.952705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:29:43.953479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:29:43.953494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:3439:5402] TestWaitNotification: OK eventTxId 102 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnMultiShardTable[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:43.831180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:43.831203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:43.831209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:43.831221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:43.831231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:43.831235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:43.831244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:43.831260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:43.831344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:43.831406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:43.845724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:43.845746Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:43.849528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:43.849627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:43.849659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:43.851614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:43.851664Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:43.851775Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.851829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:43.852408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:43.852452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:43.852714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:43.852724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:43.852735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:43.852744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:43.852750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:43.852768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.854125Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:43.873908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:43.873984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.874042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:43.874092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:43.874102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.874823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.874847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:43.874894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.874903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:43.874908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:43.874914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:43.875357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.875371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:43.875376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:43.875766Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.875779Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:43.875785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:43.875793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:43.876483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:43.876899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:43.876937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:43.877118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:43.877142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:43.877149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:43.877217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:43.877225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:43.877258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:43.877269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:43.877753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:43.877762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:43.877804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... pp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:29:44.009023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-03T10:29:44.009028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:29:44.009040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true 2025-06-03T10:29:44.009065Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:387: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:474:2431], result# PutObjectResult { ETag: e59b8748c8d5d5b54a6614e184c183e6 } 2025-06-03T10:29:44.009511Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:487: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:481:2436], result# PutObjectResult { ETag: f0d3871f5c9cc0f5c2e4afaffb7eeef2 } 2025-06-03T10:29:44.009522Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:481:2436], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-03T10:29:44.009591Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:480:2434], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:23216 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 3B75FE38-511F-40CC-8293-67B551300D56 amz-sdk-request: attempt=1 content-length: 638 content-md5: Myp3UygaBNGp6+7AMgyRnQ== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 638 FAKE_COORDINATOR: Erasing txId 102 2025-06-03T10:29:44.011252Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:294: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:474:2431], result# PutObjectResult { ETag: 332a7753281a04d1a9ebeec0320c919d } 2025-06-03T10:29:44.011306Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:473:2430] 2025-06-03T10:29:44.011351Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:445: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:474:2431], sender# [1:473:2430], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } 2025-06-03T10:29:44.011620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 REQUEST: PUT /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:23216 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 16DF8827-17BB-4ED2-816C-4E12C49847E2 amz-sdk-request: attempt=1 content-length: 20 content-md5: 2qFn9G0TW8wfvJ9C+A5Jbw== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv.zst / / 20 2025-06-03T10:29:44.012040Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:487: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:474:2431], result# PutObjectResult { ETag: daa167f46d135bcc1fbc9f42f80e496f } 2025-06-03T10:29:44.012053Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:474:2431], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-03T10:29:44.012086Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:473:2430], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-03T10:29:44.025151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 319 RawX2: 4294969597 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:29:44.025172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-03T10:29:44.025194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 319 RawX2: 4294969597 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:29:44.025204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 319 RawX2: 4294969597 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:29:44.025214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 1, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:44.025249Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:44.025372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 322 RawX2: 4294969598 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:29:44.025378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409547, partId: 0 2025-06-03T10:29:44.025389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 322 RawX2: 4294969598 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:29:44.025396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 322 RawX2: 4294969598 } Origin: 72075186233409547 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10 RowsProcessed: 1 } 2025-06-03T10:29:44.025400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:2, datashard: 72075186233409547, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:44.025403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.025406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-03T10:29:44.025413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 102:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-03T10:29:44.025417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 129 -> 240 2025-06-03T10:29:44.025429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:44.026052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.026102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.026192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.026202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-03T10:29:44.026217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:29:44.026222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:44.026228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:29:44.026231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:44.026236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-03T10:29:44.026255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:370:2335] message: TxId: 102 2025-06-03T10:29:44.026264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:44.026270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:29:44.026275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:29:44.026306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:29:44.026759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:29:44.026774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:453:2411] TestWaitNotification: OK eventTxId 102 >> TKeyValueTest::TestWriteReadDeleteWithRestartsThenResponseOk >> TKeyValueTest::TestIncorrectRequestThenResponseError >> TKeyValueTest::TestInlineCopyRangeWorks >> TKeyValueTest::TestIncorrectRequestThenResponseError [GOOD] >> TKeyValueTest::TestIncrementalKeySet >> TKeyValueTest::TestWrite200KDeleteThenResponseErrorNewApi >> TKeyValueCollectorTest::TestKeyValueCollectorSingle >> TKeyValueTest::TestWrite200KDeleteThenResponseError >> TKeyValueTest::TestCleanUpDataOnEmptyTablet >> TBackupTests::ShouldSucceedOnLargeData_MinWriteBatch [GOOD] >> TKeyValueTest::TestWriteReadPatchRead >> TKeyValueTest::TestWriteReadWithRestartsThenResponseOkNewApi >> TKeyValueCollectorTest::TestKeyValueCollectorSingle [GOOD] >> TKeyValueCollectorTest::TestKeyValueCollectorSingleWithOneError >> TKeyValueTest::TestWriteReadPatchRead [GOOD] >> TKeyValueTest::TestWriteReadDeleteWithRestartsThenResponseOkWithNewApi >> TKeyValueCollectorTest::TestKeyValueCollectorSingleWithOneError [GOOD] >> TKeyValueCollectorTest::TestKeyValueCollectorMultiple >> TKeyValueTest::TestIncrementalKeySet [GOOD] >> TKeyValueTest::TestGetStatusWorksNewApi |65.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor |65.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor |65.5%| [TA] {RESULT} $(B)/ydb/core/control/ut/test-results/unittest/{meta.json ... results_accumulator.log} |65.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_donor/ydb-core-blobstorage-ut_blobstorage-ut_donor >> TKeyValueCollectorTest::TestKeyValueCollectorMultiple [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnLargeData_MinWriteBatch [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:30.906809Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:30.906842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:30.906847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:30.906862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:30.906875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:30.906878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:30.906887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:30.906906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:30.907002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:30.907078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:30.919401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:30.919440Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:30.923367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:30.923480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:30.923518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:30.926227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:30.926297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:30.926408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:30.926464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:30.927431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:30.927498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:30.927833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:30.927846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:30.927857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:30.927866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:30.927871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:30.927889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:30.929411Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:30.946207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:30.946304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:30.946374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:30.946423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:30.946433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:30.947497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:30.947533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:30.947610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:30.947620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:30.947625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:30.947630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:30.948298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:30.948319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:30.948327Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:30.948792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:30.948804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:30.948809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:30.948815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:30.949485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:30.949986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:30.950031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:30.950242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:30.950277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:30.950287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:30.950377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:30.950387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:30.950416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:30.950427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:30.951061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:30.951076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:30.951127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 3T10:29:45.585090Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:412: TBackup TPropose, opId: 102:0 HandleReply TEvOperationPlan, stepId: 5000003, at schemeshard: 72057594046678944 2025-06-03T10:29:45.585126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 128 -> 129 2025-06-03T10:29:45.585156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:29:45.588061Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:783: [Export] [s3] Bootstrap: self# [1:3454:5416], attempt# 0 2025-06-03T10:29:45.591929Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:427: [Export] [s3] Handle TEvExportScan::TEvReady: self# [1:3454:5416], sender# [1:3453:5415] FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-03T10:29:45.594750Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:45.594768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:29:45.594867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:45.594873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 102, path id: 2 REQUEST: PUT /metadata.json HTTP/1.1 HEADERS: Host: localhost:7436 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 0872B1F3-DB05-4614-800C-796802AFCA35 amz-sdk-request: attempt=1 content-length: 61 content-md5: 5ZuHSMjV1bVKZhThhMGD5g== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /metadata.json / / 61 2025-06-03T10:29:45.595118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:45.595130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:258: TBackup TProposedWaitParts, opId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:45.595319Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:387: [Export] [s3] HandleMetadata TEvExternalStorage::TEvPutObjectResponse: self# [1:3454:5416], result# PutObjectResult { ETag: e59b8748c8d5d5b54a6614e184c183e6 } 2025-06-03T10:29:45.596324Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:29:45.596368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:29:45.596376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:29:45.596383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-03T10:29:45.596393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:29:45.596446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 102 REQUEST: PUT /scheme.pb HTTP/1.1 HEADERS: Host: localhost:7436 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 0D23B839-DEDC-4EBE-B1EC-5C107F7B9273 amz-sdk-request: attempt=1 content-length: 357 content-md5: csvC5nqNTZsSLy4ymlp0/Q== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /scheme.pb / / 357 2025-06-03T10:29:45.597139Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:294: [Export] [s3] HandleScheme TEvExternalStorage::TEvPutObjectResponse: self# [1:3454:5416], result# PutObjectResult { ETag: 72cbc2e67a8d4d9b122f2e329a5a74fd } 2025-06-03T10:29:45.597197Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:3453:5415] 2025-06-03T10:29:45.597415Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:445: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:3454:5416], sender# [1:3453:5415], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv.zst HTTP/1.1 HEADERS: Host: localhost:7436 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: A0055D9F-ECC2-42D5-B19D-DA4B70ED4026 amz-sdk-request: attempt=1 content-length: 740 content-md5: P/a/uWmNWYxyRT1pAtAE7A== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-storage-class: STANDARD S3_MOCK::HttpServeWrite: /data_00.csv.zst / / 740 2025-06-03T10:29:45.598062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:29:45.598171Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:487: [Export] [s3] HandleData TEvExternalStorage::TEvPutObjectResponse: self# [1:3454:5416], result# PutObjectResult { ETag: 3ff6bfb9698d598c72453d6902d004ec } 2025-06-03T10:29:45.598179Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:3454:5416], success# 1, error# , multipart# 0, uploadId# (empty maybe) 2025-06-03T10:29:45.598246Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:3453:5415], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-03T10:29:45.621651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-03T10:29:45.621691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-03T10:29:45.621730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-03T10:29:45.621748Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-03T10:29:45.621765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:45.621770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:45.621779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-03T10:29:45.621788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 129 -> 240 2025-06-03T10:29:45.621880Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:45.622971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:45.623126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:45.623139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-03T10:29:45.623157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:29:45.623163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:45.623170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:29:45.623174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:45.623180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-03T10:29:45.623210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:335:2313] message: TxId: 102 2025-06-03T10:29:45.623219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:45.623225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:29:45.623231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:29:45.623286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:29:45.624236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:29:45.624250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:3439:5402] TestWaitNotification: OK eventTxId 102 >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessagesNoClusterDiscovery [GOOD] >> BasicUsage::TWriteSession_WriteEncoded |65.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueCollectorTest::TestKeyValueCollectorMultiple [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithSpecificKeys [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheBeginning >> Viewer::JsonStorageListingV1PDiskIdFilter [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/viewer/ut/unittest >> Viewer::JsonStorageListingV1PDiskIdFilter [GOOD] Test command err: 2025-06-03T10:28:55.734546Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:319:2362], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:28:55.734601Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:55.734617Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] TServer::EnableGrpc on GrpcPort 12409, node 1 TClient is connected to server localhost:15867 json result: {"Success":true,"Result":{"Total":5,"Entities":[{"Name":"/Root/test","Type":"ext_sub_domain"},{"Name":"/Root/slice","Type":"ext_sub_domain"},{"Name":"/Root/qwerty","Type":"ext_sub_domain"},{"Name":"/Root/MyDatabase","Type":"ext_sub_domain"},{"Name":"/Root/TestDatabase","Type":"ext_sub_domain"}]},"Version":2} 2025-06-03T10:29:03.697062Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:3156:2433], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:03.697453Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:03.697657Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:03.697801Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [3:3148:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:03.698030Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [7:2670:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:03.698157Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:03.698326Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:03.698351Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:03.698584Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:03.698636Z node 9 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [9:2676:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:03.698828Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [8:2673:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:03.699134Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:03.699277Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:03.699294Z node 9 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:03.699527Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [4:2661:2373], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:03.699582Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [5:2664:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:03.699631Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:03.699842Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [6:2667:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:03.700008Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:03.700032Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:03.700086Z node 10 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [10:2679:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:03.700161Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:03.700170Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:03.700280Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:03.700303Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:03.700434Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:03.700461Z node 10 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:29:03.911389Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:04.043441Z node 2 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-03T10:29:04.051341Z node 2 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:424} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-03T10:29:04.111022Z node 2 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:364} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 9509, node 2 TClient is connected to server localhost:26559 2025-06-03T10:29:04.159201Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:04.159222Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:04.159228Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:04.159567Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:29:16.079590Z node 14 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [14:2703:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:16.079810Z node 11 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [11:1332:2236], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:16.080050Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [13:2700:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:16.080215Z node 15 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [15:2706:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:16.080425Z node 14 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:16.080569Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:16.080583Z node 11 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:16.080593Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:16.080621Z node 14 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:16.080666Z node 16 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [16:2709:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:16.080714Z node 19 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] A ... rrect path status: LookupError; 2025-06-03T10:29:36.766090Z node 31 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:36.766226Z node 34 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [34:3172:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:36.766341Z node 35 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [35:3175:2379], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:36.766401Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:36.766429Z node 34 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:36.766631Z node 35 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:36.766639Z node 35 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:36.766892Z node 36 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [36:3123:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:36.766971Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:36.767077Z node 36 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:29:36.854519Z node 29 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:36.960995Z node 29 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-03T10:29:36.965224Z node 29 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:424} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-03T10:29:37.007420Z node 29 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:364} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 62689, node 29 TClient is connected to server localhost:25021 2025-06-03T10:29:37.044760Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:37.044780Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:37.044783Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:37.044988Z node 29 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:29:44.560424Z node 38 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [38:3130:2433], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:44.560620Z node 44 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [44:1968:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:44.560791Z node 38 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:44.560821Z node 40 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [40:3133:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:44.560951Z node 38 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:44.560969Z node 44 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:44.561038Z node 40 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:44.561064Z node 40 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:44.561088Z node 44 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:44.561162Z node 39 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [39:3126:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:44.561403Z node 39 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:44.561437Z node 41 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [41:3136:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:44.561538Z node 39 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:44.561575Z node 42 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [42:3139:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:44.561643Z node 41 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:44.561725Z node 41 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:44.561735Z node 42 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:44.561834Z node 42 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:44.561852Z node 43 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [43:1965:2373], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:44.561974Z node 43 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:44.562023Z node 43 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:44.562302Z node 45 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [45:1971:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:44.562333Z node 46 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [46:1974:2376], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:44.562478Z node 45 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:44.562487Z node 46 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:44.562553Z node 45 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:29:44.562561Z node 46 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # SectorMap:test-client[:2000] 2025-06-03T10:29:44.651567Z node 38 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:44.751562Z node 38 :BS_NODE WARN: {NW89@node_warden_pdisk.cpp:106} Can't write new MockDevicesConfig to file Path# /Berkanavt/kikimr/testing/mock_devices.txt 2025-06-03T10:29:44.757142Z node 38 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:424} Magic sector is present on disk, now going to format device PDiskId# 1000 2025-06-03T10:29:44.794503Z node 38 :BS_PDISK WARN: {BSP01@blobstorage_pdisk_actor.cpp:364} Device formatting done PDiskId# 1000 TServer::EnableGrpc on GrpcPort 6485, node 38 TClient is connected to server localhost:3009 2025-06-03T10:29:44.829857Z node 38 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:44.829884Z node 38 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:44.829890Z node 38 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:44.830107Z node 38 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration >> KeyValueReadStorage::ReadRangeOk1Key [GOOD] >> KeyValueReadStorage::ReadRangeOk [GOOD] >> KeyValueReadStorage::ReadRangeNoData [GOOD] >> WithSDK::DescribeConsumer [GOOD] >> KeyValueReadStorage::ReadError [GOOD] >> KeyValueReadStorage::ReadErrorWithWrongGroupId [GOOD] >> KeyValueReadStorage::ReadErrorWithUncorrectCookie [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> KeyValueReadStorage::ReadRangeNoData [GOOD] Test command err: 2025-06-03T10:29:47.463380Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-03T10:29:47.463781Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:492} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 2025-06-03T10:29:47.465379Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 2 ErrorReason# ReadRequestCookie# 0 2025-06-03T10:29:47.465401Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:492} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 2025-06-03T10:29:47.466634Z 1 00h00m00.000000s :KEYVALUE INFO: {KV320@keyvalue_storage_read_request.cpp:122} Inline read request KeyValue# 1 Status# OK 2025-06-03T10:29:47.466651Z 1 00h00m00.000000s :KEYVALUE DEBUG: {KV322@keyvalue_storage_read_request.cpp:134} Expected OK or UNKNOWN and given OK readCount# 0 2025-06-03T10:29:47.466658Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:492} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> KeyValueReadStorage::ReadErrorWithUncorrectCookie [GOOD] Test command err: 2025-06-03T10:29:47.570854Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# ERROR ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-03T10:29:47.570893Z 1 00h00m00.000000s :KEYVALUE ERROR: {KV316@keyvalue_storage_read_request.cpp:270} Unexpected EvGetResult. KeyValue# 1 Status# ERROR Deadline# 18446744073709551 Now# 0 SentAt# 1970-01-01T00:00:00.000000Z GotAt# 1748946587570 ErrorReason# 2025-06-03T10:29:47.572886Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 2 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-03T10:29:47.572912Z 1 00h00m00.000000s :KEYVALUE ERROR: {KV318@keyvalue_storage_read_request.cpp:240} Received EvGetResult from an unexpected storage group. KeyValue# 1 GroupId# 2 ExpecetedGroupId# 3 Status# OK Deadline# 18446744073709551 Now# 0 SentAt# 1970-01-01T00:00:00.000000Z GotAt# 1748946587572 ErrorReason# 2025-06-03T10:29:47.574278Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 1 ErrorReason# ReadRequestCookie# 0 2025-06-03T10:29:47.574303Z 1 00h00m00.000000s :KEYVALUE ERROR: {KV319@keyvalue_storage_read_request.cpp:222} Received EvGetResult with an unexpected cookie. KeyValue# 1 Cookie# 1000 SentGets# 1 GroupId# 3 Status# OK Deadline# 18446744073709551 Now# 0 GotAt# 1748946587574 ErrorReason# |65.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql |65.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql |65.6%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/yql/ydb-core-kqp-ut-yql >> TopicAutoscaling::PartitionSplit_ReadEmptyPartitions_AutoscaleAwareSDK [GOOD] >> TopicAutoscaling::PartitionSplit_ManySession_PQv1 |65.6%| [TA] $(B)/ydb/core/viewer/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TKeyValueTest::TestInlineWriteReadDeleteWithRestartsThenResponseOk ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/ut_with_sdk/unittest >> WithSDK::DescribeConsumer [GOOD] Test command err: 2025-06-03T10:28:48.032664Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668143973207303:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:48.032696Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0007fe/r3tmp/tmpTDBaJp/pdisk_1.dat 2025-06-03T10:28:48.078846Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:28:48.110559Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:48.110814Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668143973207284:2079] 1748946528032440 != 1748946528032443 TServer::EnableGrpc on GrpcPort 31231, node 1 2025-06-03T10:28:48.122180Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/0007fe/r3tmp/yandexzXpdnQ.tmp 2025-06-03T10:28:48.122204Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/0007fe/r3tmp/yandexzXpdnQ.tmp 2025-06-03T10:28:48.122296Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/0007fe/r3tmp/yandexzXpdnQ.tmp 2025-06-03T10:28:48.122356Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:28:48.129063Z INFO: TTestServer started on Port 25863 GrpcPort 31231 2025-06-03T10:28:48.135917Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:48.135955Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:48.137028Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25863 PQClient connected to localhost:31231 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:48.171925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:28:48.183960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-03T10:28:48.403784Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668143973208096:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.403812Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.403915Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668143973208108:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.405669Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668143973208114:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.405696Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.416087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480 2025-06-03T10:28:48.418041Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668143973208110:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-03T10:28:48.457078Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:48.464984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:48.478909Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668143973208314:2525] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:48.485499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7511668143973208470:2609] 2025-06-03T10:28:53.032954Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668143973207303:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:53.032991Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:28:53.717572Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-03T10:28:53.721378Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-03T10:28:53.721812Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [1:7511668165448045144:2674], Recipient [1:7511668143973207714:2188]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:53.721830Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:53.721834Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:28:53.721844Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [1:7511668165448045140:2671], Recipient [1:7511668143973207714:2188]: {TEvModifySchemeTransaction txid# 281474976715674 TabletId# 72057594046644480} 2025-06-03T10:28:53.721847Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:28:53.732550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } PartitionStrategy { MinPartitionCount: 1 MaxPartitionCount: 100 ScaleThresholdSeconds: 300 ScaleUpPartitionWriteSpeedThresholdPercent: 90 ScaleDownPartitionWriteSpeedThresholdPercent: 30 PartitionStrategyType: CAN_SPLIT } Consumers { Name: "test-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 } } } } TxId: 281474976715674 TabletId: 72057594046644480 Owner: "root@builtin" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-03T10:28:53.732676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_pq.cpp:307: TCreatePQ Propose, path: /Root/test-topic, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:28:53.732759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: test-topic, child id: [OwnerId: 72057594046644480, LocalPathId: 13], at schemeshard: 72057594046644480 2025-06-03T10:28:53.732779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 13] was 0 2025-06-03T10:28:53.732790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 13] was 1 2025-06-03T10:28:53.732798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason new shard cre ... of messages 1, size 9 bytes 2025-06-03T10:29:46.955145Z :DEBUG: [/Root] [/Root] [b6094e8b-7fb06495-eaa5b5ef-8672b5d0] [] Returning serverBytesSize = 0 to budget >>>>> Event = 0 2025-06-03T10:29:46.955161Z :INFO: [/Root] [/Root] [b6094e8b-7fb06495-eaa5b5ef-8672b5d0] Closing read session. Close timeout: 1.000000s 2025-06-03T10:29:46.955167Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:1:1 2025-06-03T10:29:46.955173Z :INFO: [/Root] [/Root] [b6094e8b-7fb06495-eaa5b5ef-8672b5d0] Counters: { Errors: 0 CurrentSessionLifetimeMs: 3 BytesRead: 9 MessagesRead: 1 BytesReadCompressed: 29 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:29:46.955152Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_7_1_2063221063713531282_v1 grpc read done: success# 1, data# { read_request { bytes_size: 175 } } 2025-06-03T10:29:46.955189Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer test-consumer session test-consumer_7_1_2063221063713531282_v1 got read request: guid# d35fb3b8-31063eca-e7ccefbe-96b56bbe 2025-06-03T10:29:46.955272Z :INFO: [/Root] [/Root] [b6094e8b-7fb06495-eaa5b5ef-8672b5d0] Closing read session. Close timeout: 0.000000s 2025-06-03T10:29:46.955276Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:test-topic:0:1:1:1 2025-06-03T10:29:46.955280Z :INFO: [/Root] [/Root] [b6094e8b-7fb06495-eaa5b5ef-8672b5d0] Counters: { Errors: 0 CurrentSessionLifetimeMs: 3 BytesRead: 9 MessagesRead: 1 BytesReadCompressed: 29 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:29:46.955294Z :NOTICE: [/Root] [/Root] [b6094e8b-7fb06495-eaa5b5ef-8672b5d0] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-03T10:29:46.955498Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_7_1_2063221063713531282_v1 grpc read done: success# 0, data# { } 2025-06-03T10:29:46.955503Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_7_1_2063221063713531282_v1 grpc read failed 2025-06-03T10:29:46.955514Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_7_1_2063221063713531282_v1 grpc closed 2025-06-03T10:29:46.955523Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_7_1_2063221063713531282_v1 is DEAD 2025-06-03T10:29:46.955582Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 269877764, Sender [7:7511668393303712493:3038], Recipient [7:7511668393303711671:2455]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:46.955591Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5260: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:46.955594Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2888: [PQ: 72075186224037892] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:46.955597Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037892] Destroy direct read session test-consumer_7_1_2063221063713531282_v1 2025-06-03T10:29:46.955602Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037892] server disconnected, pipe [7:7511668393303712492:2694] destroyed 2025-06-03T10:29:46.955614Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_7_1_2063221063713531282_v1 2025-06-03T10:29:46.955731Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][test-topic] pipe [7:7511668393303712489:2691] disconnected; active server actors: 1 2025-06-03T10:29:46.955738Z node 7 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][test-topic] pipe [7:7511668393303712489:2691] client test-consumer disconnected session test-consumer_7_1_2063221063713531282_v1 2025-06-03T10:29:46.957545Z node 7 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:159: new Describe consumer request 2025-06-03T10:29:46.957584Z node 7 :PQ_READ_PROXY DEBUG: schema_actors.cpp:473: TDescribeConsumerActor for request operation_params { } path: "test-topic" consumer: "test-consumer" include_stats: true include_location: true 2025-06-03T10:29:46.957750Z node 7 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [7:7511668393303712502:2699]: Request location 2025-06-03T10:29:46.957760Z node 7 :PQ_READ_PROXY DEBUG: schema_actors.cpp:686: DescribeTopicImpl [7:7511668393303712502:2699]: Request sessions 2025-06-03T10:29:46.957819Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][test-topic] pipe [7:7511668393303712505:2701] connected; active server actors: 1 2025-06-03T10:29:46.957832Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 269877761, Sender [7:7511668393303712507:3043], Recipient [7:7511668393303711671:2455]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:46.957834Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037893][test-topic] addPartitionToResponse tabletId 72075186224037892, partitionId 0, NodeId 7, Generation 1 2025-06-03T10:29:46.957836Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5259: HandleHook, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:46.957839Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2875: [PQ: 72075186224037892] Handle TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:46.957844Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72075186224037892] server connected, pipe [7:7511668393303712504:2700], now have 1 active actors on pipe 2025-06-03T10:29:46.957846Z node 7 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [7:7511668393303712502:2699]: Got location 2025-06-03T10:29:46.957852Z node 7 :PQ_READ_PROXY DEBUG: schema_actors.cpp:729: DescribeTopicImpl [7:7511668393303712502:2699]: Got sessions 2025-06-03T10:29:46.957857Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 271187975, Sender [7:7511668393303712502:2699], Recipient [7:7511668393303711671:2455]: NKikimrPQ.TStatus ClientId: "test-consumer" 2025-06-03T10:29:46.957859Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5249: HandleHook, processing event TEvPersQueue::TEvStatus 2025-06-03T10:29:46.957862Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:1803: [PQ: 72075186224037892] Handle TEvPersQueue::TEvStatus 2025-06-03T10:29:46.957869Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][test-topic] pipe [7:7511668393303712505:2701] disconnected; active server actors: 1 2025-06-03T10:29:46.957871Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][test-topic] pipe [7:7511668393303712505:2701] disconnected no session 2025-06-03T10:29:46.957881Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188491 (NKikimr::TEvPQ::TEvPartitionStatus), Tablet [7:7511668393303711671:2455], Partition 0, Sender [7:7511668393303711671:2455], Recipient [7:7511668393303711729:2458], Cookie: 0 2025-06-03T10:29:46.957889Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188491, Sender [7:7511668393303711671:2455], Recipient [7:7511668393303711729:2458]: NKikimr::TEvPQ::TEvPartitionStatus 2025-06-03T10:29:46.957893Z node 7 :PERSQUEUE TRACE: partition.h:581: StateIdle, processing event TEvPQ::TEvPartitionStatus 2025-06-03T10:29:46.957943Z node 7 :PERSQUEUE DEBUG: partition.cpp:858: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } 2025-06-03T10:29:46.957988Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 269877764, Sender [7:7511668393303712507:3043], Recipient [7:7511668393303711671:2455]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:46.957990Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5260: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:46.957991Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2888: [PQ: 72075186224037892] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:29:46.957993Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037892] server disconnected, pipe [7:7511668393303712504:2700] destroyed 2025-06-03T10:29:46.992671Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668393303711671:2455], Partition 0, Sender [0:0:0], Recipient [7:7511668393303711729:2458], Cookie: 0 2025-06-03T10:29:46.992688Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668393303711729:2458]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:29:46.992693Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:29:46.992711Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:29:46.992735Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:29:46.992737Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:29:46.992743Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:29:47.093010Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668393303711671:2455], Partition 0, Sender [0:0:0], Recipient [7:7511668393303711729:2458], Cookie: 0 2025-06-03T10:29:47.093037Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668393303711729:2458]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:29:47.093041Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:29:47.093057Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:29:47.093082Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:29:47.093084Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:29:47.093090Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> TKeyValueTest::TestWrite200KDeleteThenResponseError [GOOD] >> TKeyValueTest::TestSetExecutorFastLogPolicy >> TKeyValueTest::TestWriteTrimWithRestartsThenResponseOk >> TKeyValueTest::TestWriteReadRangeLimitThenLimitWorks [GOOD] >> TKeyValueTest::TestWriteReadRangeLimitThenLimitWorksNewApi >> TKeyValueCollectorTest::TestKeyValueCollectorEmpty >> TKeyValueCollectorTest::TestKeyValueCollectorEmpty [GOOD] >> TKeyValueCollectorTest::TestKeyValueCollectorMany >> TKeyValueTest::TestWrite200KDeleteThenResponseErrorNewApi [GOOD] >> TKeyValueTest::TestWriteDeleteThenReadRemaining >> TKeyValueCollectorTest::TestKeyValueCollectorMany [GOOD] >> KeyValueReadStorage::ReadWithTwoPartsOk [GOOD] >> TKeyValueTest::TestBasicWriteRead ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> KeyValueReadStorage::ReadWithTwoPartsOk [GOOD] Test command err: 2025-06-03T10:29:49.304688Z 1 00h00m00.000000s :KEYVALUE INFO: {KV20@keyvalue_storage_read_request.cpp:209} Received GetResult KeyValue# 1 GroupId# 3 Status# OK ResponseSz# 2 ErrorReason# ReadRequestCookie# 0 2025-06-03T10:29:49.305083Z 1 00h00m00.000000s :KEYVALUE INFO: {KV34@keyvalue_storage_read_request.cpp:492} Send respose KeyValue# 1 Status# RSTATUS_OK ReadRequestCookie# 0 |65.6%| [TA] $(B)/ydb/core/tx/schemeshard/ut_move/test-results/unittest/{meta.json ... results_accumulator.log} |65.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut |65.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut |65.6%| [TA] {RESULT} $(B)/ydb/core/viewer/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TKeyValueTest::TestInlineWriteReadWithRestartsWithNotCorrectUTF8NewApi >> TopicAutoscaling::ReBalancingAfterSplit_sessionsWithPartition [GOOD] >> TopicAutoscaling::ReadFromTimestamp_AutoscaleAwareSDK >> BasicStatistics::TwoDatabases [GOOD] >> TCdcStreamTests::MeteringDedicated [GOOD] >> TCdcStreamTests::ChangeOwner >> TCdcStreamTests::ChangeOwner [GOOD] >> TCdcStreamTests::DropIndexWithStream >> TKeyValueTest::TestGetStatusWorksNewApi [GOOD] >> SystemView::QueryStatsAllTables [GOOD] >> SystemView::QueryStatsRetries >> TCdcStreamTests::DropIndexWithStream [GOOD] >> TCdcStreamTests::DropTableWithIndexWithStream >> TCdcStreamTests::DropTableWithIndexWithStream [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheBeginning [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheEnd >> CommitOffset::Commit_WithSession_ParentNotFinished_OtherSession [GOOD] >> CommitOffset::Commit_FromSession_ToNewChild_WithoutCommitToParent ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestGetStatusWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:58:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:75:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:58:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:75:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:58:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:75:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:58:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:75:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:77:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:80:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:81:2057] recipient: [4:79:2110] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:83:2057] recipient: [4:79:2110] !Reboot 72057594037927937 (actor [4:57:2097]) rebooted! !Reboot 72057594037927937 (actor [4:57:2097]) tablet resolver refreshed! new actor is[4:82:2111] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:168:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:58:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:75:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:57:2097]) on event NKikimr::TEvKeyValue::TEvGetStorageChannelStatus ! Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:77:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:79:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:81:2057] recipient: [5:80:2110] Leader for TabletID 72057594037927937 is [5:82:2111] sender: [5:83:2057] recipient: [5:80:2110] !Reboot 72057594037927937 (actor [5:57:2097]) rebooted! !Reboot 72057594037927937 (actor [5:57:2097]) tablet resolver refreshed! new actor is[5:82:2111] Leader for TabletID 72057594037927937 is [5:82:2111] sender: [5:168:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:58:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:75:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:78:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:81:2057] recipient: [6:80:2110] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:82:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:83:2111] sender: [6:84:2057] recipient: [6:80:2110] !Reboot 72057594037927937 (actor [6:57:2097]) rebooted! !Reboot 72057594037927937 (actor [6:57:2097]) tablet resolver refreshed! new actor is[6:83:2111] Leader for TabletID 72057594037927937 is [6:83:2111] sender: [6:169:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:58:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:75:2057] recipient: [7:14:2061] |65.6%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_move/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> BasicStatistics::TwoDatabases [GOOD] Test command err: 2025-06-03T10:26:26.305763Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:529:2414], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:26.305820Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:26:26.305841Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001c39/r3tmp/tmpoqpnVS/pdisk_1.dat 2025-06-03T10:26:26.407425Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22316, node 1 2025-06-03T10:26:26.501581Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:26.501606Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:26.501611Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:26.501681Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:26.502204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:26:26.588794Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:26.588827Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:26.600610Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12773 2025-06-03T10:26:26.964425Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:26:27.807740Z node 3 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 3 2025-06-03T10:26:27.815363Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:27.815393Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:27.858596Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-03T10:26:27.859147Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:28.009870Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.010069Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.010236Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.010286Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.010352Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.010370Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.010390Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.010415Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.010435Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:28.150237Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:28.150271Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:28.161386Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:28.191189Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:28.200389Z node 3 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:26:28.200415Z node 3 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:26:28.205627Z node 3 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:26:28.205808Z node 3 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:26:28.205831Z node 3 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:26:28.205835Z node 3 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:26:28.205839Z node 3 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:26:28.205843Z node 3 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:26:28.205847Z node 3 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:26:28.205853Z node 3 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:26:28.205996Z node 3 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:26:28.221128Z node 3 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:28.221152Z node 3 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [3:1940:2598], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:28.222487Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [3:1952:2607] 2025-06-03T10:26:28.223354Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [3:1972:2619] 2025-06-03T10:26:28.223393Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [3:1972:2619], schemeshard id = 72075186224037897 2025-06-03T10:26:28.224795Z node 3 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database1 2025-06-03T10:26:28.231152Z node 3 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:26:28.231174Z node 3 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:26:28.231187Z node 3 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database1/.metadata/_statistics 2025-06-03T10:26:28.234177Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:26:28.236003Z node 3 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:26:28.236035Z node 3 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:26:28.343032Z node 3 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:26:28.424835Z node 3 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:26:28.497526Z node 3 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:26:29.195082Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:26:30.016463Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:26:30.028817Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:30.028867Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:30.082890Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:26:30.083609Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:30.220741Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.220880Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.220996Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.221035Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.221082Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.221104Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.221119Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.221156Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.221179Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:30.302123Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:30.302201Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:30.313735Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:30.350818Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:30.360333Z node 2 ... DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:29:45.516147Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:10331:4444], DatabaseId: /Root/Database2, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:45.516166Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:10342:4449], DatabaseId: /Root/Database2, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:45.516206Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root/Database2, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:45.519964Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976730658:2, at schemeshard: 72075186224038898 2025-06-03T10:29:45.535006Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:10345:4452], DatabaseId: /Root/Database2, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976730658 completed, doublechecking } 2025-06-03T10:29:45.643708Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:10433:4500] txid# 281474976730659, issues: { message: "Check failed: path: \'/Root/Database2/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224038898, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:45.647852Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:10462:4515]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:45.647910Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:29:45.647920Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:10464:4517] 2025-06-03T10:29:45.647929Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:10464:4517] 2025-06-03T10:29:45.648065Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224038895] EvServerConnected, pipe server id = [2:10465:4518] 2025-06-03T10:29:45.648094Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:10464:4517], server id = [2:10465:4518], tablet id = 72075186224038895, status = OK 2025-06-03T10:29:45.648128Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224038895] EvConnectNode, pipe server id = [2:10465:4518], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:29:45.648137Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224038895] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-03T10:29:45.648186Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:29:45.648195Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:10462:4515], StatRequests.size() = 1 2025-06-03T10:29:45.664223Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=N2IyODBlY2QtMmE1M2MzOTktNDk0NzFlZDktNDk3ZTcyZDc=, TxId: 2025-06-03T10:29:45.664246Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=N2IyODBlY2QtMmE1M2MzOTktNDk0NzFlZDktNDk3ZTcyZDc=, TxId: 2025-06-03T10:29:45.664391Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224038895] TTxFinishTraversal::Execute 2025-06-03T10:29:45.686813Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224038895] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224038898, LocalPathId: 3] 2025-06-03T10:29:45.686857Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224038895] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:29:45.739108Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224038895] EvFastPropagateCheck 2025-06-03T10:29:45.739143Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224038895] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-03T10:29:45.812389Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:10464:4517], schemeshard count = 1 2025-06-03T10:29:46.602242Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:29:46.612752Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:29:46.612782Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:29:46.612801Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037897, LocalPathId: 4] is data table. 2025-06-03T10:29:46.612808Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:29:46.612904Z node 3 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database1 2025-06-03T10:29:46.613936Z node 3 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:29:46.617632Z node 3 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=3&id=YjBkNzdjZmEtNjZiYTc2YjktZTNkZTgyMTctZWZmNTFiYzM=, TxId: 2025-06-03T10:29:46.617657Z node 3 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=3&id=YjBkNzdjZmEtNjZiYTc2YjktZTNkZTgyMTctZWZmNTFiYzM=, TxId: 2025-06-03T10:29:46.617845Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:29:46.630095Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037897, LocalPathId: 4] 2025-06-03T10:29:46.630125Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:29:46.683249Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 122 ], ReplyToActorId[ [3:10556:4796]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:46.683347Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 122 ] 2025-06-03T10:29:46.683353Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 122, ReplyToActorId = [3:10556:4796], StatRequests.size() = 1 2025-06-03T10:29:48.599304Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 123 ], ReplyToActorId[ [3:10617:4816]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:48.599390Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 123 ] 2025-06-03T10:29:48.599395Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 123, ReplyToActorId = [3:10617:4816], StatRequests.size() = 1 2025-06-03T10:29:49.334972Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224038895] ScheduleNextTraversal 2025-06-03T10:29:49.335003Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224038895] ScheduleNextTraversal. No force traversals. 2025-06-03T10:29:49.335015Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224038895] IsColumnTable. Path [OwnerId: 72075186224038898, LocalPathId: 4] is data table. 2025-06-03T10:29:49.335022Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224038895] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224038898, LocalPathId: 4] 2025-06-03T10:29:49.335141Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Database2 2025-06-03T10:29:49.335955Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:29:49.340163Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZjhiYTQ2ZDctNzJmMTgxZDYtN2ViZjQ4MDgtZGYyN2M1NDM=, TxId: 2025-06-03T10:29:49.340192Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZjhiYTQ2ZDctNzJmMTgxZDYtN2ViZjQ4MDgtZGYyN2M1NDM=, TxId: 2025-06-03T10:29:49.340364Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224038895] TTxFinishTraversal::Execute 2025-06-03T10:29:49.352059Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224038895] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224038898, LocalPathId: 4] 2025-06-03T10:29:49.352075Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224038895] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:29:50.258987Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 2 2025-06-03T10:29:50.259077Z node 3 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 3 2025-06-03T10:29:50.259257Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:29:50.269870Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:29:50.269893Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:29:50.334664Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 124 ], ReplyToActorId[ [3:10696:4830]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:50.334777Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 124 ] 2025-06-03T10:29:50.334788Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 124, ReplyToActorId = [3:10696:4830], StatRequests.size() = 1 2025-06-03T10:29:50.334970Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:10698:4588]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:29:50.336106Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:29:50.336124Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:10698:4588], StatRequests.size() = 1 |65.6%| [LD] {RESULT} $(B)/ydb/core/sys_view/query_stats/ut/ydb-core-sys_view-query_stats-ut >> CommitOffset::Commit_WithSession_ParentNotFinished_SameSession [GOOD] >> CommitOffset::Commit_WithSession_ParentNotFinished_OtherSession_ParentCommittedToEnd |65.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots |65.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots |65.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_rtmr_reboots/ydb-core-tx-schemeshard-ut_rtmr_reboots >> TKeyValueTest::TestRewriteThenLastValue >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorks ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_cdc_stream/unittest >> TCdcStreamTests::DropTableWithIndexWithStream [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:27:52.122051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:52.122101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:52.122109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:52.122116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:52.122138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:52.122143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:52.122155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:52.122172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:52.122322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:52.122467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:52.163200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:27:52.163232Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:52.174337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:52.174555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:52.174646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:52.184780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:52.184883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:52.185059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:52.185145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:52.186274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:52.186369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:52.186888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:52.186915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:52.186929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:52.186942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:52.186950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:52.186979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:27:52.193718Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:27:52.259720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:52.259830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:52.259949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:52.260049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:52.260071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:52.266191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:52.266245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:52.266322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:52.266341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:52.266350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:52.266358Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:52.267223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:52.267251Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:52.267262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:52.267797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:52.267816Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:52.267825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:52.267834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:52.268803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:52.269476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:52.269564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:52.269818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:52.269856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:52.269866Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:52.269964Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:27:52.269973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:52.270018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:52.270031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:27:52.276987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:52.277009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:52.277084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... ation_side_effects.cpp:906: Part operation is done id#103:2 progress is 4/5 2025-06-03T10:29:52.420513Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 4/5 2025-06-03T10:29:52.420518Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 4/5, is published: false 2025-06-03T10:29:52.420603Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:29:52.420615Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:29:52.420620Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-03T10:29:52.420626Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-03T10:29:52.420631Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-03T10:29:52.420645Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 4/5, is published: true 2025-06-03T10:29:52.420679Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:29:52.420684Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 103:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:52.420713Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:29:52.420732Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 5/5 2025-06-03T10:29:52.420737Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 5/5 2025-06-03T10:29:52.420742Z node 20 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 5/5 2025-06-03T10:29:52.420747Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 5/5 2025-06-03T10:29:52.420751Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 5/5, is published: true 2025-06-03T10:29:52.420784Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [20:378:2345] message: TxId: 103 2025-06-03T10:29:52.420792Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 5/5 2025-06-03T10:29:52.420799Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-06-03T10:29:52.420806Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:0 2025-06-03T10:29:52.420830Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:29:52.420837Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:1 2025-06-03T10:29:52.420841Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:1 2025-06-03T10:29:52.420847Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:29:52.420852Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:2 2025-06-03T10:29:52.420856Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:2 2025-06-03T10:29:52.420865Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-03T10:29:52.420870Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:3 2025-06-03T10:29:52.420874Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:3 2025-06-03T10:29:52.420880Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-03T10:29:52.420885Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:4 2025-06-03T10:29:52.420889Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:4 2025-06-03T10:29:52.420901Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 1 2025-06-03T10:29:52.420982Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:29:52.420989Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 6], at schemeshard: 72057594046678944 2025-06-03T10:29:52.421004Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-03T10:29:52.421013Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-03T10:29:52.421020Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:29:52.421197Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:29:52.421251Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:29:52.421574Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:29:52.421588Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:29:52.421595Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:29:52.422057Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:29:52.422101Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:29:52.422108Z node 20 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [20:764:2667] 2025-06-03T10:29:52.422162Z node 20 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 103 2025-06-03T10:29:52.422276Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index/indexImplTable/Stream" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:29:52.422331Z node 20 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index/indexImplTable/Stream" took 66us result status StatusPathDoesNotExist 2025-06-03T10:29:52.422375Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/Index/indexImplTable/Stream\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table/Index/indexImplTable\' (id: [OwnerId: 72057594046678944, LocalPathId: 4]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Table/Index/indexImplTable/Stream" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:29:52.422438Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index/indexImplTable/Stream/streamImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:29:52.422454Z node 20 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index/indexImplTable/Stream/streamImpl" took 19us result status StatusPathDoesNotExist 2025-06-03T10:29:52.422473Z node 20 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/Index/indexImplTable/Stream/streamImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table/Index/indexImplTable\' (id: [OwnerId: 72057594046678944, LocalPathId: 4]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Table/Index/indexImplTable/Stream/streamImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> SystemView::QueryStatsRetries [GOOD] >> TKeyValueTest::TestEmptyWriteReadDeleteWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestEmptyWriteReadDeleteWithRestartsThenResponseOkNewApi >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldHandleBorrowCompactionTimeouts [GOOD] |65.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut |65.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut |65.6%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/ut/ydb-core-tablet_flat-ut >> TKeyValueTest::TestInlineWriteReadWithRestartsThenResponseOk ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/ut/unittest >> SystemView::QueryStatsRetries [GOOD] Test command err: test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002b0a/r3tmp/tmpaurYYj/pdisk_1.dat 2025-06-03T10:28:09.381753Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:28:09.409079Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:09.417120Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:09.417146Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:09.419405Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29982, node 1 2025-06-03T10:28:09.444197Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:09.444219Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:09.444222Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:09.444282Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26864 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:09.487727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:09.496137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:09.504778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:09.507608Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:09.507632Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:09.508530Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-03T10:28:09.509702Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:09.509730Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:09.511368Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-03T10:28:09.599243Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:09.599380Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:09.677592Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:09.684314Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511667974391238730:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:09.684343Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:28:09.685787Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511667976155564166:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:09.686557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:28:09.686947Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Tenant2/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; waiting... 2025-06-03T10:28:09.689590Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:09.689626Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:09.691146Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:09.691167Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:09.691904Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-03T10:28:09.692068Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:09.693218Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:28:09.693484Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:09.806685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:28:09.899454Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667974250302447:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:09.899484Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:09.899694Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667974250302459:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:09.900705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715663:3, at schemeshard: 72057594046644480 2025-06-03T10:28:09.918545Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667974250302461:2344], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715663 completed, doublechecking } 2025-06-03T10:28:09.997858Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667974250302538:2952] txid# 281474976715664, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:10.087012Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jwtnb4haf4yx95ng35ef6veh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjQ2ZGEyMDUtYjFlZDNmNGUtMjMzMzhlZDgtNjU3NjQ3ZmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:10.101957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:10.193209Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jwtnb4t39jy0ggc0cxxycr4b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjQ2ZGEyMDUtYjFlZDNmNGUtMjMzMzhlZDgtNjU3NjQ3ZmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:10.199123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:28:10.282109Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715669. Ctx: { TraceId: 01jwtnb4wreyrb92he8t68b1hd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjQ2ZGEyMDUtYjFlZDNmNGUtMjMzMzhlZDgtNjU3NjQ3ZmY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:10.310908Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715671. Ctx: { TraceId: 01jwtnb4xj3fad5fy6f235k967, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDVkM2ZjYjQtYjZiMWE2ZDgtMjcxZWQyYzItMzY2Njg3M2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:28:10.311967Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [1:7511667978545270106:2373], owner: [1:7511667978545270103:2371], scan id: 0, table id: [72057594046644480:1:0:partition_stats] 2025-06-03T10:28:10.317444Z node 1 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [1:7511667978545270106:2373], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node c ... 1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:50.849140Z node 71 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(71, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:50.851013Z node 71 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:50.857966Z node 71 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:51.205239Z node 71 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [71:7511668413002231664:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:51.205265Z node 71 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [71:7511668413002231656:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:51.205361Z node 71 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:51.206264Z node 71 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:29:51.211690Z node 71 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [71:7511668413002231670:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:29:51.304338Z node 71 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [71:7511668413002231740:2655] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:51.317590Z node 71 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtne7f4anw15b390wk8sss9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=71&id=N2EyZjFlMTgtNzI2MWI5NmEtNTQ4ZDc2ZC1iNDgxYzkxMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:29:51.330874Z node 71 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jwtne7jq23fgz4cjz8w866fw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=71&id=YTkxNjc0YzgtNDQ4MGRmMjEtNGI0OTg5MDYtZDg3OWEyODM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:29:51.331339Z node 71 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [71:7511668413002231810:2357], owner: [71:7511668413002231806:2355], scan id: 0, table id: [72057594046644480:1:0:top_queries_by_request_units_one_hour] 2025-06-03T10:29:51.331494Z node 71 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [71:7511668413002231810:2357], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:29:51.331586Z node 71 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [71:7511668413002231810:2357], row count: 1, finished: 1 2025-06-03T10:29:51.331600Z node 71 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [71:7511668413002231810:2357], owner: [71:7511668413002231806:2355], scan id: 0, table id: [72057594046644480:1:0:top_queries_by_request_units_one_hour] 2025-06-03T10:29:51.332328Z node 71 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946591330, txId: 281474976715662] shutting down 2025-06-03T10:29:52.408377Z node 76 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[76:7511668417930254019:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:52.408532Z node 76 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002b0a/r3tmp/tmpEYnKHY/pdisk_1.dat 2025-06-03T10:29:52.424405Z node 76 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1805, node 76 2025-06-03T10:29:52.441571Z node 76 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:52.441583Z node 76 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:52.441585Z node 76 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:52.441638Z node 76 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6077 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:52.509120Z node 76 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(76, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:52.509162Z node 76 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(76, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:52.510967Z node 76 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(76, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:52.512478Z node 76 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:52.518373Z node 76 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:52.780642Z node 76 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [76:7511668417930254948:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:52.780670Z node 76 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [76:7511668417930254959:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:52.780706Z node 76 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:52.781426Z node 76 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:29:52.785530Z node 76 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [76:7511668417930254962:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:29:52.860838Z node 76 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [76:7511668417930255035:2663] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:52.875473Z node 76 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtne90cfw764ra79q2j2r2k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=76&id=ZDk4MGIzODEtMWExYmI4Mi1iODU2MTY3Yi00YzRlNjcwOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:29:52.889142Z node 76 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jwtne93d4v23a98kq5vh3d5a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=76&id=MjQ0NDRhNWYtZjc1ZDA2ODktNGQwMDQ5OS00MzQyNzRhMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:29:52.889656Z node 76 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [76:7511668417930255105:2357], owner: [76:7511668417930255101:2355], scan id: 0, table id: [72057594046644480:1:0:top_queries_by_read_bytes_one_minute] 2025-06-03T10:29:52.889849Z node 76 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:321: Scan prepared, actor: [76:7511668417930255105:2357], schemeshard id: 72057594046644480, hive id: 72057594037968897, database: /Root, database owner: root@builtin, domain key: [OwnerId: 72057594046644480, LocalPathId: 1], database node count: 1 2025-06-03T10:29:52.889984Z node 76 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [76:7511668417930255105:2357], row count: 1, finished: 1 2025-06-03T10:29:52.889999Z node 76 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [76:7511668417930255105:2357], owner: [76:7511668417930255101:2355], scan id: 0, table id: [72057594046644480:1:0:top_queries_by_read_bytes_one_minute] 2025-06-03T10:29:52.890915Z node 76 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946592888, txId: 281474976715662] shutting down >> TKeyValueTest::TestWriteLongKey [GOOD] >> Donor::CheckOnlineReadRequestToDonor ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardBorrowedCompactionTest::SchemeshardShouldHandleBorrowCompactionTimeouts [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:27:40.153334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:27:40.153366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:40.153373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:27:40.153379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:27:40.153392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:27:40.153398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:27:40.153408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:27:40.153422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:27:40.153554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:27:40.153622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:27:40.205094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:27:40.205125Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:27:40.218974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:27:40.219133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:27:40.219182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:27:40.229817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:27:40.229908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:27:40.230047Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:40.230113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:27:40.234928Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:40.235001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:27:40.235337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:40.235348Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:27:40.235357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:27:40.235366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:40.235372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:27:40.235397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:27:40.242594Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:27:40.299211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:27:40.299290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:40.299362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:27:40.299426Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:27:40.299439Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:40.300495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:40.300530Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:27:40.300590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:40.300604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:27:40.300610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:27:40.300616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:27:40.301527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:40.301546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:27:40.301554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:27:40.302018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:40.302031Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:27:40.302039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:40.302048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:27:40.302864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:27:40.312204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:27:40.312288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:27:40.312568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:27:40.312620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:27:40.312644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:40.312738Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:27:40.312749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:27:40.312794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:27:40.312813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:27:40.317758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:27:40.317798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:27:40.317861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... d: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: true Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 103 Memory: 124088 Storage: 14156 } ShardState: 2 UserTablePartOwners: 72075186233409546 NodeId: 3 StartTime: 42 TableOwnerId: 72057594046678944 FollowerId: 0 2025-06-03T10:29:53.920495Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4919: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-03T10:29:53.920511Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] state 'Ready' dataSize 13940 rowCount 100 cpuUsage 0.0103 2025-06-03T10:29:53.920524Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409546 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 2] raw table stats: DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: true Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-03T10:29:53.920531Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-06-03T10:29:53.961589Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:126:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-03T10:29:53.961635Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-03T10:29:53.961643Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 1 2025-06-03T10:29:53.961686Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 1 2025-06-03T10:29:53.961695Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 1 2025-06-03T10:29:53.961738Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 2 shard idx 72057594046678944:1 data size 13940 row count 100 2025-06-03T10:29:53.961774Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:1 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], pathId map=Simple, is column=0, is olap=0, RowCount 100, DataSize 13940 2025-06-03T10:29:53.961783Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409546, followerId 0 2025-06-03T10:29:53.961831Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:477: Do not want to split tablet 72075186233409546 by size, its table already has 1 out of 1 partitions 2025-06-03T10:29:53.961872Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:29:53.972179Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:126:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-03T10:29:53.972223Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-03T10:29:53.972231Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-03T10:29:54.013018Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435079, Sender [0:0:0], Recipient [3:720:2684]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvPeriodicWakeup 2025-06-03T10:29:54.013158Z node 3 :TX_DATASHARD TRACE: datashard_impl.h:3437: TEvPeriodicTableStats from datashard 72075186233409547, FollowerId 0, tableId 3 2025-06-03T10:29:54.013272Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269553162, Sender [3:720:2684], Recipient [3:126:2151]: NKikimrTxDataShard.TEvPeriodicTableStats DatashardId: 72075186233409547 TableLocalId: 3 Generation: 2 Round: 12 TableStats { DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 19 Memory: 124088 } ShardState: 2 UserTablePartOwners: 72075186233409547 UserTablePartOwners: 72075186233409546 NodeId: 3 StartTime: 214 TableOwnerId: 72057594046678944 FollowerId: 0 2025-06-03T10:29:54.013282Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4919: StateWork, processing event TEvDataShard::TEvPeriodicTableStats 2025-06-03T10:29:54.013317Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 3] state 'Ready' dataSize 13940 rowCount 100 cpuUsage 0.0019 2025-06-03T10:29:54.013338Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:570: Got periodic table stats at tablet 72057594046678944 from shard 72075186233409547 followerId 0 pathId [OwnerId: 72057594046678944, LocalPathId: 3] raw table stats: DataSize: 13940 RowCount: 100 IndexSize: 102 InMemSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 2 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false Channels { Channel: 1 DataSize: 13940 IndexSize: 102 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 2025-06-03T10:29:54.013349Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.100000s, queue# 1 2025-06-03T10:29:54.054213Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: operation_queue_timer.h:92: Operation queue wakeup 2025-06-03T10:29:54.054274Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__borrowed_compaction.cpp:65: Borrowed compaction timeout for pathId# [OwnerId: 72057594046678944, LocalPathId: 3], datashard# 72075186233409547, next wakeup# 0.000000s, in queue# 0 shards, running# 0 shards at schemeshard 72057594046678944 2025-06-03T10:29:54.054290Z node 3 :FLAT_TX_SCHEMESHARD INFO: schemeshard__borrowed_compaction.cpp:28: RunBorrowedCompaction for pathId# [OwnerId: 72057594046678944, LocalPathId: 3], datashard# 72075186233409547, next wakeup# 0.000000s, rate# 0, in queue# 1 shards, running# 0 shards at schemeshard 72057594046678944 2025-06-03T10:29:54.054324Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: operation_queue_timer.h:84: Operation queue set wakeup after delta# 3 seconds 2025-06-03T10:29:54.054329Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__borrowed_compaction.cpp:100: Borrowed compaction enqueued shard# 72057594046678944:2 at schemeshard 72057594046678944 2025-06-03T10:29:54.054384Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:126:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-03T10:29:54.054398Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-03T10:29:54.054402Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 1 2025-06-03T10:29:54.054433Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:601: Will execute TTxStoreStats, queue# 1 2025-06-03T10:29:54.054440Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:610: Will delay TTxStoreTableStats on# 0.000000s, queue# 1 2025-06-03T10:29:54.054477Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 3 shard idx 72057594046678944:2 data size 13940 row count 100 2025-06-03T10:29:54.054522Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409547 maps to shardIdx: 72057594046678944:2 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], pathId map=CopyTable, is column=0, is olap=0, RowCount 100, DataSize 13940, with borrowed parts 2025-06-03T10:29:54.054530Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__table_stats.cpp:62: BuildStatsForCollector: datashardId 72075186233409547, followerId 0 2025-06-03T10:29:54.054574Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:472: Want to split tablet 72075186233409547 by size split by size (shardCount: 1, maxShardCount: 2, shardSize: 13940, maxShardSize: 1) 2025-06-03T10:29:54.054593Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:505: Postpone split tablet 72075186233409547 because it has borrow parts, enqueue compact them first 2025-06-03T10:29:54.054598Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__borrowed_compaction.cpp:100: Borrowed compaction enqueued shard# 72057594046678944:2 at schemeshard 72057594046678944 2025-06-03T10:29:54.054635Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:29:54.064903Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435092, Sender [0:0:0], Recipient [3:126:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTableStats 2025-06-03T10:29:54.064939Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5058: StateWork, processing event TEvPrivate::TEvPersistTableStats 2025-06-03T10:29:54.064945Z node 3 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046678944, queue size# 0 2025-06-03T10:29:54.300206Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271125000, Sender [0:0:0], Recipient [3:126:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:29:54.300248Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4890: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:29:54.300268Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124999, Sender [3:126:2151], Recipient [3:126:2151]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:29:54.300272Z node 3 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4889: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime >> TKeyValueTest::TestWriteDeleteThenReadRemaining [GOOD] >> Donor::CheckOnlineReadRequestToDonor [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteLongKey [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:58:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:75:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:58:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:75:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:77:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:83:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:57:2097]) rebooted! !Reboot 72057594037927937 (actor [3:57:2097]) tablet resolver refreshed! new actor is[3:82:2111] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:168:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:58:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:75:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:57:2097]) on event NKikimr::TEvKeyValue::TEvAcquireLock ! Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:77:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:80:2057] recipient: [4:79:2110] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:83:2057] recipient: [4:79:2110] !Reboot 72057594037927937 (actor [4:57:2097]) rebooted! !Reboot 72057594037927937 (actor [4:57:2097]) tablet resolver refreshed! new actor is[4:82:2111] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:168:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:58:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:75:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:78:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:80:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:82:2057] recipient: [5:81:2110] Leader for TabletID 72057594037927937 is [5:83:2111] sender: [5:84:2057] recipient: [5:81:2110] !Reboot 72057594037927937 (actor [5:57:2097]) rebooted! !Reboot 72057594037927937 (actor [5:57:2097]) tablet resolver refreshed! new actor is[5:83:2111] Leader for TabletID 72057594037927937 is [5:83:2111] sender: [5:169:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:58:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:75:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:81:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:84:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:85:2057] recipient: [6:83:2113] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:87:2057] recipient: [6:83:2113] !Reboot 72057594037927937 (actor [6:57:2097]) rebooted! !Reboot 72057594037927937 (actor [6:57:2097]) tablet resolver refreshed! new actor is[6:86:2114] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:172:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:58:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:75:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:81:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:83:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:85:2057] recipient: [7:84:2113] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:87:2057] recipient: [7:84:2113] !Reboot 72057594037927937 (actor [7:57:2097]) rebooted! !Reboot 72057594037927937 (actor [7:57:2097]) tablet resolver refreshed! new actor is[7:86:2114] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:172:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:58:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:75:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:82:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:85:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:86:2057] recipient: [8:84:2113] Leader for TabletID 72057594037927937 is [8:87:2114] sender: [8:88:2057] recipient: [8:84:2113] !Reboot 72057594037927937 (actor [8:57:2097]) rebooted! !Reboot 72057594037927937 (actor [8:57:2097]) tablet resolver refreshed! new actor is[8:87:2114] Leader for TabletID 72057594037927937 is [8:87:2114] sender: [8:173:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:58:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:75:2057] recipient: [9:14:2061] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteDeleteThenReadRemaining [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:58:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:75:2057] recipient: [1:14:2061] 2025-06-03T10:29:47.086098Z node 1 :KEYVALUE ERROR: keyvalue_state.cpp:3012: KeyValue# 72057594037927937 PrepareExecuteTransactionRequest return flase, Marker# KV73 Submsg# KeyValue# 72057594037927937 Can't delete Range, in DeleteRange, total limit of deletions per request (100000) reached, Marker# KV90 Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:58:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:75:2057] recipient: [2:14:2061] 2025-06-03T10:29:48.814146Z node 2 :KEYVALUE ERROR: keyvalue_state.cpp:3012: KeyValue# 72057594037927937 PrepareExecuteTransactionRequest return flase, Marker# KV73 Submsg# KeyValue# 72057594037927937 Can't delete Range, in DeleteRange, total limit of deletions per request (100000) reached, Marker# KV90 Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:58:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:75:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:58:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:75:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:450:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:453:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:454:2057] recipient: [4:452:2377] Leader for TabletID 72057594037927937 is [4:455:2378] sender: [4:456:2057] recipient: [4:452:2377] !Reboot 72057594037927937 (actor [4:57:2097]) rebooted! !Reboot 72057594037927937 (actor [4:57:2097]) tablet resolver refreshed! new actor is[4:455:2378] Leader for TabletID 72057594037927937 is [4:455:2378] sender: [4:541:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:58:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:75:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:450:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:453:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:454:2057] recipient: [5:452:2377] Leader for TabletID 72057594037927937 is [5:455:2378] sender: [5:456:2057] recipient: [5:452:2377] !Reboot 72057594037927937 (actor [5:57:2097]) rebooted! !Reboot 72057594037927937 (actor [5:57:2097]) tablet resolver refreshed! new actor is[5:455:2378] Leader for TabletID 72057594037927937 is [5:455:2378] sender: [5:541:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:58:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:75:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:451:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:454:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:455:2057] recipient: [6:453:2377] Leader for TabletID 72057594037927937 is [6:456:2378] sender: [6:457:2057] recipient: [6:453:2377] !Reboot 72057594037927937 (actor [6:57:2097]) rebooted! !Reboot 72057594037927937 (actor [6:57:2097]) tablet resolver refreshed! new actor is[6:456:2378] Leader for TabletID 72057594037927937 is [6:456:2378] sender: [6:542:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:58:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:75:2057] recipient: [7:14:2061] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::CheckOnlineReadRequestToDonor [GOOD] Test command err: RandomSeed# 8240316789019510636 2025-06-03T10:29:55.727872Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-03T10:29:55.728307Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 4445736098642836322] 2025-06-03T10:29:55.729519Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) THullOsirisActor: RESURRECT: id# [1:1:0:0:0:2097152:1] 2025-06-03T10:29:55.729582Z 7 00h01m11.311024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:6:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 1 PartsResurrected# 1 |65.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::MultipleEvicts >> Donor::SlayAfterWiping >> Donor::MultipleEvicts [GOOD] |65.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::SlayAfterWiping [GOOD] >> KqpScripting::ScanQuery ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::MultipleEvicts [GOOD] Test command err: RandomSeed# 4264122632546971800 0 donors: 2025-06-03T10:29:57.110163Z 26 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-03T10:29:57.110230Z 26 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 17307317640088669542] 2025-06-03T10:29:57.111487Z 26 00h00m20.011024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 25:1000 2025-06-03T10:29:57.124999Z 25 00h00m20.012048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-03T10:29:57.125072Z 25 00h00m20.012048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 17307317640088669542] 2025-06-03T10:29:57.126164Z 25 00h00m20.012048s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 26:1000 2025-06-03T10:29:57.136248Z 26 00h00m20.013072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-03T10:29:57.136290Z 26 00h00m20.013072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 17307317640088669542] 2025-06-03T10:29:57.136998Z 26 00h00m20.013072s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 25:1000 2025-06-03T10:29:57.147881Z 25 00h00m20.014096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-03T10:29:57.147922Z 25 00h00m20.014096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 17307317640088669542] 2025-06-03T10:29:57.148611Z 25 00h00m20.014096s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 26:1000 2025-06-03T10:29:57.161628Z 26 00h00m20.015120s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-03T10:29:57.161702Z 26 00h00m20.015120s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 17307317640088669542] 2025-06-03T10:29:57.162813Z 26 00h00m20.015120s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 25:1000 2025-06-03T10:29:57.178035Z 25 00h00m20.016144s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-03T10:29:57.178114Z 25 00h00m20.016144s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 17307317640088669542] 2025-06-03T10:29:57.179350Z 25 00h00m20.016144s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 26:1000 2025-06-03T10:29:57.194809Z 26 00h00m20.017168s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-03T10:29:57.194882Z 26 00h00m20.017168s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 17307317640088669542] 2025-06-03T10:29:57.195984Z 26 00h00m20.017168s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 25:1000 2025-06-03T10:29:57.210489Z 25 00h00m20.018192s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-03T10:29:57.210535Z 25 00h00m20.018192s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 17307317640088669542] 2025-06-03T10:29:57.211293Z 25 00h00m20.018192s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 26:1000 2025-06-03T10:29:57.222118Z 26 00h00m20.019216s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-03T10:29:57.222183Z 26 00h00m20.019216s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 17307317640088669542] 2025-06-03T10:29:57.223148Z 26 00h00m20.019216s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:2:2:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 1 donors: 25:1000 |65.7%| [TA] $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/test-results/unittest/{meta.json ... results_accumulator.log} >> TKeyValueTest::TestRenameWorksNewApi [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_donor/unittest >> Donor::SlayAfterWiping [GOOD] Test command err: RandomSeed# 9550414834344148766 2025-06-03T10:29:57.350882Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-03T10:29:57.351403Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 4779550733520243758] 2025-06-03T10:29:57.352792Z 1 00h01m14.361024s :BS_SYNCER ERROR: PDiskId# 1001 VDISK[82000000:_:0:0:0]: (2181038080) THullOsirisActor: FINISH: BlobsResurrected# 0 PartsResurrected# 0 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestRenameWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:58:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:75:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:58:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:75:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:77:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:80:2057] recipient: [2:79:2110] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:83:2057] recipient: [2:79:2110] !Reboot 72057594037927937 (actor [2:57:2097]) rebooted! !Reboot 72057594037927937 (actor [2:57:2097]) tablet resolver refreshed! new actor is[2:82:2111] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:168:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:58:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:75:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:77:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:83:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:57:2097]) rebooted! !Reboot 72057594037927937 (actor [3:57:2097]) tablet resolver refreshed! new actor is[3:82:2111] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:168:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:58:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:75:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:78:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:82:2057] recipient: [4:80:2110] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:84:2057] recipient: [4:80:2110] !Reboot 72057594037927937 (actor [4:57:2097]) rebooted! !Reboot 72057594037927937 (actor [4:57:2097]) tablet resolver refreshed! new actor is[4:83:2111] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:169:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:58:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:75:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:81:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:83:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:85:2057] recipient: [5:84:2113] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:87:2057] recipient: [5:84:2113] !Reboot 72057594037927937 (actor [5:57:2097]) rebooted! !Reboot 72057594037927937 (actor [5:57:2097]) tablet resolver refreshed! new actor is[5:86:2114] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:172:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:58:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:75:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:81:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:84:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:85:2057] recipient: [6:83:2113] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:87:2057] recipient: [6:83:2113] !Reboot 72057594037927937 (actor [6:57:2097]) rebooted! !Reboot 72057594037927937 (actor [6:57:2097]) tablet resolver refreshed! new actor is[6:86:2114] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:172:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:58:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:75:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:82:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:85:2057] recipient: [7:84:2113] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:88:2057] recipient: [7:84:2113] !Reboot 72057594037927937 (actor [7:57:2097]) rebooted! !Reboot 72057594037927937 (actor [7:57:2097]) tablet resolver refreshed! new actor is[7:87:2114] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:173:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:58:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:75:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:57:2097]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:83:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:86:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:87:2057] recipient: [8:85:2114] Leader for TabletID 72057594037927937 is [8:88:2115] sender: [8:89:2057] recipient: [8:85:2114] !Reboot 72057594037927937 (actor [8:57:2097]) rebooted! !Reboot 72057594037927937 (actor [8:57:2097]) tablet resolver refreshed! new actor is[8:88:2115] Leader for TabletID 72057594037927937 is [8:88:2115] sender: [8:108:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:58:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:75:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:57:2097]) on event NKikimr::TEvKeyValue::TEvCompleteGC ! Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:84:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:87:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:88:2057] recipient: [9:86:2115] Leader for TabletID 72057594037927937 is [9:89:2116] sender: [9:90:2057] recipient: [9:86:2115] !Reboot 72057594037927937 (actor [9:57:2097]) rebooted! !Reboot 72057594037927937 (actor [9:57:2097]) tablet resolver refreshed! new actor is[9:89:2116] Leader for TabletID 72057594037927937 is [9:89:2116] sender: [9:109:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:58:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:75:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:87:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:90:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:91:2057] recipient: [10:89:2118] Leader for TabletID 72057594037927937 is [10:92:2119] sender: [10:93:2057] recipient: [10:89:2118] !Reboot 72057594037927937 (actor [10:57:2097]) rebooted! !Reboot 72057594037927937 (actor [10:57:2097]) tablet resolver refreshed! new actor is[10:92:2119] Leader for TabletID 72057594037927937 is [10:92:2119] sender: [10:178:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:58:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:75:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:87:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:90:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:91:2057] recipient: [11:89:2118] Leader for TabletID 72057594037927937 is [11:92:2119] sender: [11:93:2057] recipient: [11:89:2118] !Reboot 72057594037927937 (actor [11:57:2097]) rebooted! !Reboot 72057594037927937 (actor [11:57:2097]) tablet resolver refreshed! new actor is[11:92:2119] Leader for TabletID 72057594037927937 is [11:92:2119] sender: [11:178:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:58:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:75:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:57:209 ... recipient: [15:79:2110] !Reboot 72057594037927937 (actor [15:57:2097]) rebooted! !Reboot 72057594037927937 (actor [15:57:2097]) tablet resolver refreshed! new actor is[15:82:2111] Leader for TabletID 72057594037927937 is [15:82:2111] sender: [15:168:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:55:2057] recipient: [16:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:55:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:58:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:75:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:77:2057] recipient: [16:36:2083] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:80:2057] recipient: [16:79:2110] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:81:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:82:2111] sender: [16:83:2057] recipient: [16:79:2110] !Reboot 72057594037927937 (actor [16:57:2097]) rebooted! !Reboot 72057594037927937 (actor [16:57:2097]) tablet resolver refreshed! new actor is[16:82:2111] Leader for TabletID 72057594037927937 is [16:82:2111] sender: [16:168:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:55:2057] recipient: [17:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:55:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:58:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:75:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:78:2057] recipient: [17:36:2083] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:81:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:82:2057] recipient: [17:80:2110] Leader for TabletID 72057594037927937 is [17:83:2111] sender: [17:84:2057] recipient: [17:80:2110] !Reboot 72057594037927937 (actor [17:57:2097]) rebooted! !Reboot 72057594037927937 (actor [17:57:2097]) tablet resolver refreshed! new actor is[17:83:2111] Leader for TabletID 72057594037927937 is [17:83:2111] sender: [17:169:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:55:2057] recipient: [18:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:55:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:58:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:75:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:81:2057] recipient: [18:36:2083] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:84:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:85:2057] recipient: [18:83:2113] Leader for TabletID 72057594037927937 is [18:86:2114] sender: [18:87:2057] recipient: [18:83:2113] !Reboot 72057594037927937 (actor [18:57:2097]) rebooted! !Reboot 72057594037927937 (actor [18:57:2097]) tablet resolver refreshed! new actor is[18:86:2114] Leader for TabletID 72057594037927937 is [18:86:2114] sender: [18:172:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2095] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:58:2057] recipient: [19:51:2095] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:75:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:81:2057] recipient: [19:36:2083] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:83:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:85:2057] recipient: [19:84:2113] Leader for TabletID 72057594037927937 is [19:86:2114] sender: [19:87:2057] recipient: [19:84:2113] !Reboot 72057594037927937 (actor [19:57:2097]) rebooted! !Reboot 72057594037927937 (actor [19:57:2097]) tablet resolver refreshed! new actor is[19:86:2114] Leader for TabletID 72057594037927937 is [19:86:2114] sender: [19:172:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:55:2057] recipient: [20:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:55:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:58:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:75:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:82:2057] recipient: [20:36:2083] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:85:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:86:2057] recipient: [20:84:2113] Leader for TabletID 72057594037927937 is [20:87:2114] sender: [20:88:2057] recipient: [20:84:2113] !Reboot 72057594037927937 (actor [20:57:2097]) rebooted! !Reboot 72057594037927937 (actor [20:57:2097]) tablet resolver refreshed! new actor is[20:87:2114] Leader for TabletID 72057594037927937 is [20:87:2114] sender: [20:173:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:55:2057] recipient: [21:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:55:2057] recipient: [21:51:2095] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:58:2057] recipient: [21:51:2095] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:75:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:57:2097]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:83:2057] recipient: [21:36:2083] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:86:2057] recipient: [21:85:2114] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:87:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:88:2115] sender: [21:89:2057] recipient: [21:85:2114] !Reboot 72057594037927937 (actor [21:57:2097]) rebooted! !Reboot 72057594037927937 (actor [21:57:2097]) tablet resolver refreshed! new actor is[21:88:2115] Leader for TabletID 72057594037927937 is [21:88:2115] sender: [21:108:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:55:2057] recipient: [22:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:55:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:58:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:75:2057] recipient: [22:14:2061] !Reboot 72057594037927937 (actor [22:57:2097]) on event NKikimr::TEvKeyValue::TEvCompleteGC ! Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:84:2057] recipient: [22:36:2083] Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:87:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:88:2057] recipient: [22:86:2115] Leader for TabletID 72057594037927937 is [22:89:2116] sender: [22:90:2057] recipient: [22:86:2115] !Reboot 72057594037927937 (actor [22:57:2097]) rebooted! !Reboot 72057594037927937 (actor [22:57:2097]) tablet resolver refreshed! new actor is[22:89:2116] Leader for TabletID 72057594037927937 is [22:89:2116] sender: [22:109:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:55:2057] recipient: [23:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:55:2057] recipient: [23:50:2095] Leader for TabletID 72057594037927937 is [23:57:2097] sender: [23:58:2057] recipient: [23:50:2095] Leader for TabletID 72057594037927937 is [23:57:2097] sender: [23:75:2057] recipient: [23:14:2061] !Reboot 72057594037927937 (actor [23:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [23:57:2097] sender: [23:87:2057] recipient: [23:36:2083] Leader for TabletID 72057594037927937 is [23:57:2097] sender: [23:90:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [23:57:2097] sender: [23:91:2057] recipient: [23:89:2118] Leader for TabletID 72057594037927937 is [23:92:2119] sender: [23:93:2057] recipient: [23:89:2118] !Reboot 72057594037927937 (actor [23:57:2097]) rebooted! !Reboot 72057594037927937 (actor [23:57:2097]) tablet resolver refreshed! new actor is[23:92:2119] Leader for TabletID 72057594037927937 is [23:92:2119] sender: [23:178:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:55:2057] recipient: [24:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:55:2057] recipient: [24:51:2095] Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:58:2057] recipient: [24:51:2095] Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:75:2057] recipient: [24:14:2061] !Reboot 72057594037927937 (actor [24:57:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:87:2057] recipient: [24:36:2083] Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:90:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:91:2057] recipient: [24:89:2118] Leader for TabletID 72057594037927937 is [24:92:2119] sender: [24:93:2057] recipient: [24:89:2118] !Reboot 72057594037927937 (actor [24:57:2097]) rebooted! !Reboot 72057594037927937 (actor [24:57:2097]) tablet resolver refreshed! new actor is[24:92:2119] Leader for TabletID 72057594037927937 is [24:92:2119] sender: [24:178:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:55:2057] recipient: [25:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:55:2057] recipient: [25:51:2095] Leader for TabletID 72057594037927937 is [25:57:2097] sender: [25:58:2057] recipient: [25:51:2095] Leader for TabletID 72057594037927937 is [25:57:2097] sender: [25:75:2057] recipient: [25:14:2061] !Reboot 72057594037927937 (actor [25:57:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [25:57:2097] sender: [25:88:2057] recipient: [25:36:2083] Leader for TabletID 72057594037927937 is [25:57:2097] sender: [25:91:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [25:57:2097] sender: [25:92:2057] recipient: [25:90:2118] Leader for TabletID 72057594037927937 is [25:93:2119] sender: [25:94:2057] recipient: [25:90:2118] !Reboot 72057594037927937 (actor [25:57:2097]) rebooted! !Reboot 72057594037927937 (actor [25:57:2097]) tablet resolver refreshed! new actor is[25:93:2119] Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:55:2057] recipient: [26:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:55:2057] recipient: [26:50:2095] Leader for TabletID 72057594037927937 is [26:57:2097] sender: [26:58:2057] recipient: [26:50:2095] Leader for TabletID 72057594037927937 is [26:57:2097] sender: [26:75:2057] recipient: [26:14:2061] >> KqpScripting::ScriptingCreateAndAlterTableTest >> TopicAutoscaling::PartitionSplit_ManySession_PQv1 [GOOD] >> TopicAutoscaling::PartitionSplit_ManySession_existed_AutoscaleAwareSDK >> CommitOffset::DistributedTxCommit_CheckSessionResetAfterCommit [GOOD] >> CommitOffset::DistributedTxCommit_CheckOffsetCommitForDifferentCases |65.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/query_stats/ut/unittest |65.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore |65.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore >> KqpScripting::ScanQuery [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheEnd [GOOD] >> KqpScripting::ScanQueryDisable >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheMiddle |65.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume >> TopicAutoscaling::ReadFromTimestamp_BeforeAutoscaleAwareSDK [GOOD] >> TBackupTests::ShouldSucceedOnLargeData[Zstd] [GOOD] |65.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume >> TKeyValueTest::TestBasicWriteRead [GOOD] >> TopicAutoscaling::ReadFromTimestamp_PQv1 >> TKeyValueTest::TestBasicWriteReadOverrun |65.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_restore/ydb-core-tx-schemeshard-ut_restore |65.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_bsvolume/ydb-core-tx-schemeshard-ut_bsvolume |65.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/query_stats/ut/unittest |65.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr_reboots/unittest |65.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut |65.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut |65.7%| [LD] {RESULT} $(B)/ydb/core/kqp/rm_service/ut/ydb-core-kqp-rm_service-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/ut/unittest >> SystemView::ShowCreateTableColumnAlterObject [GOOD] Test command err: 2025-06-03T10:28:08.851010Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667971448408140:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:08.851090Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002b0e/r3tmp/tmpZ7CrvG/pdisk_1.dat 2025-06-03T10:28:08.941071Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6405, node 1 2025-06-03T10:28:08.969831Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:08.969848Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:08.969850Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:08.969901Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21270 2025-06-03T10:28:08.995434Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:08.995465Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:21270 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:09.070545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:09.189662Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:09.285967Z node 1 :KQP_COMPILE_SERVICE INFO: kqp_compile_service.cpp:276: Subscribed for config changes 2025-06-03T10:28:09.285989Z node 1 :KQP_COMPILE_SERVICE INFO: kqp_compile_service.cpp:329: Updated config 2025-06-03T10:28:09.295740Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667975743376339:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:09.295769Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511667975743376328:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:09.295789Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:09.296763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710658:3, at schemeshard: 72057594046644480 2025-06-03T10:28:09.306230Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511667975743376342:2343], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710658 completed, doublechecking } 2025-06-03T10:28:09.399957Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511667975743376411:2715] txid# 281474976710659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:09.400490Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:1183: Try to find query by queryId, queryId: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_user":"", "ydb_database":"Root" }, "settings": { "ydb_user":"", "ydb_database":"Root" }, "rollback_settings": { } } }} 2025-06-03T10:28:09.400553Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:410: Perform request, TraceId.SpanIdPtr: 0x000070E76A395658 2025-06-03T10:28:09.400571Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:420: Received compile request, sender: [1:7511667975743376324:2336], queryUid: , queryText: "\n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n ", keepInCache: 1, split: 0{ TraceId: 01jwtnb3yeevqxv811y26kj8qs, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTgzNzRiMjUtMWI2OWUzODktN2MxOGVjZDEtOGExZGRkOWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default} 2025-06-03T10:28:09.400590Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:1183: Try to find query by queryId, queryId: {Cluster: db, Database: /Root, DatabaseId: /Root, UserSid: , Text: \n UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`);\n UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`);\n CREATE EXTERNAL DATA SOURCE `tier1` WITH (\n SOURCE_TYPE = \"ObjectStorage\",\n LOCATION = \"http://fake.fake/olap-tier1\",\n AUTH_METHOD = \"AWS\",\n AWS_ACCESS_KEY_ID_SECRET_NAME = \"accessKey\",\n AWS_SECRET_ACCESS_KEY_SECRET_NAME = \"secretKey\",\n AWS_REGION = \"ru-central1\"\n );\n , Settings: {DocumentApiRestricted: 1, IsInternalCall: 0, QueryType: QUERY_TYPE_SQL_GENERIC_CONCURRENT_QUERY}, QueryParameterTypes: , GUCSettings: { "guc_settings": { "session_settings": { "ydb_user":"", "ydb_database":"Root" }, "settings": { "ydb_user":"", "ydb_database":"Root" }, "rollback_settings": { } } }} 2025-06-03T10:28:09.400609Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:516: Added request to queue, sender: [1:7511667975743376324:2336], queueSize: 1 2025-06-03T10:28:09.400784Z node 1 :KQP_COMPILE_SERVICE DEBUG: kqp_compile_service.cpp:877: Created compile actor, sender: [1:7511667975743376324:2336], compileActor: [1:7511667975743376430:2347] 2025-06-03T10:28:09.457766Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jwtnb3yeevqxv811y26kj8qs, SessionId: CompileActor 2025-06-03 10:28:09.457 INFO ydb-core-sys_view-ut(pid=217475, tid=0x00007F0C0C446640) [core dq] kqp_host.cpp:1375: Good place to weld in 2025-06-03T10:28:09.457968Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jwtnb3yeevqxv811y26kj8qs, SessionId: CompileActor 2025-06-03 10:28:09.457 INFO ydb-core-sys_view-ut(pid=217475, tid=0x00007F0C0C446640) [core dq] kqp_host.cpp:1380: Compiled query: ( (let $1 (Write! world (DataSink '"kikimr" '"db") (Key '('objectId (String '"accessKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"secretAccessKey")))))) (let $2 (Write! $1 (DataSink '"kikimr" '"db") (Key '('objectId (String '"secretKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"fakeSecret")))))) (let $3 '('('"auth_method" '"AWS") '('"aws_access_key_id_secret_name" '"accessKey") '('"aws_region" '"ru-central1") '('"aws_secret_access_key_secret_name" '"secretKey") '('"location" '"http://fake.fake/olap-tier1") '('"source_type" '"ObjectStorage"))) (return (Write! $2 (DataSink '"kikimr" '"db") (Key '('objectId (String '"/Root/tier1")) '('typeId (String '"EXTERNAL_DATA_SOURCE"))) (Void) '('('mode 'createObject) '('features $3)))) ) 2025-06-03T10:28:09.458041Z node 1 :KQP_YQL INFO: log.cpp:67: TraceId: 01jwtnb3yeevqxv811y26kj8qs, SessionId: CompileActor 2025-06-03 10:28:09.457 INFO ydb-core-sys_view-ut(pid=217475, tid=0x00007F0C0C446640) [KQP] kqp_host.cpp:1386: Compiled query: ( (let $1 (Write! world (DataSink '"kikimr" '"db") (Key '('objectId (String '"accessKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"secretAccessKey")))))) (let $2 (Write! $1 (DataSink '"kikimr" '"db") (Key '('objectId (String '"secretKey")) '('typeId (String '"SECRET"))) (Void) '('('mode 'upsertObject) '('features '('('"value" '"fakeSecret")))))) (let $3 '('('"auth_method" '"AWS") '('"aws_access_key_id_secret_name" '"accessKey") '('"aws_region" '"ru-central1") '('"aws_secret_access_key_secret_name" '"secretKey") '('"location" '"http://fake.fake/olap-tier1") '('"source_type" '"ObjectStorage"))) (return (Write! $2 (DataSink '"kikimr" '"db") (Key '('objectId (String '"/Root/tier1")) '('typeId (String '"EXTERNAL_DATA_SOURCE"))) (Void) '('('mode 'createObject) '('features $3)))) ) 2025-06-03T10:28:09.458208Z node 1 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jwtnb3yeevqxv811y26kj8qs, SessionId: CompileActor 2025-06-03 10:28:09.458 TRACE ydb-core-sys_view-ut(pid=217475, tid=0x00007F0C0C446640) [KQP] kqp_transf ... eActor 2025-06-03 10:28:57.584 TRACE ydb-core-sys_view-ut(pid=217475, tid=0x00007F0C0E44A640) [core peephole] yql_out_transformers.cpp:62: PeepHoleOpt: ( (declare $ids (ListType (TupleType (DataType 'Utf8) (DataType 'Utf8)))) (return (KqpProgram (lambda '() (block '( (let $1 (Uint64 '10000)) (let $2 (DataType 'Utf8)) (let $3 (Collect (Take (FlatMap $ids (lambda '($6) (block '( (let $7 (Int32 '1)) (let $8 '((Just (Just (Nth $6 '0))) $7)) (let $9 '((Just (Just (Nth $6 '1))) $7)) (return (RangeMultiply $1 (RangeCreate (AsList '($8 $8))) (RangeCreate (AsList '($9 $9))))) )))) (Uint64 '10001)))) (let $4 (Nothing (OptionalType (OptionalType $2)))) (let $5 '($4 $4 (Int32 '0))) (return (ToStream (Just '((RangeFinalize (RangeMultiply $1 (RangeUnion (If (> (Length $3) $1) (RangeCreate (AsList '($5 $5))) (RangeUnion $3))))))))) ))) (TupleType))) ) 2025-06-03T10:28:57.584852Z node 41 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jwtnck1ac9sxnah270nyfj7j, SessionId: CompileActor 2025-06-03 10:28:57.584 TRACE ydb-core-sys_view-ut(pid=217475, tid=0x00007F0C0E44A640) [KQP] kqp_transform.cpp:33: TxsPeephole: ( (declare $ids (ListType (TupleType (DataType 'Utf8) (DataType 'Utf8)))) (let $1 '('('"_logical_id" '1263) '('"_id" '"7fd9e645-7a0b2564-3e5f776b-d1eb5a66") '('"_partition_mode" '"single"))) (let $2 (DqPhyStage '() (lambda '() (block '( (let $4 (Uint64 '10000)) (let $5 (DataType 'Utf8)) (let $6 (Collect (Take (FlatMap $ids (lambda '($9) (block '( (let $10 (Int32 '1)) (let $11 '((Just (Just (Nth $9 '0))) $10)) (let $12 '((Just (Just (Nth $9 '1))) $10)) (return (RangeMultiply $4 (RangeCreate (AsList '($11 $11))) (RangeCreate (AsList '($12 $12))))) )))) (Uint64 '10001)))) (let $7 (Nothing (OptionalType (OptionalType $5)))) (let $8 '($7 $7 (Int32 '0))) (return (ToStream (Just '((RangeFinalize (RangeMultiply $4 (RangeUnion (If (> (Length $6) $4) (RangeCreate (AsList '($8 $8))) (RangeUnion $6))))))))) ))) $1)) (let $3 (DqCnValue (TDqOutput $2 '0))) (return (KqpPhysicalTx '($2) '($3) '('('"$ids")) '('('"type" '"compute")))) ) 2025-06-03T10:28:57.584929Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jwtnck1ac9sxnah270nyfj7j, SessionId: CompileActor 2025-06-03 10:28:57.584 DEBUG ydb-core-sys_view-ut(pid=217475, tid=0x00007F0C0E44A640) [perf] type_ann_expr.cpp:48: Execution of [TypeAnnotationTransformer::DoTransform] took 69us 2025-06-03T10:28:57.584969Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jwtnck1ac9sxnah270nyfj7j, SessionId: CompileActor 2025-06-03 10:28:57.584 DEBUG ydb-core-sys_view-ut(pid=217475, tid=0x00007F0C0E44A640) [perf] yql_expr_constraint.cpp:3248: Execution of [ConstraintTransformer::DoTransform] took 35us 2025-06-03T10:28:57.584993Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jwtnck1ac9sxnah270nyfj7j, SessionId: CompileActor 2025-06-03 10:28:57.584 DEBUG ydb-core-sys_view-ut(pid=217475, tid=0x00007F0C0E44A640) [perf] yql_expr_csee.cpp:620: Execution of [UpdateCompletness] took 18us 2025-06-03T10:28:57.585058Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jwtnck1ac9sxnah270nyfj7j, SessionId: CompileActor 2025-06-03 10:28:57.585 DEBUG ydb-core-sys_view-ut(pid=217475, tid=0x00007F0C0E44A640) [perf] yql_expr_csee.cpp:633: Execution of [EliminateCommonSubExpressionsForSubGraph] took 60us 2025-06-03T10:28:57.585177Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jwtnck1ac9sxnah270nyfj7j, SessionId: CompileActor 2025-06-03 10:28:57.585 DEBUG ydb-core-sys_view-ut(pid=217475, tid=0x00007F0C0E44A640) [KQP] kqp_opt_peephole.cpp:489: >>> TKqpTxPeepholeTransformer[skip]: ( (declare $ids (ListType (TupleType (DataType 'Utf8) (DataType 'Utf8)))) (let $1 '('('"_logical_id" '1263) '('"_id" '"7fd9e645-7a0b2564-3e5f776b-d1eb5a66") '('"_partition_mode" '"single"))) (let $2 (DqPhyStage '() (lambda '() (block '( (let $4 (Uint64 '10000)) (let $5 (DataType 'Utf8)) (let $6 (Collect (Take (FlatMap $ids (lambda '($9) (block '( (let $10 (Int32 '1)) (let $11 '((Just (Just (Nth $9 '0))) $10)) (let $12 '((Just (Just (Nth $9 '1))) $10)) (return (RangeMultiply $4 (RangeCreate (AsList '($11 $11))) (RangeCreate (AsList '($12 $12))))) )))) (Uint64 '10001)))) (let $7 (Nothing (OptionalType (OptionalType $5)))) (let $8 '($7 $7 (Int32 '0))) (return (ToStream (Just '((RangeFinalize (RangeMultiply $4 (RangeUnion (If (> (Length $6) $4) (RangeCreate (AsList '($8 $8))) (RangeUnion $6))))))))) ))) $1)) (let $3 (DqCnValue (TDqOutput $2 '0))) (return (KqpPhysicalTx '($2) '($3) '('('"$ids")) '('('"type" '"compute")))) ) 2025-06-03T10:28:57.585436Z node 41 :KQP_YQL TRACE: log.cpp:67: TraceId: 01jwtnck1ac9sxnah270nyfj7j, SessionId: CompileActor 2025-06-03 10:28:57.585 TRACE ydb-core-sys_view-ut(pid=217475, tid=0x00007F0C0E44A640) [KQP] kqp_transform.cpp:33: TxsPeephole: ( (declare $ids (ListType (TupleType (DataType 'Utf8) (DataType 'Utf8)))) (declare %kqp%tx_result_binding_0_0 (TupleType (ListType (TupleType (TupleType (OptionalType (OptionalType (DataType 'Utf8))) (OptionalType (OptionalType (DataType 'Utf8))) (DataType 'Int32)) (TupleType (OptionalType (OptionalType (DataType 'Utf8))) (OptionalType (OptionalType (DataType 'Utf8))) (DataType 'Int32)))))) (let $1 (KqpTable '"//Root/.metadata/secrets/values" '"72057594046644480:9" '"" '3)) (let $2 '('"ownerUserId" '"secretId" '"value")) (let $3 '"%kqp%tx_result_binding_0_0") (let $4 (DataType 'Utf8)) (let $5 (OptionalType $4)) (let $6 (OptionalType $5)) (let $7 (TupleType $6 $6 (DataType 'Int32))) (let $8 (TupleType (ListType (TupleType $7 $7)))) (let $9 (KqpRowsSourceSettings $1 $2 '() %kqp%tx_result_binding_0_0 '('('"UsedKeyColumns" '('"ownerUserId" '"secretId")) '('"PointPrefixLen" '"2")))) (let $10 (Uint64 '"1001")) (let $11 (StructType '('"ownerUserId" $5) '('"secretId" $5) '('"value" $5))) (let $12 '('('"_logical_id" '1382) '('"_id" '"ea478b03-2ffca352-722bd43e-20a9c24c") '('"_wide_channels" $11))) (let $13 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $9)) (lambda '($18) (block '( (let $19 (lambda '($30) (Member $30 '"ownerUserId") (Member $30 '"secretId") (Member $30 '"value"))) (return (FromFlow (ExpandMap (Take (Filter (ToFlow $18) (lambda '($20) (block '( (let $21 (Member $20 '"ownerUserId")) (let $22 (Member $20 '"secretId")) (let $23 (Bool 'false)) (let $24 (If (Or (Not (Exists $21)) (Not (Exists $22))) (Nothing (OptionalType (DataType 'Bool))) (Just (IfPresent (FilterNullElements (Just '($21 $22)) '('0 '1)) (lambda '($25) (block '( (let $26 '('Auto 'One 'Compact)) (let $27 (ToDict $ids (lambda '($28) $28) (lambda '($29) (Void)) $26)) (return (Contains $27 $25)) ))) $23)))) (return (Coalesce $24 $23)) )))) $10) $19))) ))) $12)) (let $14 (DqCnUnionAll (TDqOutput $13 '0))) (let $15 (DqPhyStage '($14) (lambda '($31) (FromFlow (Take (NarrowMap (ToFlow $31) (lambda '($32 $33 $34) (AsStruct '('"ownerUserId" $32) '('"secretId" $33) '('"value" $34)))) $10))) '('('"_logical_id" '1395) '('"_id" '"5b5c4131-a8bd4760-4ceac428-ff88fcb7")))) (let $16 (DqCnResult (TDqOutput $15 '0) '())) (let $17 (KqpTxResultBinding $8 '0 '0)) (return (KqpPhysicalTx '($13 $15) '($16) '('('"$ids") '($3 $17)) '('('"type" '"data")))) ) 2025-06-03T10:28:57.585446Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jwtnck1ac9sxnah270nyfj7j, SessionId: CompileActor 2025-06-03 10:28:57.585 DEBUG ydb-core-sys_view-ut(pid=217475, tid=0x00007F0C0E44A640) [perf] type_ann_expr.cpp:48: Execution of [TypeAnnotationTransformer::DoTransform] took 2us 2025-06-03T10:28:57.585451Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jwtnck1ac9sxnah270nyfj7j, SessionId: CompileActor 2025-06-03 10:28:57.585 DEBUG ydb-core-sys_view-ut(pid=217475, tid=0x00007F0C0E44A640) [perf] yql_expr_constraint.cpp:3248: Execution of [ConstraintTransformer::DoTransform] took 1us 2025-06-03T10:28:57.585487Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jwtnck1ac9sxnah270nyfj7j, SessionId: CompileActor 2025-06-03 10:28:57.585 DEBUG ydb-core-sys_view-ut(pid=217475, tid=0x00007F0C0E44A640) [perf] yql_expr_csee.cpp:620: Execution of [UpdateCompletness] took 31us 2025-06-03T10:28:57.585594Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jwtnck1ac9sxnah270nyfj7j, SessionId: CompileActor 2025-06-03 10:28:57.585 DEBUG ydb-core-sys_view-ut(pid=217475, tid=0x00007F0C0E44A640) [perf] yql_expr_csee.cpp:633: Execution of [EliminateCommonSubExpressionsForSubGraph] took 102us 2025-06-03T10:28:57.585789Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jwtnck1ac9sxnah270nyfj7j, SessionId: CompileActor 2025-06-03 10:28:57.585 DEBUG ydb-core-sys_view-ut(pid=217475, tid=0x00007F0C0E44A640) [KQP] kqp_opt_peephole.cpp:494: >>> TKqpTxPeepholeTransformer: ( (declare $ids (ListType (TupleType (DataType 'Utf8) (DataType 'Utf8)))) (declare %kqp%tx_result_binding_0_0 (TupleType (ListType (TupleType (TupleType (OptionalType (OptionalType (DataType 'Utf8))) (OptionalType (OptionalType (DataType 'Utf8))) (DataType 'Int32)) (TupleType (OptionalType (OptionalType (DataType 'Utf8))) (OptionalType (OptionalType (DataType 'Utf8))) (DataType 'Int32)))))) (let $1 (KqpTable '"//Root/.metadata/secrets/values" '"72057594046644480:9" '"" '3)) (let $2 '('"ownerUserId" '"secretId" '"value")) (let $3 '"%kqp%tx_result_binding_0_0") (let $4 (DataType 'Utf8)) (let $5 (OptionalType $4)) (let $6 (OptionalType $5)) (let $7 (TupleType $6 $6 (DataType 'Int32))) (let $8 (TupleType (ListType (TupleType $7 $7)))) (let $9 (KqpRowsSourceSettings $1 $2 '() %kqp%tx_result_binding_0_0 '('('"UsedKeyColumns" '('"ownerUserId" '"secretId")) '('"PointPrefixLen" '"2")))) (let $10 (Uint64 '"1001")) (let $11 (StructType '('"ownerUserId" $5) '('"secretId" $5) '('"value" $5))) (let $12 '('('"_logical_id" '1382) '('"_id" '"ea478b03-2ffca352-722bd43e-20a9c24c") '('"_wide_channels" $11))) (let $13 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $9)) (lambda '($18) (block '( (let $19 (lambda '($30) (Member $30 '"ownerUserId") (Member $30 '"secretId") (Member $30 '"value"))) (return (FromFlow (ExpandMap (Take (Filter (ToFlow $18) (lambda '($20) (block '( (let $21 (Member $20 '"ownerUserId")) (let $22 (Member $20 '"secretId")) (let $23 (Bool 'false)) (let $24 (If (Or (Not (Exists $21)) (Not (Exists $22))) (Nothing (OptionalType (DataType 'Bool))) (Just (IfPresent (FilterNullElements (Just '($21 $22)) '('0 '1)) (lambda '($25) (block '( (let $26 '('Auto 'One 'Compact)) (let $27 (ToDict $ids (lambda '($28) $28) (lambda '($29) (Void)) $26)) (return (Contains $27 $25)) ))) $23)))) (return (Coalesce $24 $23)) )))) $10) $19))) ))) $12)) (let $14 (DqCnUnionAll (TDqOutput $13 '0))) (let $15 (DqPhyStage '($14) (lambda '($31) (FromFlow (Take (NarrowMap (ToFlow $31) (lambda '($32 $33 $34) (AsStruct '('"ownerUserId" $32) '('"secretId" $33) '('"value" $34)))) $10))) '('('"_logical_id" '1395) '('"_id" '"5b5c4131-a8bd4760-4ceac428-ff88fcb7")))) (let $16 (DqCnResult (TDqOutput $15 '0) '())) (let $17 (KqpTxResultBinding $8 '0 '0)) (return (KqpPhysicalTx '($13 $15) '($16) '('('"$ids") '($3 $17)) '('('"type" '"data")))) ) 2025-06-03T10:28:57.586026Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jwtnck1ac9sxnah270nyfj7j, SessionId: CompileActor 2025-06-03 10:28:57.586 DEBUG ydb-core-sys_view-ut(pid=217475, tid=0x00007F0C0E44A640) [perf] type_ann_expr.cpp:48: Execution of [TypeAnnotationTransformer::DoTransform] took 63us 2025-06-03T10:28:57.586043Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jwtnck1ac9sxnah270nyfj7j, SessionId: CompileActor 2025-06-03 10:28:57.586 DEBUG ydb-core-sys_view-ut(pid=217475, tid=0x00007F0C0E44A640) [perf] yql_expr_constraint.cpp:3248: Execution of [ConstraintTransformer::DoTransform] took 11us 2025-06-03T10:28:57.586048Z node 41 :KQP_YQL DEBUG: log.cpp:67: TraceId: 01jwtnck1ac9sxnah270nyfj7j, SessionId: CompileActor 2025-06-03 10:28:57.586 DEBUG ydb-core-sys_view-ut(pid=217475, tid=0x00007F0C0E44A640) [perf] type_ann_expr.cpp:48: Execution of [TypeAnnotationTransformer::DoTransform] took 0us |65.7%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_backup/unittest >> TBackupTests::ShouldSucceedOnLargeData[Zstd] [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:29:44.009064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:44.009090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:44.009094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:44.009111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:44.009126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:44.009131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:44.009139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:44.009160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:44.009240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:44.009326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:44.019238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:29:44.019265Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:44.022871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:44.023006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:44.023041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:44.024919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:44.024972Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:44.025067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:44.025117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:44.025755Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:44.025801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:44.026064Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:44.026072Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:44.026084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:44.026091Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:44.026096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:44.026111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.027463Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:29:44.043744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:44.043827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.043887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:44.043934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:44.043944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.044709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:44.044734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:44.044787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.044795Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:44.044801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:44.044805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:44.045160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.045168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:44.045172Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:44.045612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.045630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:44.045635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:44.045641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:44.046190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:44.046616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:44.046652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:44.046817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:44.046837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:44.046843Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:44.046908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:29:44.046915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:44.046944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:29:44.046955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:29:44.047327Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:44.047333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:44.047378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... taShard::TEvExportScan::TEvBuffer { Last: 0 Checksum: } REQUEST: PUT /data_00.csv.zst?partNumber=100&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:27046 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 5168E0E7-0986-4EEC-B053-850D33868A38 amz-sdk-request: attempt=1 content-length: 55 content-md5: B5SOCmjwb1RI3tHamcoRHA== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv.zst / partNumber=100&uploadId=1 / 55 2025-06-03T10:29:59.087164Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:578: [Export] [s3] Handle TEvExternalStorage::TEvUploadPartResponse: self# [1:3454:5416], result# UploadPartResult { ETag: 07948e0a68f06f5448ded1da99ca111c } 2025-06-03T10:29:59.087223Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:130: [Export] [scanner] Handle TEvExportScan::TEvFeed: self# [1:3453:5415] 2025-06-03T10:29:59.087246Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:445: [Export] [s3] Handle TEvExportScan::TEvBuffer: self# [1:3454:5416], sender# [1:3453:5415], msg# NKikimr::NDataShard::TEvExportScan::TEvBuffer { Last: 1 Checksum: } REQUEST: PUT /data_00.csv.zst?partNumber=101&uploadId=1 HTTP/1.1 HEADERS: Host: localhost:27046 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 5DC9B7B3-FEAA-4972-A1AA-63C391DBF7EA amz-sdk-request: attempt=1 content-length: 0 content-md5: 1B2M2Y8AsgTpgAmY7PhCfg== content-type: binary/octet-stream user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 S3_MOCK::HttpServeWrite: /data_00.csv.zst / partNumber=101&uploadId=1 / 0 2025-06-03T10:29:59.087960Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:578: [Export] [s3] Handle TEvExternalStorage::TEvUploadPartResponse: self# [1:3454:5416], result# UploadPartResult { ETag: d41d8cd98f00b204e9800998ecf8427e } 2025-06-03T10:29:59.087978Z node 1 :DATASHARD_BACKUP INFO: export_s3_uploader.cpp:702: [Export] [s3] Finish: self# [1:3454:5416], success# 1, error# , multipart# 1, uploadId# 1 2025-06-03T10:29:59.090220Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:512: [Export] [s3] Handle TEvDataShard::TEvS3Upload: self# [1:3454:5416], upload# { Id: 1 Status: Complete Error: (empty maybe) Parts: [f8f51a1e4a70db44fa91cc2ab9680824,9eba675fd7f187274786dff2f47292df,921325fb6b8811df3d06a44dbe1f8523,4eeb6b90e8e61075275bd8a42f56bd69,2840a487abe8cb9502b3d9c8a8e1c942,607d8f6e3b235a360d63796efd3a51c2,ed22e08df7fb8840f7cabc779cc86885,efeff2c7731061edd9a39059cc078045,4af01cb3455932f28e3bba713dcd57c9,dc94d36ecf3b36d183d75c84b9b2fac6,e2ce425dd2bb582abcc13d0d714c3554,b71e46686939d2cdf046520dd2774281,ab731a82a161e5e044b24e895a1713d6,1df51aaec89711e13a6f95c13113e36c,b6066b2ed343831b1b0ee0076179981e,332d34d77adc2b024a33d87e07d4233f,cf0093cc99590a0e8f9c199ed6deca07,8cc923ec76224e69263ac93b7bfabd30,690d66897e0780f2dfe3614e5a659a22,7502aae0ec253663b1cbfdc8ede92ab9,7d2c6f728ee0c12097dfe5441970b946,5fc7b9b675e0a125eea67cf05f82627f,fc8c5faa99cc7f4ce7ca320f8e7adb58,8e305c5aca758683ff25407a7bbd9220,181bce9c6393e22a0ac359a7b45d8187,639677548f0a8b776a6db92f44d96505,390ff8f57cfa4c04bfbed0d7a63c90e8,3dd76756e6558fd6c8c918210f7dc136,a3f5254fdad3ded54edef910e704c151,e9186373f80dbaa55dd04d07621de277,8898b965060a431b499261ec0cd3cee3,3ed51c736e64defe04980ce328b17aa4,bb0e45971888796588c12ea1c1bec162,e2b3defa84005d3892986ca6894b811f,656c7c809c8c8485f6e91892591cd284,779c6827126f255bde25ae242bf4c8ff,8883fc9b073e683558f1231c5f2142d0,19390a0e3340bcb6ccfe866a790f05cb,305182d3e9745fba3aad1973bb1bfc93,002819d72a6dc7954ecc1bcd2bd20254,325c6bc3cdd6fd83083cf0126c606218,b86932903843b9626e80bd9ccb5d0571,b5054116537a7c467bdb488c9d67dee7,fc3a45bd17a00b147e4f9c55bc2493da,1118e2f41e8839211163250796a65dce,b403ff17c2c269a79201a03ce439dc2a,88f2692ee439cfadef1cd21d58aac8d3,e5bef12f89b101af84d52299a5867d99,ed613335180c53f69d450ef8b176a4d5,150fd7dcdc86eb38c7f821ff4698d8bc,a0c18bf08acc6ebecac04a2520efee9b,e8463d7ce8f502d1575a433c1b30a9af,f123e0fc879e2fdc2c3e2f698fc4176d,d7ab79d73e4648e0a2bf8dec3a19c019,4e74b82f6a8ea7fad8790ee7dfcdb76e,f72bb1d8aa0f5c9265bae10a3784d8e8,924b317371d16363a37962b17a2ae4bb,7214b458c7e25c791e54bd430b835a6e,e79dba1b56122372af3fe7b06ea91bda,6aae345b94d78fc7c1ed0b8697cf5e62,fd3636ed699facb5f0c12f81741cabc5,2c4a198408c3eb9577fcd339ca62c539,59fbf761f9b7574b65fa6877b167bb8c,14f9f5cfdf3a6c33c577a54429b19cb6,c6d078b3be9cd7943e8145fd982baeef,198f55ae25539fbd54a4a6075beac2d1,939123b44e362c76a151a85af0247fb7,0147f8bd741be7780cbc900b6f4b0899,43453200aeaf201420737354cd73cfe4,de26d1339779fe0c538d01d5963fd423,5c903650e719f959dc9f37ea360c6319,23607b3f36e0a2abae7f1ed8e38596f3,0db9af920c6d1cf868e470bf7a349747,aed6ac19c60d08500582eea9dadcdfee,3f4e37ddd3e2e56a725323fad4d85cf6,942b269af420b4277d025cea489dcb25,89eddc25ba615b6cf09b9cd9a11a16bb,1d8e7f0613dc1919ee90133c468380bd,8bf1e4c1266d8437c1bd85e0fca6640a,e9eabcf5b61cf257f530b156dbd77a88,411f1661ae7650d2144e8c6f8a33b28f,6706ec5b8771e555779d5cbeca41aa75,b3a33ef21a8224ddc78a52e8d7ca8357,58749d344f42c192e572eda4ee66fb01,381aeb5ee3014e2c0fd9b85bd59ce005,9aed2297cd10dce10d68de3ff1830b42,be88e095fc3a13708b714db03b1f2744,5628e81ee17fb22fc828ed1b2169578b,a1cfb563fa4af884fe02ced05c26c881,fc602b8ee2e9746fb52823f8fd1f0f28,a1de256e94c7baa9b8ab905c892d1a14,6bff895b0b5f3552ad4bdc61b0d24148,fcba1d258a8651d831767b42e010e439,bef6e3d7088e671809fe584531f96971,f0b489242271d11200dbdbc78e4ce715,372d2d6877fff7c04433e492ad4dbd45,32191cf1972dcccd59c0b5a8b53d4f23,25928b7997b97ac58f18fbbe589573e8,472e53a27497661c6400410909405c4e,07948e0a68f06f5448ded1da99ca111c,d41d8cd98f00b204e9800998ecf8427e] } REQUEST: POST /data_00.csv.zst?uploadId=1 HTTP/1.1 HEADERS: Host: localhost:27046 Accept: */* Connection: Upgrade, HTTP2-Settings Upgrade: h2c HTTP2-Settings: AAMAAABkAAQAoAAAAAIAAAAA amz-sdk-invocation-id: 453D9280-525D-47CE-924D-030626B6ADA1 amz-sdk-request: attempt=1 content-length: 11529 content-type: application/xml user-agent: aws-sdk-cpp/1.11.37 Linux/5.15.0-139-generic x86_64 Clang/18.1.8 x-amz-api-version: 2006-03-01 S3_MOCK::HttpServeAction: 4 / /data_00.csv.zst / uploadId=1 2025-06-03T10:29:59.092506Z node 1 :DATASHARD_BACKUP DEBUG: export_s3_uploader.cpp:609: [Export] [s3] Handle TEvExternalStorage::TEvCompleteMultipartUploadResponse: self# [1:3454:5416], result# CompleteMultipartUploadResult { Bucket: Key: data_00.csv.zst ETag: c902b621cdd1ee89b9f1c4e6c36e6e45 } 2025-06-03T10:29:59.092654Z node 1 :DATASHARD_BACKUP DEBUG: export_scan.cpp:144: [Export] [scanner] Handle TEvExportScan::TEvFinish: self# [1:3453:5415], msg# NKikimr::NDataShard::TEvExportScan::TEvFinish { Success: 1 Error: } 2025-06-03T10:29:59.096186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-03T10:29:59.096218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409546, partId: 0 2025-06-03T10:29:59.096255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-03T10:29:59.096273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:233: TBackup TProposedWaitParts, opId: 102:0 HandleReply TEvSchemaChanged at tablet# 72057594046678944 message# Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 102 Step: 0 Generation: 2 OpResult { Success: true Explain: "" BytesProcessed: 10000 RowsProcessed: 1000 } 2025-06-03T10:29:59.096306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:59.096312Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:59.096318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 102:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-03T10:29:59.096327Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 129 -> 240 2025-06-03T10:29:59.096401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_backup_restore_common.h:116: Unable to make a bill: kind# TBackup, opId# 102:0, reason# domain is not a serverless db, domain# /MyRoot, domainPathId# [OwnerId: 72057594046678944, LocalPathId: 1], IsDomainSchemeShard: 1, ParentDomainId: [OwnerId: 72057594046678944, LocalPathId: 1], ResourcesDomainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:59.097713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:59.097902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:29:59.097919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-03T10:29:59.097940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:29:59.097946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:59.097953Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:29:59.097960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:59.097966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-03T10:29:59.097997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:335:2313] message: TxId: 102 2025-06-03T10:29:59.098005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:29:59.098012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:29:59.098018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:29:59.098063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:29:59.099302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:29:59.099322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:3439:5402] TestWaitNotification: OK eventTxId 102 >> KqpScripting::ScriptingCreateAndAlterTableTest [GOOD] >> KqpScripting::SecondaryIndexes >> KqpScripting::ScanQueryDisable [GOOD] >> TExportToS3WithRebootsTests::ForgetShouldSucceedOnSingleShardTableWithChangefeed [GOOD] >> KqpWorkloadServiceDistributed::TestDistributedLargeConcurrentQueryLimit [GOOD] >> TKeyValueTest::TestWriteTrimWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOkNewApi ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/yql/unittest >> KqpScripting::ScanQueryDisable [GOOD] Test command err: Trying to start YDB, gRPC: 13565, MsgBus: 2817 2025-06-03T10:29:57.721842Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668438349389900:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:57.721896Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0020ff/r3tmp/tmpWtVoaR/pdisk_1.dat 2025-06-03T10:29:57.779613Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13565, node 1 2025-06-03T10:29:57.798469Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:57.798493Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:57.798495Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:57.798548Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2817 2025-06-03T10:29:57.823530Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:57.823558Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:57.824705Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2817 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:57.863277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:57.872839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:57.894857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:57.915409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:57.935367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:58.122449Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668442644358797:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:58.122473Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:58.185498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:29:58.196925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:29:58.210963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:29:58.267902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:29:58.282550Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:29:58.296127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:29:58.309730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:29:58.327075Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668442644359451:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:58.327114Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:58.327122Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668442644359456:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:58.328131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:29:58.337280Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668442644359458:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:29:58.402107Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668442644359509:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:58.688433Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946598721, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 17078, MsgBus: 23366 2025-06-03T10:29:59.053618Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668447600027055:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:59.053665Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0020ff/r3tmp/tmpItubWp/pdisk_1.dat 2025-06-03T10:29:59.069806Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17078, node 2 2025-06-03T10:29:59.079132Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:59.079147Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:59.079150Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:59.079212Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23366 TClient is connected to server localhost:23366 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:59.154208Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:59.154251Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:59.155342Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:59.158887Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:59.164089Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:59.178457Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:59.209024Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:59.223934Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:59.459594Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668447600028642:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:59.459622Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:59.468854Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.479352Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.493142Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.506961Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.522041Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.535330Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.548941Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.565453Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668447600029296:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:59.565482Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:59.565488Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668447600029301:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:59.566314Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:29:59.576234Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511668447600029303:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:29:59.640748Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668447600029354:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:59.862707Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946599904, txId: 281474976715672] shutting down |65.8%| [TA] $(B)/ydb/core/tx/schemeshard/ut_backup/test-results/unittest/{meta.json ... results_accumulator.log} |65.8%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_backup/test-results/unittest/{meta.json ... results_accumulator.log} >> TKeyValueTest::TestInlineWriteReadWithRestartsWithNotCorrectUTF8NewApi [GOOD] >> TKeyValueTest::TestLargeWriteAndDelete >> BasicUsage::TWriteSession_WriteEncoded [GOOD] >> CompressExecutor::TestExecutorMemUsage |65.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant |65.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant |65.8%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_ext_tenant/ydb-core-tx-tx_proxy-ut_ext_tenant |65.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |65.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut |65.8%| [LD] {RESULT} $(B)/ydb/core/wrappers/ut/ydb-core-wrappers-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadServiceDistributed::TestDistributedLargeConcurrentQueryLimit [GOOD] Test command err: 2025-06-03T10:29:23.343589Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668294671409238:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:23.343786Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001b67/r3tmp/tmplwV7fL/pdisk_1.dat 2025-06-03T10:29:23.428763Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:23.431651Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668294671409214:2079] 1748946563343335 != 1748946563343338 TServer::EnableGrpc on GrpcPort 26745, node 1 2025-06-03T10:29:23.452511Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:23.452526Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:23.452528Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:23.452581Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29930 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:29:23.485928Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:23.485965Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:23.487048Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:23.508042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:23.511684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:29:23.772958Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-03T10:29:23.773002Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7511668294671409854:2326], Start check tables existence, number paths: 2 2025-06-03T10:29:23.773867Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=ZWZiM2QxMDctOWE3ZDlkZGQtZjY0NDg2M2ItMjg5OWRmYmQ=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZWZiM2QxMDctOWE3ZDlkZGQtZjY0NDg2M2ItMjg5OWRmYmQ= 2025-06-03T10:29:23.774145Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-03T10:29:23.774156Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-03T10:29:23.774159Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-03T10:29:23.774175Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7511668294671409854:2326], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-03T10:29:23.774184Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7511668294671409854:2326], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-03T10:29:23.774189Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7511668294671409854:2326], Successfully finished 2025-06-03T10:29:23.776963Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=ZWZiM2QxMDctOWE3ZDlkZGQtZjY0NDg2M2ItMjg5OWRmYmQ=, ActorId: [1:7511668294671409870:2327], ActorState: unknown state, session actor bootstrapped 2025-06-03T10:29:23.777053Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-03T10:29:23.777425Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668294671409873:2290], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-03T10:29:23.778343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:29:23.778739Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668294671409873:2290], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976715658 2025-06-03T10:29:23.778777Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668294671409873:2290], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-03T10:29:23.780564Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668294671409873:2290], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:23.864665Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668294671409873:2290], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-03T10:29:23.866023Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668294671409924:2322] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:23.866078Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668294671409873:2290], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-03T10:29:23.866216Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668294671409931:2328], DatabaseId: Root, PoolId: sample_pool_id, Start pool fetching 2025-06-03T10:29:23.866559Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668294671409931:2328], DatabaseId: Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-03T10:29:23.867726Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2323: SessionId: ydb://session/3?node_id=1&id=ZWZiM2QxMDctOWE3ZDlkZGQtZjY0NDg2M2ItMjg5OWRmYmQ=, ActorId: [1:7511668294671409870:2327], ActorState: ReadyState, Session closed due to explicit close event 2025-06-03T10:29:23.867771Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2481: SessionId: ydb://session/3?node_id=1&id=ZWZiM2QxMDctOWE3ZDlkZGQtZjY0NDg2M2ItMjg5OWRmYmQ=, ActorId: [1:7511668294671409870:2327], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-03T10:29:23.867776Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2542: SessionId: ydb://session/3?node_id=1&id=ZWZiM2QxMDctOWE3ZDlkZGQtZjY0NDg2M2ItMjg5OWRmYmQ=, ActorId: [1:7511668294671409870:2327], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-03T10:29:23.867780Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2554: SessionId: ydb://session/3?node_id=1&id=ZWZiM2QxMDctOWE3ZDlkZGQtZjY0NDg2M2ItMjg5OWRmYmQ=, ActorId: [1:7511668294671409870:2327], ActorState: unknown state, Cleanup temp tables: 0 2025-06-03T10:29:23.867814Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2645: SessionId: ydb://session/3?node_id=1&id=ZWZiM2QxMDctOWE3ZDlkZGQtZjY0NDg2M2ItMjg5OWRmYmQ=, ActorId: [1:7511668294671409870:2327], ActorState: unknown state, Session actor destroyed 2025-06-03T10:29:24.171288Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668296280605831:2136];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:24.171456Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001b67/r3tmp/tmpq5wIMF/pdisk_1.dat 2025-06-03T10:29:24.187076Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:24.187568Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511668296280605728:2079] 1748946564170535 != 1748946564170538 TServer::EnableGrpc on GrpcPort 24298, node 2 2025-06-03T10:29:24.199652Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:24.199670Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:24.199672Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:24.199732Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20759 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecuritySta ... TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-03T10:29:59.657223Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2542: SessionId: ydb://session/3?node_id=6&id=OTQ2YTFiMC0xNzU0MTgyOS02YTkxYjY3NS0zYjE5ZWYzZA==, ActorId: [6:7511668312418029535:2331], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-03T10:29:59.657226Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2554: SessionId: ydb://session/3?node_id=6&id=OTQ2YTFiMC0xNzU0MTgyOS02YTkxYjY3NS0zYjE5ZWYzZA==, ActorId: [6:7511668312418029535:2331], ActorState: unknown state, Cleanup temp tables: 0 2025-06-03T10:29:59.657240Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2645: SessionId: ydb://session/3?node_id=6&id=OTQ2YTFiMC0xNzU0MTgyOS02YTkxYjY3NS0zYjE5ZWYzZA==, ActorId: [6:7511668312418029535:2331], ActorState: unknown state, Session actor destroyed 2025-06-03T10:29:59.659556Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1707: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ExecuteState, TraceId: 01jwtnefq7dx8scxktndgr2k0k, TEvTxResponse, CurrentTx: 1/1 response.status: SUCCESS 2025-06-03T10:29:59.659607Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:1966: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ExecuteState, TraceId: 01jwtnefq7dx8scxktndgr2k0k, txInfo Status: Committed Kind: ReadWrite TotalDuration: 3.842 ServerDuration: 3.795 QueriesCount: 2 2025-06-03T10:29:59.659637Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2121: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ExecuteState, TraceId: 01jwtnefq7dx8scxktndgr2k0k, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-03T10:29:59.659653Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2481: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ExecuteState, TraceId: 01jwtnefq7dx8scxktndgr2k0k, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-03T10:29:59.659657Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2542: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ExecuteState, TraceId: 01jwtnefq7dx8scxktndgr2k0k, EndCleanup, isFinal: 0 2025-06-03T10:29:59.659669Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2278: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ExecuteState, TraceId: 01jwtnefq7dx8scxktndgr2k0k, Sent query response back to proxy, proxyRequestId: 548, proxyId: [6:7511668312418028957:2277] 2025-06-03T10:29:59.659795Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Update lease, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, TxId: 2025-06-03T10:29:59.659820Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:197: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Update lease, RunDataQuery: -- TRefreshPoolStateQuery::OnLeaseUpdated DECLARE $database_id AS Text; DECLARE $pool_id AS Text; SELECT COUNT(*) AS delayed_requests FROM `.metadata/workload_manager/delayed_requests` WHERE database = $database_id AND pool_id = $pool_id AND (wait_deadline IS NULL OR wait_deadline >= CurrentUtcTimestamp()) AND lease_deadline >= CurrentUtcTimestamp(); SELECT COUNT(*) AS running_requests FROM `.metadata/workload_manager/running_requests` WHERE database = $database_id AND pool_id = $pool_id AND lease_deadline >= CurrentUtcTimestamp(); 2025-06-03T10:29:59.659924Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ReadyState, TraceId: 01jwtnefqb2jgms7hmdh8xt50q, received request, proxyRequestId: 549 prepared: 0 tx_control: 1 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DML text: -- TRefreshPoolStateQuery::OnLeaseUpdated DECLARE $database_id AS Text; DECLARE $pool_id AS Text; SELECT COUNT(*) AS delayed_requests FROM `.metadata/workload_manager/delayed_requests` WHERE database = $database_id AND pool_id = $pool_id AND (wait_deadline IS NULL OR wait_deadline >= CurrentUtcTimestamp()) AND lease_deadline >= CurrentUtcTimestamp(); SELECT COUNT(*) AS running_requests FROM `.metadata/workload_manager/running_requests` WHERE database = $database_id AND pool_id = $pool_id AND lease_deadline >= CurrentUtcTimestamp(); rpcActor: [6:7511668445562022704:4487] database: /Root databaseId: /Root pool id: default 2025-06-03T10:29:59.659933Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ReadyState, TraceId: 01jwtnefqb2jgms7hmdh8xt50q, request placed into pool from cache: default 2025-06-03T10:29:59.660099Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1306: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ExecuteState, TraceId: 01jwtnefqb2jgms7hmdh8xt50q, ExecutePhyTx, tx: 0x000072B2A1FB9198 literal: 0 commit: 0 txCtx.DeferredEffects.size(): 0 2025-06-03T10:29:59.660113Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1457: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ExecuteState, TraceId: 01jwtnefqb2jgms7hmdh8xt50q, Sending to Executer TraceId: 0 8 2025-06-03T10:29:59.660125Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1515: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ExecuteState, TraceId: 01jwtnefqb2jgms7hmdh8xt50q, Created new KQP executer: [6:7511668445562022707:4482] isRollback: 0 2025-06-03T10:29:59.661628Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1707: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ExecuteState, TraceId: 01jwtnefqb2jgms7hmdh8xt50q, TEvTxResponse, CurrentTx: 1/2 response.status: SUCCESS 2025-06-03T10:29:59.661656Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1306: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ExecuteState, TraceId: 01jwtnefqb2jgms7hmdh8xt50q, ExecutePhyTx, tx: 0x000072B2A1FB9218 literal: 1 commit: 1 txCtx.DeferredEffects.size(): 0 2025-06-03T10:29:59.661829Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1707: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ExecuteState, TraceId: 01jwtnefqb2jgms7hmdh8xt50q, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-06-03T10:29:59.661870Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:1966: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ExecuteState, TraceId: 01jwtnefqb2jgms7hmdh8xt50q, txInfo Status: Committed Kind: ReadOnly TotalDuration: 1.801 ServerDuration: 1.77 QueriesCount: 2 2025-06-03T10:29:59.661911Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2121: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ExecuteState, TraceId: 01jwtnefqb2jgms7hmdh8xt50q, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-03T10:29:59.661927Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2481: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ExecuteState, TraceId: 01jwtnefqb2jgms7hmdh8xt50q, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-03T10:29:59.661934Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2542: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ExecuteState, TraceId: 01jwtnefqb2jgms7hmdh8xt50q, EndCleanup, isFinal: 0 2025-06-03T10:29:59.661946Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2278: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ExecuteState, TraceId: 01jwtnefqb2jgms7hmdh8xt50q, Sent query response back to proxy, proxyRequestId: 549, proxyId: [6:7511668312418028957:2277] 2025-06-03T10:29:59.662045Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Describe pool, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, TxId: 2025-06-03T10:29:59.662064Z node 6 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:367: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Describe pool, Finish with SUCCESS, SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, TxId: 2025-06-03T10:29:59.662088Z node 6 :KQP_WORKLOAD_SERVICE TRACE: pool_handlers_actors.cpp:746: [WorkloadService] [TPoolHandlerActorBase] ActorId: [6:7511668316712997154:2348], DatabaseId: /Root, PoolId: sample_pool_id, succefully refreshed pool state, in flight: 0, delayed: 0 2025-06-03T10:29:59.662101Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2323: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ReadyState, Session closed due to explicit close event 2025-06-03T10:29:59.662110Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2481: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-03T10:29:59.662112Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2542: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-03T10:29:59.662114Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2554: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: unknown state, Cleanup temp tables: 0 2025-06-03T10:29:59.662126Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2645: SessionId: ydb://session/3?node_id=6&id=YzhjN2E2M2QtZTk1OWIwZDUtZWE0MDlhZDItMjcyNDBkOTA=, ActorId: [6:7511668445562022686:4482], ActorState: unknown state, Session actor destroyed >> Bloom::Conf [GOOD] >> Bloom::Hashes [GOOD] >> Bloom::Rater [GOOD] >> Bloom::Dipping >> Bloom::Dipping [GOOD] >> Bloom::Basics [GOOD] >> Bloom::Stairs >> KqpScripting::SecondaryIndexes [GOOD] |65.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut |65.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut |65.8%| [LD] {RESULT} $(B)/ydb/core/mind/address_classification/ut/ydb-core-mind-address_classification-ut >> Bloom::Stairs [GOOD] >> BuildStatsBTreeIndex::Single [GOOD] >> BuildStatsBTreeIndex::Single_Slices [GOOD] >> BuildStatsBTreeIndex::Single_History >> DataCleanup::CleanupDataNoTables [GOOD] >> DataCleanup::CleanupDataNoTablesWithRestart [GOOD] >> DataCleanup::CleanupDataLog [GOOD] >> DataCleanup::CleanupData [GOOD] >> DataCleanup::CleanupDataMultipleFamilies >> BuildStatsBTreeIndex::Single_History [GOOD] |65.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage >> BuildStatsBTreeIndex::Single_History_Slices [GOOD] >> BuildStatsBTreeIndex::Single_Groups >> DataCleanup::CleanupDataMultipleFamilies [GOOD] >> DataCleanup::CleanupDataMultipleTables [GOOD] >> DataCleanup::CleanupDataWithFollowers [GOOD] >> DataCleanup::CleanupDataMultipleTimes [GOOD] >> DataCleanup::CleanupDataEmptyTable [GOOD] >> DataCleanup::CleanupDataWithRestarts [GOOD] >> DataCleanup::CleanupDataRetryWithNotGreaterGenerations [GOOD] >> DataCleanup::CleanupDataWithTabletGCErrors [GOOD] >> DataCleanup::CleanupDataWithSysTabletGCErrors >> BuildStatsBTreeIndex::Single_Groups [GOOD] >> BuildStatsBTreeIndex::Single_Groups_Slices [GOOD] >> BuildStatsBTreeIndex::Single_Groups_History |65.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage |65.9%| [LD] {RESULT} $(B)/ydb/tests/fq/control_plane_storage/ydb-tests-fq-control_plane_storage >> BuildStatsBTreeIndex::Single_Groups_History [GOOD] >> BuildStatsBTreeIndex::Single_Groups_History_Slices >> BuildStatsBTreeIndex::Single_Groups_History_Slices [GOOD] >> BuildStatsBTreeIndex::Mixed ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_export_reboots_s3/unittest >> TExportToS3WithRebootsTests::ForgetShouldSucceedOnSingleShardTableWithChangefeed [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:127:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:130:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:134:2058] recipient: [1:111:2142] 2025-06-03T10:29:08.482885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:08.482924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:08.482931Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:08.482938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:08.482945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:08.482950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:08.482962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:08.482980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:08.483112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:08.483230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:08.499838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:29:08.499874Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:08.500028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:176:2058] recipient: [1:15:2062] 2025-06-03T10:29:08.507756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:08.507814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:08.507868Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:08.509641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:08.509718Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:08.509874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:08.509983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:08.511106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:08.511166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:08.511457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:08.511469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:08.511510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:08.511519Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:08.511525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:08.511549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:29:08.513307Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:29:08.540708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:08.540825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:08.540910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:08.540972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:08.540987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:08.543962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:08.544010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:08.544095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:08.544109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:08.544117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:08.544123Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:08.544740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:08.544755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:08.544762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:08.545138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:08.545149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:08.545157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:08.545165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:08.546025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:08.546538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:08.546586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:08.546820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:08.546852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:08.546874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:08.546954Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... OORDINATOR: advance: minStep5000009 State->FrontStep: 5000008 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 281474976710761 at step: 5000009 2025-06-03T10:30:00.206101Z node 122 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000009, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:00.206136Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710761 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 523986012268 } } Step: 5000009 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:00.206147Z node 122 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_rmdir.cpp:129: TRmDir HandleReply TEvOperationPlan, opId: 281474976710761:0, step: 5000009, at schemeshard: 72057594046678944 2025-06-03T10:30:00.206186Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_rmdir.cpp:180: RmDir is done, opId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-03T10:30:00.206255Z node 122 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-03T10:30:00.206262Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-03T10:30:00.206276Z node 122 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-03T10:30:00.206280Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-03T10:30:00.206297Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:30:00.206311Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 1 2025-06-03T10:30:00.206319Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: false 2025-06-03T10:30:00.206330Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-03T10:30:00.206387Z node 122 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710761:0 2025-06-03T10:30:00.206396Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976710761:0 2025-06-03T10:30:00.206410Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 2 2025-06-03T10:30:00.206419Z node 122 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 281474976710761, publications: 2, subscribers: 1 2025-06-03T10:30:00.206425Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 1], 14 2025-06-03T10:30:00.206429Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 281474976710761, [OwnerId: 72057594046678944, LocalPathId: 6], 18446744073709551615 2025-06-03T10:30:00.206775Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-03T10:30:00.206812Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-03T10:30:00.206823Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-03T10:30:00.207739Z node 122 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:00.207800Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 FAKE_COORDINATOR: Erasing txId 281474976710761 2025-06-03T10:30:00.208297Z node 122 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:00.208308Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:00.208379Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 281474976710761, path id: [OwnerId: 72057594046678944, LocalPathId: 6] 2025-06-03T10:30:00.208414Z node 122 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:00.208422Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [122:206:2207], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 1 2025-06-03T10:30:00.208430Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [122:206:2207], at schemeshard: 72057594046678944, txId: 281474976710761, path id: 6 2025-06-03T10:30:00.208656Z node 122 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 14 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-03T10:30:00.208676Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 14 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-03T10:30:00.208684Z node 122 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-03T10:30:00.208691Z node 122 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 14 2025-06-03T10:30:00.208698Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-03T10:30:00.208849Z node 122 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-03T10:30:00.208864Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 281474976710761 2025-06-03T10:30:00.208869Z node 122 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 281474976710761 2025-06-03T10:30:00.208875Z node 122 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 281474976710761, pathId: [OwnerId: 72057594046678944, LocalPathId: 6], version: 18446744073709551615 2025-06-03T10:30:00.208880Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 1 2025-06-03T10:30:00.208894Z node 122 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 281474976710761, subscribers: 1 2025-06-03T10:30:00.208901Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [122:126:2151] 2025-06-03T10:30:00.209028Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:00.209036Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 6], at schemeshard: 72057594046678944 2025-06-03T10:30:00.209070Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:30:00.209832Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-03T10:30:00.209946Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 281474976710761 2025-06-03T10:30:00.209966Z node 122 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-03T10:30:00.209985Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6753: Message: TxId: 281474976710761 2025-06-03T10:30:00.209998Z node 122 :EXPORT DEBUG: schemeshard_export__create.cpp:309: TExport::TTxProgress: DoExecute 2025-06-03T10:30:00.210005Z node 122 :EXPORT DEBUG: schemeshard_export__create.cpp:1232: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761 2025-06-03T10:30:00.210011Z node 122 :EXPORT DEBUG: schemeshard_export__create.cpp:1263: TExport::TTxProgress: OnNotifyResult: txId# 281474976710761, id# 1004, itemIdx# 4294967295 2025-06-03T10:30:00.210079Z node 122 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:00.210412Z node 122 :EXPORT DEBUG: schemeshard_export__create.cpp:329: TExport::TTxProgress: DoComplete TestWaitNotification wait txId: 1004 2025-06-03T10:30:00.210476Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1004: send EvNotifyTxCompletion 2025-06-03T10:30:00.210488Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1004 2025-06-03T10:30:00.210584Z node 122 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1004, at schemeshard: 72057594046678944 2025-06-03T10:30:00.210609Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-03T10:30:00.210615Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [122:972:2875] TestWaitNotification: OK eventTxId 1004 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/yql/unittest >> KqpScripting::SecondaryIndexes [GOOD] Test command err: Trying to start YDB, gRPC: 27781, MsgBus: 27735 2025-06-03T10:29:58.592876Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668444483421767:2199];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:58.598079Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0020e9/r3tmp/tmpOmerqC/pdisk_1.dat 2025-06-03T10:29:58.695199Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:58.696552Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668444483421599:2079] 1748946598589619 != 1748946598589622 TServer::EnableGrpc on GrpcPort 27781, node 1 2025-06-03T10:29:58.728802Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:58.728824Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:58.728828Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:58.728889Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:29:58.748808Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:58.748849Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:58.750163Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27735 TClient is connected to server localhost:27735 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:58.842618Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:58.847579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:29:58.857152Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:58.904553Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:58.968668Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:58.981041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:59.052110Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668448778390530:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:59.052145Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:59.116817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.126724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.135874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.151227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.164795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.179731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.237473Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.250500Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668448778391184:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:59.250538Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:59.250605Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668448778391189:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:59.251514Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:29:59.261286Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668448778391191:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:29:59.334393Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668448778391242:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:59.499284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.582409Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946599624, txId: 281474976715674] shutting down 2025-06-03T10:29:59.592002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715677:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.607247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715678:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.611749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715679:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.619653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715680:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.634134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715681:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.640042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715682:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.708463Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946599750, txId: 281474976715683] shutting down 2025-06-03T10:29:59.718711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715686:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.728458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715687:0, at schemeshard: 72057594046644480 2025-06-03T10:29:59.797044Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946599841, txId: 281474976715688] shutting down Trying to start YDB, gRPC: 1431, MsgBus: 27307 2025-06-03T10:30:00.150389Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668451764200362:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:00.150415Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0020e9/r3tmp/tmpJOov5H/pdisk_1.dat 2025-06-03T10:30:00.173698Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1431, node 2 2025-06-03T10:30:00.185644Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:00.185662Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:00.185664Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:00.185729Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27307 2025-06-03T10:30:00.248226Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:00.248260Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:00.249048Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27307 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:00.260215Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:00.262828Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:30:00.267204Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:30:00.289661Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:00.319597Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:30:00.334442Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:00.587860Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668451764201951:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:00.587905Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:00.598978Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:00.609549Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:00.620415Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:00.634899Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:00.648133Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:00.662486Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:00.676459Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:00.693372Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668451764202604:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:00.693423Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:00.693453Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668451764202609:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:00.694458Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:00.703686Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511668451764202611:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:00.765943Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668451764202662:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:00.947704Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:30:00.960268Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:30:00.971748Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 >> TFlatTableExecutor_Exceptions::TestTabletExecuteExceptionEnqueue >> DataCleanup::CleanupDataWithSysTabletGCErrors [GOOD] >> DBase::WideKey >> BuildStatsBTreeIndex::Mixed [GOOD] >> TFlatTableExecutor_Exceptions::TestTabletExecuteExceptionEnqueue [GOOD] >> TFlatTableExecutor_ExecutorTxLimit::TestExecutorTxLimit [GOOD] >> TFlatTableExecutor_Follower::BasicFollowerRead [GOOD] >> TFlatTableExecutor_Follower::FollowerEarlyRebootHoles [GOOD] >> TFlatTableExecutor_Follower::FollowerAttachOnTxQueueScanSnapshot >> BuildStatsBTreeIndex::Mixed_Groups >> DBase::WideKey [GOOD] >> DBase::VersionPureMem >> TFlatTableExecutor_Follower::FollowerAttachOnTxQueueScanSnapshot [GOOD] >> TFlatTableExecutor_Follower::FollowerAttachAfterLoan [GOOD] >> TFlatTableExecutor_Gc::TestFailedGcAfterReboot [GOOD] >> TFlatTableExecutor_IndexLoading::CalculateReadSize_FlatIndex >> TBtreeIndexBuilder::NoNodes [GOOD] >> TBtreeIndexBuilder::OneNode [GOOD] >> TBtreeIndexBuilder::FewNodes [GOOD] >> TBtreeIndexBuilder::SplitBySize [GOOD] >> TBtreeIndexNode::TIsNullBitmap [GOOD] >> TBtreeIndexNode::CompareTo [GOOD] >> TBtreeIndexNode::Basics [GOOD] >> TBtreeIndexNode::Group [GOOD] >> TBtreeIndexNode::History [GOOD] >> TBtreeIndexNode::OneKey [GOOD] >> TBtreeIndexNode::Reusable [GOOD] >> TBtreeIndexNode::CutKeys [GOOD] >> TBtreeIndexTPart::Conf [GOOD] >> TBtreeIndexTPart::NoNodes [GOOD] >> TBtreeIndexTPart::OneNode [GOOD] >> TBtreeIndexTPart::FewNodes [GOOD] >> TBtreeIndexTPart::Erases [GOOD] >> TBtreeIndexTPart::Groups [GOOD] >> TBtreeIndexTPart::History [GOOD] >> TBtreeIndexTPart::External >> DBase::VersionPureMem [GOOD] >> DBase::VersionPureParts >> TFlatTableExecutor_ResourceProfile::TestExecutorStaticMemoryLimits [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxDataLimitExceeded [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxDataGC [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxPartialDataHold [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxHoldAndUse [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorTxHoldOnRelease [GOOD] >> TFlatTableExecutor_ResourceProfile::TestUpdateConfig [GOOD] >> TFlatTableExecutor_SliceOverlapScan::TestSliceOverlapScan >> BuildStatsBTreeIndex::Mixed_Groups [GOOD] >> TFlatTableExecutor_IndexLoading::CalculateReadSize_FlatIndex [GOOD] >> BuildStatsBTreeIndex::Mixed_Groups_History [GOOD] >> BuildStatsFlatIndex::Single [GOOD] >> BuildStatsFlatIndex::Single_Slices >> TFlatTableExecutor_IndexLoading::CalculateReadSize_BTreeIndex >> TBtreeIndexTPart::External [GOOD] >> TChargeBTreeIndex::NoNodes >> DBase::VersionPureParts [GOOD] >> DBase::VersionCompactedMem >> BuildStatsFlatIndex::Single_Slices [GOOD] >> BuildStatsFlatIndex::Single_History [GOOD] >> TFlatTableExecutor_IndexLoading::CalculateReadSize_BTreeIndex [GOOD] >> BuildStatsFlatIndex::Single_History_Slices [GOOD] >> BuildStatsFlatIndex::Single_Groups >> TFlatTableExecutor_IndexLoading::PrechargeAndSeek_FlatIndex >> DBase::VersionCompactedMem [GOOD] >> DBase::VersionCompactedParts [GOOD] >> Memtable::Basics [GOOD] >> Memtable::BasicsReverse [GOOD] >> Memtable::Markers [GOOD] >> Memtable::Overlap [GOOD] >> Memtable::Wreck [GOOD] >> Memtable::Erased >> BuildStatsFlatIndex::Single_Groups [GOOD] >> BuildStatsFlatIndex::Single_Groups_Slices [GOOD] >> BuildStatsFlatIndex::Single_Groups_History [GOOD] >> BuildStatsFlatIndex::Single_Groups_History_Slices >> Memtable::Erased [GOOD] >> NFwd_TBlobs::MemTableTest [GOOD] >> NFwd_TBlobs::Lower [GOOD] >> NFwd_TBlobs::Sieve [GOOD] >> NFwd_TBlobs::SieveFiltered [GOOD] >> NFwd_TBlobs::Basics [GOOD] >> NFwd_TBlobs::Simple [GOOD] >> NFwd_TBlobs::Shuffle [GOOD] >> NFwd_TBlobs::Grow [GOOD] >> NFwd_TBlobs::Trace [GOOD] >> NFwd_TBlobs::Filtered [GOOD] >> NFwd_TBTreeIndexCache::Basics [GOOD] >> NFwd_TBTreeIndexCache::IndexPagesLocator [GOOD] >> NFwd_TBTreeIndexCache::GetTwice [GOOD] >> NFwd_TBTreeIndexCache::ForwardTwice [GOOD] >> NFwd_TBTreeIndexCache::Forward_OnlyUsed [GOOD] >> NFwd_TBTreeIndexCache::Skip_Done [GOOD] >> NFwd_TBTreeIndexCache::Skip_Done_None [GOOD] >> NFwd_TBTreeIndexCache::Skip_Keep [GOOD] >> NFwd_TBTreeIndexCache::Skip_Wait [GOOD] >> NFwd_TBTreeIndexCache::Trace_BTree [GOOD] >> NFwd_TBTreeIndexCache::Trace_Data [GOOD] >> NFwd_TBTreeIndexCache::End [GOOD] >> NFwd_TBTreeIndexCache::Slices [GOOD] >> NFwd_TBTreeIndexCache::ManyApplies [GOOD] >> NFwd_TFlatIndexCache::Basics [GOOD] >> NFwd_TFlatIndexCache::IndexPagesLocator [GOOD] >> NFwd_TFlatIndexCache::GetTwice [GOOD] >> NFwd_TFlatIndexCache::ForwardTwice [GOOD] >> NFwd_TFlatIndexCache::Skip_Done [GOOD] >> NFwd_TFlatIndexCache::Skip_Done_None [GOOD] >> NFwd_TFlatIndexCache::Skip_Keep [GOOD] >> NFwd_TFlatIndexCache::End [GOOD] >> BuildStatsFlatIndex::Single_Groups_History_Slices [GOOD] >> BuildStatsFlatIndex::Mixed [GOOD] >> BuildStatsFlatIndex::Mixed_Groups [GOOD] >> BuildStatsFlatIndex::Mixed_Groups_History |65.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile |65.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile |65.9%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_volatile/ydb-core-tx-datashard-ut_volatile >> TChargeBTreeIndex::NoNodes [GOOD] >> TChargeBTreeIndex::NoNodes_Groups >> BuildStatsFlatIndex::Mixed_Groups_History [GOOD] >> BuildStatsFlatIndex::Serial |65.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots |65.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots |65.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sequence_reboots/ydb-core-tx-schemeshard-ut_sequence_reboots >> BuildStatsFlatIndex::Serial [GOOD] >> BuildStatsFlatIndex::Serial_Groups [GOOD] >> BuildStatsFlatIndex::Serial_Groups_History >> KqpWorkloadServiceTables::TestLeaseExpiration [GOOD] >> KqpWorkloadServiceTables::TestLeaseUpdates >> BuildStatsFlatIndex::Serial_Groups_History [GOOD] >> BuildStatsHistogram::Single >> TFlatTableExecutor_SliceOverlapScan::TestSliceOverlapScan [GOOD] >> TFlatTableExecutor_SnapshotWithCommits::SnapshotWithCommits [GOOD] >> TFlatTableExecutor_StickyPages::TestNonSticky_FlatIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestNonSticky_BTreeIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestSticky [GOOD] >> TFlatTableExecutor_StickyPages::TestNonStickyGroup_FlatIndex >> TPart::State [GOOD] >> TPart::Trivials [GOOD] >> TPart::Basics [GOOD] >> TPart::CellDefaults [GOOD] >> TPart::Matter [GOOD] >> TPart::External [GOOD] >> TPart::Outer [GOOD] >> TPart::MassCheck [GOOD] >> TPart::WreckPart ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut/unittest >> NFwd_TFlatIndexCache::End [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:01.674753Z 00000.005 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.005 II| FAKE_ENV: Starting storage for BS group 0 00000.005 II| FAKE_ENV: Starting storage for BS group 1 00000.005 II| FAKE_ENV: Starting storage for BS group 2 00000.005 II| FAKE_ENV: Starting storage for BS group 3 00000.007 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.007 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.007 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.007 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.007 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {146b, 4} 00000.007 II| FAKE_ENV: DS.1 gone, left {105b, 3}, put {105b, 3} 00000.007 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.007 II| FAKE_ENV: All BS storage groups are stopped 00000.007 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.007 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:01.683833Z 00000.003 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.003 II| FAKE_ENV: Starting storage for BS group 0 00000.003 II| FAKE_ENV: Starting storage for BS group 1 00000.003 II| FAKE_ENV: Starting storage for BS group 2 00000.003 II| FAKE_ENV: Starting storage for BS group 3 00000.004 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.004 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.004 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.004 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {292b, 8} 00000.004 II| FAKE_ENV: DS.1 gone, left {210b, 6}, put {210b, 6} 00000.004 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.004 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.004 II| FAKE_ENV: All BS storage groups are stopped 00000.004 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 0.000s 00000.004 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:01.690026Z 00000.002 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00000.005 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.005 NN| TABLET_SAUSAGECACHE: Poison cache serviced 1 reqs hit {1 76b} miss {0 0b} 00000.005 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.005 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {1181b, 13} 00000.005 II| FAKE_ENV: DS.1 gone, left {909b, 3}, put {1913b, 12} 00000.005 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {132b, 2} 00000.006 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {116b, 2} 00000.006 II| FAKE_ENV: All BS storage groups are stopped 00000.006 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.006 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:01.697516Z 00000.003 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.003 II| FAKE_ENV: Starting storage for BS group 0 00000.003 II| FAKE_ENV: Starting storage for BS group 1 00000.003 II| FAKE_ENV: Starting storage for BS group 2 00000.003 II| FAKE_ENV: Starting storage for BS group 3 00000.006 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.006 NN| TABLET_SAUSAGECACHE: Poison cache serviced 1 reqs hit {1 102443b} miss {0 0b} 00000.006 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.006 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {751b, 11} 00000.006 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.006 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.006 II| FAKE_ENV: DS.1 gone, left {541b, 3}, put {103970b, 10} 00000.006 II| FAKE_ENV: All BS storage groups are stopped 00000.006 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.006 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:01.705736Z 00000.003 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.003 II| FAKE_ENV: Starting storage for BS group 0 00000.003 II| FAKE_ENV: Starting storage for BS group 1 00000.003 II| FAKE_ENV: Starting storage for BS group 2 00000.003 II| FAKE_ENV: Starting storage for BS group 3 ... blocking NKikimr::TEvBlobStorage::TEvCollectGarbage from FLAT_EXECUTOR to FAKE_ENV_A cookie 0 00000.018 II| TABLET_SAUSAGECACHE: Wakeup 1 ... unblocking NKikimr::TEvBlobStorage::TEvCollectGarbage from FLAT_EXECUTOR to FAKE_ENV_A 00000.018 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.018 NN| TABLET_SAUSAGECACHE: Poison cache serviced 11 reqs hit {18 513007b} miss {0 0b} 00000.018 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.018 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {2093b, 23} 00000.018 II| FAKE_ENV: DS.1 gone, left {774b, 4}, put {210604b, 21} 00000.018 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {205178b, 4} 00000.018 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {102690b, 4} 00000.018 II| FAKE_ENV: All BS storage groups are stopped 00000.018 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 15.00s 00000.018 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 16}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:01.725642Z 00000.001 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.001 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00000.005 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.005 NN| TABLET_SAUSAGECACHE: Poison cache serviced 3 reqs hit {3 307329b} miss {0 0b} 00000.005 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.005 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.005 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.005 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {1828b, 23} 00000.005 II| FAKE_ENV: DS.1 gone, left {1247b, 3}, put {311467b, 22} 00000.005 II| FAKE_ENV: All BS storage groups are stopped 00000.005 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.005 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:01.731951Z 00000.001 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.001 II| FAKE_ENV: Starting storage for BS group 0 00000.001 II| FAKE_ENV: Starting storage for BS group 1 00000.001 II| FAKE_ENV: Starting storage for BS group 2 00000.001 II| FAKE_ENV: Starting storage for BS group 3 00000.006 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 5 actors 00000.006 NN| TABLET_SAUSAGECACHE: Poison cache serviced 4 reqs hit {8 307836b} miss {0 0b} 00000.006 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.006 II| FAKE_ENV: DS.0 gone, left {57b, 2}, put {1436b, 31} 00000.006 II| FAKE_ENV: DS.1 gone, left {629b, 3}, put {310476b, 16} 00000.006 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.006 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.006 II| FAKE_ENV: All BS storage groups are stopped 00000.006 II| FAKE_ENV: Model stopped, hosted 5 actors, spent 0.000s 00000.006 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:01.739841Z 00000.002 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00000.004 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.005 NN| TABLET_SAUSAGECACHE: Poison cache serviced 2 reqs hit {2 194646b} miss {0 0b} 00000.005 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.005 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {1768b, 27} 00000.005 II| FAKE_ENV: DS.1 gone, left {732b, 6}, put {197813b, 24} 00000.005 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.005 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.005 II| FAKE_ENV: All BS storage groups are stopped 00000.005 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.005 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:01.746089Z 00000.001 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.001 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00000.002 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.003 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.003 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.003 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.003 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {326b, 7} 00000.003 II| FAKE_ENV: DS.1 gone, left {418b, 4}, put {453b, 5} 00000.003 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.003 II| FAKE_ENV: All BS storage groups are stopped 00000.003 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.003 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:01.750145Z 00000.001 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.001 II| FAKE_ENV: Starting storage for BS group 0 00000.001 II| FAKE_ENV: Starting storage for BS group 1 00000.001 II| FAKE_ENV: Starting storage for BS group 2 00000.001 II| FAKE_ENV: Starting storage for BS group 3 ... blocking NKikimr::TEvBlobStorage::TEvCollectGarbage from FLAT_EXECUTOR to FAKE_ENV_A cookie 0 00000.016 II| TABLET_SAUSAGECACHE: Wakeup 1 ... unblocking NKikimr::TEvBlobStorage::TEvCollectGarbage from FLAT_EXECUTOR to FAKE_ENV_A 00000.017 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.017 NN| TABLET_SAUSAGECACHE: Poison cache serviced 6 reqs hit {8 410030b} miss {0 0b} 00000.017 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.017 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {1492b, 23} 00000.017 II| FAKE_ENV: DS.1 gone, left {504b, 4}, put {310786b, 20} 00000.017 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.017 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.017 II| FAKE_ENV: All BS storage groups are stopped 00000.017 II| FAKE_ENV: Model stopped, hosted 5 actors, spent 15.00s 00000.017 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 16}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:01.768881Z 00000.002 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for ... 3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} >> TPartSlice::TrivialMerge [GOOD] |66.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber |66.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber |66.0%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_subscriber/ydb-core-tx-scheme_board-ut_subscriber >> TPartSlice::SupersetByRowId [GOOD] >> TPartSlice::Subtract [GOOD] >> TPartSlice::UnsplitBorrow [GOOD] >> TPartSliceLoader::RestoreMissingSlice [GOOD] >> TPartSliceLoader::RestoreOneSlice [GOOD] >> TPartSliceLoader::RestoreMissingSliceFullScreen [GOOD] >> TPartSliceLoader::RestoreFromScreenIndexKeys [GOOD] >> TPartSliceLoader::RestoreFromScreenDataKeys [GOOD] >> TRowVersionRangesTest::SimpleInserts [GOOD] >> TRowVersionRangesTest::MergeFailLeft [GOOD] >> TRowVersionRangesTest::MergeFailRight [GOOD] >> TRowVersionRangesTest::MergeFailOuter [GOOD] >> TRowVersionRangesTest::MergeFailInner [GOOD] >> TRowVersionRangesTest::MergeExtendLeft [GOOD] >> TRowVersionRangesTest::MergeExtendLeftInner [GOOD] >> TRowVersionRangesTest::MergeExtendLeftComplete [GOOD] >> TRowVersionRangesTest::MergeExtendRight [GOOD] >> TRowVersionRangesTest::MergeExtendRightInner [GOOD] >> TRowVersionRangesTest::MergeExtendRightComplete [GOOD] >> TRowVersionRangesTest::MergeExtendBoth [GOOD] >> TRowVersionRangesTest::MergeHoleExact [GOOD] >> TRowVersionRangesTest::MergeHoleInner [GOOD] >> TRowVersionRangesTest::MergeHoleOuter [GOOD] >> TRowVersionRangesTest::MergeAllOuter [GOOD] >> TRowVersionRangesTest::MergeAllInner [GOOD] >> TRowVersionRangesTest::MergeAllEdges [GOOD] >> TRowVersionRangesTest::ContainsEmpty [GOOD] >> TRowVersionRangesTest::ContainsNonEmpty [GOOD] >> TRowVersionRangesTest::ContainsInvalid [GOOD] >> TRowVersionRangesTest::AdjustDown [GOOD] >> TRowVersionRangesTest::AdjustDownSnapshot [GOOD] >> TRowVersionRangesTest::SteppedCookieAllocatorOrder [GOOD] >> TRowVersionRangesTest::SteppedCookieAllocatorLowerBound [GOOD] >> TS3FIFOCache::Touch [GOOD] >> TS3FIFOCache::Touch_MainQueue [GOOD] >> TS3FIFOCache::EvictNext [GOOD] >> TS3FIFOCache::UpdateLimit [GOOD] >> TS3FIFOCache::Erase [GOOD] >> TS3FIFOCache::Random >> TFlatTableExecutor_StickyPages::TestNonStickyGroup_FlatIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestNonStickyGroup_BTreeIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestStickyMain [GOOD] >> TFlatTableExecutor_StickyPages::TestStickyAlt_FlatIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestStickyAlt_BTreeIndex >> TS3FIFOCache::Random [GOOD] >> TS3FIFOGhostQueue::Basics [GOOD] >> TScheme::Shapshot [GOOD] >> TScheme::Delta [GOOD] >> TScheme::Policy [GOOD] >> TScreen::Cuts [GOOD] >> TScreen::Join [GOOD] >> TScreen::Sequential >> TFlatTableExecutor_StickyPages::TestStickyAlt_BTreeIndex [GOOD] >> TFlatTableExecutor_StickyPages::TestStickyAll [GOOD] >> TFlatTableExecutor_StickyPages::TestAlterAddFamilySticky [GOOD] >> TFlatTableExecutor_StickyPages::TestAlterAddFamilyPartiallySticky [GOOD] >> TFlatTableExecutor_VersionedLargeBlobs::TestMultiVersionCompactionLargeBlobs [GOOD] >> TFlatTableExecutor_VersionedRows::TestVersionedRows >> TPart::WreckPart [GOOD] >> TPart::PageFailEnv >> TScreen::Sequential [GOOD] >> TScreen::Random >> TFlatTableExecutor_VersionedRows::TestVersionedRows [GOOD] >> TFlatTableExecutor_VersionedRows::TestVersionedRowsSmallBlobs >> TScreen::Random [GOOD] >> TScreen::Shrink [GOOD] >> TScreen::Cook [GOOD] >> TSharedPageCache::Limits >> TPart::PageFailEnv [GOOD] >> TPart::ForwardEnv >> TPart::ForwardEnv [GOOD] >> TPart::WreckPartColumnGroups >> TSharedPageCache::Limits [GOOD] >> TSharedPageCache::Limits_Config >> BuildStatsHistogram::Single [GOOD] >> BuildStatsHistogram::Single_Slices >> TSharedPageCache::Limits_Config [GOOD] >> TSharedPageCache::ClockPro >> TKeyValueTest::TestWriteReadRangeLimitThenLimitWorksNewApi [GOOD] >> TPart::WreckPartColumnGroups [GOOD] >> TPart::PageFailEnvColumnGroups >> TSharedPageCache::ClockPro [GOOD] >> TSharedPageCache::BigCache_BTreeIndex >> TSharedPageCache::BigCache_BTreeIndex [GOOD] >> TSharedPageCache::BigCache_FlatIndex >> TSharedPageCache::ThreeLeveledLRU >> TClockProCache::Touch [GOOD] >> TClockProCache::UpdateLimit [GOOD] >> TCompaction::OneMemtable [GOOD] >> TCompaction::ManyParts >> TSharedPageCache::BigCache_FlatIndex [GOOD] >> TSharedPageCache::MiddleCache_BTreeIndex >> TSharedPageCache::MiddleCache_BTreeIndex [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteReadRangeLimitThenLimitWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:58:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:75:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:58:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:75:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:77:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:80:2057] recipient: [2:79:2110] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:83:2057] recipient: [2:79:2110] !Reboot 72057594037927937 (actor [2:57:2097]) rebooted! !Reboot 72057594037927937 (actor [2:57:2097]) tablet resolver refreshed! new actor is[2:82:2111] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:168:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:58:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:75:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:77:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:83:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:57:2097]) rebooted! !Reboot 72057594037927937 (actor [3:57:2097]) tablet resolver refreshed! new actor is[3:82:2111] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:168:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:58:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:75:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:78:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:82:2057] recipient: [4:80:2110] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:84:2057] recipient: [4:80:2110] !Reboot 72057594037927937 (actor [4:57:2097]) rebooted! !Reboot 72057594037927937 (actor [4:57:2097]) tablet resolver refreshed! new actor is[4:83:2111] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:169:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:58:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:75:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:81:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:83:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:85:2057] recipient: [5:84:2113] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:87:2057] recipient: [5:84:2113] !Reboot 72057594037927937 (actor [5:57:2097]) rebooted! !Reboot 72057594037927937 (actor [5:57:2097]) tablet resolver refreshed! new actor is[5:86:2114] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:172:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:58:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:75:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:81:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:84:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:85:2057] recipient: [6:83:2113] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:87:2057] recipient: [6:83:2113] !Reboot 72057594037927937 (actor [6:57:2097]) rebooted! !Reboot 72057594037927937 (actor [6:57:2097]) tablet resolver refreshed! new actor is[6:86:2114] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:172:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:58:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:75:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:82:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:85:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:86:2057] recipient: [7:84:2113] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:88:2057] recipient: [7:84:2113] !Reboot 72057594037927937 (actor [7:57:2097]) rebooted! !Reboot 72057594037927937 (actor [7:57:2097]) tablet resolver refreshed! new actor is[7:87:2114] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:173:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:58:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:75:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:84:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:87:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:88:2057] recipient: [8:86:2115] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:90:2057] recipient: [8:86:2115] !Reboot 72057594037927937 (actor [8:57:2097]) rebooted! !Reboot 72057594037927937 (actor [8:57:2097]) tablet resolver refreshed! new actor is[8:89:2116] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:175:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:58:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:75:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:84:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:87:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:88:2057] recipient: [9:86:2115] Leader for TabletID 72057594037927937 is [9:89:2116] sender: [9:90:2057] recipient: [9:86:2115] !Reboot 72057594037927937 (actor [9:57:2097]) rebooted! !Reboot 72057594037927937 (actor [9:57:2097]) tablet resolver refreshed! new actor is[9:89:2116] Leader for TabletID 72057594037927937 is [9:89:2116] sender: [9:175:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:58:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:75:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:85:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:87:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:89:2057] recipient: [10:88:2115] Leader for TabletID 72057594037927937 is [10:90:2116] sender: [10:91:2057] recipient: [10:88:2115] !Reboot 72057594037927937 (actor [10:57:2097]) rebooted! !Reboot 72057594037927937 (actor [10:57:2097]) tablet resolver refreshed! new actor is[10:90:2116] Leader for TabletID 72057594037927937 is [10:90:2116] sender: [10:176:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:58:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:75:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:87:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:89:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:91:2057] recipient: [11:90:2117] Leader for TabletID 72057594037927937 is [11:92:2118] sender: [11:93:2057] recipient: [11:90:2117] !Reboot 72057594037927937 (actor [11:57:2097]) rebooted! !Reboot 72057594037927937 (actor [11:57:2097]) tablet resolver refreshed! new actor is[11:92:2118] Leader for TabletID 72057594037927937 is [11:92:2118] sender: [11:178:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:58:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:75:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... bletID 72057594037927937 is [13:57:2097] sender: [13:88:2057] recipient: [13:36:2083] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:90:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:92:2057] recipient: [13:91:2117] Leader for TabletID 72057594037927937 is [13:93:2118] sender: [13:94:2057] recipient: [13:91:2117] !Reboot 72057594037927937 (actor [13:57:2097]) rebooted! !Reboot 72057594037927937 (actor [13:57:2097]) tablet resolver refreshed! new actor is[13:93:2118] Leader for TabletID 72057594037927937 is [13:93:2118] sender: [13:179:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:55:2057] recipient: [14:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:55:2057] recipient: [14:50:2095] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:58:2057] recipient: [14:50:2095] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:75:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:55:2057] recipient: [15:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:55:2057] recipient: [15:50:2095] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:58:2057] recipient: [15:50:2095] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:75:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:55:2057] recipient: [16:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:55:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:58:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:75:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:77:2057] recipient: [16:36:2083] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:80:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:81:2057] recipient: [16:79:2110] Leader for TabletID 72057594037927937 is [16:82:2111] sender: [16:83:2057] recipient: [16:79:2110] !Reboot 72057594037927937 (actor [16:57:2097]) rebooted! !Reboot 72057594037927937 (actor [16:57:2097]) tablet resolver refreshed! new actor is[16:82:2111] Leader for TabletID 72057594037927937 is [16:82:2111] sender: [16:168:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:55:2057] recipient: [17:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:55:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:58:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:75:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:77:2057] recipient: [17:36:2083] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:79:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:81:2057] recipient: [17:80:2110] Leader for TabletID 72057594037927937 is [17:82:2111] sender: [17:83:2057] recipient: [17:80:2110] !Reboot 72057594037927937 (actor [17:57:2097]) rebooted! !Reboot 72057594037927937 (actor [17:57:2097]) tablet resolver refreshed! new actor is[17:82:2111] Leader for TabletID 72057594037927937 is [17:82:2111] sender: [17:168:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:55:2057] recipient: [18:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:55:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:58:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:75:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:78:2057] recipient: [18:36:2083] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:81:2057] recipient: [18:80:2110] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:82:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:83:2111] sender: [18:84:2057] recipient: [18:80:2110] !Reboot 72057594037927937 (actor [18:57:2097]) rebooted! !Reboot 72057594037927937 (actor [18:57:2097]) tablet resolver refreshed! new actor is[18:83:2111] Leader for TabletID 72057594037927937 is [18:83:2111] sender: [18:169:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2095] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:58:2057] recipient: [19:51:2095] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:75:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:81:2057] recipient: [19:36:2083] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:84:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:85:2057] recipient: [19:83:2113] Leader for TabletID 72057594037927937 is [19:86:2114] sender: [19:87:2057] recipient: [19:83:2113] !Reboot 72057594037927937 (actor [19:57:2097]) rebooted! !Reboot 72057594037927937 (actor [19:57:2097]) tablet resolver refreshed! new actor is[19:86:2114] Leader for TabletID 72057594037927937 is [19:86:2114] sender: [19:172:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:55:2057] recipient: [20:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:55:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:58:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:75:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:57:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:81:2057] recipient: [20:36:2083] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:84:2057] recipient: [20:83:2113] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:85:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:86:2114] sender: [20:87:2057] recipient: [20:83:2113] !Reboot 72057594037927937 (actor [20:57:2097]) rebooted! !Reboot 72057594037927937 (actor [20:57:2097]) tablet resolver refreshed! new actor is[20:86:2114] Leader for TabletID 72057594037927937 is [20:86:2114] sender: [20:172:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:55:2057] recipient: [21:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:55:2057] recipient: [21:51:2095] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:58:2057] recipient: [21:51:2095] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:75:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:57:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:82:2057] recipient: [21:36:2083] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:85:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:86:2057] recipient: [21:84:2113] Leader for TabletID 72057594037927937 is [21:87:2114] sender: [21:88:2057] recipient: [21:84:2113] !Reboot 72057594037927937 (actor [21:57:2097]) rebooted! !Reboot 72057594037927937 (actor [21:57:2097]) tablet resolver refreshed! new actor is[21:87:2114] Leader for TabletID 72057594037927937 is [21:87:2114] sender: [21:105:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:55:2057] recipient: [22:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:55:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:58:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:75:2057] recipient: [22:14:2061] !Reboot 72057594037927937 (actor [22:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:84:2057] recipient: [22:36:2083] Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:86:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:88:2057] recipient: [22:87:2115] Leader for TabletID 72057594037927937 is [22:89:2116] sender: [22:90:2057] recipient: [22:87:2115] !Reboot 72057594037927937 (actor [22:57:2097]) rebooted! !Reboot 72057594037927937 (actor [22:57:2097]) tablet resolver refreshed! new actor is[22:89:2116] Leader for TabletID 72057594037927937 is [22:89:2116] sender: [22:175:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:55:2057] recipient: [23:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:55:2057] recipient: [23:50:2095] Leader for TabletID 72057594037927937 is [23:57:2097] sender: [23:58:2057] recipient: [23:50:2095] Leader for TabletID 72057594037927937 is [23:57:2097] sender: [23:75:2057] recipient: [23:14:2061] !Reboot 72057594037927937 (actor [23:57:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [23:57:2097] sender: [23:84:2057] recipient: [23:36:2083] Leader for TabletID 72057594037927937 is [23:57:2097] sender: [23:87:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [23:57:2097] sender: [23:88:2057] recipient: [23:86:2115] Leader for TabletID 72057594037927937 is [23:89:2116] sender: [23:90:2057] recipient: [23:86:2115] !Reboot 72057594037927937 (actor [23:57:2097]) rebooted! !Reboot 72057594037927937 (actor [23:57:2097]) tablet resolver refreshed! new actor is[23:89:2116] Leader for TabletID 72057594037927937 is [23:89:2116] sender: [23:175:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:55:2057] recipient: [24:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:55:2057] recipient: [24:51:2095] Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:58:2057] recipient: [24:51:2095] Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:75:2057] recipient: [24:14:2061] !Reboot 72057594037927937 (actor [24:57:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:85:2057] recipient: [24:36:2083] Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:87:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:89:2057] recipient: [24:88:2115] Leader for TabletID 72057594037927937 is [24:90:2116] sender: [24:91:2057] recipient: [24:88:2115] !Reboot 72057594037927937 (actor [24:57:2097]) rebooted! !Reboot 72057594037927937 (actor [24:57:2097]) tablet resolver refreshed! new actor is[24:90:2116] Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:55:2057] recipient: [25:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:55:2057] recipient: [25:51:2095] Leader for TabletID 72057594037927937 is [25:57:2097] sender: [25:58:2057] recipient: [25:51:2095] Leader for TabletID 72057594037927937 is [25:57:2097] sender: [25:75:2057] recipient: [25:14:2061] >> TPart::PageFailEnvColumnGroups [GOOD] >> TPart::ForwardEnvColumnGroups [GOOD] >> TPart::Versions [GOOD] >> TPart::ManyVersions >> TCompaction::ManyParts [GOOD] >> TCompaction::BootAbort >> TPart::ManyVersions [GOOD] >> TPart::ManyDeltas [GOOD] >> TPart::CutKeys_Lz4 [GOOD] >> TPart::CutKeys_Seek [GOOD] >> TPart::CutKeys_SeekPages [GOOD] >> TPart::CutKeys_SeekSlices [GOOD] >> TPart::CutKeys_CutString [GOOD] >> TPart::CutKeys_CutUtf8String [GOOD] >> TPartBtreeIndexIteration::NoNodes >> TSharedPageCache::ThreeLeveledLRU [GOOD] >> TSharedPageCache::S3FIFO >> TFlatTableExecutor_VersionedRows::TestVersionedRowsSmallBlobs [GOOD] >> TFlatTableExecutor_VersionedRows::TestVersionedRowsLargeBlobs >> TCompaction::BootAbort [GOOD] >> TCompaction::Defaults [GOOD] >> TCompaction::Merges [GOOD] >> TCompactionMulti::ManyParts >> BuildStatsHistogram::Single_Slices [GOOD] >> BuildStatsHistogram::Single_History >> TCompactionMulti::ManyParts [GOOD] >> TCompactionMulti::MainPageCollectionEdge >> TSharedPageCache::S3FIFO [GOOD] >> TSharedPageCache::ReplacementPolicySwitch >> TSharedPageCache::ReplacementPolicySwitch [GOOD] >> TSharedPageCache::MiddleCache_FlatIndex >> TSharedPageCache::MiddleCache_FlatIndex [GOOD] >> TSharedPageCache::ZeroCache_BTreeIndex >> TPartBtreeIndexIteration::NoNodes [GOOD] >> TPartBtreeIndexIteration::NoNodes_Groups >> TSharedPageCache::ZeroCache_BTreeIndex [GOOD] >> TSharedPageCache::ZeroCache_FlatIndex ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut/unittest >> TSharedPageCache::MiddleCache_BTreeIndex [GOOD] Test command err: SmallQueue: MainQueue: {11 0f 1b}, {14 1f 1b}, {15 2f 1b}, {18 0f 1b}, {19 0f 1b}, {23 0f 1b}, {27 0f 1b} GhostQueue: 9, 12, 13, 16, 17, 20, 21, 24, 25, 28 0.29361 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:03.398691Z 00000.006 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.006 II| FAKE_ENV: Starting storage for BS group 0 00000.006 II| FAKE_ENV: Starting storage for BS group 1 00000.006 II| FAKE_ENV: Starting storage for BS group 2 00000.006 II| FAKE_ENV: Starting storage for BS group 3 00000.007 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.007 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:2} commited cookie 2 for step 1 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} hope 1 -> done Change{2, redo 0b alter 209b annex 0, ~{ } -{ }, 0 gb} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} release 4194304b of static, Memory{0 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:3} commited cookie 1 for step 2 00000.008 NN| TABLET_SAUSAGECACHE: Update config MemoryLimit: 8388608 ReplacementPolicy: ThreeLeveledLRU 00000.008 NN| TABLET_SAUSAGECACHE: Switch replacement policy from S3FIFO to ThreeLeveledLRU 00000.008 NN| TABLET_SAUSAGECACHE: Switch replacement policy done from S3FIFO to ThreeLeveledLRU 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{2, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} commited cookie 1 for step 3 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{3, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:5} commited cookie 1 for step 4 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{4, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:6} commited cookie 1 for step 5 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{5, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:7} commited cookie 1 for step 6 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{6, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:8} commited cookie 1 for step 7 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{7, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:9} commited cookie 1 for step 8 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{8, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:10} commited cookie 1 for step 9 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{9, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:11} commited cookie 1 for step 10 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{10, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:12} commited cookie 1 for step 11 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{11, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:13} commited cookie 1 for step 12 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{12, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:14} commited cookie 1 for step 13 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{13, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:15} commited cookie 1 for step 14 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{14, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxW ... TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.091 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [4 4] 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{96, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.091 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 4 ] 00000.091 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 96 ] owner [6:580:2605] 00000.091 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 4 ] 00000.091 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 4 ] cookie 1 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{96, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{96, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.091 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 4 117 111 ] 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 1 -> retry Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} touch new 796b, 102443b lo load (103239b in total), 0b requested for data (4194304b in total) 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.091 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [3 4] 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.091 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 3 ] 00000.091 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 95 ] owner [6:580:2605] 00000.091 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 3 ] 00000.091 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 3 ] cookie 1 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{97, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.091 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 3 117 111 ] 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 1 -> retry Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} touch new 796b, 102443b lo load (103239b in total), 0b requested for data (4194304b in total) 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.091 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [2 4] 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.091 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 2 ] 00000.091 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 93 ] owner [6:580:2605] 00000.091 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 2 ] 00000.091 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 2 ] cookie 1 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.091 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{98, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.091 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 2 117 111 ] 00000.092 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow 00000.092 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.092 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 1 -> retry Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.092 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} touch new 796b, 102443b lo load (103239b in total), 0b requested for data (4194304b in total) 00000.092 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.092 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [1 4] 00000.092 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.092 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 1 ] 00000.092 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 92 ] owner [6:580:2605] 00000.092 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 1 ] 00000.092 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 1 ] cookie 1 00000.092 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.092 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.092 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{99, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.092 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 1 117 111 ] 00000.092 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow 00000.092 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.092 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 1 -> retry Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.092 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} touch new 796b, 102443b lo load (103239b in total), 0b requested for data (4194304b in total) 00000.092 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} took 8388608b of static mem, Memory{8388608 dyn 0} 00000.092 D3| TABLET_EXECUTOR: Leader{1:3:2} requests PageCollection [1:2:103:1:12288:2976:0] 102443 bytes, 1 pages: [0 4] 00000.092 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} postponed, 102443b, pages {1 wait, 1 load}, freshly touched 4 pages 00000.092 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] cookie 1 class Online from cache [ ] already requested [ ] to request [ 0 ] 00000.092 DD| TABLET_SAUSAGECACHE: Drop page collection [1:2:103:1:12288:2976:0] pages [ 91 ] owner [6:580:2605] 00000.092 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:103:1:12288:2976:0] status OK pages [ 0 ] 00000.092 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:103:1:12288:2976:0] owner [6:580:2605] class Online pages [ 0 ] cookie 1 00000.092 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{1 pages [1:2:103:1:12288:2976:0] ok OK}, category 1 00000.092 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} hope 2 -> done Change{103, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.092 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{100, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxReadRow} release 8388608b of static, Memory{0 dyn 0} 00000.092 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] pages [ 14 0 117 111 ] Counters: Active:8313958/8388608, Passive:0, MemLimit:-1 00000.092 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.092 II| TABLET_EXECUTOR: Leader{1:3:2} suiciding, Waste{2:0, 10255801b +(0, 0b), 1 trc, -48685b acc} 00000.093 DD| TABLET_SAUSAGECACHE: Unregister owner [6:580:2605] 00000.093 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:103:1:12288:2976:0] owner [6:580:2605] 00000.093 DD| TABLET_SAUSAGECACHE: Remove owner [6:580:2605] 00000.093 NN| TABLET_SAUSAGECACHE: Poison cache serviced 138 reqs hit {0 0b} miss {139 12197190b} 00000.093 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.093 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {10191b, 107} 00000.093 II| FAKE_ENV: DS.1 gone, left {10257096b, 5}, put {10305919b, 107} 00000.093 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.093 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.093 II| FAKE_ENV: All BS storage groups are stopped 00000.093 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 0.000s 00000.093 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 2741}, stopped >> TSharedPageCache::ZeroCache_FlatIndex [GOOD] >> TSharedPageCache_Actor::Request_Basics [GOOD] >> TSharedPageCache_Actor::Request_Failed [GOOD] >> TSharedPageCache_Actor::Request_Queue >> TopicAutoscaling::ReadFromTimestamp_AutoscaleAwareSDK [GOOD] >> TCompactionMulti::MainPageCollectionEdge [GOOD] >> TCompactionMulti::MainPageCollectionEdgeMany >> TSharedPageCache_Actor::Request_Queue [GOOD] >> TSharedPageCache_Actor::Request_Queue_Failed [GOOD] >> TSharedPageCache_Actor::Request_Queue_Fast [GOOD] >> TSharedPageCache_Actor::Request_Sequential [GOOD] >> TSharedPageCache_Actor::Request_Cached [GOOD] >> TSharedPageCache_Actor::Request_Different_Collections >> TCompactionMulti::MainPageCollectionEdgeMany [GOOD] >> TCompactionMulti::MainPageCollectionOverflow [GOOD] >> TCompactionMulti::MainPageCollectionOverflowSmallRefs [GOOD] >> TCompactionMulti::MainPageCollectionOverflowLargeRefs [GOOD] >> TExecutorDb::RandomOps >> TSharedPageCache_Actor::Request_Different_Collections [GOOD] >> TSharedPageCache_Actor::Request_Different_Pages [GOOD] >> TSharedPageCache_Actor::Request_Different_Pages_Reversed [GOOD] >> TSharedPageCache_Actor::Request_Subset [GOOD] >> TSharedPageCache_Actor::Request_Subset_Shuffled [GOOD] >> TSharedPageCache_Actor::Request_Superset [GOOD] >> TSharedPageCache_Actor::Request_Superset_Reversed [GOOD] >> TSharedPageCache_Actor::Request_Crossing [GOOD] >> TSharedPageCache_Actor::Request_Crossing_Reversed >> TSharedPageCache_Actor::Request_Crossing_Reversed [GOOD] >> TSharedPageCache_Actor::Request_Crossing_Shuffled [GOOD] >> TSharedPageCache_Actor::Attach_Basics [GOOD] >> TSharedPageCache_Actor::Attach_Request [GOOD] >> TSharedPageCache_Actor::Detach_Basics [GOOD] >> TSharedPageCache_Actor::Detach_Cached [GOOD] >> TSharedPageCache_Actor::Detach_Expired [GOOD] >> TSharedPageCache_Actor::Detach_InFly [GOOD] >> TSharedPageCache_Actor::Detach_Queued >> BuildStatsHistogram::Three_Serial_Small_2_Levels [GOOD] >> BuildStatsHistogram::Three_Serial_Small_2_Levels_3_Buckets [GOOD] >> BuildStatsHistogram::Three_Serial_Small_1_Level [GOOD] >> BuildStatsHistogram::Three_Serial_Small_0_Levels >> TSharedPageCache_Actor::Detach_Queued [GOOD] >> TSharedPageCache_Actor::Unregister_Basics [GOOD] >> TSharedPageCache_Actor::Unregister_Cached [GOOD] >> TSharedPageCache_Actor::Unregister_Expired [GOOD] >> TSharedPageCache_Actor::Unregister_InFly [GOOD] >> TSharedPageCache_Actor::Unregister_Queued [GOOD] >> TSharedPageCache_Actor::Unregister_Queued_Pending >> BuildStatsHistogram::Three_Serial_Small_0_Levels [GOOD] >> BuildStatsMixedIndex::Single [GOOD] >> BuildStatsMixedIndex::Single_Slices [GOOD] >> BuildStatsMixedIndex::Single_History >> TSharedPageCache_Actor::Unregister_Queued_Pending [GOOD] >> TSwitchableCache::Touch [GOOD] >> TSwitchableCache::Erase [GOOD] >> TSwitchableCache::EvictNext [GOOD] >> TSwitchableCache::UpdateLimit [GOOD] >> TSwitchableCache::Switch_Touch_RotatePages_All [GOOD] >> TSwitchableCache::Switch_Touch_RotatePages_Parts [GOOD] >> TSwitchableCache::Switch_RotatePages_Force [GOOD] >> TSwitchableCache::Switch_RotatePages_Evicts [GOOD] >> TSwitchableCache::Switch_Touch [GOOD] >> TSwitchableCache::Switch_Erase [GOOD] >> TSwitchableCache::Switch_EvictNext [GOOD] >> TSwitchableCache::Switch_UpdateLimit [GOOD] >> TVersions::WreckHead >> BuildStatsMixedIndex::Single_History [GOOD] >> BuildStatsMixedIndex::Single_History_Slices [GOOD] >> BuildStatsMixedIndex::Single_Groups [GOOD] >> BuildStatsMixedIndex::Single_Groups_Slices >> TFlatTableExecutor_VersionedRows::TestVersionedRowsLargeBlobs [GOOD] >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2NoRestart |66.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr_reboots/unittest >> BuildStatsMixedIndex::Single_Groups_Slices [GOOD] >> BuildStatsMixedIndex::Single_Groups_History [GOOD] >> BuildStatsMixedIndex::Single_Groups_History_Slices >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2NoRestart [GOOD] >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2 [GOOD] >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2ToSchema1 [GOOD] >> TFlatTableRenameTableAndColumn::TestSchema1ToSchema2ToSchema1ToSchema2 [GOOD] >> TGenCompaction::OverloadFactorDuringForceCompaction >> BuildStatsMixedIndex::Single_Groups_History_Slices [GOOD] >> BuildStatsMixedIndex::Mixed [GOOD] >> BuildStatsMixedIndex::Mixed_Groups ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::ReadFromTimestamp_AutoscaleAwareSDK [GOOD] Test command err: 2025-06-03T10:28:47.308154Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668138087526586:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:47.308246Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:28:47.337889Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000e2f/r3tmp/tmp4tkJqn/pdisk_1.dat 2025-06-03T10:28:47.373413Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:47.373869Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668138087526424:2079] 1748946527306810 != 1748946527306813 TServer::EnableGrpc on GrpcPort 18850, node 1 2025-06-03T10:28:47.391154Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/000e2f/r3tmp/yandexqH0UnC.tmp 2025-06-03T10:28:47.391176Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/000e2f/r3tmp/yandexqH0UnC.tmp 2025-06-03T10:28:47.391270Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/000e2f/r3tmp/yandexqH0UnC.tmp 2025-06-03T10:28:47.391328Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:28:47.396224Z INFO: TTestServer started on Port 5509 GrpcPort 18850 TClient is connected to server localhost:5509 2025-06-03T10:28:47.410660Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:47.410693Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:47.411777Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected PQClient connected to localhost:18850 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:47.430507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:28:47.441878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-03T10:28:47.755651Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668138087527234:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.755681Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.756081Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668138087527246:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.756260Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668138087527251:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.756283Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.756992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-06-03T10:28:47.759589Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668138087527249:2341], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-03T10:28:47.804333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.812629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.818390Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668138087527431:2505] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:47.837005Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668138087527448:2359], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:28:47.837124Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=NWM1OGNlNDEtNWQ5MjM1MDQtY2VkMjBjNDQtNWNlMGIyNjY=, ActorId: [1:7511668138087527231:2334], ActorState: ExecuteState, TraceId: 01jwtnc9g84zzn1jtvbjdr0k9w, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:28:47.837683Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:28:47.875422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7511668138087527610:2609] 2025-06-03T10:28:52.307732Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668138087526586:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:52.307783Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:28:53.053474Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-03T10:28:53.057210Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-03T10:28:53.057599Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [1:7511668163857331567:2673], Recipient [1:7511668138087526892:2202]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:53.057610Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:53.057613Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:28:53.057622Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [1:7511668163857331563:2670], Recipient [1:7511668138087526892:2202]: {TEvModifySchemeTransaction txid# 281474976710673 TabletId# 72057594046644480} 2025-06-03T10:28:53.057624Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:28:53.063866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } PartitionStrategy { MinPartitionCount: 1 MaxPartitionCount: 100 ScaleThresho ... 1], Cookie: 0 2025-06-03T10:30:05.063449Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188506, Sender [7:7511668465543846733:2794], Recipient [7:7511668465543846809:2801]: NKikimr::TEvPQ::TEvPipeDisconnected 2025-06-03T10:30:05.063453Z node 7 :PERSQUEUE TRACE: partition.h:591: StateIdle, processing event TEvPQ::TEvPipeDisconnected 2025-06-03T10:30:05.063461Z node 7 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::DropOwner. 2025-06-03T10:30:05.063467Z node 7 :PERSQUEUE TRACE: partition_write.cpp:854: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessChangeOwnerRequests. 2025-06-03T10:30:05.063478Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:05.063500Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:05.063503Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:05.063510Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:05.063519Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 269877764, Sender [7:7511668474133781704:3352], Recipient [7:7511668465543846737:2795]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:30:05.063522Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5260: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:30:05.063525Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2888: [PQ: 72075186224037897] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:30:05.063527Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037897] Destroy direct read session test-consumer_7_1_17188449387638599426_v1 2025-06-03T10:30:05.063530Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037897] server disconnected, pipe [7:7511668474133781702:2885] destroyed 2025-06-03T10:30:05.063536Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 269877764, Sender [7:7511668474133781697:3350], Recipient [7:7511668435479074425:2455]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:30:05.063537Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5260: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:30:05.063539Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2888: [PQ: 72075186224037892] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:30:05.063541Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037892] Destroy direct read session test-consumer_7_1_17188449387638599426_v1 2025-06-03T10:30:05.063543Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037892] server disconnected, pipe [7:7511668474133781696:2884] destroyed 2025-06-03T10:30:05.063548Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_7_1_17188449387638599426_v1 2025-06-03T10:30:05.063551Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_7_1_17188449387638599426_v1 2025-06-03T10:30:05.065527Z :DEBUG: [/Root] TraceId [] SessionId [producer-1|e69ad6c7-a44468c2-862f283a-24abe512_0] PartitionId [2] Generation [1] Write session: OnReadDone gRpcStatusCode: 1, Msg: CANCELLED, Details: , InternalError: 0 2025-06-03T10:30:05.065548Z :TRACE: [/Root] TRACE_EVENT Error status=CLIENT_CANCELLED 2025-06-03T10:30:05.065552Z :DEBUG: [/Root] TraceId [] SessionId [producer-1|e69ad6c7-a44468c2-862f283a-24abe512_0] PartitionId [2] Generation [1] Write session is aborting and will not restart 2025-06-03T10:30:05.076526Z :DEBUG: [/Root] TraceId [] SessionId [producer-1|e69ad6c7-a44468c2-862f283a-24abe512_0] PartitionId [2] Generation [1] Write session: destroy 2025-06-03T10:30:05.107476Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668435479074425:2455], Partition 0, Sender [0:0:0], Recipient [7:7511668435479074482:2458], Cookie: 0 2025-06-03T10:30:05.107504Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668435479074482:2458]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:05.107508Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:05.107522Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:05.107545Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:05.107548Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:05.107552Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:05.142269Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668465543846737:2795], Partition 1, Sender [0:0:0], Recipient [7:7511668465543846813:2804], Cookie: 0 2025-06-03T10:30:05.142292Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668465543846813:2804]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:05.142297Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:05.142313Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:05.142336Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:05.142339Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:05.142347Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:05.142360Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668465543846733:2794], Partition 2, Sender [0:0:0], Recipient [7:7511668465543846809:2801], Cookie: 0 2025-06-03T10:30:05.142368Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668465543846809:2801]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:05.142370Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:05.142375Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:05.142383Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:05.142385Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:05.142389Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:05.208015Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668435479074425:2455], Partition 0, Sender [0:0:0], Recipient [7:7511668435479074482:2458], Cookie: 0 2025-06-03T10:30:05.208038Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668435479074482:2458]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:05.208043Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:05.208062Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:05.208087Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:05.208090Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:05.208099Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:05.242816Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668465543846737:2795], Partition 1, Sender [0:0:0], Recipient [7:7511668465543846813:2804], Cookie: 0 2025-06-03T10:30:05.242839Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668465543846813:2804]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:05.242845Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:05.242863Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:05.242890Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:05.242893Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:05.242902Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:05.242914Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668465543846733:2794], Partition 2, Sender [0:0:0], Recipient [7:7511668465543846809:2801], Cookie: 0 2025-06-03T10:30:05.242918Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668465543846809:2801]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:05.242920Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:05.242925Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:05.242932Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:05.242935Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:05.242939Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> BasicStatistics::TwoServerlessTwoSharedDbs [GOOD] >> BuildStatsMixedIndex::Mixed_Groups [GOOD] >> BuildStatsMixedIndex::Mixed_Groups_History [GOOD] >> BuildStatsMixedIndex::Serial >> BuildStatsHistogram::Single_History [GOOD] >> BuildStatsHistogram::Single_History_Slices |66.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut |66.0%| [LD] {RESULT} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut |66.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/proxy_service/ut/ydb-core-kqp-proxy_service-ut >> TGenCompaction::OverloadFactorDuringForceCompaction [GOOD] >> TKeyValueTest::TestWriteReadDeleteWithRestartsThenResponseOkWithNewApi [GOOD] >> BuildStatsMixedIndex::Serial [GOOD] >> TGenCompaction::ForcedCompactionNoGenerations [GOOD] >> BuildStatsMixedIndex::Serial_Groups [GOOD] >> BuildStatsMixedIndex::Serial_Groups_History >> TGenCompaction::ForcedCompactionWithGenerations [GOOD] >> TGenCompaction::ForcedCompactionWithFinalParts [GOOD] >> TGenCompaction::ForcedCompactionByDeletedRows [GOOD] >> TGenCompaction::ForcedCompactionByUnreachableMvccData [GOOD] >> TGenCompaction::ForcedCompactionByUnreachableMvccDataRestart [GOOD] >> TGenCompaction::ForcedCompactionByUnreachableMvccDataBorrowed [GOOD] >> TIterator::Basics [GOOD] >> TIterator::External [GOOD] >> TIterator::Single >> ReadIteratorExternalBlobs::ExtBlobsWithDeletesInTheMiddle [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithFirstRowPreloaded >> TIterator::Single [GOOD] >> TIterator::SingleReverse >> BuildStatsMixedIndex::Serial_Groups_History [GOOD] >> BuildStatsMixedIndex::Single_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Slices_LowResolution >> BuildStatsMixedIndex::Single_Slices_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Groups_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Groups_Slices_LowResolution >> IndexBuildTestReboots::DropIndexWithDataColumns [GOOD] >> TIterator::SingleReverse [GOOD] >> TIterator::Mixed >> BuildStatsMixedIndex::Single_Groups_Slices_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Groups_History_LowResolution >> CommitOffset::Commit_FromSession_ToNewChild_WithoutCommitToParent [GOOD] >> TPartBtreeIndexIteration::NoNodes_Groups [GOOD] >> TPartBtreeIndexIteration::NoNodes_History >> TKeyValueTest::TestSetExecutorFastLogPolicy [GOOD] |66.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteReadDeleteWithRestartsThenResponseOkWithNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:58:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:75:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:58:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:75:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:77:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:83:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:57:2097]) rebooted! !Reboot 72057594037927937 (actor [3:57:2097]) tablet resolver refreshed! new actor is[3:82:2111] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:168:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:58:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:75:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:77:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:80:2057] recipient: [4:79:2110] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:83:2057] recipient: [4:79:2110] !Reboot 72057594037927937 (actor [4:57:2097]) rebooted! !Reboot 72057594037927937 (actor [4:57:2097]) tablet resolver refreshed! new actor is[4:82:2111] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:168:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:58:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:75:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:78:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:81:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:82:2057] recipient: [5:80:2110] Leader for TabletID 72057594037927937 is [5:83:2111] sender: [5:84:2057] recipient: [5:80:2110] !Reboot 72057594037927937 (actor [5:57:2097]) rebooted! !Reboot 72057594037927937 (actor [5:57:2097]) tablet resolver refreshed! new actor is[5:83:2111] Leader for TabletID 72057594037927937 is [5:83:2111] sender: [5:169:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:58:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:75:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:81:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:84:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:85:2057] recipient: [6:83:2113] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:87:2057] recipient: [6:83:2113] !Reboot 72057594037927937 (actor [6:57:2097]) rebooted! !Reboot 72057594037927937 (actor [6:57:2097]) tablet resolver refreshed! new actor is[6:86:2114] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:172:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:58:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:75:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:57:2097]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:81:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:83:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:85:2057] recipient: [7:84:2113] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:87:2057] recipient: [7:84:2113] !Reboot 72057594037927937 (actor [7:57:2097]) rebooted! !Reboot 72057594037927937 (actor [7:57:2097]) tablet resolver refreshed! new actor is[7:86:2114] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:172:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:58:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:75:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:57:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:82:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:85:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:86:2057] recipient: [8:84:2113] Leader for TabletID 72057594037927937 is [8:87:2114] sender: [8:88:2057] recipient: [8:84:2113] !Reboot 72057594037927937 (actor [8:57:2097]) rebooted! !Reboot 72057594037927937 (actor [8:57:2097]) tablet resolver refreshed! new actor is[8:87:2114] Leader for TabletID 72057594037927937 is [8:87:2114] sender: [8:105:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:58:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:75:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:84:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:87:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:88:2057] recipient: [9:86:2115] Leader for TabletID 72057594037927937 is [9:89:2116] sender: [9:90:2057] recipient: [9:86:2115] !Reboot 72057594037927937 (actor [9:57:2097]) rebooted! !Reboot 72057594037927937 (actor [9:57:2097]) tablet resolver refreshed! new actor is[9:89:2116] Leader for TabletID 72057594037927937 is [9:89:2116] sender: [9:175:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:58:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:75:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:84:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:87:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:88:2057] recipient: [10:86:2115] Leader for TabletID 72057594037927937 is [10:89:2116] sender: [10:90:2057] recipient: [10:86:2115] !Reboot 72057594037927937 (actor [10:57:2097]) rebooted! !Reboot 72057594037927937 (actor [10:57:2097]) tablet resolver refreshed! new actor is[10:89:2116] Leader for TabletID 72057594037927937 is [10:89:2116] sender: [10:175:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:58:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:75:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:85:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:88:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:89:2057] recipient: [11:87:2115] Leader for TabletID 72057594037927937 is [11:90:2116] sender: [11:91:2057] recipient: [11:87:2115] !Reboot 72057594037927937 (actor [11:57:2097]) rebooted! !Reboot 72057594037927937 (actor [11:57:2097]) tablet resolver refreshed! new actor is[11:90:2116] Leader for TabletID 72057594037927937 is [11:90:2116] sender: [11:176:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:58:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:75:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:57:2097]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:86:2057] recipient: [12:36:2083] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:89:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:90:2057] recipient: [12:88:2116] Leader for TabletID 72057594037927937 is [12:91:2117] sender: [12:92:2057] recipient: [12:88:2116] !Reboot 72057594037927937 (actor [12:57:2097]) rebooted! !Reboot 72057594037927937 (actor [12:57:2097]) tablet resolver refreshed! new actor is[12:91:2117] Leader for TabletID 72057594037927937 is [12:91:2117] sender: [12:111:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:55:2057] recipient: [13:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:55:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:58:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:75:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:57:2097]) on event NKikimr::TEvKeyValue::TEvCompleteGC ! Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:87:2057] recipient: [13:36:2083] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:90:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:91:2057] recipient: [13:89:2117] Leader for TabletID 72057594037927937 is [13:92:2118] sender: [13:93:2057] recipient: [13:89:2117] !Reboot 72057594037927937 (actor [13:57:2097]) rebooted! !Reboot 72057594037927937 (actor [13:57:2097]) tablet resolver refreshed! new actor is[13:92:2118] Leader for TabletID 72057594037927937 is [13:92:2118] sender: [13:112:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:55:2057] recipient: [14:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:55:2057] recipient: [14:50:2095] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:58:2057] recipient: [14:50:2095] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:75:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:90:2057] recipient: [14:36:2083] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:93:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:94:2057] recipient: [14:92:2120] Leader for TabletID 72057594037927937 is [14:95:2121] sender: [14:96:2057] recipient: [14:92:2120] !Reboot 72057594037927937 (actor [14:57:2097]) rebooted! !Reboot 72057594037927937 (actor [14:57:2097]) tablet resolver refreshed! new actor is[14:95:2121] Leader for TabletID 72057594037927937 is [14:95:2121] sender: [14:181:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:55:2057] recipient: [15:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:55:2057] recipient: [15:50:2095] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:58:2057] recipient: [15:50:2095] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:75:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:57:2097]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:90:2057] recipient: [15:36:2083] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:93:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:94:2057] recipient: [15:92:2120] Leader for TabletID 72057594037927937 is [15:95:2121] sender: [15:96:2057] recipient: [15:92:2120] !Reboot 72057594037927937 (actor [15:57:2097]) rebooted! !Reboot 72057594037927937 (actor [15:57:2097]) tablet resolver refreshed! new actor is[15:95:2121] Leader for TabletID 72057594037927937 is [15:95:2121] sender: [15:181:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:55:2057] recipient: [16:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:55:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:58:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:75:2057] recipient: [16:14:2061] >> BuildStatsMixedIndex::Single_Groups_History_LowResolution [GOOD] >> BuildStatsMixedIndex::Single_Groups_History_Slices_LowResolution [GOOD] >> Charge::Lookups [GOOD] >> Charge::ByKeysBasics [GOOD] >> Charge::ByKeysGroups [GOOD] >> Charge::ByKeysGroupsLimits [GOOD] >> Charge::ByKeysLimits [GOOD] >> Charge::ByKeysReverse [GOOD] >> Charge::ByKeysHistory [GOOD] >> Charge::ByKeysIndex [GOOD] >> Charge::ByRows [GOOD] >> Charge::ByRowsReverse [GOOD] >> Charge::ByRowsLimits [GOOD] >> Charge::ByRowsLimitsReverse [GOOD] >> DBase::Basics [GOOD] >> DBase::Select [GOOD] >> DBase::Defaults [GOOD] >> DBase::Subsets [GOOD] >> DBase::Garbage [GOOD] >> DBase::Affects [GOOD] >> DBase::Annex >> TChargeBTreeIndex::NoNodes_Groups [GOOD] >> TChargeBTreeIndex::NoNodes_History >> DBase::Annex [GOOD] >> DBase::AnnexRollbackChanges [GOOD] >> DBase::Outer [GOOD] >> DBase::VersionBasics [GOOD] >> DBase::KIKIMR_15506_MissingSnapshotKeys [GOOD] >> DBase::EraseCacheWithUncommittedChanges [GOOD] >> DBase::EraseCacheWithUncommittedChangesCompacted [GOOD] >> DBase::AlterAndUpsertChangesVisibility [GOOD] >> DBase::UncommittedChangesVisibility [GOOD] >> DBase::UncommittedChangesCommitWithUpdates [GOOD] >> DBase::ReplayNewTable [GOOD] >> DBase::SnapshotNewTable [GOOD] >> DBase::DropModifiedTable [GOOD] >> DBase::KIKIMR_15598_Many_MemTables ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> BasicStatistics::TwoServerlessTwoSharedDbs [GOOD] Test command err: 2025-06-03T10:26:32.236023Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:332:2219], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:32.236095Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:26:32.236114Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ba9/r3tmp/tmpuHHfX7/pdisk_1.dat 2025-06-03T10:26:32.358030Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20973, node 1 2025-06-03T10:26:32.452305Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:32.452325Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:32.452329Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:32.452419Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:32.452912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:26:32.540238Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:32.540292Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:32.551829Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4174 2025-06-03T10:26:32.909141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:26:33.808825Z node 3 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 3 2025-06-03T10:26:33.825442Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:33.825486Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:33.859266Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-03T10:26:33.860054Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:34.022691Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:34.022856Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:34.023001Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:34.023032Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:34.023079Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:34.023093Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:34.023107Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:34.023122Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:34.023140Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:34.178433Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:34.178484Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:34.190550Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:34.245365Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:34.259659Z node 3 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:26:34.259691Z node 3 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:26:34.268064Z node 3 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:26:34.268269Z node 3 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:26:34.268292Z node 3 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:26:34.268296Z node 3 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:26:34.268300Z node 3 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:26:34.268305Z node 3 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:26:34.268309Z node 3 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:26:34.268314Z node 3 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:26:34.268449Z node 3 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:26:34.285806Z node 3 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:34.285857Z node 3 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [3:1948:2601], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:34.287820Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [3:1959:2610] 2025-06-03T10:26:34.289501Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [3:1994:2625] 2025-06-03T10:26:34.289645Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [3:1994:2625], schemeshard id = 72075186224037897 2025-06-03T10:26:34.291700Z node 3 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Shared1 2025-06-03T10:26:34.299751Z node 3 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:26:34.299780Z node 3 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:26:34.299794Z node 3 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Shared1/.metadata/_statistics 2025-06-03T10:26:34.303987Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:26:34.306388Z node 3 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:26:34.306437Z node 3 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:26:34.430881Z node 3 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:26:34.562234Z node 3 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:26:34.631363Z node 3 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:26:35.199960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:26:36.094422Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:26:36.106775Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:36.106837Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:36.162319Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:26:36.162963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:36.305093Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:36.305421Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:36.305630Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:36.305700Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:36.305777Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:36.305803Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:36.305857Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:36.305892Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:36.305918Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:36.390254Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:36.390303Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:36.403775Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:36.449347Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:36.459710Z node 2 :STA ... ain>: Error: Transaction 281474976730658 completed, doublechecking } 2025-06-03T10:30:00.917245Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:12680:5308] txid# 281474976730659, issues: { message: "Check failed: path: \'/Root/Shared2/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72075186224038898, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:00.924072Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [2:12709:5323]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:30:00.924187Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:30:00.924204Z node 2 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [2:12711:5325] 2025-06-03T10:30:00.924221Z node 2 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [2:12711:5325] 2025-06-03T10:30:00.924545Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224038895] EvServerConnected, pipe server id = [2:12712:5326] 2025-06-03T10:30:00.924627Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224038895] EvConnectNode, pipe server id = [2:12712:5326], node id = 2, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:30:00.924639Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224038895] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-03T10:30:00.924737Z node 2 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 2, client id = [2:12711:5325], server id = [2:12712:5326], tablet id = 72075186224038895, status = OK 2025-06-03T10:30:00.924758Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:30:00.924776Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 1, ReplyToActorId = [2:12709:5323], StatRequests.size() = 1 2025-06-03T10:30:00.946081Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZmVjYWJhNjUtMThmZTQ0NzctYjc3NDQ2LWFlYTEyMTI3, TxId: 2025-06-03T10:30:00.946110Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZmVjYWJhNjUtMThmZTQ0NzctYjc3NDQ2LWFlYTEyMTI3, TxId: 2025-06-03T10:30:00.946313Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224038895] TTxFinishTraversal::Execute 2025-06-03T10:30:00.962696Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224038895] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224038898, LocalPathId: 3] 2025-06-03T10:30:00.962727Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224038895] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:30:01.005263Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:217: [72075186224038895] EvFastPropagateCheck 2025-06-03T10:30:01.005327Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:357: [72075186224038895] PropagateFastStatistics(), node count = 0, schemeshard count = 0 2025-06-03T10:30:01.080500Z node 2 :STATISTICS DEBUG: service_impl.cpp:1189: EvRequestTimeout, pipe client id = [2:12711:5325], schemeshard count = 1 2025-06-03T10:30:01.491176Z node 3 :STATISTICS DEBUG: schemeshard_impl.cpp:7996: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224037899 2025-06-03T10:30:01.491215Z node 3 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 229.000000s, at schemeshard: 72075186224037899 2025-06-03T10:30:01.491334Z node 3 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037899, stats size# 26 2025-06-03T10:30:01.503761Z node 3 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-03T10:30:01.784901Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:30:01.795442Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:30:01.795471Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:30:01.795480Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224037894] IsColumnTable. Path [OwnerId: 72075186224037899, LocalPathId: 2] is data table. 2025-06-03T10:30:01.795484Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224037894] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-03T10:30:01.795577Z node 3 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared1 2025-06-03T10:30:01.796733Z node 3 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:30:01.800905Z node 3 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=3&id=NWVmNTAyMWYtYjFlODI0OGUtYjdlZTI0ZDktMzg4MWE3ODY=, TxId: 2025-06-03T10:30:01.800932Z node 3 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=3&id=NWVmNTAyMWYtYjFlODI0OGUtYjdlZTI0ZDktMzg4MWE3ODY=, TxId: 2025-06-03T10:30:01.801144Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224037894] TTxFinishTraversal::Execute 2025-06-03T10:30:01.813483Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224037894] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224037899, LocalPathId: 2] 2025-06-03T10:30:01.813512Z node 3 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224037894] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:30:01.899600Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 119 ], ReplyToActorId[ [3:12808:5646]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:30:01.899692Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 119 ] 2025-06-03T10:30:01.899698Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 119, ReplyToActorId = [3:12808:5646], StatRequests.size() = 1 2025-06-03T10:30:03.548072Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 120 ], ReplyToActorId[ [3:12879:5674]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:30:03.548240Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 120 ] 2025-06-03T10:30:03.548252Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 120, ReplyToActorId = [3:12879:5674], StatRequests.size() = 1 2025-06-03T10:30:04.095352Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7996: SendBaseStatsToSA(), path count: 1, at schemeshard: 72075186224038900 2025-06-03T10:30:04.095389Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 211.000000s, at schemeshard: 72075186224038900 2025-06-03T10:30:04.095500Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224038895] TTxSchemeShardStats::Execute: schemeshard id# 72075186224038900, stats size# 26 2025-06-03T10:30:04.115692Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224038895] TTxSchemeShardStats::Complete 2025-06-03T10:30:04.372936Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224038895] ScheduleNextTraversal 2025-06-03T10:30:04.372975Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224038895] ScheduleNextTraversal. No force traversals. 2025-06-03T10:30:04.372986Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:802: [72075186224038895] IsColumnTable. Path [OwnerId: 72075186224038900, LocalPathId: 2] is data table. 2025-06-03T10:30:04.372995Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:723: [72075186224038895] ScheduleNextTraversal. Skip traversal for datashard table [OwnerId: 72075186224038900, LocalPathId: 2] 2025-06-03T10:30:04.373180Z node 2 :STATISTICS DEBUG: query_actor.cpp:134: [TQueryBase] Bootstrap. Database: /Root/Shared2 2025-06-03T10:30:04.374061Z node 2 :STATISTICS DEBUG: query_actor.cpp:197: [TQueryBase] RunDataQuery: DECLARE $owner_id AS Uint64; DECLARE $local_path_id AS Uint64; DELETE FROM `.metadata/_statistics` WHERE owner_id = $owner_id AND local_path_id = $local_path_id; 2025-06-03T10:30:04.379422Z node 2 :STATISTICS DEBUG: query_actor.cpp:240: [TQueryBase] TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=OTdhZThlNjEtODJiNWFkZGItODVlYzM4NzMtODNjYjE0ODU=, TxId: 2025-06-03T10:30:04.379452Z node 2 :STATISTICS DEBUG: query_actor.cpp:367: [TQueryBase] Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=OTdhZThlNjEtODJiNWFkZGItODVlYzM4NzMtODNjYjE0ODU=, TxId: 2025-06-03T10:30:04.379774Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:26: [72075186224038895] TTxFinishTraversal::Execute 2025-06-03T10:30:04.394370Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:36: [72075186224038895] TTxFinishTraversal::Complete schedule traversal for path [OwnerId: 72075186224038900, LocalPathId: 2] 2025-06-03T10:30:04.394412Z node 2 :STATISTICS DEBUG: tx_finish_trasersal.cpp:39: [72075186224038895] TTxFinishTraversal::Complete. No ActorId to send reply. 2025-06-03T10:30:05.304591Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 3 2025-06-03T10:30:05.304728Z node 3 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 3 2025-06-03T10:30:05.304969Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:30:05.315784Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:30:05.315821Z node 3 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:30:05.426759Z node 3 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 121 ], ReplyToActorId[ [3:12976:5694]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:30:05.426886Z node 3 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 121 ] 2025-06-03T10:30:05.426897Z node 3 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 121, ReplyToActorId = [3:12976:5694], StatRequests.size() = 1 2025-06-03T10:30:05.427101Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 2 ], ReplyToActorId[ [2:12978:5413]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:30:05.428220Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 2 ] 2025-06-03T10:30:05.428282Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:171: [72075186224038895] EvRequestStats, node id = 2, schemeshard count = 1, urgent = 0 2025-06-03T10:30:05.428287Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224038895] SendStatisticsToNode(), node id = 2, schemeshard count = 1 2025-06-03T10:30:05.428330Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:30:05.428341Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 2, ReplyToActorId = [2:12978:5413], StatRequests.size() = 1 >> IndexBuildTestReboots::DropIndex [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestSetExecutorFastLogPolicy [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:58:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:75:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:58:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:75:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:58:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:75:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:58:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:75:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:77:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:80:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:81:2057] recipient: [4:79:2110] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:83:2057] recipient: [4:79:2110] !Reboot 72057594037927937 (actor [4:57:2097]) rebooted! !Reboot 72057594037927937 (actor [4:57:2097]) tablet resolver refreshed! new actor is[4:82:2111] Leader for TabletID 72057594037927937 is [4:82:2111] sender: [4:168:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:58:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:75:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:77:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:79:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:81:2057] recipient: [5:80:2110] Leader for TabletID 72057594037927937 is [5:82:2111] sender: [5:83:2057] recipient: [5:80:2110] !Reboot 72057594037927937 (actor [5:57:2097]) rebooted! !Reboot 72057594037927937 (actor [5:57:2097]) tablet resolver refreshed! new actor is[5:82:2111] Leader for TabletID 72057594037927937 is [5:82:2111] sender: [5:168:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:58:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:75:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:78:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:81:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:82:2057] recipient: [6:80:2110] Leader for TabletID 72057594037927937 is [6:83:2111] sender: [6:84:2057] recipient: [6:80:2110] !Reboot 72057594037927937 (actor [6:57:2097]) rebooted! !Reboot 72057594037927937 (actor [6:57:2097]) tablet resolver refreshed! new actor is[6:83:2111] Leader for TabletID 72057594037927937 is [6:83:2111] sender: [6:169:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:58:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:75:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:80:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:83:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:84:2057] recipient: [7:82:2112] Leader for TabletID 72057594037927937 is [7:85:2113] sender: [7:86:2057] recipient: [7:82:2112] !Reboot 72057594037927937 (actor [7:57:2097]) rebooted! !Reboot 72057594037927937 (actor [7:57:2097]) tablet resolver refreshed! new actor is[7:85:2113] Leader for TabletID 72057594037927937 is [7:85:2113] sender: [7:171:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:58:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:75:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:80:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:83:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:84:2057] recipient: [8:82:2112] Leader for TabletID 72057594037927937 is [8:85:2113] sender: [8:86:2057] recipient: [8:82:2112] !Reboot 72057594037927937 (actor [8:57:2097]) rebooted! !Reboot 72057594037927937 (actor [8:57:2097]) tablet resolver refreshed! new actor is[8:85:2113] Leader for TabletID 72057594037927937 is [8:85:2113] sender: [8:171:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:58:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:75:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:81:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:84:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:85:2057] recipient: [9:83:2112] Leader for TabletID 72057594037927937 is [9:86:2113] sender: [9:87:2057] recipient: [9:83:2112] !Reboot 72057594037927937 (actor [9:57:2097]) rebooted! !Reboot 72057594037927937 (actor [9:57:2097]) tablet resolver refreshed! new actor is[9:86:2113] Leader for TabletID 72057594037927937 is [9:86:2113] sender: [9:172:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:58:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:75:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:84:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:87:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:88:2057] recipient: [10:86:2115] Leader for TabletID 72057594037927937 is [10:89:2116] sender: [10:90:2057] recipient: [10:86:2115] !Reboot 72057594037927937 (actor [10:57:2097]) rebooted! !Reboot 72057594037927937 (actor [10:57:2097]) tablet resolver refreshed! new actor is[10:89:2116] Leader for TabletID 72057594037927937 is [10:89:2116] sender: [10:175:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:58:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:75:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:84:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:87:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:88:2057] recipient: [11:86:2115] Leader for TabletID 72057594037927937 is [11:89:2116] sender: [11:90:2057] recipient: [11:86:2115] !Reboot 72057594037927937 (actor [11:57:2097]) rebooted! !Reboot 72057594037927937 (actor [11:57:2097]) tablet resolver refreshed! new actor is[11:89:2116] Leader for TabletID 72057594037927937 is [11:89:2116] sender: [11:175:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:58:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:75:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:85:2057] recipient: [12:36:2083] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:88:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:89:2057] recipient: [12:87:2115] Leader for TabletID 72057594037927937 is [12:90:2116] sender: [12:91:2057] recipient: [12:87:2115] !Reboot 72057594037927937 (actor [12:57:2097]) rebooted! !Reboot 72057594037927937 (actor [12:57:2097]) tablet resolver refreshed! new actor is[12:90:2116] Leader for TabletID 72057594037927937 is [12:90:2116] sender: [12:176:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:55:2057] recipient: [13:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:55:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:58:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:75:2057] recipient: [13:14:2061] >> TFlatTableExecutor_IndexLoading::PrechargeAndSeek_FlatIndex [GOOD] >> TFlatTableExecutor_IndexLoading::PrechargeAndSeek_BTreeIndex >> KqpRm::DisonnectNodes |66.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume/unittest >> TIterator::Mixed [GOOD] >> TIterator::MixedReverse >> TopicAutoscaling::PartitionSplit_ManySession_existed_AutoscaleAwareSDK [GOOD] >> BuildStatsHistogram::Single_History_Slices [GOOD] >> BuildStatsHistogram::Ten_Mixed ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build_reboots/unittest >> IndexBuildTestReboots::DropIndexWithDataColumns [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:29:37.659829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:37.659854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:37.659861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:37.659867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:37.659881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:37.659886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:37.659897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:37.659911Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:37.660014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:37.660096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:37.676612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:29:37.676640Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:37.676730Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:29:37.679601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:37.679735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:37.679767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:37.681416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:37.681467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:37.681597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:37.681674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:37.682151Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:37.682193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:37.682454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:37.682466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:37.682482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:37.682490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:37.682496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:37.682537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:29:37.684034Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:29:37.698304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:37.698369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:37.698428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:37.698464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:37.698474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:37.699343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:37.699382Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:37.699450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:37.699462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:37.699469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:37.699475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:37.700029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:37.700042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:37.700048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:37.700475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:37.700486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:37.700492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:37.700499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:37.701288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:37.701812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:37.701856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:37.702053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:37.702084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:37.702093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:37.702166Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... .cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:30:06.416244Z node 110 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:30:06.416248Z node 110 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-03T10:30:06.416253Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-03T10:30:06.416265Z node 110 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-03T10:30:06.416435Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:30:06.416446Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:30:06.417070Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:30:06.417124Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:30:06.417134Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:30:06.417486Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5554: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 344 RawX2: 472446404887 } TabletId: 72075186233409546 State: 4 2025-06-03T10:30:06.417503Z node 110 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409546, state: Offline, at schemeshard: 72057594046678944 2025-06-03T10:30:06.417755Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:06.417817Z node 110 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409546 Forgetting tablet 72075186233409546 2025-06-03T10:30:06.417896Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:30:06.417955Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-03T10:30:06.418461Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:06.418468Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-03T10:30:06.418480Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:30:06.418486Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-03T10:30:06.418490Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:30:06.419151Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:30:06.419165Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409546 2025-06-03T10:30:06.419180Z node 110 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-03T10:30:06.419246Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-03T10:30:06.419252Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-03T10:30:06.419300Z node 110 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-03T10:30:06.419313Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-03T10:30:06.419317Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [110:576:2537] TestWaitNotification: OK eventTxId 1003 2025-06-03T10:30:06.419375Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:06.419413Z node 110 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 49us result status StatusSuccess 2025-06-03T10:30:06.419499Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:06.419562Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndexByValue0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:30:06.419579Z node 110 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndexByValue0" took 18us result status StatusPathDoesNotExist 2025-06-03T10:30:06.419593Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/UserDefinedIndexByValue0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 3]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Table/UserDefinedIndexByValue0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 3 LastExistedPrefixDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:06.419624Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndexByValue0/indexImplTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:30:06.419635Z node 110 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndexByValue0/indexImplTable" took 13us result status StatusPathDoesNotExist 2025-06-03T10:30:06.419647Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/UserDefinedIndexByValue0/indexImplTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 3]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Table/UserDefinedIndexByValue0/indexImplTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 3 LastExistedPrefixDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |66.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service >> DBase::KIKIMR_15598_Many_MemTables [GOOD] |66.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service |66.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/service/ydb-core-kqp-ut-service ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/ut_with_sdk/unittest >> CommitOffset::Commit_FromSession_ToNewChild_WithoutCommitToParent [GOOD] Test command err: 2025-06-03T10:28:47.284321Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668138134131468:2206];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:47.284438Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:28:47.313255Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000e46/r3tmp/tmpYmjFAW/pdisk_1.dat 2025-06-03T10:28:47.341863Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668138134131289:2079] 1748946527282223 != 1748946527282226 2025-06-03T10:28:47.345153Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19776, node 1 2025-06-03T10:28:47.355581Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/000e46/r3tmp/yandexaJ5KMw.tmp 2025-06-03T10:28:47.355592Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/000e46/r3tmp/yandexaJ5KMw.tmp 2025-06-03T10:28:47.355661Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/000e46/r3tmp/yandexaJ5KMw.tmp 2025-06-03T10:28:47.355702Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:28:47.362289Z INFO: TTestServer started on Port 31715 GrpcPort 19776 TClient is connected to server localhost:31715 PQClient connected to localhost:19776 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:28:47.385351Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:47.385382Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:47.386577Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:47.416045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:28:47.430159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-03T10:28:47.658353Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668138134132101:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.658386Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.658489Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668138134132113:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.659293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480 2025-06-03T10:28:47.661095Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668138134132115:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-03T10:28:47.712325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.720924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.724123Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668138134132301:2503] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:47.739709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.741717Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668138134132310:2358], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:28:47.742558Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=OWY0ZThhNGYtZWYzNjVjMjItNGJlZmU4MjctYmI3MWQ4ZWM=, ActorId: [1:7511668138134132098:2334], ActorState: ExecuteState, TraceId: 01jwtnc9d8epd4gxncs4r9rsdr, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:28:47.743048Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7511668138134132472:2607] 2025-06-03T10:28:52.284066Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668138134131468:2206];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:52.284114Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:28:52.931983Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-03T10:28:52.936235Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-03T10:28:52.936701Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [1:7511668159608969134:2672], Recipient [1:7511668138134131768:2209]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:52.936713Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:52.936718Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:28:52.936733Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [1:7511668159608969130:2669], Recipient [1:7511668138134131768:2209]: {TEvModifySchemeTransaction txid# 281474976715673 TabletId# 72057594046644480} 2025-06-03T10:28:52.936735Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:28:52.946463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 10 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } Consumers { Name: "test-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 } } } } TxId: 281474976715673 TabletId: 72057594046644480 Owner: "root@builtin" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-03T10:28:52.946585Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_pq.cpp:307: TCreatePQ Propose, path: /Root/test-topic, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:28:52.946677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent ... est" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } 2025-06-03T10:30:06.359007Z node 8 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037893][test-topic] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 8 DataSize: 0 UsedReserveSize: 0 2025-06-03T10:30:06.359031Z node 8 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][test-topic] ProcessPendingStats. PendingUpdates size 3 2025-06-03T10:30:06.359227Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271188001, Sender [8:7511668441204813846:2453], Recipient [8:7511668415435009051:2153]: NKikimrPQ.TEvPeriodicTopicStats PathId: 13 Generation: 1 Round: 8 DataSize: 0 UsedReserveSize: 0 SubDomainOutOfSpace: false 2025-06-03T10:30:06.359233Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4920: StateWork, processing event TEvPersQueue::TEvPeriodicTopicStats 2025-06-03T10:30:06.359239Z node 8 :FLAT_TX_SCHEMESHARD INFO: schemeshard__pq_stats.cpp:100: Got periodic topic stats at partition [OwnerId: 72057594046644480, LocalPathId: 13] DataSize 0 UsedReserveSize 0 2025-06-03T10:30:06.359246Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.099999s, queue# 1 2025-06-03T10:30:06.359573Z node 8 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188544 (NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated), Tablet [8:7511668441204813847:2454], Partition 0, Sender [8:7511668441204813910:2459], Recipient [8:7511668441204813907:2457], Cookie: 0 2025-06-03T10:30:06.359585Z node 8 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188544, Sender [8:7511668441204813910:2459], Recipient [8:7511668441204813907:2457]: NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-03T10:30:06.359588Z node 8 :PERSQUEUE TRACE: partition.h:609: StateIdle, processing event NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-03T10:30:06.359644Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122945, Sender [8:7511668441204813846:2453], Recipient [8:7511668415435009051:2153]: NKikimrSchemeOp.TDescribePath PathId: 13 SchemeshardId: 72057594046644480 2025-06-03T10:30:06.359648Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4894: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-03T10:30:06.391954Z node 8 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7511668449794749398:2720], Partition 2, Sender [0:0:0], Recipient [8:7511668449794749472:2726], Cookie: 0 2025-06-03T10:30:06.391989Z node 8 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7511668449794749472:2726]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:06.391995Z node 8 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:06.392012Z node 8 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:06.392038Z node 8 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:06.392041Z node 8 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:06.392048Z node 8 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:06.401705Z node 8 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7511668449794749403:2721], Partition 1, Sender [0:0:0], Recipient [8:7511668449794749474:2728], Cookie: 0 2025-06-03T10:30:06.401738Z node 8 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7511668449794749474:2728]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:06.401744Z node 8 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:06.401763Z node 8 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:06.401793Z node 8 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:06.401796Z node 8 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:06.401804Z node 8 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:06.422736Z node 8 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7511668441204813847:2454], Partition 0, Sender [0:0:0], Recipient [8:7511668441204813907:2457], Cookie: 0 2025-06-03T10:30:06.422769Z node 8 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7511668441204813907:2457]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:06.422774Z node 8 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:06.422791Z node 8 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:06.422817Z node 8 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:06.422820Z node 8 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:06.422828Z node 8 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:06.459783Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435095, Sender [0:0:0], Recipient [8:7511668415435009051:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTopicStats 2025-06-03T10:30:06.459804Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5059: StateWork, processing event TEvPrivate::TEvPersistTopicStats 2025-06-03T10:30:06.459807Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-03T10:30:06.459810Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:128: Will execute TTxStoreStats, queue# 1 2025-06-03T10:30:06.459836Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__pq_stats.cpp:141: Will delay TTxStoreTopicStats on# 0.000000s, queue# 1 2025-06-03T10:30:06.459987Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435095, Sender [0:0:0], Recipient [8:7511668415435009051:2153]: NKikimr::NSchemeShard::TEvPrivate::TEvPersistTopicStats 2025-06-03T10:30:06.459988Z node 8 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5059: StateWork, processing event TEvPrivate::TEvPersistTopicStats 2025-06-03T10:30:06.459989Z node 8 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__pq_stats.cpp:119: Started TEvPersistStats at tablet 72057594046644480, queue size# 0 2025-06-03T10:30:06.492284Z node 8 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7511668449794749398:2720], Partition 2, Sender [0:0:0], Recipient [8:7511668449794749472:2726], Cookie: 0 2025-06-03T10:30:06.492311Z node 8 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7511668449794749472:2726]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:06.492322Z node 8 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:06.492334Z node 8 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:06.492351Z node 8 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:06.492353Z node 8 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:06.492357Z node 8 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:06.502428Z node 8 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7511668449794749403:2721], Partition 1, Sender [0:0:0], Recipient [8:7511668449794749474:2728], Cookie: 0 2025-06-03T10:30:06.502469Z node 8 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7511668449794749474:2728]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:06.502474Z node 8 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:06.502494Z node 8 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:06.502521Z node 8 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:06.502527Z node 8 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:06.502534Z node 8 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:06.523045Z node 8 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [8:7511668441204813847:2454], Partition 0, Sender [0:0:0], Recipient [8:7511668441204813907:2457], Cookie: 0 2025-06-03T10:30:06.523076Z node 8 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [8:7511668441204813907:2457]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:06.523080Z node 8 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:06.523098Z node 8 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:06.523124Z node 8 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:06.523126Z node 8 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:06.523132Z node 8 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> KqpRm::NotEnoughMemory >> TPartBtreeIndexIteration::NoNodes_History [GOOD] >> TPartBtreeIndexIteration::OneNode >> KqpRm::ResourceBrokerNotEnoughResources >> TKeyValueTest::TestWriteReadWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestWriteReadWhileWriteWorks ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build_reboots/unittest >> IndexBuildTestReboots::DropIndex [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:29:38.184350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:38.184379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:38.184386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:38.184392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:38.184413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:38.184418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:38.184430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:38.184446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:38.184554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:38.184640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:38.197321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:29:38.197356Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:38.197475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:29:38.200115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:38.200201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:38.200232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:38.201718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:38.201791Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:38.201933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:38.202003Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:38.202511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:38.202556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:38.202794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:38.202801Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:38.202813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:38.202818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:38.202823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:38.202869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:29:38.204267Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:29:38.222251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:38.222327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:38.222395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:38.222438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:38.222447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:38.223297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:38.223329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:38.223402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:38.223414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:38.223420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:38.223427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:38.223865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:38.223876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:38.223881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:38.224181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:38.224189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:38.224193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:38.224199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:38.224708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:38.225117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:38.225153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:38.225356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:38.225381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:38.225388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:38.225455Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... .cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:30:06.960613Z node 110 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:30:06.960620Z node 110 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-03T10:30:06.960626Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-03T10:30:06.960644Z node 110 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-03T10:30:06.960907Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:30:06.960927Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:30:06.962009Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:30:06.962095Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:30:06.962114Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:30:06.962678Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5554: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 344 RawX2: 472446404887 } TabletId: 72075186233409546 State: 4 2025-06-03T10:30:06.962706Z node 110 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409546, state: Offline, at schemeshard: 72057594046678944 2025-06-03T10:30:06.963223Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:06.963330Z node 110 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409546 Forgetting tablet 72075186233409546 2025-06-03T10:30:06.963439Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:30:06.963521Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-03T10:30:06.964407Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:06.964424Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-03T10:30:06.964445Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:30:06.964454Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-03T10:30:06.964462Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:30:06.965369Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:30:06.965393Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409546 2025-06-03T10:30:06.965418Z node 110 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-03T10:30:06.965520Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-03T10:30:06.965530Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-03T10:30:06.965605Z node 110 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-03T10:30:06.965627Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-03T10:30:06.965633Z node 110 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [110:576:2537] TestWaitNotification: OK eventTxId 1003 2025-06-03T10:30:06.965723Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:06.965787Z node 110 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 81us result status StatusSuccess 2025-06-03T10:30:06.965921Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value0" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value1" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 2 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:06.966025Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndexByValue0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:30:06.966056Z node 110 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndexByValue0" took 34us result status StatusPathDoesNotExist 2025-06-03T10:30:06.966079Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/UserDefinedIndexByValue0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 3]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Table/UserDefinedIndexByValue0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 3 LastExistedPrefixDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:06.966132Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/UserDefinedIndexByValue0/indexImplTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:30:06.966151Z node 110 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/UserDefinedIndexByValue0/indexImplTable" took 20us result status StatusPathDoesNotExist 2025-06-03T10:30:06.966168Z node 110 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/UserDefinedIndexByValue0/indexImplTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 3]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Table/UserDefinedIndexByValue0/indexImplTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 3 LastExistedPrefixDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TKeyValueTest::TestWriteReadWithRestartsThenResponseOkNewApi [GOOD] >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOk >> TChargeBTreeIndex::NoNodes_History [GOOD] >> TChargeBTreeIndex::NoNodes_Groups_History >> TVersions::WreckHead [GOOD] >> TVersions::WreckHeadReverse ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::PartitionSplit_ManySession_existed_AutoscaleAwareSDK [GOOD] Test command err: 2025-06-03T10:28:47.221815Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668138431996713:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:47.222227Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000e40/r3tmp/tmpGAouFa/pdisk_1.dat 2025-06-03T10:28:47.262350Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:28:47.276988Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668138431996689:2079] 1748946527221526 != 1748946527221529 2025-06-03T10:28:47.279753Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19191, node 1 2025-06-03T10:28:47.292076Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/000e40/r3tmp/yandexveyEFM.tmp 2025-06-03T10:28:47.292093Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/000e40/r3tmp/yandexveyEFM.tmp 2025-06-03T10:28:47.292188Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/000e40/r3tmp/yandexveyEFM.tmp 2025-06-03T10:28:47.292247Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:28:47.297790Z INFO: TTestServer started on Port 8476 GrpcPort 19191 TClient is connected to server localhost:8476 PQClient connected to localhost:19191 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:28:47.324145Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:47.324176Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:47.325276Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:47.356762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:28:47.364363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-03T10:28:47.597082Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668138431997512:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.597111Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668138431997487:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.597211Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.597846Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668138431997544:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.597862Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.598008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480 2025-06-03T10:28:47.599902Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668138431997515:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-03T10:28:47.638279Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.647547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.667921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-03T10:28:47.698432Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668138431997823:2581] txid# 281474976715666, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } === CheckClustersList. Subcribe to ClusterTracker from [1:7511668138431997874:2609] 2025-06-03T10:28:52.222260Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668138431996713:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:52.222313Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:28:52.937828Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-03T10:28:52.941387Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-03T10:28:52.941858Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [1:7511668159906834549:2675], Recipient [1:7511668138431997154:2202]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:52.941869Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:52.941872Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:28:52.941881Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [1:7511668159906834545:2672], Recipient [1:7511668138431997154:2202]: {TEvModifySchemeTransaction txid# 281474976715674 TabletId# 72057594046644480} 2025-06-03T10:28:52.941883Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:28:52.949874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } PartitionStrategy { MinPartitionCount: 1 MaxPartitionCount: 100 ScaleThresholdSeconds: 300 ScaleUpPartitionWriteSpeedThresholdPercent: 90 ScaleDownPartitionWriteSpeedThresholdPercent: 30 PartitionStrategyType: CAN_SPLIT } Consumers { Name: "test-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 } } } } TxId: 281474976715674 TabletId: 72057594046644480 Owner: "root@builtin" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-03T10:28:52.950016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_pq.cpp:307: TCreatePQ Propose, path: /Root/test-topic, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:28:52.950098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: test-topic, child id: [OwnerId: 72057594046644480, LocalPathId: 13], at schemeshard: 72057594046644480 2025-06-03T10:28:52.950118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 13] was 0 2025-06-03T10:28:52.950124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason new shard created for pathId [OwnerId: 72057594046644480, LocalPathId: 13] was 1 2025-06-03T10:28:52.950132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason new shard creat ... EvPersQueue::TEvHasDataInfo), Tablet [7:7511668468536417830:2453], Partition 0, Sender [7:7511668468536417830:2453], Recipient [7:7511668468536417890:2457], Cookie: 0 2025-06-03T10:30:07.286370Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271187977, Sender [7:7511668468536417830:2453], Recipient [7:7511668468536417890:2457]: NKikimrPQ.THasDataInfo Partition: 0 Offset: 1 Deadline: 1748946616785 Sender { RawX1: 7511668481421320870 RawX2: 4503629692144322 } Cookie: 1 ClientId: "test-consumer" 2025-06-03T10:30:07.286373Z node 7 :PERSQUEUE TRACE: partition.h:584: StateIdle, processing event TEvPersQueue::TEvHasDataInfo 2025-06-03T10:30:07.286412Z node 7 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1301: session cookie 2 consumer test-consumer session test-consumer_7_2_14135459055395274942_v1 TopicId: Topic /Root/test-topic in database: Root, partition 0(assignId:2) wait for data done: readOffset 1 EndOffset 1 newEndOffset 1 commitOffset 1 clientCommitOffset 1 cookie 1 readingFinished 1 firstRead 1 2025-06-03T10:30:07.286450Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:2448: session cookie 2 consumer test-consumer session test-consumer_7_2_14135459055395274942_v1 sending to client end partition stream event >>>>> Session-2 Received TSessionClosedEvent message SessionClosed { Status: SUCCESS Issues: "
: Error: Session was gracefully closed " } 2025-06-03T10:30:07.293488Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer test-consumer session test-consumer_7_2_14135459055395274942_v1 grpc read done: success# 0, data# { } 2025-06-03T10:30:07.293510Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer test-consumer session test-consumer_7_2_14135459055395274942_v1 grpc read failed 2025-06-03T10:30:07.293520Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer test-consumer session test-consumer_7_2_14135459055395274942_v1 grpc closed 2025-06-03T10:30:07.293538Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer test-consumer session test-consumer_7_2_14135459055395274942_v1 is DEAD 2025-06-03T10:30:07.294316Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 269877764, Sender [7:7511668481421320872:3193], Recipient [7:7511668468536417830:2453]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:30:07.294323Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5260: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:30:07.294329Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2888: [PQ: 72075186224037892] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:30:07.294337Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037892] Destroy direct read session test-consumer_7_2_14135459055395274942_v1 2025-06-03T10:30:07.294346Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037892] server disconnected, pipe [7:7511668481421320871:2754] destroyed 2025-06-03T10:30:07.294371Z node 7 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_7_2_14135459055395274942_v1 2025-06-03T10:30:07.294386Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][test-topic] pipe [7:7511668472831385878:2657] disconnected; active server actors: 1 2025-06-03T10:30:07.294390Z node 7 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][test-topic] pipe [7:7511668472831385878:2657] client test-consumer disconnected session test-consumer_7_2_14135459055395274942_v1 2025-06-03T10:30:07.305581Z :INFO: [/Root] TraceId [] SessionId [producer-1|d99e17fc-3c634965-5d321bd5-e108cc8d_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-03T10:30:07.305600Z :INFO: [/Root] TraceId [] SessionId [producer-1|d99e17fc-3c634965-5d321bd5-e108cc8d_0] PartitionId [0] Generation [1] Write session will now close 2025-06-03T10:30:07.305612Z :DEBUG: [/Root] TraceId [] SessionId [producer-1|d99e17fc-3c634965-5d321bd5-e108cc8d_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-03T10:30:07.305890Z :INFO: [/Root] TraceId [] SessionId [producer-1|d99e17fc-3c634965-5d321bd5-e108cc8d_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-03T10:30:07.305897Z :DEBUG: [/Root] TraceId [] SessionId [producer-1|d99e17fc-3c634965-5d321bd5-e108cc8d_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-03T10:30:07.306662Z node 7 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 2 sessionId: producer-1|d99e17fc-3c634965-5d321bd5-e108cc8d_0 grpc read done: success: 0 data: 2025-06-03T10:30:07.306683Z node 7 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 2 sessionId: producer-1|d99e17fc-3c634965-5d321bd5-e108cc8d_0 grpc read failed 2025-06-03T10:30:07.306695Z node 7 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 2 sessionId: producer-1|d99e17fc-3c634965-5d321bd5-e108cc8d_0 grpc closed 2025-06-03T10:30:07.306702Z node 7 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 2 sessionId: producer-1|d99e17fc-3c634965-5d321bd5-e108cc8d_0 is DEAD 2025-06-03T10:30:07.307003Z node 7 :PQ_PARTITION_CHOOSER TRACE: partition_chooser_impl__abstract_chooser_actor.h:278: StateIdle, received event# 65543, Sender [7:7511668472831385898:2664], Recipient [7:7511668472831385900:2664]: NActors::TEvents::TEvPoison 2025-06-03T10:30:07.307022Z node 7 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:30:07.309470Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 269877764, Sender [7:7511668472831385934:3037], Recipient [7:7511668468536417830:2453]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:30:07.309489Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5260: HandleHook, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:30:07.309498Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:2888: [PQ: 72075186224037892] Handle TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:30:07.309515Z node 7 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037892] server disconnected, pipe [7:7511668472831385933:2664] destroyed 2025-06-03T10:30:07.309538Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188506 (NKikimr::TEvPQ::TEvPipeDisconnected), Tablet [7:7511668468536417830:2453], Partition 0, Sender [7:7511668468536417830:2453], Recipient [7:7511668468536417890:2457], Cookie: 0 2025-06-03T10:30:07.309544Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188506, Sender [7:7511668468536417830:2453], Recipient [7:7511668468536417890:2457]: NKikimr::TEvPQ::TEvPipeDisconnected 2025-06-03T10:30:07.309548Z node 7 :PERSQUEUE TRACE: partition.h:591: StateIdle, processing event TEvPQ::TEvPipeDisconnected 2025-06-03T10:30:07.309559Z node 7 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-03T10:30:07.309566Z node 7 :PERSQUEUE TRACE: partition_write.cpp:854: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessChangeOwnerRequests. 2025-06-03T10:30:07.309577Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:07.309609Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:07.309612Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:07.309621Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:07.370574Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668477126353328:2699], Partition 2, Sender [0:0:0], Recipient [7:7511668477126353401:2705], Cookie: 0 2025-06-03T10:30:07.370613Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668477126353401:2705]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:07.370620Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:07.370637Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:07.370669Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:07.370672Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:07.370678Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:07.370699Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668477126353329:2700], Partition 1, Sender [0:0:0], Recipient [7:7511668477126353403:2707], Cookie: 0 2025-06-03T10:30:07.370703Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668477126353403:2707]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:07.370706Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:07.370711Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:07.370719Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:07.370722Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:07.370726Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:07.385380Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668468536417830:2453], Partition 0, Sender [0:0:0], Recipient [7:7511668468536417890:2457], Cookie: 0 2025-06-03T10:30:07.385419Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668468536417890:2457]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:07.385425Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:07.385443Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:07.385474Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:07.385477Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:07.385485Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> TIterator::MixedReverse [GOOD] >> TIterator::Serial |66.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/wrappers/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut/unittest >> DBase::KIKIMR_15598_Many_MemTables [GOOD] Test command err: 3 parts: [0:0:1:0:0:0:0] 167 rows, 7 pages, 1 levels: (91, 38) (166, 63) (325, 116) (394, 139) (481, 168) [0:0:2:0:0:0:0] 166 rows, 8 pages, 2 levels: (631, 218) (709, 244) (853, 292) (934, 319) (1087, 370) [0:0:3:0:0:0:0] 167 rows, 8 pages, 2 levels: (1156, 393) (1246, 423) (1396, 473) (1471, 498) (1633, 552) Checking BTree: adding part [0:0:1:0:0:0:0] data size (14.1KiB in total) adding group {0,0} PageId: 8 RowCount: 167 DataSize: 13685 ErasedRowCount: 0 LevelCount: 1 IndexSize: 371 added slice [0, 167) data size (13.4KiB - 0B) => 13.4KiB adding part [0:0:2:0:0:0:0] data size (14.6KiB in total) adding group {0,0} PageId: 11 RowCount: 166 DataSize: 14080 ErasedRowCount: 0 LevelCount: 2 IndexSize: 530 added slice [0, 166) data size (13.8KiB - 0B) => 27.1KiB adding part [0:0:3:0:0:0:0] data size (14.8KiB in total) adding group {0,0} PageId: 11 RowCount: 167 DataSize: 14255 ErasedRowCount: 0 LevelCount: 2 IndexSize: 530 added slice [0, 167) data size (13.9KiB - 0B) => 41KiB building histogram with row resolution 0, data size resolution 42B slicing part [0:0:1:0:0:0:0]: { {rows: [0, 166] keys: [{7, 10}, {553, 192}]} } slicing node Part: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 0 => take adding node future events -1 Part: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 0 slicing part [0:0:2:0:0:0:0]: { {rows: [0, 165] keys: [{556, 193}, {1087, 370}]} } slicing node Part: [0:0:2:0:0:0:0] PageId: 11 Level: 2 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 14080 BeginKey: {556, 193} EndKey: {1087, 370} State: 0 => take adding node future events -1 Part: [0:0:2:0:0:0:0] PageId: 11 Level: 2 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 14080 BeginKey: {556, 193} EndKey: {1087, 370} State: 0 slicing part [0:0:3:0:0:0:0]: { {rows: [0, 166] keys: [{1090, 371}, {1645, 556}]} } slicing node Part: [0:0:3:0:0:0:0] PageId: 11 Level: 2 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14255 BeginKey: {1090, 371} EndKey: {1645, 556} State: 0 => take adding node future events -1 Part: [0:0:3:0:0:0:0] PageId: 11 Level: 2 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14255 BeginKey: {1090, 371} EndKey: {1645, 556} State: 0 iterating stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 0 nextHistogramDataSize: 42 closedRowCount: 0 closedDataSize: 0 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 6 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 0 processing event IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 0 loading node by row count triggerPart: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 1 closedRowCount: 0 openedRowCount: 167 nextHistogramRowCount: 0 adding part [0:0:1:0:0:0:0] data size (14.1KiB in total) adding group {0,0} PageId: 8 RowCount: 167 DataSize: 13685 ErasedRowCount: 0 LevelCount: 1 IndexSize: 371 added slice [0, 167) data size (13.4KiB - 0B) => 13.4KiB adding part [0:0:2:0:0:0:0] data size (14.6KiB in total) adding group {0,0} PageId: 11 RowCount: 166 DataSize: 14080 ErasedRowCount: 0 LevelCount: 2 IndexSize: 530 added slice [0, 166) data size (13.8KiB - 0B) => 27.1KiB adding part [0:0:3:0:0:0:0] data size (14.8KiB in total) adding group {0,0} PageId: 11 RowCount: 167 DataSize: 14255 ErasedRowCount: 0 LevelCount: 2 IndexSize: 530 added slice [0, 167) data size (13.9KiB - 0B) => 41KiB building histogram with row resolution 0, data size resolution 42B slicing part [0:0:1:0:0:0:0]: { {rows: [0, 166] keys: [{7, 10}, {553, 192}]} } slicing node Part: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 0 => take adding node future events -1 Part: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 0 slicing part [0:0:2:0:0:0:0]: { {rows: [0, 165] keys: [{556, 193}, {1087, 370}]} } slicing node Part: [0:0:2:0:0:0:0] PageId: 11 Level: 2 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 14080 BeginKey: {556, 193} EndKey: {1087, 370} State: 0 => take adding node future events -1 Part: [0:0:2:0:0:0:0] PageId: 11 Level: 2 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 14080 BeginKey: {556, 193} EndKey: {1087, 370} State: 0 slicing part [0:0:3:0:0:0:0]: { {rows: [0, 166] keys: [{1090, 371}, {1645, 556}]} } slicing node Part: [0:0:3:0:0:0:0] PageId: 11 Level: 2 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14255 BeginKey: {1090, 371} EndKey: {1645, 556} State: 0 => take adding node future events -1 Part: [0:0:3:0:0:0:0] PageId: 11 Level: 2 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14255 BeginKey: {1090, 371} EndKey: {1645, 556} State: 0 iterating stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 0 nextHistogramDataSize: 42 closedRowCount: 0 closedDataSize: 0 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 6 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 0 processing event IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 0 loading node by row count triggerPart: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 1 closedRowCount: 0 openedRowCount: 167 nextHistogramRowCount: 0 adding event 0 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 25 BeginDataSize: 0 EndDataSize: 1974 BeginKey: {7, 10} EndKey: {91, 38} State: 0 processing event IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 25 BeginDataSize: 0 EndDataSize: 1974 BeginKey: {7, 10} EndKey: {91, 38} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 25 BeginDataSize: 0 EndDataSize: 1974 BeginKey: {7, 10} EndKey: {91, 38} State: 1 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 1 Level: 0 BeginRowId: 25 EndRowId: 50 BeginDataSize: 1974 EndDataSize: 3992 BeginKey: {91, 38} EndKey: {166, 63} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 1 Level: 0 BeginRowId: 25 EndRowId: 50 BeginDataSize: 1974 EndDataSize: 3992 BeginKey: {91, 38} EndKey: {166, 63} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 2 Level: 0 BeginRowId: 50 EndRowId: 74 BeginDataSize: 3992 EndDataSize: 5889 BeginKey: {166, 63} EndKey: {253, 92} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 2 Level: 0 BeginRowId: 50 EndRowId: 74 BeginDataSize: 3992 EndDataSize: 5889 BeginKey: {166, 63} EndKey: {253, 92} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 3 Level: 0 BeginRowId: 74 EndRowId: 96 BeginDataSize: 5889 EndDataSize: 7868 BeginKey: {253, 92} EndKey: {325, 116} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 3 Level: 0 BeginRowId: 74 EndRowId: 96 BeginDataSize: 5889 EndDataSize: 7868 BeginKey: {253, 92} EndKey: {325, 116} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 4 Level: 0 BeginRowId: 96 EndRowId: 119 BeginDataSize: 7868 EndDataSize: 9910 BeginKey: {325, 116} EndKey: {394, 139} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 4 Level: 0 BeginRowId: 96 EndRowId: 119 BeginDataSize: 7868 EndDataSize: 9910 BeginKey: {325, 116} EndKey: {394, 139} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 5 Level: 0 BeginRowId: 119 EndRowId: 144 BeginDataSize: 9910 EndDataSize: 11938 BeginKey: {394, 139} EndKey: {481, 168} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 5 Level: 0 BeginRowId: 119 EndRowId: 144 BeginDataSize: 9910 EndDataSize: 11938 BeginKey: {394, 139} EndKey: {481, 168} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 6 Level: 0 BeginRowId: 144 EndRowId: 167 BeginDataSize: 11938 EndDataSize: 13685 BeginKey: {481, 168} EndKey: {553, 192} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 6 Level: 0 BeginRowId: 144 EndRowId: 167 BeginDataSize: 11938 EndDataSize: 13685 BeginKey: {481, 168} EndKey: {553, 192} State: 0 loading node by row count triggerPart: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 25 BeginDataSize: 0 EndDataSize: 1974 BeginKey: {7, 10} EndKey: {91, 38} State: 1 closedRowCount: 0 openedRowCount: 25 nextHistogramRowCount: 0 loading node by data size triggerPart: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 3 closedDataSize: 0 openedDataSize: 1974 nextHistogramDataSize: 42 loading node by data size triggerPart: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 25 BeginDataSize: 0 EndDataSize: 1974 BeginKey: {7, 10} EndKey: {91, 38} State: 1 closedDataSize: 0 openedDataSize: 1974 nextHistogramDataSize: 42 checking stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 0 nextHistogramDataSize: 42 closedRowCount: 0 closedDataSize: 0 openedRowCount: 25 openedDataSize: 1974 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 18 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 8 Level: 1 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13685 BeginKey: {7, 10} EndKey: {553, 192} State: 3 iterating stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 0 nextHistogramDataSize: 42 closedRowCount: 0 closedDataSize: 0 openedRowCount: 25 openedDataSize: 1974 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 18 currentKeyPointer: IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 25 BeginDataSize: 0 EndDataSize: 1974 BeginKey: {7, 10} EndKey: {91, 38} State: 1 processing event IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 25 BeginDataSize: 0 EndDataSize: 1974 BeginKey: {7, 10} EndKey: {91, 38} State: 1 checking stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 0 nextHistogramDataSize: 42 closedRowCount: 25 closedDataSize: 1974 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 17 currentKeyPointer: IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 25 BeginDataSize: 0 EndDataSize: 1974 BeginKey: {7, 10} EndKey: {91, 38} State: 2 iterating stats.RowCountHistogram: 1 stats.DataSizeHistogram: 1 nextHistogramRowCount: 26 nextHistogramDataSize: 1975 closedRowCount: 25 closedDataSize: 1974 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 17 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 1 Level: 0 BeginRowId: 25 EndRowId: 50 BeginDataSize: 1974 EndDataSize: 3992 BeginKey: {91, 38} EndKey: {166, 63} State: 0 processing event IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 1 Level: 0 BeginRowId: 25 EndRowId: 50 BeginDataSize: 1974 EndDataSize: 3992 BeginKey: {91, 38} EndKey: {166, 63} State: 0 loading node by row count triggerPart: [0:0:1:0:0:0:0] PageId: 1 Level: 0 BeginRowId: 25 EndRowId: 50 BeginDataSize: 1974 EndDataSize: 3992 BeginKey: {91, 38} EndKey: {166, 63} State: 1 closedRowCount: 25 openedRowCount: 25 nextHistogramRowCount: 26 loading node by data size t ... : 41 closedRowCount: 0 closedDataSize: 0 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 6 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13565 BeginKey: {7, 10} EndKey: {553, 192} State: 0 processing event IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13565 BeginKey: {7, 10} EndKey: {553, 192} State: 0 loading node by row count triggerPart: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13565 BeginKey: {7, 10} EndKey: {553, 192} State: 1 closedRowCount: 0 openedRowCount: 167 nextHistogramRowCount: 0 loading node by data size triggerPart: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13565 BeginKey: {7, 10} EndKey: {553, 192} State: 1 closedDataSize: 0 openedDataSize: 13565 nextHistogramDataSize: 41 checking stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 0 nextHistogramDataSize: 41 closedRowCount: 0 closedDataSize: 0 openedRowCount: 167 openedDataSize: 13565 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 5 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13565 BeginKey: {7, 10} EndKey: {553, 192} State: 1 iterating stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 0 nextHistogramDataSize: 41 closedRowCount: 0 closedDataSize: 0 openedRowCount: 167 openedDataSize: 13565 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 5 currentKeyPointer: IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13565 BeginKey: {7, 10} EndKey: {553, 192} State: 1 processing event IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13565 BeginKey: {7, 10} EndKey: {553, 192} State: 1 checking stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 0 nextHistogramDataSize: 41 closedRowCount: 167 closedDataSize: 13565 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 4 currentKeyPointer: IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 13565 BeginKey: {7, 10} EndKey: {553, 192} State: 2 iterating stats.RowCountHistogram: 1 stats.DataSizeHistogram: 1 nextHistogramRowCount: 168 nextHistogramDataSize: 13566 closedRowCount: 167 closedDataSize: 13565 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 4 currentKeyPointer: IsBegin: 1 Part: [0:0:2:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 13940 BeginKey: {556, 193} EndKey: {1087, 370} State: 0 processing event IsBegin: 1 Part: [0:0:2:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 13940 BeginKey: {556, 193} EndKey: {1087, 370} State: 0 loading node by row count triggerPart: [0:0:2:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 13940 BeginKey: {556, 193} EndKey: {1087, 370} State: 1 closedRowCount: 167 openedRowCount: 166 nextHistogramRowCount: 168 loading node by data size triggerPart: [0:0:2:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 13940 BeginKey: {556, 193} EndKey: {1087, 370} State: 1 closedDataSize: 13565 openedDataSize: 13940 nextHistogramDataSize: 13566 checking stats.RowCountHistogram: 1 stats.DataSizeHistogram: 1 nextHistogramRowCount: 168 nextHistogramDataSize: 13566 closedRowCount: 167 closedDataSize: 13565 openedRowCount: 166 openedDataSize: 13940 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 3 currentKeyPointer: IsBegin: 1 Part: [0:0:2:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 13940 BeginKey: {556, 193} EndKey: {1087, 370} State: 1 iterating stats.RowCountHistogram: 1 stats.DataSizeHistogram: 1 nextHistogramRowCount: 168 nextHistogramDataSize: 13566 closedRowCount: 167 closedDataSize: 13565 openedRowCount: 166 openedDataSize: 13940 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 3 currentKeyPointer: IsBegin: 0 Part: [0:0:2:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 13940 BeginKey: {556, 193} EndKey: {1087, 370} State: 1 processing event IsBegin: 0 Part: [0:0:2:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 13940 BeginKey: {556, 193} EndKey: {1087, 370} State: 1 checking stats.RowCountHistogram: 1 stats.DataSizeHistogram: 1 nextHistogramRowCount: 168 nextHistogramDataSize: 13566 closedRowCount: 333 closedDataSize: 27505 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 2 currentKeyPointer: IsBegin: 0 Part: [0:0:2:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 166 BeginDataSize: 0 EndDataSize: 13940 BeginKey: {556, 193} EndKey: {1087, 370} State: 2 iterating stats.RowCountHistogram: 2 stats.DataSizeHistogram: 2 nextHistogramRowCount: 334 nextHistogramDataSize: 27506 closedRowCount: 333 closedDataSize: 27505 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 2 currentKeyPointer: IsBegin: 1 Part: [0:0:3:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14115 BeginKey: {1090, 371} EndKey: {1645, 556} State: 0 processing event IsBegin: 1 Part: [0:0:3:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14115 BeginKey: {1090, 371} EndKey: {1645, 556} State: 0 loading node by row count triggerPart: [0:0:3:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14115 BeginKey: {1090, 371} EndKey: {1645, 556} State: 1 closedRowCount: 333 openedRowCount: 167 nextHistogramRowCount: 334 loading node by data size triggerPart: [0:0:3:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14115 BeginKey: {1090, 371} EndKey: {1645, 556} State: 1 closedDataSize: 27505 openedDataSize: 14115 nextHistogramDataSize: 27506 checking stats.RowCountHistogram: 2 stats.DataSizeHistogram: 2 nextHistogramRowCount: 334 nextHistogramDataSize: 27506 closedRowCount: 333 closedDataSize: 27505 openedRowCount: 167 openedDataSize: 14115 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 1 currentKeyPointer: IsBegin: 1 Part: [0:0:3:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14115 BeginKey: {1090, 371} EndKey: {1645, 556} State: 1 iterating stats.RowCountHistogram: 2 stats.DataSizeHistogram: 2 nextHistogramRowCount: 334 nextHistogramDataSize: 27506 closedRowCount: 333 closedDataSize: 27505 openedRowCount: 167 openedDataSize: 14115 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 1 currentKeyPointer: IsBegin: 0 Part: [0:0:3:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14115 BeginKey: {1090, 371} EndKey: {1645, 556} State: 1 processing event IsBegin: 0 Part: [0:0:3:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14115 BeginKey: {1090, 371} EndKey: {1645, 556} State: 1 checking stats.RowCountHistogram: 2 stats.DataSizeHistogram: 2 nextHistogramRowCount: 334 nextHistogramDataSize: 27506 closedRowCount: 500 closedDataSize: 41620 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 0 currentKeyPointer: IsBegin: 0 Part: [0:0:3:0:0:0:0] PageId: 0 Level: 0 BeginRowId: 0 EndRowId: 167 BeginDataSize: 0 EndDataSize: 14115 BeginKey: {1090, 371} EndKey: {1645, 556} State: 2 finished stats.RowCountHistogram: 2 stats.DataSizeHistogram: 2 nextHistogramRowCount: 334 nextHistogramDataSize: 27506 closedRowCount: 500 closedDataSize: 41620 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 0 Touched 0% bytes, 0 pages RowCountHistogram: 33% (actual 33%) key = (553, 192) value = 167 (actual 166 - 0% error) 33% (actual 33%) key = (1087, 370) value = 333 (actual 332 - 0% error) 33% (actual 33%) DataSizeHistogram: 32% (actual 32%) key = (553, 192) value = 13565 (actual 13565 - 0% error) 33% (actual 33%) key = (1087, 370) value = 27505 (actual 27505 - 0% error) 33% (actual 33%) Checking Flat: Touched 100% bytes, 3 pages RowCountHistogram: 33% (actual 33%) key = (556, 193) value = 167 (actual 167 - 0% error) 33% (actual 33%) key = (1090, 371) value = 333 (actual 333 - 0% error) 33% (actual 33%) DataSizeHistogram: 32% (actual 32%) key = (556, 193) value = 13565 (actual 13565 - 0% error) 33% (actual 33%) key = (1090, 371) value = 27505 (actual 27505 - 0% error) 33% (actual 33%) Checking Mixed: Touched 0% bytes, 0 pages RowCountHistogram: 100% (actual 100%) DataSizeHistogram: 100% (actual 100%) Got : 24000 2106439 49449 38 44 Expected: 24000 2106439 49449 38 44 { [2455, 2599), [2798, 3624), [4540, 4713), [5654, 7161), [8509, 8794), [8936, 9973), [11888, 14280), [14337, 14882), [15507, 16365), [17368, 19451), [19536, 20135), [20790, 21503), [21589, 23243) } Got : 12816 1121048 49449 20 23 Expected: 12816 1121048 49449 20 23 Got : 24000 3547100 81694 64 44 Expected: 24000 3547100 81694 64 44 { [1012, 1475), [1682, 1985), [2727, 3553), [3599, 3992), [5397, 7244), [9181, 9807), [9993, 10178), [12209, 14029), [15089, 15342), [16198, 16984), [17238, 18436), [21087, 21876), [23701, 23794) } Got : 9582 1425198 81694 26 17 Expected: 9582 1425198 81694 26 17 Got : 24000 2460139 23760 42 41 Expected: 24000 2460139 23760 42 41 { [1296, 2520), [3888, 4320), [5040, 6840), [6912, 7272), [10872, 11160), [11520, 12096), [12096, 13824), [15192, 15624), [17064, 17856), [18216, 19296), [19800, 20160), [20736, 21096), [21096, 22104) } Got : 10440 1060798 23760 18 18 Expected: 10440 1060798 23760 18 18 Got : 24000 4054050 46562 68 43 Expected: 24000 4054050 46562 68 43 { [460, 1518), [2300, 2484), [2760, 4002), [4600, 5842), [6302, 9752), [11178, 12328), [14582, 14858), [16790, 18032), [18216, 18446), [18722, 19504), [19504, 19964), [20378, 20470), [21344, 23506) } Got : 13570 2277890 46562 38 24 Expected: 13570 2277890 46562 38 24 Got : 24000 2106459 49449 38 44 Expected: 24000 2106459 49449 38 44 Got : 24000 2460219 23555 41 41 Expected: 24000 2460219 23555 41 41 Got : 24000 4054270 46543 66 43 Expected: 24000 4054270 46543 66 43 Got : 24000 2106479 49555 38 44 Expected: 24000 2106479 49555 38 44 Got : 24000 2460259 23628 41 41 Expected: 24000 2460259 23628 41 41 Got : 24000 4054290 46640 65 43 Expected: 24000 4054290 46640 65 43 Got : 24000 2106439 66674 3 4 Expected: 24000 2106439 66674 3 4 { [2455, 2599), [2798, 3624), [4540, 4713), [5654, 7161), [8509, 8794), [8936, 9973), [11888, 14280), [14337, 14882), [15507, 16365), [17368, 19451), [19536, 20135), [20790, 21503), [21589, 23243) } Got : 12816 1121048 66674 2 2 Expected: 12816 1121048 66674 2 2 Got : 24000 2460139 33541 4 4 Expected: 24000 2460139 33541 4 4 { [1296, 2520), [3888, 4320), [5040, 6840), [6912, 7272), [10872, 11160), [11520, 12096), [12096, 13824), [15192, 15624), [17064, 17856), [18216, 19296), [19800, 20160), [20736, 21096), [21096, 22104) } Got : 10440 1060798 33541 1 1 Expected: 10440 1060798 33541 1 1 Got : 24000 4054050 64742 7 4 Expected: 24000 4054050 64742 7 4 { [460, 1518), [2300, 2484), [2760, 4002), [4600, 5842), [6302, 9752), [11178, 12328), [14582, 14858), [16790, 18032), [18216, 18446), [18722, 19504), [19504, 19964), [20378, 20470), [21344, 23506) } Got : 13570 2234982 64742 4 2 Expected: 13570 2234982 64742 4 2 >> KqpRm::ResourceBrokerNotEnoughResources [GOOD] >> KqpRm::NotEnoughMemory [GOOD] >> TKeyValueTest::TestWriteReadDeleteWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestWriteReadDeleteWithRestartsAndCatchCollectGarbageEventsWithSlowInitialGC |66.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/address_classification/ut/unittest >> TIterator::Serial [GOOD] >> TIterator::SerialReverse >> TSequenceReboots::CreateSequence >> BuildStatsHistogram::Ten_Mixed [GOOD] >> BuildStatsHistogram::Ten_Serial >> TPartBtreeIndexIteration::OneNode [GOOD] >> TPartBtreeIndexIteration::OneNode_Groups ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::ResourceBrokerNotEnoughResources [GOOD] Test command err: 2025-06-03T10:30:08.257212Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:180:2102] Bootstrap 2025-06-03T10:30:08.292067Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:180:2102] Become StateWork (SchemeCache [2:187:2105]) 2025-06-03T10:30:08.292204Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:179:2152] Bootstrap 2025-06-03T10:30:08.293596Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:179:2152] Become StateWork (SchemeCache [1:190:2158]) 2025-06-03T10:30:08.303402Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:30:08.305032Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:30:08.305064Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-03T10:30:08.305656Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:30:08.305874Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-03T10:30:08.306007Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-03T10:30:08.306013Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:521} Handle TEvInterconnect::TEvNodesInfo 2025-06-03T10:30:08.306038Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-03T10:30:08.308589Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-03T10:30:08.308693Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-03T10:30:08.308706Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-03T10:30:08.308719Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:30:08.308729Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:30:08.308795Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-03T10:30:08.342518Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-03T10:30:08.342589Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:30:08.353746Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:30:08.353815Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:30:08.353835Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:30:08.353847Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:30:08.353877Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:30:08.353884Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:30:08.353889Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:30:08.353898Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:30:08.366234Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:30:08.366300Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:30:08.377586Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:30:08.377660Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:19} TTxLoadEverything Execute 2025-06-03T10:30:08.377871Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:557} TTxLoadEverything Complete 2025-06-03T10:30:08.377878Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2188} LoadFinished 2025-06-03T10:30:08.380608Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-03T10:30:08.380642Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:562} TTxLoadEverything InitQueue processed 2025-06-03T10:30:08.381206Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 1 VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: INIT_PENDING OnlyPhantomsRemain: false } DeclarativePDiskManagement: true } 2025-06-03T10:30:08.381400Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 2 DeclarativePDiskManagement: true } 2025-06-03T10:30:08.381755Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/home/runner/.ya/build/build_root/u93c/001274/r3tmp/tmp11epQz/pdisk_1.dat" } } } Command { DefineBox { BoxId: 1 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } } } } 2025-06-03T10:30:08.381867Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1 Path# /home/runner/.ya/build/build_root/u93c/001274/r3tmp/tmp11epQz/pdisk_1.dat 2025-06-03T10:30:08.381876Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /home/runner/.ya/build/build_root/u93c/001274/r3tmp/tmp11epQz/pdisk_1.dat 2025-06-03T10:30:08.382231Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-03T10:30:08.382279Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-03T10:30:08.382300Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } 2025-06-03T10:30:08.382371Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-03T10:30:08.382425Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: READY OnlyPhantomsRemain: false } } 2025-06-03T10:30:08.383650Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Success: true } 2025-06-03T10:30:08.383824Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-03T10:30:08.395130Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 1 Devices# [] 2025-06-03T10:30:08.395254Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 2 Devices# [] 2025-06-03T10:30:08.398101Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-03T10:30:08.398323Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2856} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/u93c/001274/r3tmp/tmp11epQz/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-03T10:30:08.398556Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:290} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/u93c/001274/r3tmp/tmp11epQz/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/u93c/001274/r3tmp/tmp11epQz/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 7244646074476030015 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-03T10:30:08.398816Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-03T10:30:08.398910Z node 1 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:08.398917Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-03T10:30:08.398965Z node 2 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:08.398977Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-03T10:30:08.399013Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-03T10:30:08.399035Z node 2 :LOCAL DEBUG: local.cpp:1441: TDomainLocal(dc-1): Bootstrap 2025-06-03T10:30:08.399043Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-03T10:30:08.399050Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-03T10:30:08.399062Z node 1 :LOCAL DEBUG: local.cpp:1441: TDomainLocal(dc-1): Bootstrap 2025-06-03T10:30:08.399175Z node 1 :LOCAL DEBUG: local.cpp:1149: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-03T10:30:08.399185Z node 1 :LOCAL DEBUG: local.cpp:975: TLocalNodeRegistrar::Bootstrap 2025-06-03T10:30:08.399190Z node 1 :LOCAL DEBUG: local.cpp:181: TLocalNodeRegistrar::TryToRegister 2025-06-03T10:30:08.399221Z node 1 :LOCAL DEBUG: local.cpp:213: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:413:2304] 2025-06-03T10:30:08.399293Z node 2 :LOCAL DEBUG: local.cpp:1149: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-03T10:30:08.399300Z node 2 :LOCAL DEBUG: local.cpp:975: TLocalNodeRegistrar::Bootstrap 2025-06-03T10:30:08.399304Z node 2 :LOCAL DEBUG: local.cpp:181: TLocalNodeRegistrar::TryToRegister 2025-06-03T10:30:08.399313Z node 2 :LOCAL DEBUG: local.cpp:213: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[2:415:2115] 2025-06-03T10:30:08.400708Z node 1 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-03T10:30:08.400745Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [1:402:2300] 2025-06-03T10:30:08.400794Z node 2 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-03T10:30:08.400802Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [2:403:2111] 2025-06-03T10:30:08.401080Z node 1 :LOCAL DEBUG: local.cpp:260: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[1:413:2304]} 2025-06-03T10:30:08.401096Z node 1 :LOCAL DEBUG: local.cpp:324: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-03T10:30:08.401106Z node 1 :LOCAL DEBUG: local.cpp:380: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-03T10:30:08.401110Z node 1 :LOCAL DEBUG: local.cpp:297: TLocalNodeRegistrar SendStatusOk 2025-06-03T10:30:08.401239Z node 2 :LOCAL DEBUG: local.cpp:260: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[2:415:2115]} 2025-06-03T10:30:08.401313Z node 2 :LOCAL DEBUG: local.cpp:324: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-03T10:30:08.401322Z node 2 :LOCAL DEBUG: local.cpp:380: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-03T10:30:08.401326Z node 2 :LOCAL DEBUG: local.cpp:297: TLocalNodeRegistrar SendStatusOk 2025-06-03T10:30:08.422289Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-03T10:30:08.422312Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-03T10:30:08.422316Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-03T10:30:08.422321Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. |66.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/address_classification/ut/unittest >> TSubscriberTest::InvalidNotification >> TIterator::SerialReverse [GOOD] >> TIterator::GetKey [GOOD] >> TIterator::GetKeyWithEraseCache [GOOD] >> TIterator::GetKeyWithVersionSkips [GOOD] >> TLegacy::IndexIter ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::NotEnoughMemory [GOOD] Test command err: 2025-06-03T10:30:08.231712Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:180:2102] Bootstrap 2025-06-03T10:30:08.269528Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:180:2102] Become StateWork (SchemeCache [2:187:2105]) 2025-06-03T10:30:08.269693Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:179:2152] Bootstrap 2025-06-03T10:30:08.271984Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:179:2152] Become StateWork (SchemeCache [1:190:2158]) 2025-06-03T10:30:08.285692Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:30:08.288253Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:30:08.288299Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-03T10:30:08.288918Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:30:08.289195Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-03T10:30:08.289441Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-03T10:30:08.289454Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:521} Handle TEvInterconnect::TEvNodesInfo 2025-06-03T10:30:08.289490Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-03T10:30:08.293076Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-03T10:30:08.293163Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-03T10:30:08.293183Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-03T10:30:08.293220Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:30:08.293237Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:30:08.293253Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-03T10:30:08.327012Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-03T10:30:08.327071Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:30:08.339895Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:30:08.339983Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:30:08.340003Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:30:08.340020Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:30:08.340052Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:30:08.340077Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:30:08.340086Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:30:08.340097Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:30:08.351208Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:30:08.351269Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:30:08.363730Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:30:08.363846Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:19} TTxLoadEverything Execute 2025-06-03T10:30:08.364281Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:557} TTxLoadEverything Complete 2025-06-03T10:30:08.364304Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2188} LoadFinished 2025-06-03T10:30:08.366350Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-03T10:30:08.366370Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:562} TTxLoadEverything InitQueue processed 2025-06-03T10:30:08.366749Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 1 VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: INIT_PENDING OnlyPhantomsRemain: false } DeclarativePDiskManagement: true } 2025-06-03T10:30:08.366822Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 2 DeclarativePDiskManagement: true } 2025-06-03T10:30:08.367025Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/home/runner/.ya/build/build_root/u93c/001273/r3tmp/tmpp0n44m/pdisk_1.dat" } } } Command { DefineBox { BoxId: 1 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } } } } 2025-06-03T10:30:08.367098Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1 Path# /home/runner/.ya/build/build_root/u93c/001273/r3tmp/tmpp0n44m/pdisk_1.dat 2025-06-03T10:30:08.367103Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /home/runner/.ya/build/build_root/u93c/001273/r3tmp/tmpp0n44m/pdisk_1.dat 2025-06-03T10:30:08.367351Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-03T10:30:08.367378Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-03T10:30:08.367392Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } 2025-06-03T10:30:08.367437Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-03T10:30:08.367477Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: READY OnlyPhantomsRemain: false } } 2025-06-03T10:30:08.368066Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Success: true } 2025-06-03T10:30:08.368163Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-03T10:30:08.379581Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 1 Devices# [] 2025-06-03T10:30:08.379682Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 2 Devices# [] 2025-06-03T10:30:08.382924Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-03T10:30:08.383236Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2856} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/u93c/001273/r3tmp/tmpp0n44m/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-03T10:30:08.383441Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:290} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/u93c/001273/r3tmp/tmpp0n44m/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/u93c/001273/r3tmp/tmpp0n44m/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 17736700568052314023 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-03T10:30:08.383763Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-03T10:30:08.383861Z node 2 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:08.383874Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-03T10:30:08.383973Z node 1 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:08.383987Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-03T10:30:08.384017Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-03T10:30:08.384042Z node 1 :LOCAL DEBUG: local.cpp:1441: TDomainLocal(dc-1): Bootstrap 2025-06-03T10:30:08.384049Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-03T10:30:08.384057Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-03T10:30:08.384069Z node 2 :LOCAL DEBUG: local.cpp:1441: TDomainLocal(dc-1): Bootstrap 2025-06-03T10:30:08.384199Z node 1 :LOCAL DEBUG: local.cpp:1149: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-03T10:30:08.384212Z node 1 :LOCAL DEBUG: local.cpp:975: TLocalNodeRegistrar::Bootstrap 2025-06-03T10:30:08.384218Z node 1 :LOCAL DEBUG: local.cpp:181: TLocalNodeRegistrar::TryToRegister 2025-06-03T10:30:08.384253Z node 1 :LOCAL DEBUG: local.cpp:213: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:416:2307] 2025-06-03T10:30:08.384323Z node 2 :LOCAL DEBUG: local.cpp:1149: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-03T10:30:08.384332Z node 2 :LOCAL DEBUG: local.cpp:975: TLocalNodeRegistrar::Bootstrap 2025-06-03T10:30:08.384336Z node 2 :LOCAL DEBUG: local.cpp:181: TLocalNodeRegistrar::TryToRegister 2025-06-03T10:30:08.384348Z node 2 :LOCAL DEBUG: local.cpp:213: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[2:418:2115] 2025-06-03T10:30:08.385434Z node 2 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-03T10:30:08.385457Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [2:406:2111] 2025-06-03T10:30:08.385504Z node 1 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-03T10:30:08.385512Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [1:405:2303] 2025-06-03T10:30:08.385791Z node 1 :LOCAL DEBUG: local.cpp:260: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[1:416:2307]} 2025-06-03T10:30:08.385808Z node 1 :LOCAL DEBUG: local.cpp:324: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-03T10:30:08.385822Z node 1 :LOCAL DEBUG: local.cpp:380: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-03T10:30:08.385827Z node 1 :LOCAL DEBUG: local.cpp:297: TLocalNodeRegistrar SendStatusOk 2025-06-03T10:30:08.385984Z node 2 :LOCAL DEBUG: local.cpp:260: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[2:418:2115]} 2025-06-03T10:30:08.386048Z node 2 :LOCAL DEBUG: local.cpp:324: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-03T10:30:08.386055Z node 2 :LOCAL DEBUG: local.cpp:380: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-03T10:30:08.386059Z node 2 :LOCAL DEBUG: local.cpp:297: TLocalNodeRegistrar SendStatusOk 2025-06-03T10:30:08.404892Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-03T10:30:08.404920Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-03T10:30:08.404924Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-03T10:30:08.404929Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. >> TLegacy::IndexIter [GOOD] >> TLegacy::ScreenedIndexIter [GOOD] >> TLegacy::StatsIter [GOOD] >> TPageHandleTest::Uninitialized [GOOD] >> TPageHandleTest::NormalUse [GOOD] >> TPageHandleTest::HandleRef [GOOD] >> TPageHandleTest::PinnedRef [GOOD] >> TPageHandleTest::PinnedRefPure [GOOD] >> TPart::BasicColumnGroups [GOOD] >> TSubscriberTest::SyncWithOutdatedReplica >> KqpRm::DisonnectNodes [GOOD] >> TSubscriberTest::ReconnectOnFailure >> TSubscriberTest::InvalidNotification [GOOD] >> TSubscriberTest::SyncWithOutdatedReplica [GOOD] >> TVersions::WreckHeadReverse [GOOD] >> TVersions::Wreck2 >> TExecutorDb::RandomOps [GOOD] >> TExecutorDb::FullScan >> KqpProxy::CalcPeerStats [GOOD] >> KqpProxy::CreatesScriptExecutionsTable >> BuildStatsHistogram::Ten_Serial [GOOD] >> BuildStatsHistogram::Ten_Crossed >> TSubscriberTest::Sync >> TSubscriberTest::ReconnectOnFailure [GOOD] >> TKeyValueTest::TestBasicWriteReadOverrun [GOOD] >> TKeyValueTest::TestBlockedEvGetRequest >> TSubscriberTest::Sync [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut/unittest >> TPart::BasicColumnGroups [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:02.297354Z 00000.005 DD| RESOURCE_BROKER: TResourceBrokerActor bootstrap 00000.006 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.006 II| FAKE_ENV: Starting storage for BS group 0 00000.006 II| FAKE_ENV: Starting storage for BS group 1 00000.006 II| FAKE_ENV: Starting storage for BS group 2 00000.006 II| FAKE_ENV: Starting storage for BS group 3 00000.007 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.007 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:2} commited cookie 2 for step 1 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} queued, type NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} hope 1 -> done Change{2, redo 0b alter 302b annex 0, ~{ } -{ }, 0 gb} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} release 4194304b of static, Memory{0 dyn 0} 00000.008 DD| TABLET_EXECUTOR: TGenCompactionStrategy CheckGeneration for 1 generation 1, state Free, final id 0, final level 0 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:3} commited cookie 1 for step 2 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile} hope 1 -> done Change{2, redo 0b alter 15b annex 0, ~{ } -{ }, 0 gb} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxSetResourceProfile} release 4194304b of static, Memory{0 dyn 0} 00000.008 DD| TABLET_EXECUTOR: TGenCompactionStrategy CheckGeneration for 1 generation 1, state Free, final id 0, final level 0 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} commited cookie 1 for step 3 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 104856577b requested for data (104857601b in total) 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 1024b of static, Memory{0 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release tx data 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} request Res{1 104857601b} type large_transaction 00000.008 DD| RESOURCE_BROKER: Submitted new unknown task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062]) priority=5 resources={0, 104857601} 00000.008 EE| RESOURCE_BROKER: Assigning waiting task 'Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062])' of unknown type 'large_transaction' to default queue 00000.008 DD| RESOURCE_BROKER: Allocate resources {0, 104857601} for task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062]) from queue queue_default 00000.008 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062])' of unknown type 'large_transaction' to default queue 00000.008 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.000000 to 12.207031 (insert task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062])) 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} acquired dyn mem Res{1 104857601b}, Memory{0 dyn 104857601} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release Res{1 104857601b}, Memory{0 dyn 0} 00000.008 DD| RESOURCE_BROKER: Finish task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062]) (release resources {0, 104857601}) 00000.008 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 12.207031 to 0.000000 (remove task Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (1 by [1:30:2062])) 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 104856577b requested for data (104857601b in total) 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 104857601b of static mem, Memory{104857601 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 104857601b of static, Memory{0 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 209714177b requested for data (209715201b in total) 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 1024b of static, Memory{0 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release tx data 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} request Res{2 209715201b} type large_transaction 00000.009 DD| RESOURCE_BROKER: Submitted new unknown task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062]) priority=5 resources={0, 209715201} 00000.009 EE| RESOURCE_BROKER: Assigning waiting task 'Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062])' of unknown type 'large_transaction' to default queue 00000.009 DD| RESOURCE_BROKER: Allocate resources {0, 209715201} for task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062]) from queue queue_default 00000.009 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062])' of unknown type 'large_transaction' to default queue 00000.009 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.000000 to 23.193359 (insert task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062])) 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} acquired dyn mem Res{2 209715201b}, Memory{0 dyn 209715201} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release Res{2 209715201b}, Memory{0 dyn 0} 00000.009 DD| RESOURCE_BROKER: Finish task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062]) (release resources {0, 209715201}) 00000.009 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 23.193359 to 0.000000 (remove task Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (2 by [1:30:2062])) 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.009 DD| TABLET_EXE ... 76:97:0], [1:2:54:1:24576:97:0], [1:2:55:1:24576:97:0], [1:2:56:1:24576:97:0], [1:2:57:1:24576:97:0], [1:2:58:1:24576:97:0], [1:2:59:1:24576:97:0], [1:2:60:1:24576:97:0], [1:2:61:1:24576:97:0], [1:2:62:1:24576:97:0], [1:2:63:1:24576:97:0], [1:2:64:1:24576:97:0], [1:2:65:1:24576:97:0], [1:2:66:1:24576:97:0], [1:2:67:1:24576:97:0], [1:2:68:1:24576:97:0], [1:2:69:1:24576:97:0], [1:2:70:1:24576:97:0], [1:2:71:1:24576:97:0], [1:2:72:1:24576:97:0], [1:2:73:1:24576:101:0], [1:2:74:1:24576:102:0], [1:2:75:1:24576:101:0], [1:2:76:1:24576:102:0], [1:2:77:1:24576:104:0], [1:2:78:1:24576:104:0], [1:2:79:1:24576:104:0], [1:2:80:1:24576:104:0], [1:2:81:1:24576:103:0], [1:2:82:1:24576:101:0], [1:2:83:1:24576:104:0], [1:2:84:1:24576:104:0], [1:2:85:1:24576:104:0], [1:2:86:1:24576:104:0], [1:2:87:1:24576:104:0], [1:2:88:1:24576:104:0], [1:2:89:1:24576:104:0], [1:2:90:1:24576:101:0], [1:2:91:1:24576:104:0], [1:2:92:1:24576:104:0], [1:2:93:1:24576:98:0], [1:2:94:1:24576:104:0], [1:2:95:1:24576:104:0], [1:2:96:1:24576:104:0], [1:2:97:1:24576:104:0], [1:2:98:1:24576:104:0], [1:2:99:1:24576:104:0], [1:2:100:1:24576:104:0], [1:2:101:1:24576:97:0], [1:2:102:1:24576:100:0], [1:2:103:1:24576:104:0], [1:2:104:1:24576:104:0], [1:2:105:1:24576:104:0], [1:2:106:1:24576:104:0], [1:2:107:1:24576:104:0], [1:2:108:1:24576:104:0], [1:2:109:1:24576:104:0], [1:2:110:1:24576:104:0], [1:2:111:1:24576:104:0], [1:2:112:1:24576:104:0], [1:2:113:1:24576:104:0], [1:2:114:1:24576:104:0], [1:2:115:1:24576:104:0], [1:2:116:1:24576:104:0], [1:2:117:1:24576:104:0], [1:2:118:1:24576:104:0], [1:2:119:1:24576:104:0], [1:2:120:1:24576:104:0], [1:2:121:1:24576:104:0], [1:2:122:1:24576:104:0], [1:2:123:1:24576:104:0], [1:2:124:1:24576:104:0], [1:2:125:1:24576:104:0], [1:2:126:1:24576:104:0], [1:2:127:1:24576:104:0], [1:2:128:1:24576:104:0], [1:2:129:1:24576:104:0], [1:2:130:1:24576:104:0], [1:2:131:1:24576:104:0], [1:2:132:1:24576:104:0], [1:2:133:1:24576:104:0], [1:2:134:1:24576:104:0], [1:2:135:1:24576:104:0], [1:2:136:1:24576:104:0], [1:2:137:1:24576:104:0], [1:2:138:1:24576:104:0], [1:2:139:1:24576:104:0], [1:2:140:1:24576:104:0], [1:2:141:1:24576:104:0], [1:2:142:1:24576:104:0], [1:2:145:1:24576:60:0], [1:2:146:1:24576:60:0] } 00000.012 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:143:1:12288:758:0] 00000.012 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:143:1:12288:758:0] owner [20:212:2237] 00000.012 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:143:1:12288:758:0] owner [20:212:2237] cookie 4 class Online from cache [ ] already requested [ ] to request [ 22 23 24 25 ] 00000.012 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:143:1:12288:758:0] status OK pages [ 22 23 24 25 ] 00000.012 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:143:1:12288:758:0] owner [20:212:2237] class Online pages [ 22 23 24 25 ] cookie 4 00000.012 II| TABLET_EXECUTOR: Leader{1:3:0} activating executor 00000.013 II| TABLET_EXECUTOR: LSnap{1:3, on 3:1, 1880b, wait} done, Waste{2:0, 141856b +(140, 14018b), 146 trc} 00000.013 DD| TABLET_SAUSAGECACHE: Attach page collection [1:2:143:1:12288:758:0] owner [20:212:2237] 00000.013 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:143:1:12288:758:0] owner [20:212:2237] cookie 2 class AsyncLoad from cache [ 22 23 24 25 ] already requested [ ] to request [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 ] 00000.013 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:143:1:12288:758:0] async queue pages [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 ] 00000.013 DD| TABLET_EXECUTOR: Leader{1:3:2} commited cookie 2 for step 1 00000.013 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:143:1:12288:758:0] owner [20:212:2237] pages [ 22 23 24 25 ] 00000.013 TT| TABLET_SAUSAGECACHE: Receive page collection [1:2:143:1:12288:758:0] status OK pages [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 ] 00000.013 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:143:1:12288:758:0] owner [20:212:2237] class AsyncLoad pages [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 ] cookie 2 00000.013 DD| TABLET_EXECUTOR: Leader{1:3:2} got result TEvResult{26 pages [1:2:143:1:12288:758:0] ok OK}, category 2 00000.013 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:143:1:12288:758:0] owner [20:212:2237] pages [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 ] 00000.013 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{1, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan 00000.013 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{1, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.013 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{1, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan} hope 1 -> done Change{145, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.013 DD| TABLET_EXECUTOR: Leader{1:3:2} Tx{1, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_StickyPages::TTxFullScan} release 4194304b of static, Memory{0 dyn 0} 00000.013 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.013 II| TABLET_EXECUTOR: Leader{1:3:2} suiciding, Waste{2:0, 141856b +(0, 0b), 1 trc, -14018b acc} 00000.013 DD| TABLET_SAUSAGECACHE: Unregister owner [20:212:2237] 00000.013 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:143:1:12288:758:0] owner [20:212:2237] 00000.013 DD| TABLET_SAUSAGECACHE: Remove owner [20:212:2237] 00000.013 DD| TABLET_SAUSAGECACHE: Drop expired page collection [1:2:143:1:12288:758:0] 00000.013 NN| TABLET_SAUSAGECACHE: Poison cache serviced 3 reqs hit {6 1077b} miss {50 281387b} 00000.013 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.013 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {14354b, 149} 00000.013 II| FAKE_ENV: DS.1 gone, left {143736b, 8}, put {157893b, 150} 00000.013 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.013 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.013 II| FAKE_ENV: All BS storage groups are stopped 00000.013 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 0.000s 00000.013 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 795}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:03.215292Z 00000.001 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.001 II| FAKE_ENV: Starting storage for BS group 0 00000.001 II| FAKE_ENV: Starting storage for BS group 1 00000.001 II| FAKE_ENV: Starting storage for BS group 2 00000.001 II| FAKE_ENV: Starting storage for BS group 3 00000.003 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 3 actors 00000.003 NN| TABLET_SAUSAGECACHE: Poison cache serviced 3 reqs hit {3 512b} miss {0 0b} 00000.003 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.003 II| FAKE_ENV: DS.0 gone, left {1356b, 12}, put {1376b, 13} 00000.003 II| FAKE_ENV: DS.1 gone, left {6814b, 23}, put {6814b, 23} 00000.003 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.003 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.004 II| FAKE_ENV: All BS storage groups are stopped 00000.004 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.004 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:03.220159Z 00000.001 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.001 II| FAKE_ENV: Starting storage for BS group 0 00000.001 II| FAKE_ENV: Starting storage for BS group 1 00000.001 II| FAKE_ENV: Starting storage for BS group 2 00000.001 II| FAKE_ENV: Starting storage for BS group 3 00000.035 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.035 NN| TABLET_SAUSAGECACHE: Poison cache serviced 10 reqs hit {860 5551893b} miss {0 0b} 00000.036 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.036 II| FAKE_ENV: DS.0 gone, left {1201b, 13}, put {1221b, 14} 00000.036 II| FAKE_ENV: DS.1 gone, left {6751256b, 17}, put {6751256b, 17} 00000.036 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.036 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.036 II| FAKE_ENV: All BS storage groups are stopped 00000.036 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.036 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:03.258171Z 00000.002 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00001.275 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00001.275 NN| TABLET_SAUSAGECACHE: Poison cache serviced 4109 reqs hit {2091 2366986b} miss {6144 6340608b} 00001.278 II| FAKE_ENV: Shut order, stopping 4 BS groups 00001.278 II| FAKE_ENV: DS.0 gone, left {1761b, 14}, put {1781b, 15} 00001.278 II| FAKE_ENV: DS.1 gone, left {6927727b, 27}, put {6927727b, 27} 00001.279 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00001.279 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00001.279 II| FAKE_ENV: All BS storage groups are stopped 00001.279 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00001.279 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:04.543849Z 00000.002 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00001.460 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00001.460 NN| TABLET_SAUSAGECACHE: Poison cache serviced 4106 reqs hit {43 253450b} miss {4096 4227072b} 00001.463 II| FAKE_ENV: Shut order, stopping 4 BS groups 00001.463 II| FAKE_ENV: DS.0 gone, left {44744b, 2}, put {164747b, 16} 00001.463 II| FAKE_ENV: DS.1 gone, left {2764621b, 2068}, put {2764621b, 2068} 00001.464 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00001.464 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00001.464 II| FAKE_ENV: All BS storage groups are stopped 00001.464 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00001.464 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:06.014387Z 00000.003 II| FAKE_ENV: Starting storage for BS group 0 00000.003 II| FAKE_ENV: Starting storage for BS group 1 00000.003 II| FAKE_ENV: Starting storage for BS group 2 00000.003 II| FAKE_ENV: Starting storage for BS group 3 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:06.031009Z 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:06.048817Z 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:06.066048Z 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/rm_service/ut/unittest >> KqpRm::DisonnectNodes [GOOD] Test command err: 2025-06-03T10:30:07.556266Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:180:2102] Bootstrap 2025-06-03T10:30:07.589433Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:180:2102] Become StateWork (SchemeCache [2:187:2105]) 2025-06-03T10:30:07.589585Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:179:2152] Bootstrap 2025-06-03T10:30:07.590915Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:179:2152] Become StateWork (SchemeCache [1:190:2158]) 2025-06-03T10:30:07.604793Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828672 Event# NKikimr::TEvTablet::TEvBoot 2025-06-03T10:30:07.606959Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828673 Event# NKikimr::TEvTablet::TEvRestored 2025-06-03T10:30:07.607000Z node 1 :BS_CONTROLLER DEBUG: {BSC22@console_interaction.cpp:14} Console interaction started 2025-06-03T10:30:07.607664Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268828684 Event# NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:30:07.607949Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 268639244 Event# NKikimr::TEvNodeWardenStorageConfig 2025-06-03T10:30:07.610372Z node 1 :BS_CONTROLLER DEBUG: {BSC05@impl.h:2057} StateInit event Type# 131082 Event# NActors::TEvInterconnect::TEvNodesInfo 2025-06-03T10:30:07.610402Z node 1 :BS_CONTROLLER DEBUG: {BSC01@bsc.cpp:521} Handle TEvInterconnect::TEvNodesInfo 2025-06-03T10:30:07.610456Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS01@init_scheme.cpp:17} TTxInitScheme Execute 2025-06-03T10:30:07.619413Z node 1 :BS_CONTROLLER DEBUG: {BSCTXIS03@init_scheme.cpp:44} TTxInitScheme Complete 2025-06-03T10:30:07.619504Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM01@migrate.cpp:190} Execute tx 2025-06-03T10:30:07.619518Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM02@migrate.cpp:251} Complete tx IncompatibleData# false 2025-06-03T10:30:07.619573Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:30:07.619591Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxTrimUnusedSlots 2025-06-03T10:30:07.619604Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-03T10:30:07.656605Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateSchemaVersion 2025-06-03T10:30:07.656666Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:30:07.669888Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxGenerateInstanceId 2025-06-03T10:30:07.669950Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:30:07.669969Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateStaticPDiskInfo 2025-06-03T10:30:07.669983Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:30:07.670012Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxFillInNonNullConfigForPDisk 2025-06-03T10:30:07.670023Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:30:07.670030Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxDropDriveStatus 2025-06-03T10:30:07.670039Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:30:07.681070Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateCompatibilityInfo 2025-06-03T10:30:07.681125Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM03@migrate.cpp:37} Execute tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:30:07.692176Z node 1 :BS_CONTROLLER DEBUG: {BSCTXM04@migrate.cpp:43} Complete tx from queue Type# NKikimr::NBsController::TBlobStorageController::TTxMigrate::TTxUpdateEnableConfigV2 2025-06-03T10:30:07.692244Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE01@load_everything.cpp:19} TTxLoadEverything Execute 2025-06-03T10:30:07.692488Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE03@load_everything.cpp:557} TTxLoadEverything Complete 2025-06-03T10:30:07.692497Z node 1 :BS_CONTROLLER DEBUG: {BSC09@impl.h:2188} LoadFinished 2025-06-03T10:30:07.694961Z node 1 :BS_CONTROLLER DEBUG: {BSC18@console_interaction.cpp:31} Console connection service started 2025-06-03T10:30:07.694991Z node 1 :BS_CONTROLLER DEBUG: {BSCTXLE04@load_everything.cpp:562} TTxLoadEverything InitQueue processed 2025-06-03T10:30:07.695443Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 1 VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: INIT_PENDING OnlyPhantomsRemain: false } DeclarativePDiskManagement: true } 2025-06-03T10:30:07.695601Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 2 DeclarativePDiskManagement: true } 2025-06-03T10:30:07.695917Z node 1 :BS_CONTROLLER DEBUG: {BSCTXCC01@config_cmd.cpp:398} Execute TEvControllerConfigRequest Request# {Command { DefineHostConfig { HostConfigId: 1 Drive { Path: "/home/runner/.ya/build/build_root/u93c/00127d/r3tmp/tmpdPEiLJ/pdisk_1.dat" } } } Command { DefineBox { BoxId: 1 Host { Key { Fqdn: "::1" IcPort: 12001 } HostConfigId: 1 } Host { Key { Fqdn: "::1" IcPort: 12002 } HostConfigId: 1 } } } } 2025-06-03T10:30:07.696004Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 1:1 Path# /home/runner/.ya/build/build_root/u93c/00127d/r3tmp/tmpdPEiLJ/pdisk_1.dat 2025-06-03T10:30:07.696014Z node 1 :BS_CONTROLLER NOTICE: {BSCFP02@config_fit_pdisks.cpp:340} Create new pdisk PDiskId# 2:1000 Path# /home/runner/.ya/build/build_root/u93c/00127d/r3tmp/tmpdPEiLJ/pdisk_1.dat 2025-06-03T10:30:07.696270Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } State: Initial Replicated: false DiskSpace: Green } } 2025-06-03T10:30:07.696300Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-03T10:30:07.696317Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } 2025-06-03T10:30:07.696368Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: REPLICATING OnlyPhantomsRemain: false } } 2025-06-03T10:30:07.696410Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDiskStatus { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } NodeId: 1 PDiskId: 1 VSlotId: 0 PDiskGuid: 123 Status: READY OnlyPhantomsRemain: false } } 2025-06-03T10:30:07.697229Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Success: true } 2025-06-03T10:30:07.697356Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-03T10:30:07.708419Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 1 Devices# [] 2025-06-03T10:30:07.708484Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 2 Devices# [] 2025-06-03T10:30:07.711507Z node 2 :BS_PDISK WARN: {BPD01@blobstorage_pdisk_blockdevice_async.cpp:922} Warning# got EIoResult::FileOpenError from IoContext->Setup PDiskId# 1000 2025-06-03T10:30:07.711734Z node 2 :BS_PDISK CRIT: {BPD39@blobstorage_pdisk_impl.cpp:2856} BlockDevice initialization error Details# Can't open file "/home/runner/.ya/build/build_root/u93c/00127d/r3tmp/tmpdPEiLJ/pdisk_1.dat": unknown reason, errno# 0. PDiskId# 1000 2025-06-03T10:30:07.711895Z node 2 :BS_PDISK CRIT: {BPD01@blobstorage_pdisk_actor.cpp:290} PDiskId# 1000 bootstrapped to the StateError, reason# Can't open file "/home/runner/.ya/build/build_root/u93c/00127d/r3tmp/tmpdPEiLJ/pdisk_1.dat": unknown reason, errno# 0. Can not be initialized Config: {TPDiskConfg Path# "/home/runner/.ya/build/build_root/u93c/00127d/r3tmp/tmpdPEiLJ/pdisk_1.dat" ExpectedPath# "" ExpectedSerial# "" PDiskGuid# 5509297022568961204 PDiskId# 1000 PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} MetadataOnly# 0 StartOwnerRound# 2 SectorMap# true EnableSectorEncryption # 0 ChunkSize# 134217728 SectorSize# 4096 StatisticsUpdateIntervalMs# 1000 SchedulerCfg# {TPDiskSchedulerConfig BytesSchedulerWeight# 1 LogWeight# 1 FreshWeight# 2 CompWeight# 7 SyncLogWeight# 15 HugeWeight# 2 FastReadWeight# 1 OtherReadWeight# 1 LoadWeight# 2 LowReadWeight# 1 MaxChunkReadsPerCycle# 16 MaxChunkReadsDurationPerCycleMs# 0.25 MaxChunkWritesPerCycle# 8 MaxChunkWritesDurationPerCycleMs# 1} MinLogChunksTotal# 4 MaxLogChunksPerOwnerMultiplier# 5 MaxLogChunksPerOwnerDivisor# 4 SortFreeChunksPerItems# 100 GetDriveDataSwitch# DoNotTouch WriteCacheSwitch# DoNotTouch DriveModelSeekTimeNs# 8000000 DriveModelSpeedBps# 127000000 DriveModelSpeedBpsMin# 135000000 DriveModelSpeedBpsMax# 200000000 DriveModelBulkWrieBlockSize# 2097152 DriveModelTrimSpeedBps# 0 ReorderingMs# 50 DeviceInFlight# 4 CostLimitNs# 50000000 BufferPoolBufferSizeBytes# 524288 BufferPoolBufferCount# 256 MaxQueuedCompletionActions# 128 ExpectedSlotCount# 0 ReserveLogChunksMultiplier# 56 InsaneLogChunksMultiplier# 40 RedLogChunksMultiplier# 30 OrangeLogChunksMultiplier# 20 WarningLogChunksMultiplier# 4 YellowLogChunksMultiplier# 4 MaxMetadataMegabytes# 32 SpaceColorBorder# GREEN CompletionThreadsCount# 1 UseNoopScheduler# false PlainDataChunks# 0} PDiskId# 1000 2025-06-03T10:30:07.712130Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-03T10:30:07.712200Z node 2 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:07.712209Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:826: TTenantPool::Bootstrap 2025-06-03T10:30:07.712264Z node 1 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:07.712275Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-03T10:30:07.712305Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-03T10:30:07.712324Z node 1 :LOCAL DEBUG: local.cpp:1441: TDomainLocal(dc-1): Bootstrap 2025-06-03T10:30:07.712329Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:412: TDomainTenantPool(dc-1) Bootstrap 2025-06-03T10:30:07.712338Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:286: TDomainTenantPool(dc-1) send request to add tenant /dc-1 with resources CPU: 1 Memory: 1 Network: 1 2025-06-03T10:30:07.712349Z node 2 :LOCAL DEBUG: local.cpp:1441: TDomainLocal(dc-1): Bootstrap 2025-06-03T10:30:07.712466Z node 1 :LOCAL DEBUG: local.cpp:1149: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-03T10:30:07.712475Z node 1 :LOCAL DEBUG: local.cpp:975: TLocalNodeRegistrar::Bootstrap 2025-06-03T10:30:07.712480Z node 1 :LOCAL DEBUG: local.cpp:181: TLocalNodeRegistrar::TryToRegister 2025-06-03T10:30:07.712506Z node 1 :LOCAL DEBUG: local.cpp:213: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:416:2307] 2025-06-03T10:30:07.712587Z node 2 :LOCAL DEBUG: local.cpp:1149: TDomainLocal(dc-1): Binding to hive 72057594046578946 at domain dc-1 (allocated resources: CPU: 1 Memory: 1 Network: 1) 2025-06-03T10:30:07.712593Z node 2 :LOCAL DEBUG: local.cpp:975: TLocalNodeRegistrar::Bootstrap 2025-06-03T10:30:07.712598Z node 2 :LOCAL DEBUG: local.cpp:181: TLocalNodeRegistrar::TryToRegister 2025-06-03T10:30:07.712607Z node 2 :LOCAL DEBUG: local.cpp:213: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[2:418:2115] 2025-06-03T10:30:07.713770Z node 2 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-03T10:30:07.713789Z node 2 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [2:406:2111] 2025-06-03T10:30:07.713828Z node 1 :TENANT_POOL NOTICE: tenant_pool.cpp:526: TDomainTenantPool(dc-1) started tenant /dc-1 2025-06-03T10:30:07.713835Z node 1 :TENANT_POOL DEBUG: tenant_pool.cpp:274: TDomainTenantPool(dc-1) send status update to [1:405:2303] 2025-06-03T10:30:07.714053Z node 1 :LOCAL DEBUG: local.cpp:260: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[1:416:2307]} 2025-06-03T10:30:07.714065Z node 1 :LOCAL DEBUG: local.cpp:324: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-03T10:30:07.714074Z node 1 :LOCAL DEBUG: local.cpp:380: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-03T10:30:07.714078Z node 1 :LOCAL DEBUG: local.cpp:297: TLocalNodeRegistrar SendStatusOk 2025-06-03T10:30:07.714210Z node 2 :LOCAL DEBUG: local.cpp:260: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[2:418:2115]} 2025-06-03T10:30:07.714262Z node 2 :LOCAL DEBUG: local.cpp:324: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-03T10:30:07.714269Z node 2 :LOCAL DEBUG: local.cpp:380: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-03T10:30:07.714272Z node 2 :LOCAL DEBUG: local.cpp:297: TLocalNodeRegistrar SendStatusOk 2025-06-03T10:30:07.736550Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-03T10:30:07.736581Z node 1 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-03T10:30:07.736604Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_rm_service.cpp:796: Failed to deliver subscription request to config dispatcher 2025-06-03T10:30:07.736611Z node 2 :KQP_RESOURCE_MANAGER CRIT: kqp_resource_info_exchanger.cpp:411: Failed to deliver subscription request to config dispatcher. 2025-06-03T10:30:07.750181Z node 1 :BS_CONTROLLER DEBUG: {BSC19@console_interaction.cpp:74} Console proposed config response Response# {Status: ReverseCommit ConsoleConfigVersion: 0 YAML: "" } 2025-06-03T10:30:07.791712Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:179:2152] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:30:07.792899Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976710656 RangeEnd# 281474976715656 txAllocator# 72057594046447617 2025-06-03T10:30:07.793042Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [2:180:2102] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:30:07.794024Z node 2 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-03T10:30:07.827236Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {PDisksMetrics { PDiskId: 1 AvailableSize: 34225520640 TotalSize: 34359738368 MaxReadThroughput: 127000000 MaxWriteThroughput: 127000000 NonRealTimeMs: 0 SlowDeviceMs: 0 MaxIOPS: 125 EnforcedDynamicSlotSize: 34158411776 State: Normal } } 2025-06-03T10:30:07.871695Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {PDisksMetrics { PDiskId: 1000 AvailableSize: 0 TotalSize: 0 MaxReadThroughput: 127000000 MaxWriteThroughput: 127000000 NonRealTimeMs: 0 SlowDeviceMs: 0 MaxIOPS: 125 State: OpenFileError } } 2025-06-03T10:30:07.903331Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {VDisksMetrics { VDiskId { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } SatisfactionRank: 0 AvailableSize: 34158411776 AllocatedSize: 0 StatusFlags: 1 VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Occupancy: 0.00098231827111984276 State: OK Replicated: true DiskSpace: Green IsThrottling: false ThrottlingRate: 1000 } } 2025-06-03T10:30:08.265510Z node 1 :BS_CONTROLLER DEBUG: {BSC13@scrub.cpp:597} sending TEvControllerScrubStartQuantum Msg# NKikimrBlobStorage.TEvControllerScrubStartQuantum VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } 2025-06-03T10:30:08.266351Z node 1 :BS_CONTROLLER DEBUG: {BSC11@scrub.cpp:214} Handle(TEvControllerScrubQuantumFinished) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } Success: true } 2025-06-03T10:30:08.266450Z node 1 :BS_CONTROLLER DEBUG: {BSC10@scrub.cpp:187} Handle(TEvControllerScrubQueryStartQuantum) Msg# {VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 0 } } 2025-06-03T10:30:08.830208Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046447617] NodeDisconnected NodeId# 2 2025-06-03T10:30:08.830384Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037932033] NodeDisconnected NodeId# 2 2025-06-03T10:30:08.830570Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594046578946] NodeDisconnected NodeId# 2 2025-06-03T10:30:08.830708Z node 2 :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:89:2081] ServerId# [1:385:2290] TabletId# 72057594037932033 PipeClientId# [2:89:2081] 2025-06-03T10:30:08.830842Z node 2 :LOCAL DEBUG: local.cpp:274: TEvTabletPipe::TEvClientDestroyed {TabletId=72057594046578946 ClientId=[2:418:2115]} 2025-06-03T10:30:08.830851Z node 2 :LOCAL DEBUG: local.cpp:217: TLocalNodeRegistrar HandlePipeDestroyed - DISCONNECTED 2025-06-03T10:30:08.830860Z node 2 :LOCAL DEBUG: local.cpp:181: TLocalNodeRegistrar::TryToRegister 2025-06-03T10:30:08.830880Z node 2 :LOCAL DEBUG: local.cpp:213: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[2:556:2115] 2025-06-03T10:30:08.830947Z node 2 :TX_PROXY WARN: proxy_impl.cpp:227: actor# [2:180:2102] HANDLE TEvClientDestroyed from tablet# 72057594046447617 2025-06-03T10:30:08.832705Z node 2 :LOCAL DEBUG: local.cpp:260: TEvTabletPipe::TEvClientConnected {TabletId=72057594046578946 Status=OK ClientId=[2:556:2115]} 2025-06-03T10:30:08.832854Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN01@register_node.cpp:216} Handle TEvControllerRegisterNode Request# {NodeID: 2 DeclarativePDiskManagement: true } 2025-06-03T10:30:08.833045Z node 1 :BS_CONTROLLER DEBUG: {BSCTXUDM01@disk_metrics.cpp:68} Updating disk status Record# {PDisksMetrics { PDiskId: 1000 AvailableSize: 0 TotalSize: 0 MaxReadThroughput: 127000000 MaxWriteThroughput: 127000000 NonRealTimeMs: 0 SlowDeviceMs: 0 MaxIOPS: 125 State: OpenFileError } } 2025-06-03T10:30:08.833069Z node 2 :LOCAL DEBUG: local.cpp:324: TLocalNodeRegistrar::Handle TEvLocal::TEvPing 2025-06-03T10:30:08.833087Z node 2 :LOCAL DEBUG: local.cpp:380: TLocalNodeRegistrar TEvPing - CONNECTED 2025-06-03T10:30:08.833091Z node 2 :LOCAL DEBUG: local.cpp:297: TLocalNodeRegistrar SendStatusOk 2025-06-03T10:30:08.847464Z node 1 :BS_CONTROLLER DEBUG: {BSCTXRN05@register_node.cpp:34} Add devicesData from NodeWarden NodeId# 2 Devices# [] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::InvalidNotification [GOOD] Test command err: 2025-06-03T10:30:09.373689Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:34:2065][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-03T10:30:09.374247Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:38:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-03T10:30:09.374269Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:39:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-03T10:30:09.374279Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-06-03T10:30:09.374294Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:35:2065] 2025-06-03T10:30:09.374339Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:36:2065] 2025-06-03T10:30:09.374354Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:34:2065][path] Set up state: owner# [1:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:30:09.374371Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:37:2065] 2025-06-03T10:30:09.374381Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:34:2065][path] Ignore empty state: owner# [1:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:30:09.374463Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { PathId: [OwnerId: 1, LocalPathId: 1] Version: 0 }: sender# [1:33:2064] 2025-06-03T10:30:09.374470Z node 1 :SCHEME_BOARD_SUBSCRIBER ERROR: subscriber.cpp:811: [main][1:34:2065][path] Suspicious NKikimrSchemeBoard.TEvNotify { PathId: [OwnerId: 1, LocalPathId: 1] Version: 0 }: sender# [1:33:2064] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::ReconnectOnFailure [GOOD] Test command err: 2025-06-03T10:30:09.794472Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][2:34:2065][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-03T10:30:09.795064Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:38:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-03T10:30:09.795113Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][2:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:35:2065] 2025-06-03T10:30:09.795151Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:39:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-03T10:30:09.795158Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:40:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-06-03T10:30:09.795182Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][2:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:36:2065] 2025-06-03T10:30:09.795195Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][2:34:2065][path] Set up state: owner# [2:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:30:09.795213Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][2:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:37:2065] 2025-06-03T10:30:09.795222Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][2:34:2065][path] Ignore empty state: owner# [2:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:30:09.795321Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][2:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:35:2065] 2025-06-03T10:30:09.795328Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][2:34:2065][path] Ignore empty state: owner# [2:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:30:09.795334Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][2:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:36:2065] 2025-06-03T10:30:09.795339Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][2:34:2065][path] Ignore empty state: owner# [2:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:30:09.795347Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][2:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:37:2065] 2025-06-03T10:30:09.795352Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][2:34:2065][path] Ignore empty state: owner# [2:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:30:09.809600Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:45:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:3:2050] 2025-06-03T10:30:09.809645Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][2:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:35:2065] 2025-06-03T10:30:09.809668Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][2:34:2065][path] Ignore empty state: owner# [2:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:30:09.809682Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:46:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:6:2053] 2025-06-03T10:30:09.809701Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][2:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:36:2065] 2025-06-03T10:30:09.809707Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][2:34:2065][path] Ignore empty state: owner# [2:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:30:09.809723Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:47:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [1:9:2056] 2025-06-03T10:30:09.809732Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][2:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path Version: 0 }: sender# [2:37:2065] 2025-06-03T10:30:09.809738Z node 2 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][2:34:2065][path] Ignore empty state: owner# [2:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:30:09.809893Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][2:45:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:3:2050] 2025-06-03T10:30:09.809906Z node 2 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][2:34:2065][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [2:35:2065] 2025-06-03T10:30:09.809915Z node 2 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:842: [main][2:34:2065][path] Update to strong state: owner# [2:33:2064], state# { Deleted: 1 Strong: 0 Version: DomainId: AbandonedSchemeShards: there are 0 elements }, new state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } >> TKeyValueTest::TestBlockedEvGetRequest [GOOD] >> KqpProxy::PingNotExistedSession >> TableCreation::ConcurrentTableCreationWithDifferentVersions ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::SyncWithOutdatedReplica [GOOD] Test command err: 2025-06-03T10:30:09.506096Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:35:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-03T10:30:09.506533Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:39:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 2] Version: 2 }: sender# [1:3:2050] 2025-06-03T10:30:09.506550Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [1:6:2053] 2025-06-03T10:30:09.506556Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [1:9:2056] 2025-06-03T10:30:09.506565Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:35:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 2] Version: 2 }: sender# [1:36:2066] 2025-06-03T10:30:09.506572Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:35:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [1:37:2066] 2025-06-03T10:30:09.506584Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:35:2066][path] Set up state: owner# [1:33:2064], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 2, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 2, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:30:09.506631Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:35:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 2, LocalPathId: 2] Version: 1 }: sender# [1:38:2066] 2025-06-03T10:30:09.506640Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:35:2066][path] Path was already updated: owner# [1:33:2064], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 2, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 2, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 2, LocalPathId: 2], Version: 1) DomainId: [OwnerId: 2, LocalPathId: 1] AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:30:09.506659Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:871: [main][1:35:2066][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:33:2064], cookie# 1 2025-06-03T10:30:09.506674Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:39:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:36:2066], cookie# 1 2025-06-03T10:30:09.506685Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:37:2066], cookie# 1 2025-06-03T10:30:09.506694Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:38:2066], cookie# 1 2025-06-03T10:30:09.506702Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:39:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:3:2050], cookie# 1 2025-06-03T10:30:09.506707Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:6:2053], cookie# 1 2025-06-03T10:30:09.506710Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:9:2056], cookie# 1 2025-06-03T10:30:09.506717Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:35:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 2 Partial: 0 }: sender# [1:36:2066], cookie# 1 2025-06-03T10:30:09.506724Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:35:2066][path] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:30:09.506729Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:35:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:37:2066], cookie# 1 2025-06-03T10:30:09.506733Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:35:2066][path] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:30:09.506738Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:35:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:38:2066], cookie# 1 2025-06-03T10:30:09.506741Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:35:2066][path] Unexpected sync response: sender# [1:38:2066], cookie# 1 >> TableCreation::SimpleTableCreation >> TKeyValueTest::TestWriteReadDeleteWithRestartsAndCatchCollectGarbageEventsWithSlowInitialGC [GOOD] >> BuildStatsHistogram::Ten_Crossed [GOOD] >> BuildStatsHistogram::Ten_Mixed_Log ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_subscriber/unittest >> TSubscriberTest::Sync [GOOD] Test command err: 2025-06-03T10:30:10.170390Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:960: [main][1:35:2066][path] Handle NKikimr::TEvStateStorage::TEvResolveReplicasList 2025-06-03T10:30:10.170874Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:39:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:3:2050] 2025-06-03T10:30:10.170905Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:6:2053] 2025-06-03T10:30:10.170915Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:365: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:9:2056] 2025-06-03T10:30:10.170927Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:35:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:36:2066] 2025-06-03T10:30:10.170938Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:35:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:37:2066] 2025-06-03T10:30:10.170953Z node 1 :SCHEME_BOARD_SUBSCRIBER NOTICE: subscriber.cpp:836: [main][1:35:2066][path] Set up state: owner# [1:33:2064], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:30:10.170993Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:807: [main][1:35:2066][path] Handle NKikimrSchemeBoard.TEvNotify { Path: path PathId: [OwnerId: 1, LocalPathId: 1] Version: 1 }: sender# [1:38:2066] 2025-06-03T10:30:10.171003Z node 1 :SCHEME_BOARD_SUBSCRIBER INFO: subscriber.cpp:854: [main][1:35:2066][path] Path was already updated: owner# [1:33:2064], state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements }, other state# { Deleted: 0 Strong: 1 Version: (PathId: [OwnerId: 1, LocalPathId: 1], Version: 1) DomainId: AbandonedSchemeShards: there are 0 elements } 2025-06-03T10:30:10.171027Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:871: [main][1:35:2066][path] Handle NKikimr::NSchemeBoard::NInternalEvents::TEvSyncRequest: sender# [1:33:2064], cookie# 1 2025-06-03T10:30:10.171045Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:39:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:36:2066], cookie# 1 2025-06-03T10:30:10.171056Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:37:2066], cookie# 1 2025-06-03T10:30:10.171064Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:381: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionRequest { Path: path }: sender# [1:38:2066], cookie# 1 2025-06-03T10:30:10.171073Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:39:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:3:2050], cookie# 1 2025-06-03T10:30:10.171079Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:40:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:6:2053], cookie# 1 2025-06-03T10:30:10.171085Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:390: [replica][1:41:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:9:2056], cookie# 1 2025-06-03T10:30:10.171095Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:35:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:36:2066], cookie# 1 2025-06-03T10:30:10.171103Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:932: [main][1:35:2066][path] Sync is in progress: cookie# 1, size# 3, half# 1, successes# 1, faulires# 0 2025-06-03T10:30:10.171113Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:35:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:37:2066], cookie# 1 2025-06-03T10:30:10.171119Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:946: [main][1:35:2066][path] Sync is done: cookie# 1, size# 3, half# 1, successes# 2, faulires# 0, partial# 0 2025-06-03T10:30:10.171126Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:892: [main][1:35:2066][path] Handle NKikimrSchemeBoard.TEvSyncVersionResponse { Version: 1 Partial: 0 }: sender# [1:38:2066], cookie# 1 2025-06-03T10:30:10.171131Z node 1 :SCHEME_BOARD_SUBSCRIBER DEBUG: subscriber.cpp:906: [main][1:35:2066][path] Unexpected sync response: sender# [1:38:2066], cookie# 1 >> ScriptExecutionsTest::RunCheckLeaseStatus >> CommitOffset::Commit_WithSession_ParentNotFinished_OtherSession_ParentCommittedToEnd [GOOD] >> CommitOffset::Commit_WithSession_ToPastParentPartition >> TPartBtreeIndexIteration::OneNode_Groups [GOOD] >> TPartBtreeIndexIteration::OneNode_History ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestBlockedEvGetRequest [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:58:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:75:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:58:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:75:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:77:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:80:2057] recipient: [2:79:2110] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:83:2057] recipient: [2:79:2110] !Reboot 72057594037927937 (actor [2:57:2097]) rebooted! !Reboot 72057594037927937 (actor [2:57:2097]) tablet resolver refreshed! new actor is[2:82:2111] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:168:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:58:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:75:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:77:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:83:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:57:2097]) rebooted! !Reboot 72057594037927937 (actor [3:57:2097]) tablet resolver refreshed! new actor is[3:82:2111] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:168:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:58:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:75:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:78:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:82:2057] recipient: [4:80:2110] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:84:2057] recipient: [4:80:2110] !Reboot 72057594037927937 (actor [4:57:2097]) rebooted! !Reboot 72057594037927937 (actor [4:57:2097]) tablet resolver refreshed! new actor is[4:83:2111] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:169:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:58:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:75:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:81:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:84:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:85:2057] recipient: [5:83:2113] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:87:2057] recipient: [5:83:2113] !Reboot 72057594037927937 (actor [5:57:2097]) rebooted! !Reboot 72057594037927937 (actor [5:57:2097]) tablet resolver refreshed! new actor is[5:86:2114] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:172:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:58:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:75:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:57:2097]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:81:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:84:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:85:2057] recipient: [6:83:2113] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:87:2057] recipient: [6:83:2113] !Reboot 72057594037927937 (actor [6:57:2097]) rebooted! !Reboot 72057594037927937 (actor [6:57:2097]) tablet resolver refreshed! new actor is[6:86:2114] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:172:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:58:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:75:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:57:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:82:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:85:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:86:2057] recipient: [7:84:2113] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:88:2057] recipient: [7:84:2113] !Reboot 72057594037927937 (actor [7:57:2097]) rebooted! !Reboot 72057594037927937 (actor [7:57:2097]) tablet resolver refreshed! new actor is[7:87:2114] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:58:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:75:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:58:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:75:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:58:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:75:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:77:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:80:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:81:2057] recipient: [10:79:2110] Leader for TabletID 72057594037927937 is [10:82:2111] sender: [10:83:2057] recipient: [10:79:2110] !Reboot 72057594037927937 (actor [10:57:2097]) rebooted! !Reboot 72057594037927937 (actor [10:57:2097]) tablet resolver refreshed! new actor is[10:82:2111] Leader for TabletID 72057594037927937 is [10:82:2111] sender: [10:168:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:58:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:75:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:77:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:80:2057] recipient: [11:79:2110] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:81:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:82:2111] sender: [11:83:2057] recipient: [11:79:2110] !Reboot 72057594037927937 (actor [11:57:2097]) rebooted! !Reboot 72057594037927937 (actor [11:57:2097]) tablet resolver refreshed! new actor is[11:82:2111] Leader for TabletID 72057594037927937 is [11:82:2111] sender: [11:168:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:58:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:75:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:78:2057] recipient: [12:36:2083] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:81:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:82:2057] recipient: [12:80:2110] Leader for TabletID 72057594037927937 is [12:83:2111] sender: [12:84:2057] recipient: [12:80:2110] !Reboot 72057594037927937 (actor [12:57:2097]) rebooted! !Reboot 72057594037927937 (actor [12:57:2097]) tablet resolver refreshed! new actor is[12:83:2111] Leader for TabletID 72057594037927937 is [12:83:2111] sender: [12:169:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:55:2057] recipient: [13:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:55:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:58:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:75:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:81:2057] recipient: [13:36:2083] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:84:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:85:2057] recipient: [13:83:2113] Leader for TabletID 72057594037927937 is [13:86:2114] sender: [13:87:2057] recipient: [13:83:2113] !Reboot 72057594037927937 (actor [13:57:2097]) rebooted! !Reboot 72057594037927937 (actor [13:57:2097]) tablet resolver refreshed! new actor is[13:86:2114] Leader for TabletID 72057594037927937 is [13:86:2114] sender: [13:172:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:55:2057] recipient: [14:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:55:2057] recipient: [14:50:2095] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:58:2057] recipient: [14:50:2095] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:75:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:57:2097]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:81:2057] recipient: [14:36:2083] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:84:2057] recipient: [14:83:2113] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:85:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:86:2114] sender: [14:87:2057] recipient: [14:83:2113] !Reboot 72057594037927937 (actor [14:57:2097]) rebooted! !Reboot 72057594037927937 (actor [14:57:2097]) tablet resolver refreshed! new actor is[14:86:2114] Leader for TabletID 72057594037927937 is [14:86:2114] sender: [14:172:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:55:2057] recipient: [15:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:55:2057] recipient: [15:50:2095] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:58:2057] recipient: [15:50:2095] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:75:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:57:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:82:2057] recipient: [15:36:2083] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:85:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:86:2057] recipient: [15:84:2113] Leader for TabletID 72057594037927937 is [15:87:2114] sender: [15:88:2057] recipient: [15:84:2113] !Reboot 72057594037927937 (actor [15:57:2097]) rebooted! !Reboot 72057594037927937 (actor [15:57:2097]) tablet resolver refreshed! new actor is[15:87:2114] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:55:2057] recipient: [16:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:55:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:58:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:75:2057] recipient: [16:14:2061] 2025-06-03T10:30:10.567825Z node 17 :KEYVALUE ERROR: keyvalue_storage_read_request.cpp:254: {KV323@keyvalue_storage_read_request.cpp:254} Received BLOCKED EvGetResult. KeyValue# 72057594037927937 Status# BLOCKED Deadline# 18446744073709551 Now# 0 SentAt# 1970-01-01T00:00:00.000000Z GotAt# 0 ErrorReason# block race detected 2025-06-03T10:30:10.569017Z node 17 :TABLET_MAIN ERROR: tablet_sys.cpp:934: Tablet: 72057594037927937 HandleBlockBlobStorageResult, msg->Status: ALREADY, not discovered Marker# TSYS21 2025-06-03T10:30:10.569046Z node 17 :TABLET_MAIN ERROR: tablet_sys.cpp:1849: Tablet: 72057594037927937 Type: KeyValue, EReason: ReasonBootBSError, SuggestedGeneration: 0, KnownGeneration: 3 Marker# TSYS31 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteReadDeleteWithRestartsAndCatchCollectGarbageEventsWithSlowInitialGC [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:58:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:75:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:58:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:75:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:77:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:80:2057] recipient: [2:79:2110] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:83:2057] recipient: [2:79:2110] !Reboot 72057594037927937 (actor [2:57:2097]) rebooted! !Reboot 72057594037927937 (actor [2:57:2097]) tablet resolver refreshed! new actor is[2:82:2111] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:168:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:58:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:75:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:77:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:83:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:57:2097]) rebooted! !Reboot 72057594037927937 (actor [3:57:2097]) tablet resolver refreshed! new actor is[3:82:2111] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:168:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:58:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:75:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:78:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:82:2057] recipient: [4:80:2110] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:84:2057] recipient: [4:80:2110] !Reboot 72057594037927937 (actor [4:57:2097]) rebooted! !Reboot 72057594037927937 (actor [4:57:2097]) tablet resolver refreshed! new actor is[4:83:2111] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:169:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:58:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:75:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:81:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:84:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:85:2057] recipient: [5:83:2113] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:87:2057] recipient: [5:83:2113] !Reboot 72057594037927937 (actor [5:57:2097]) rebooted! !Reboot 72057594037927937 (actor [5:57:2097]) tablet resolver refreshed! new actor is[5:86:2114] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:172:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:58:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:75:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:81:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:84:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:85:2057] recipient: [6:83:2113] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:87:2057] recipient: [6:83:2113] !Reboot 72057594037927937 (actor [6:57:2097]) rebooted! !Reboot 72057594037927937 (actor [6:57:2097]) tablet resolver refreshed! new actor is[6:86:2114] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:172:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:58:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:75:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:82:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:85:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:86:2057] recipient: [7:84:2113] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:88:2057] recipient: [7:84:2113] !Reboot 72057594037927937 (actor [7:57:2097]) rebooted! !Reboot 72057594037927937 (actor [7:57:2097]) tablet resolver refreshed! new actor is[7:87:2114] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:173:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:58:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:75:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:84:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:87:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:88:2057] recipient: [8:86:2115] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:90:2057] recipient: [8:86:2115] !Reboot 72057594037927937 (actor [8:57:2097]) rebooted! !Reboot 72057594037927937 (actor [8:57:2097]) tablet resolver refreshed! new actor is[8:89:2116] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:175:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:58:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:75:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:84:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:87:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:88:2057] recipient: [9:86:2115] Leader for TabletID 72057594037927937 is [9:89:2116] sender: [9:90:2057] recipient: [9:86:2115] !Reboot 72057594037927937 (actor [9:57:2097]) rebooted! !Reboot 72057594037927937 (actor [9:57:2097]) tablet resolver refreshed! new actor is[9:89:2116] Leader for TabletID 72057594037927937 is [9:89:2116] sender: [9:175:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:58:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:75:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:85:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:87:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:89:2057] recipient: [10:88:2115] Leader for TabletID 72057594037927937 is [10:90:2116] sender: [10:91:2057] recipient: [10:88:2115] !Reboot 72057594037927937 (actor [10:57:2097]) rebooted! !Reboot 72057594037927937 (actor [10:57:2097]) tablet resolver refreshed! new actor is[10:90:2116] Leader for TabletID 72057594037927937 is [10:90:2116] sender: [10:176:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:58:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:75:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:57:2097]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:86:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:89:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:90:2057] recipient: [11:88:2116] Leader for TabletID 72057594037927937 is [11:91:2117] sender: [11:92:2057] recipient: [11:88:2116] !Reboot 72057594037927937 (actor [11:57:2097]) rebooted! !Reboot 72057594037927937 (actor [11:57:2097]) tablet resolver refreshed! new actor is[11:91:2117] Leader for TabletID 72057594037927937 is [11:91:2117] sender: [11:111:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:58:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:75:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:57:2097]) on event NKikimr::TEvKeyValue::TEvCompleteGC ! Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:87:2057] recipient: [12:36:2083] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:90:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:91:2057] recipient: [12:89:2117] Leader for TabletID 72057594037927937 is [12:92:2118] sender: [12:93:2057] recipient: [12:89:2117] !Reboot 72057594037927937 (actor [12:57:2097]) rebooted! !Reboot 72057594037927937 (actor [12:57:2097]) tablet resolver refreshed! new actor is[12:92:2118] Leader for TabletID 72057594037927937 is [12:92:2118] sender: [12:112:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:55:2057] recipient: [13:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:55:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:58:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:75:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:90:2057] recipient: [13:36:2083] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:93:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:94:2057] recipient: [13:92:2120] Leader for TabletID 72057594037927937 is [13:95:2121] sender: [13:96:2057] recipient: [13:92:2120] !Reboot 72057594037927937 (actor [13:57:2097]) rebooted! !Reboot 72057594037927937 (actor [13:57:2097]) tablet resolver refreshed! new actor is[13:95:2121] Leader for TabletID 72057594037927937 is [13:95:2121] sender: [13:181:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:55:2057] recipient: [14:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:55:2057] recipient: [14:50:2095] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:58:2057] recipient: [14:50:2095] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:75:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:90:2057] recipient: [14:36:2083] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:93:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:94:2057] recipient: [14:92:2120] Leader for TabletID 72057594037927937 is [14:95:2121] sender: [14:96:2057] recipient: [14:92:2120] !Reboot 72057594037927937 (actor [14:57:2097]) rebooted! !Reboot 72057594037927937 (actor [14:57:2097]) tablet resolver refreshed! new actor is[14:95:2121] Leader for TabletID 72057594037927937 is [14:95:2121] sender: [14:181:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:55:2057] recipient: [15:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:55:2057] recipient: [15:50:2095] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:58:2057] recipient: [15:50:2095] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:75:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:91:2057] recipient: [15:36:2083] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:94:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:95:2057] recipient: [15:93:2120] Leader for TabletID 72057594037927937 is [15:96:2121] sender: [15:97:2057] recipient: [15:93:2120] !Reboot 72057594037927937 (actor [15:57:2097]) rebooted! !Reboot 72057594037927937 (actor [15:57:2097]) tablet resolver refreshed! new actor is[15:96:2121] Leader for TabletID 72057594037927937 is [15:96:2121] sender: [15:182:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:55:2057] recipient: [16:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:55:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:58:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:75:2057] recipient: [16:14:2061] >> TableCreation::ConcurrentTableCreation >> TVersions::Wreck2 [GOOD] >> TVersions::Wreck2Reverse >> TableCreation::ConcurrentTableCreationWithDifferentVersions [GOOD] >> TableCreation::ConcurrentUpdateTable >> KqpProxy::PingNotExistedSession [GOOD] >> ScriptExecutionsTest::AttemptToUpdateDeletedLease >> TFlatTableExecutor_IndexLoading::PrechargeAndSeek_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_FlatIndex >> KqpQueryService::IssuesInCaseOfSuccess >> TKeyValueTest::TestInlineWriteReadDeleteWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestInlineWriteReadDeleteWithRestartsThenResponseOkNewApi >> TableCreation::SimpleTableCreation [GOOD] >> TableCreation::SimpleUpdateTable >> KqpQueryService::TableSink_OlapUpdate >> TFlatTableExecutor_IndexLoading::Scan_FlatIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_BTreeIndex >> BuildStatsHistogram::Ten_Mixed_Log [GOOD] >> BuildStatsHistogram::Ten_Serial_Log >> KqpQueryService::Ddl >> TExecutorDb::FullScan [GOOD] >> TExecutorDb::CoordinatorSimulation >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorks [GOOD] >> KqpQueryService::ShowCreateTableOnView >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorksNewApi |66.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut |66.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut |66.2%| [LD] {RESULT} $(B)/ydb/core/fq/libs/common/ut/ydb-core-fq-libs-common-ut >> KqpQueryService::StreamExecuteQuery >> KqpProxy::CreatesScriptExecutionsTable [GOOD] >> KqpProxy::DatabasesCacheForServerless >> TFlatTableExecutor_IndexLoading::Scan_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_History_FlatIndex >> KqpQueryServiceScripts::ExecuteScriptStatsBasic |66.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut |66.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut |66.2%| [LD] {RESULT} $(B)/ydb/core/fq/libs/result_formatter/ut/ydb-core-fq-libs-result_formatter-ut |66.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build >> TFlatTableExecutor_IndexLoading::Scan_History_FlatIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_History_BTreeIndex >> BuildStatsHistogram::Ten_Serial_Log [GOOD] >> BuildStatsHistogram::Ten_Crossed_Log >> TPartBtreeIndexIteration::OneNode_History [GOOD] >> TPartBtreeIndexIteration::OneNode_Slices |66.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build |66.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index_build/ydb-core-tx-schemeshard-ut_index_build >> TableCreation::ConcurrentTableCreation [GOOD] >> TableCreation::ConcurrentMultipleTablesCreation >> KqpQueryService::ShowCreateTable >> TableCreation::ConcurrentUpdateTable [GOOD] >> TopicAutoscaling::PartitionSplit_AutosplitByLoad [GOOD] >> TopicAutoscaling::PartitionSplit_AutosplitByLoad_AfterAlter >> ScriptExecutionsTest::RunCheckLeaseStatus [GOOD] >> ScriptExecutionsTest::UpdatesLeaseAfterExpiring >> TableCreation::SimpleUpdateTable [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_History_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_Groups_FlatIndex >> TVersions::Wreck2Reverse [GOOD] >> TVersions::Wreck1 >> TopicAutoscaling::ReadFromTimestamp_PQv1 [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/proxy_service/ut/unittest >> TableCreation::ConcurrentUpdateTable [GOOD] Test command err: 2025-06-03T10:30:10.904702Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668492625826083:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:10.905067Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002078/r3tmp/tmpvho62K/pdisk_1.dat 2025-06-03T10:30:10.994058Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:10.997131Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668492625825926:2079] 1748946610900593 != 1748946610900596 TClient is connected to server localhost:20864 TServer::EnableGrpc on GrpcPort 23850, node 1 2025-06-03T10:30:11.033585Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:11.033604Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:11.033606Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:11.033668Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: 2025-06-03T10:30:11.057668Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:11.057701Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:11.064163Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-03T10:30:11.067966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:11.072785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:30:11.370751Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-06-03T10:30:11.371323Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-03T10:30:11.371605Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-06-03T10:30:11.371609Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-06-03T10:30:11.371616Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-06-03T10:30:11.371632Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-03T10:30:11.371648Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:11.371654Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:11.371735Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:11.371738Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:11.372047Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-03T10:30:11.372048Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-03T10:30:11.372049Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-03T10:30:11.372050Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-03T10:30:11.372057Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-03T10:30:11.372067Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-03T10:30:11.372079Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-03T10:30:11.372080Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-03T10:30:11.372082Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-03T10:30:11.372957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:1, at schemeshard: 72057594046644480 2025-06-03T10:30:11.373409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-06-03T10:30:11.373907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-06-03T10:30:11.375558Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-06-03T10:30:11.375574Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976710658 2025-06-03T10:30:11.376112Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-06-03T10:30:11.376131Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976710659 2025-06-03T10:30:11.376147Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-06-03T10:30:11.376149Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976710660 2025-06-03T10:30:11.446861Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976710658. Doublechecking... 2025-06-03T10:30:11.460575Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976710660. Doublechecking... 2025-06-03T10:30:11.468451Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976710659. Doublechecking... 2025-06-03T10:30:11.520304Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-06-03T10:30:11.526310Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-06-03T10:30:11.531012Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-06-03T10:30:11.534000Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 554ac376-f9cf18a8-7f5357fb-da72b984, Bootstrap. Database: /dc-1 2025-06-03T10:30:11.537424Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444995127098.014212s seconds to be completed 2025-06-03T10:30:11.538312Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=1&id=MmExNjBjODgtYzUwNTM3ZTctZjI0MDM4MDgtM2U0NTY4ODc=, workerId: [1:7511668496920794109:2330], database: /dc-1, longSession: 1, local sessions count: 1 2025-06-03T10:30:11.538390Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-06-03T10:30:11.539431Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 554ac376-f9cf18a8-7f5357fb-da72b984, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLARE $lease_duration AS Interval; DECLARE $execution_meta_ttl AS Interval; UPSERT INTO `.metadata/script_executions` (database, execution_id, run_script_actor_id, execution_status, execution_mode, start_ts, query_text, syntax, meta, expire_at) VALUES ($database, $execution_id, $run_script_actor_id, $execution_status, $execution_mode, CurrentUtcTimestamp(), $query_text, $syntax, $meta, CurrentUtcTimestamp() + $execution_meta_ttl); UPSERT INTO `.metadata/script_execution_leases` (database, execution_id, lease_deadline, lease_generation, expire_at) VALUES ($database, $execution_id, CurrentUtcTimestamp() + $lease_duration, 1, CurrentUtcTimestamp() + $execution_meta_ttl); 2025-06-03T10:30:11.539693Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=1&id=MmExNjBjODgtYzUwNTM3ZTctZjI0MDM4MDgtM2U0NTY4ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 3, targetId: [1:7511668496920794109:2330] 2025-06-03T10:30:11.539717Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 3 timeout: 300.000000s actor id: [1:7511668496920794111:2452] 2025-06-03T10:30:11.543994Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_ac ... BUG: table_creator.cpp:249: Table test_table updater. Unable to subscribe to concurrent transaction, falling back 2025-06-03T10:30:13.153481Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668505455272747:2634] txid# 281474976715675, issues: { message: "Check failed: path: \'/dc-1/.test/test_table\', error: path is under operation (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeTable, state: EPathStateAlter), source_location: ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:521" severity: 1 } 2025-06-03T10:30:13.153501Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:190: Table test_table updater. TEvProposeTransactionStatus: { Status: 52 TxId: 281474976715675 Issues { message: "Check failed: path: \'/dc-1/.test/test_table\', error: path is under operation (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeTable, state: EPathStateAlter), source_location: ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:521" severity: 1 } SchemeShardStatus: 8 SchemeShardReason: "Check failed: path: \'/dc-1/.test/test_table\', error: path is under operation (id: [OwnerId: 72057594046644480, LocalPathId: 10], type: EPathTypeTable, state: EPathStateAlter), source_location: ydb/core/tx/schemeshard/schemeshard__operation_alter_table.cpp:521" SchemeShardTabletId: 72057594046644480 } 2025-06-03T10:30:13.153502Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:249: Table test_table updater. Unable to subscribe to concurrent transaction, falling back 2025-06-03T10:30:13.157582Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 12, sender: [2:7511668505455272706:2380], selfId: [2:7511668501160304276:2065], source: [2:7511668505455272704:2379] 2025-06-03T10:30:13.157665Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:290: Table test_table updater. Request: alter. Transaction completed: 281474976715668. Doublechecking... 2025-06-03T10:30:13.157686Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: c27535fa-91153e28-4dd503e8-6ba8e08, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MjY0Y2FkNDEtNzk5ZjJlNTItNjQ3NDJiOWEtY2FhYTYwN2Y=, TxId: 2025-06-03T10:30:13.157691Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: c27535fa-91153e28-4dd503e8-6ba8e08, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MjY0Y2FkNDEtNzk5ZjJlNTItNjQ3NDJiOWEtY2FhYTYwN2Y=, TxId: 2025-06-03T10:30:13.157870Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: c27535fa-91153e28-4dd503e8-6ba8e08, Bootstrap. Database: /dc-1 2025-06-03T10:30:13.157909Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=MjY0Y2FkNDEtNzk5ZjJlNTItNjQ3NDJiOWEtY2FhYTYwN2Y=, workerId: [2:7511668505455272704:2379], local sessions count: 2 2025-06-03T10:30:13.157917Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444995127096.393700s seconds to be completed 2025-06-03T10:30:13.158405Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=2&id=YjZlM2YwZDUtN2FkMjg3NmMtMTQ5NzE1MzctYTMyOGYzYTk=, workerId: [2:7511668505455272828:2389], database: /dc-1, longSession: 1, local sessions count: 3 2025-06-03T10:30:13.158436Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-06-03T10:30:13.158495Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: c27535fa-91153e28-4dd503e8-6ba8e08, RunDataQuery: -- TSaveScriptFinalStatusActor::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT operation_status, finalization_status, meta, customer_supplied_id, user_token, script_sinks, script_secret_names FROM `.metadata/script_executions` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); SELECT lease_generation FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-06-03T10:30:13.158594Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=YjZlM2YwZDUtN2FkMjg3NmMtMTQ5NzE1MzctYTMyOGYzYTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 14, targetId: [2:7511668505455272828:2389] 2025-06-03T10:30:13.158600Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 14 timeout: 300.000000s actor id: [2:7511668505455272830:2687] 2025-06-03T10:30:13.207135Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-03T10:30:13.219920Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-03T10:30:13.220688Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-03T10:30:13.230169Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-03T10:30:13.237035Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-03T10:30:13.241401Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-03T10:30:13.241432Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-03T10:30:13.245416Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-03T10:30:13.245452Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-03T10:30:13.249351Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-03T10:30:13.254897Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 14, sender: [2:7511668505455272829:2390], selfId: [2:7511668501160304276:2065], source: [2:7511668505455272828:2389] 2025-06-03T10:30:13.255050Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: c27535fa-91153e28-4dd503e8-6ba8e08, State: Get operation info, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YjZlM2YwZDUtN2FkMjg3NmMtMTQ5NzE1MzctYTMyOGYzYTk=, TxId: 01jwtnewzsawpg09xgr11vncac 2025-06-03T10:30:13.255186Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: c27535fa-91153e28-4dd503e8-6ba8e08, State: Get operation info, RunDataQuery: -- TSaveScriptFinalStatusActor::FinishScriptExecution DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $operation_status AS Int32; DECLARE $execution_status AS Int32; DECLARE $finalization_status AS Int32; DECLARE $issues AS JsonDocument; DECLARE $plan AS JsonDocument; DECLARE $stats AS JsonDocument; DECLARE $ast AS Optional; DECLARE $ast_compressed AS Optional; DECLARE $ast_compression_method AS Optional; DECLARE $operation_ttl AS Interval; DECLARE $customer_supplied_id AS Text; DECLARE $user_token AS Text; DECLARE $script_sinks AS Optional; DECLARE $script_secret_names AS Optional; DECLARE $applicate_script_external_effect_required AS Bool; UPDATE `.metadata/script_executions` SET operation_status = $operation_status, execution_status = $execution_status, finalization_status = IF($applicate_script_external_effect_required, $finalization_status, NULL), issues = $issues, plan = $plan, end_ts = CurrentUtcTimestamp(), stats = $stats, ast = $ast, ast_compressed = $ast_compressed, ast_compression_method = $ast_compression_method, expire_at = IF($operation_ttl > CAST(0 AS Interval), CurrentUtcTimestamp() + $operation_ttl, NULL), customer_supplied_id = IF($applicate_script_external_effect_required, $customer_supplied_id, NULL), user_token = IF($applicate_script_external_effect_required, $user_token, NULL), script_sinks = IF($applicate_script_external_effect_required, $script_sinks, NULL), script_secret_names = IF($applicate_script_external_effect_required, $script_secret_names, NULL) WHERE database = $database AND execution_id = $execution_id; DELETE FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id; 2025-06-03T10:30:13.255355Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=YjZlM2YwZDUtN2FkMjg3NmMtMTQ5NzE1MzctYTMyOGYzYTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 15, targetId: [2:7511668505455272828:2389] 2025-06-03T10:30:13.255363Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 15 timeout: 300.000000s actor id: [2:7511668505455272874:2709] 2025-06-03T10:30:13.274607Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=ZTIyMGExNzQtMTI0Mjk5MzktNzg0NTM3YjYtMTQ3MmIyMzQ=, workerId: [2:7511668505455272576:2363], local sessions count: 2 2025-06-03T10:30:13.342611Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 15, sender: [2:7511668505455272873:2402], selfId: [2:7511668501160304276:2065], source: [2:7511668505455272828:2389] 2025-06-03T10:30:13.342854Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: c27535fa-91153e28-4dd503e8-6ba8e08, State: Update final status, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=YjZlM2YwZDUtN2FkMjg3NmMtMTQ5NzE1MzctYTMyOGYzYTk=, TxId: 2025-06-03T10:30:13.342887Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: c27535fa-91153e28-4dd503e8-6ba8e08, State: Update final status, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=YjZlM2YwZDUtN2FkMjg3NmMtMTQ5NzE1MzctYTMyOGYzYTk=, TxId: 2025-06-03T10:30:13.342892Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:2658: [ScriptExecutions] Finish script execution operation. ExecutionId: c27535fa-91153e28-4dd503e8-6ba8e08. SUCCESS. Issues: 2025-06-03T10:30:13.343731Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=YjZlM2YwZDUtN2FkMjg3NmMtMTQ5NzE1MzctYTMyOGYzYTk=, workerId: [2:7511668505455272828:2389], local sessions count: 1 2025-06-03T10:30:13.344121Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=ZGNjOTY0NDctOTRhZGVkYzgtNzliMzljY2YtYWUzMWExMjU=, workerId: [2:7511668505455272544:2348], local sessions count: 0 >> TFlatTableExecutor_IndexLoading::Scan_Groups_FlatIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_Groups_BTreeIndex >> TExecutorDb::CoordinatorSimulation [GOOD] >> TExecutorDb::RandomCoordinatorSimulation ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/proxy_service/ut/unittest >> TableCreation::SimpleUpdateTable [GOOD] Test command err: 2025-06-03T10:30:11.101638Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668499376582208:2218];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:11.104336Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002081/r3tmp/tmpbdgI7O/pdisk_1.dat 2025-06-03T10:30:11.154088Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:11.155302Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668499376582004:2079] 1748946611031617 != 1748946611031620 2025-06-03T10:30:11.205669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:11.205702Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:11.213773Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2769 TServer::EnableGrpc on GrpcPort 32692, node 1 2025-06-03T10:30:11.229323Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:11.229344Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:11.229346Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:11.229407Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-03T10:30:11.298450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:11.304896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:30:11.728712Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-06-03T10:30:11.730269Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-03T10:30:11.733607Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-06-03T10:30:11.733620Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-06-03T10:30:11.733632Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-06-03T10:30:11.733665Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-03T10:30:11.733723Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:11.733734Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:11.733860Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:11.733865Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:11.734174Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-03T10:30:11.734179Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-03T10:30:11.734189Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-03T10:30:11.734191Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-03T10:30:11.734195Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-03T10:30:11.734200Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-03T10:30:11.734234Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-03T10:30:11.734235Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-03T10:30:11.734250Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-03T10:30:11.735344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:1, at schemeshard: 72057594046644480 2025-06-03T10:30:11.736061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-06-03T10:30:11.736428Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-03T10:30:11.738811Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-06-03T10:30:11.738818Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-06-03T10:30:11.738841Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976710660 2025-06-03T10:30:11.738842Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976710659 2025-06-03T10:30:11.738870Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976710658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-06-03T10:30:11.738873Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976710658 2025-06-03T10:30:11.774027Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976710660. Doublechecking... 2025-06-03T10:30:11.785942Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976710658. Doublechecking... 2025-06-03T10:30:11.830022Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976710659. Doublechecking... 2025-06-03T10:30:11.839566Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-06-03T10:30:11.841181Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-06-03T10:30:11.915594Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-06-03T10:30:11.915840Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 70b3c603-867ff65-bf1604c4-b9b36f9f, Bootstrap. Database: /dc-1 2025-06-03T10:30:11.918064Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444995127097.633560s seconds to be completed 2025-06-03T10:30:11.918800Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=1&id=ZTJiYzk3ZjgtNmE1NzAzMjktOGI4Mjk5OWItNTNjMTVlZjg=, workerId: [1:7511668499376582889:2330], database: /dc-1, longSession: 1, local sessions count: 1 2025-06-03T10:30:11.918834Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-06-03T10:30:11.919096Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 70b3c603-867ff65-bf1604c4-b9b36f9f, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLARE $lease_duration AS Interval; DECLARE $execution_meta_ttl AS Interval; UPSERT INTO `.metadata/script_executions` (database, execution_id, run_script_actor_id, execution_status, execution_mode, start_ts, query_text, syntax, meta, expire_at) VALUES ($database, $execution_id, $run_script_actor_id, $execution_status, $execution_mode, CurrentUtcTimestamp(), $query_text, $syntax, $meta, CurrentUtcTimestamp() + $execution_meta_ttl); UPSERT INTO `.metadata/script_execution_leases` (database, execution_id, lease_deadline, lease_generation, expire_at) VALUES ($database, $execution_id, CurrentUtcTimestamp() + $lease_duration, 1, CurrentUtcTimestamp() + $execution_meta_ttl); 2025-06-03T10:30:11.919283Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=1&id=ZTJiYzk3ZjgtNmE1NzAzMjktOGI4Mjk5OWItNTNjMTVlZjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 3, targetId: [1:7511668499376582889:2330] 2025-06-03T10:30:11.919290Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 3 timeout: 300.000000s actor id: [1:7511668499376582891:2452] 2025-06-03T10:30:11.919786Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actor ... se: /dc-1, longSession: 1, local sessions count: 4 2025-06-03T10:30:13.394818Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-06-03T10:30:13.394833Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=MWZiOGE3NzYtMTk0YTdjMDMtNzkzZGQwZDAtZDEzMjc5MzI=, workerId: [2:7511668507793197169:2372], local sessions count: 3 2025-06-03T10:30:13.394900Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 3baeaf13-7c76ce6e-e88c965-42662d3b, RunDataQuery: -- TSaveScriptExecutionResultMetaQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $result_set_metas AS JsonDocument; UPDATE `.metadata/script_executions` SET result_set_metas = $result_set_metas WHERE database = $database AND execution_id = $execution_id; 2025-06-03T10:30:13.395007Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=MmQ0MDBjNjYtMThhMTAyYWItNGU2MmVlMTItYjY0M2FlNDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 12, targetId: [2:7511668507793197195:2379] 2025-06-03T10:30:13.395022Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 12 timeout: 300.000000s actor id: [2:7511668507793197197:2594] 2025-06-03T10:30:13.421420Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-03T10:30:13.425558Z node 2 :KQP_PROXY NOTICE: table_creator.cpp:365: Table test_table updater. Adding columns. New columns: col4, col5. Existing columns: col1, col2, col3 2025-06-03T10:30:13.425576Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:100: Table test_table updater. Full table path:/dc-1/.test/test_table 2025-06-03T10:30:13.426058Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.426352Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:190: Table test_table updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715669 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 } 2025-06-03T10:30:13.426357Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:261: Table test_table updater. Subscribe on create table tx: 281474976715669 2025-06-03T10:30:13.427187Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 12, sender: [2:7511668507793197196:2380], selfId: [2:7511668503498228915:2204], source: [2:7511668507793197195:2379] 2025-06-03T10:30:13.427465Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 3baeaf13-7c76ce6e-e88c965-42662d3b, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MmQ0MDBjNjYtMThhMTAyYWItNGU2MmVlMTItYjY0M2FlNDU=, TxId: 2025-06-03T10:30:13.427472Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: 3baeaf13-7c76ce6e-e88c965-42662d3b, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MmQ0MDBjNjYtMThhMTAyYWItNGU2MmVlMTItYjY0M2FlNDU=, TxId: 2025-06-03T10:30:13.427737Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=MmQ0MDBjNjYtMThhMTAyYWItNGU2MmVlMTItYjY0M2FlNDU=, workerId: [2:7511668507793197195:2379], local sessions count: 2 2025-06-03T10:30:13.427748Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 3baeaf13-7c76ce6e-e88c965-42662d3b, Bootstrap. Database: /dc-1 2025-06-03T10:30:13.427769Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444995127096.123849s seconds to be completed 2025-06-03T10:30:13.428209Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=2&id=ZTY4ZTI3ZWMtYTlkNDk2YTctYjA0MjQyNTgtOGVlNTZlMjE=, workerId: [2:7511668507793197231:2388], database: /dc-1, longSession: 1, local sessions count: 3 2025-06-03T10:30:13.428225Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-06-03T10:30:13.428286Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 3baeaf13-7c76ce6e-e88c965-42662d3b, RunDataQuery: -- TSaveScriptFinalStatusActor::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT operation_status, finalization_status, meta, customer_supplied_id, user_token, script_sinks, script_secret_names FROM `.metadata/script_executions` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); SELECT lease_generation FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-06-03T10:30:13.428372Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZTY4ZTI3ZWMtYTlkNDk2YTctYjA0MjQyNTgtOGVlNTZlMjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 14, targetId: [2:7511668507793197231:2388] 2025-06-03T10:30:13.428379Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 14 timeout: 300.000000s actor id: [2:7511668507793197233:2610] 2025-06-03T10:30:13.436485Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:290: Table test_table updater. Request: alter. Transaction completed: 281474976715669. Doublechecking... 2025-06-03T10:30:13.517085Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table updater. Column diff is empty, finishing 2025-06-03T10:30:13.527376Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 14, sender: [2:7511668507793197232:2389], selfId: [2:7511668503498228915:2204], source: [2:7511668507793197231:2388] 2025-06-03T10:30:13.527535Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 3baeaf13-7c76ce6e-e88c965-42662d3b, State: Get operation info, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZTY4ZTI3ZWMtYTlkNDk2YTctYjA0MjQyNTgtOGVlNTZlMjE=, TxId: 01jwtnex8hb7nhhryttgxk8s0g 2025-06-03T10:30:13.527676Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 3baeaf13-7c76ce6e-e88c965-42662d3b, State: Get operation info, RunDataQuery: -- TSaveScriptFinalStatusActor::FinishScriptExecution DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $operation_status AS Int32; DECLARE $execution_status AS Int32; DECLARE $finalization_status AS Int32; DECLARE $issues AS JsonDocument; DECLARE $plan AS JsonDocument; DECLARE $stats AS JsonDocument; DECLARE $ast AS Optional; DECLARE $ast_compressed AS Optional; DECLARE $ast_compression_method AS Optional; DECLARE $operation_ttl AS Interval; DECLARE $customer_supplied_id AS Text; DECLARE $user_token AS Text; DECLARE $script_sinks AS Optional; DECLARE $script_secret_names AS Optional; DECLARE $applicate_script_external_effect_required AS Bool; UPDATE `.metadata/script_executions` SET operation_status = $operation_status, execution_status = $execution_status, finalization_status = IF($applicate_script_external_effect_required, $finalization_status, NULL), issues = $issues, plan = $plan, end_ts = CurrentUtcTimestamp(), stats = $stats, ast = $ast, ast_compressed = $ast_compressed, ast_compression_method = $ast_compression_method, expire_at = IF($operation_ttl > CAST(0 AS Interval), CurrentUtcTimestamp() + $operation_ttl, NULL), customer_supplied_id = IF($applicate_script_external_effect_required, $customer_supplied_id, NULL), user_token = IF($applicate_script_external_effect_required, $user_token, NULL), script_sinks = IF($applicate_script_external_effect_required, $script_sinks, NULL), script_secret_names = IF($applicate_script_external_effect_required, $script_secret_names, NULL) WHERE database = $database AND execution_id = $execution_id; DELETE FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id; 2025-06-03T10:30:13.527824Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZTY4ZTI3ZWMtYTlkNDk2YTctYjA0MjQyNTgtOGVlNTZlMjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 15, targetId: [2:7511668507793197231:2388] 2025-06-03T10:30:13.527831Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 15 timeout: 300.000000s actor id: [2:7511668507793197292:2639] 2025-06-03T10:30:13.543029Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=MWM5MjcyY2QtZTBhZTZkOGQtYjQzY2ZlZjItNjM2NGQxZWY=, workerId: [2:7511668507793197058:2359], local sessions count: 2 2025-06-03T10:30:13.612324Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 15, sender: [2:7511668507793197291:2403], selfId: [2:7511668503498228915:2204], source: [2:7511668507793197231:2388] 2025-06-03T10:30:13.612585Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 3baeaf13-7c76ce6e-e88c965-42662d3b, State: Update final status, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZTY4ZTI3ZWMtYTlkNDk2YTctYjA0MjQyNTgtOGVlNTZlMjE=, TxId: 2025-06-03T10:30:13.612644Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: 3baeaf13-7c76ce6e-e88c965-42662d3b, State: Update final status, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=ZTY4ZTI3ZWMtYTlkNDk2YTctYjA0MjQyNTgtOGVlNTZlMjE=, TxId: 2025-06-03T10:30:13.612667Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:2658: [ScriptExecutions] Finish script execution operation. ExecutionId: 3baeaf13-7c76ce6e-e88c965-42662d3b. SUCCESS. Issues: 2025-06-03T10:30:13.613142Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=ZTY4ZTI3ZWMtYTlkNDk2YTctYjA0MjQyNTgtOGVlNTZlMjE=, workerId: [2:7511668507793197231:2388], local sessions count: 1 2025-06-03T10:30:13.613410Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=NjUwY2E4ZWQtODhmYzdmZjktMTQ2ZWU2ODAtZDE0YzA4NjM=, workerId: [2:7511668507793197032:2348], local sessions count: 0 >> KqpQueryService::ShowCreateTableOnView [GOOD] >> KqpQueryService::ShowCreateView >> BuildStatsHistogram::Ten_Crossed_Log [GOOD] >> BuildStatsHistogram::Five_Five_Mixed >> KqpQueryService::IssuesInCaseOfSuccess [GOOD] >> KqpQueryService::ForbidInteractiveTxOnImplicitSession |66.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence |66.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence |66.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sequence/ydb-core-tx-schemeshard-ut_sequence >> TFlatTableExecutor_IndexLoading::Scan_Groups_BTreeIndex [GOOD] >> TFlatTableExecutor_IndexLoading::Scan_Groups_BTreeIndex_Empty [GOOD] >> TFlatTableExecutor_KeepEraseMarkers::TestKeepEraseMarkers [GOOD] >> TFlatTableExecutor_LongTx::MemTableLongTx [GOOD] >> TFlatTableExecutor_LongTx::CompactUncommittedLongTx [GOOD] >> TFlatTableExecutor_LongTx::CompactCommittedLongTx [GOOD] >> TFlatTableExecutor_LongTx::CompactedLongTxRestart [GOOD] >> TFlatTableExecutor_LongTx::CompactMultipleChanges [GOOD] >> TFlatTableExecutor_LongTx::LongTxBorrow >> KqpQueryService::Ddl [GOOD] >> KqpQueryService::DdlColumnTable >> TFlatTableExecutor_LongTx::LongTxBorrow [GOOD] >> TFlatTableExecutor_LongTx::MemTableLongTxRead [GOOD] >> TFlatTableExecutor_LongTx::CompactedTxIdReuse [GOOD] >> TFlatTableExecutor_LongTx::MergeSkewedCommitted [GOOD] >> TFlatTableExecutor_LongTxAndBlobs::SmallValues [GOOD] >> TFlatTableExecutor_LongTxAndBlobs::OuterBlobValues [GOOD] >> TFlatTableExecutor_LongTxAndBlobs::ExternalBlobValues [GOOD] >> TFlatTableExecutor_LowPriorityTxs::TestEnqueueCancel [GOOD] >> TFlatTableExecutor_LowPriorityTxs::TestLowPriority [GOOD] >> TFlatTableExecutor_LowPriorityTxs::TestLowPriorityCancel [GOOD] >> TFlatTableExecutor_LowPriorityTxs::TestLowPriorityAllocatingCancel >> KqpQueryService::StreamExecuteQuery [GOOD] >> KqpQueryService::StreamExecuteCollectMeta >> TFlatTableExecutor_LowPriorityTxs::TestLowPriorityAllocatingCancel [GOOD] >> TFlatTableExecutor_MoveTableData::TestMoveSnapshot [GOOD] >> TFlatTableExecutor_MoveTableData::TestMoveSnapshotFollower [GOOD] >> TFlatTableExecutor_PostponedScan::TestPostponedScan [GOOD] >> TFlatTableExecutor_PostponedScan::TestCancelFinishedScan [GOOD] >> TFlatTableExecutor_PostponedScan::TestCancelRunningPostponedScan [GOOD] >> TFlatTableExecutor_PostponedScan::TestPostponedScanSnapshotMVCC [GOOD] >> TFlatTableExecutor_Reboot::TestSchemeGcAfterReassign >> CompressExecutor::TestExecutorMemUsage [GOOD] >> TableCreation::ConcurrentMultipleTablesCreation [GOOD] >> TFlatTableExecutor_Reboot::TestSchemeGcAfterReassign [GOOD] >> TFlatTableExecutor_RejectProbability::MaxedOutRejectProbability >> ScriptExecutionsTest::AttemptToUpdateDeletedLease [GOOD] >> KqpQueryService::ShowCreateTable [GOOD] >> KqpQueryService::ShowCreateTableDisable >> TFlatTableExecutor_RejectProbability::MaxedOutRejectProbability [GOOD] >> TFlatTableExecutor_RejectProbability::SomeRejectProbability >> KqpQueryServiceScripts::ExecuteScriptStatsBasic [GOOD] >> KqpQueryServiceScripts::ExecuteScriptStatsFull ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::ReadFromTimestamp_PQv1 [GOOD] Test command err: 2025-06-03T10:28:47.873631Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668137736942907:2266];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:47.873735Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000806/r3tmp/tmpwxShiv/pdisk_1.dat 2025-06-03T10:28:47.935138Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:28:47.945787Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:47.945972Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668137736942676:2079] 1748946527871477 != 1748946527871480 TServer::EnableGrpc on GrpcPort 31515, node 1 2025-06-03T10:28:47.962458Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/000806/r3tmp/yandexGacnCs.tmp 2025-06-03T10:28:47.962479Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/000806/r3tmp/yandexGacnCs.tmp 2025-06-03T10:28:47.962554Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/000806/r3tmp/yandexGacnCs.tmp 2025-06-03T10:28:47.962617Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:28:47.967427Z INFO: TTestServer started on Port 17340 GrpcPort 31515 2025-06-03T10:28:47.974706Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:47.974746Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:47.975826Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17340 PQClient connected to localhost:31515 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:48.018459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:48.021774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:48.029433Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:28:48.103485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:48.273447Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668142031910769:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.273496Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.273522Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668142031910796:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.274350Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668142031910826:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.274383Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.274510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480 2025-06-03T10:28:48.276755Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668142031910798:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-03T10:28:48.308193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:48.316609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:48.337204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-03T10:28:48.364670Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668142031911106:2582] txid# 281474976715666, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } === CheckClustersList. Subcribe to ClusterTracker from [1:7511668142031911148:2605] 2025-06-03T10:28:52.872818Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668137736942907:2266];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:52.872857Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:28:53.616189Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-03T10:28:53.620432Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-03T10:28:53.620960Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [1:7511668163506747831:2675], Recipient [1:7511668137736943118:2193]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:53.620977Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:53.620981Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:28:53.620997Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [1:7511668163506747827:2672], Recipient [1:7511668137736943118:2193]: {TEvModifySchemeTransaction txid# 281474976715674 TabletId# 72057594046644480} 2025-06-03T10:28:53.621004Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:28:53.631981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } PartitionStrategy { MinPartitionCount: 1 MaxPartitionCount: 100 ScaleThresholdSeconds: 300 ScaleUpPartitionWriteSpeedThresholdPercent: 90 ScaleDownPartitionWriteSpeedThresholdPercent: 30 PartitionStrategyType: CAN_SPLIT } Consumers { Name: "test-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 } } } } TxId: 281474976715674 TabletId: 72057594046644480 Owner: "root@builtin" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-03T10:28:53.632103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_pq.cpp:307: TCreatePQ Propose, path: /Root/test-topic, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:28:53.632187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: test-topic, child id: [OwnerId: 72057594046644480, LocalPathId: 13], at schemeshard: 72057594046644480 2025-06-03T10:28:53.632209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId ... ests. 2025-06-03T10:30:13.743979Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:13.759969Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668468662960587:2455], Partition 0, Sender [0:0:0], Recipient [7:7511668468662960644:2458], Cookie: 0 2025-06-03T10:30:13.760009Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668468662960644:2458]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:13.760015Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:13.760036Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:13.760071Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:13.760075Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:13.760085Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:13.767434Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668498727732905:2795], Partition 2, Sender [0:0:0], Recipient [7:7511668498727732981:2803], Cookie: 0 2025-06-03T10:30:13.767477Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668498727732981:2803]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:13.767482Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:13.767504Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:13.767538Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:13.767542Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:13.767549Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:13.767567Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668498727732904:2794], Partition 1, Sender [0:0:0], Recipient [7:7511668498727732978:2800], Cookie: 0 2025-06-03T10:30:13.767571Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668498727732978:2800]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:13.767574Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:13.767578Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:13.767591Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:13.767594Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:13.767597Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:13.860533Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668468662960587:2455], Partition 0, Sender [0:0:0], Recipient [7:7511668468662960644:2458], Cookie: 0 2025-06-03T10:30:13.860564Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668468662960644:2458]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:13.860570Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:13.860598Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:13.860630Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:13.860634Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:13.860642Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:13.867646Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668498727732905:2795], Partition 2, Sender [0:0:0], Recipient [7:7511668498727732981:2803], Cookie: 0 2025-06-03T10:30:13.867646Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668498727732904:2794], Partition 1, Sender [0:0:0], Recipient [7:7511668498727732978:2800], Cookie: 0 2025-06-03T10:30:13.867661Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668498727732978:2800]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:13.867668Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:13.867672Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668498727732981:2803]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:13.867676Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:13.867680Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:13.867688Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:13.867702Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:13.867705Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:13.867705Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:13.867708Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:13.867710Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:13.867714Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:13.960850Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668468662960587:2455], Partition 0, Sender [0:0:0], Recipient [7:7511668468662960644:2458], Cookie: 0 2025-06-03T10:30:13.960881Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668468662960644:2458]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:13.960887Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:13.960908Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:13.960939Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:13.960942Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:13.960950Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:13.967976Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668498727732904:2794], Partition 1, Sender [0:0:0], Recipient [7:7511668498727732978:2800], Cookie: 0 2025-06-03T10:30:13.967986Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668498727732905:2795], Partition 2, Sender [0:0:0], Recipient [7:7511668498727732981:2803], Cookie: 0 2025-06-03T10:30:13.968004Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668498727732981:2803]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:13.968004Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668498727732978:2800]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:13.968009Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:13.968009Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:13.968027Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:13.968027Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:13.968057Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:13.968058Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:13.968061Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:13.968061Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:13.968068Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:13.968067Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/proxy_service/ut/unittest >> TableCreation::ConcurrentMultipleTablesCreation [GOOD] Test command err: 2025-06-03T10:30:12.186491Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668504169764214:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:12.186520Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00206a/r3tmp/tmpmwUpFx/pdisk_1.dat 2025-06-03T10:30:12.278505Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:26050 TServer::EnableGrpc on GrpcPort 15147, node 1 2025-06-03T10:30:12.334574Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:12.334606Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:12.336838Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:12.341593Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:12.341601Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:12.341605Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:12.341654Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-03T10:30:12.379846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:12.693576Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-06-03T10:30:12.694111Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-03T10:30:12.697644Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:12.697669Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:12.697675Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-06-03T10:30:12.697697Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-06-03T10:30:12.697705Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-06-03T10:30:12.698684Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-03T10:30:12.698696Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-03T10:30:12.698705Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-03T10:30:12.698729Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-03T10:30:12.698730Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-03T10:30:12.698733Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-03T10:30:12.698739Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-03T10:30:12.698740Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-03T10:30:12.698742Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-03T10:30:12.699724Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:1, at schemeshard: 72057594046644480 2025-06-03T10:30:12.700396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:30:12.700692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:30:12.701823Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:12.705398Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-06-03T10:30:12.705430Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976715660 2025-06-03T10:30:12.705461Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-06-03T10:30:12.705465Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976715659 2025-06-03T10:30:12.706989Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-06-03T10:30:12.706999Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976715658 2025-06-03T10:30:12.733546Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976715660. Doublechecking... 2025-06-03T10:30:12.754118Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976715658. Doublechecking... 2025-06-03T10:30:12.754177Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976715659. Doublechecking... 2025-06-03T10:30:12.807836Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-06-03T10:30:12.821829Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-06-03T10:30:12.834314Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-06-03T10:30:12.834543Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: fb2c4c8b-d3057b40-d50c4489-29f2dc5e, Bootstrap. Database: /dc-1 2025-06-03T10:30:12.841434Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444995127096.710198s seconds to be completed 2025-06-03T10:30:12.842367Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=1&id=MWE0YmYzOWQtODUxOThjMjUtN2NiMjNjYzQtOGY4OGI5NTE=, workerId: [1:7511668504169765064:2330], database: /dc-1, longSession: 1, local sessions count: 1 2025-06-03T10:30:12.842398Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-06-03T10:30:12.842830Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: fb2c4c8b-d3057b40-d50c4489-29f2dc5e, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLARE $lease_duration AS Interval; DECLARE $execution_meta_ttl AS Interval; UPSERT INTO `.metadata/script_executions` (database, execution_id, run_script_actor_id, execution_status, execution_mode, start_ts, query_text, syntax, meta, expire_at) VALUES ($database, $execution_id, $run_script_actor_id, $execution_status, $execution_mode, CurrentUtcTimestamp(), $query_text, $syntax, $meta, CurrentUtcTimestamp() + $execution_meta_ttl); UPSERT INTO `.metadata/script_execution_leases` (database, execution_id, lease_deadline, lease_generation, expire_at) VALUES ($database, $execution_id, CurrentUtcTimestamp() + $lease_duration, 1, CurrentUtcTimestamp() + $execution_meta_ttl); 2025-06-03T10:30:12.843028Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=1&id=MWE0YmYzOWQtODUxOThjMjUtN2NiMjNjYzQtOGY4OGI5NTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 3, targetId: [1:7511668504169765064:2330] 2025-06-03T10:30:12.843038Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 3 timeout: 300.000000s actor id: [1:7511668504169765066:2448] 2025-06-03T10:30:12.843648Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668504169765067:2332], DatabaseId: /dc-1, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:12.843670Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /dc-1, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:12.843778Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668504169765079: ... estId: 12, sender: [2:7511668511399802015:2383], selfId: [2:7511668507104833242:2202], source: [2:7511668511399802014:2382] 2025-06-03T10:30:14.336682Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: e32c754c-d09f4e2b-90ed1113-817a7d3e, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MzIwYzY4OTEtNjdmMDcwYi1kZGVhYmE0NS05YjVjNGI5Mw==, TxId: 2025-06-03T10:30:14.336690Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptExecutionResultMetaQuery] TraceId: e32c754c-d09f4e2b-90ed1113-817a7d3e, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MzIwYzY4OTEtNjdmMDcwYi1kZGVhYmE0NS05YjVjNGI5Mw==, TxId: 2025-06-03T10:30:14.336864Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: e32c754c-d09f4e2b-90ed1113-817a7d3e, Bootstrap. Database: /dc-1 2025-06-03T10:30:14.337031Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=MzIwYzY4OTEtNjdmMDcwYi1kZGVhYmE0NS05YjVjNGI5Mw==, workerId: [2:7511668511399802014:2382], local sessions count: 2 2025-06-03T10:30:14.337053Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444995127095.214566s seconds to be completed 2025-06-03T10:30:14.337576Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=2&id=ZmIwNjNjZWYtMzFhM2E1ZmMtMjg3ZTU4YjgtMTE3ODVkNjg=, workerId: [2:7511668511399802041:2391], database: /dc-1, longSession: 1, local sessions count: 3 2025-06-03T10:30:14.337602Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-06-03T10:30:14.337697Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: e32c754c-d09f4e2b-90ed1113-817a7d3e, RunDataQuery: -- TSaveScriptFinalStatusActor::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT operation_status, finalization_status, meta, customer_supplied_id, user_token, script_sinks, script_secret_names FROM `.metadata/script_executions` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); SELECT lease_generation FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-06-03T10:30:14.337810Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZmIwNjNjZWYtMzFhM2E1ZmMtMjg3ZTU4YjgtMTE3ODVkNjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 14, targetId: [2:7511668511399802041:2391] 2025-06-03T10:30:14.337818Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 14 timeout: 300.000000s actor id: [2:7511668511399802043:3033] 2025-06-03T10:30:14.343771Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-03T10:30:14.347867Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-03T10:30:14.347899Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-03T10:30:14.350949Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-03T10:30:14.350986Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-03T10:30:14.352166Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-03T10:30:14.352188Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-03T10:30:14.352201Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-03T10:30:14.353512Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-03T10:30:14.353533Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-03T10:30:14.354682Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-03T10:30:14.354774Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-03T10:30:14.358438Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-03T10:30:14.359931Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-03T10:30:14.359967Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-03T10:30:14.360722Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-03T10:30:14.364002Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-03T10:30:14.364052Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-03T10:30:14.367623Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-03T10:30:14.367661Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-03T10:30:14.373379Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-03T10:30:14.373417Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-03T10:30:14.373428Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-03T10:30:14.373439Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-03T10:30:14.373450Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-03T10:30:14.375583Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table0 updater. Column diff is empty, finishing 2025-06-03T10:30:14.376293Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-03T10:30:14.376798Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-03T10:30:14.378322Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-03T10:30:14.380913Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-03T10:30:14.380934Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-03T10:30:14.381905Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-03T10:30:14.383458Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-03T10:30:14.389163Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-03T10:30:14.392739Z node 2 :KQP_PROXY DEBUG: table_creator.cpp:362: Table test_table1 updater. Column diff is empty, finishing 2025-06-03T10:30:14.413268Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=MmIyMjMzODctN2YzNDRjNmUtNmFhOTdkZmUtMWU1ZTcwYjY=, workerId: [2:7511668511399801380:2358], local sessions count: 2 2025-06-03T10:30:14.487311Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 14, sender: [2:7511668511399802042:2392], selfId: [2:7511668507104833242:2202], source: [2:7511668511399802041:2391] 2025-06-03T10:30:14.487487Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: e32c754c-d09f4e2b-90ed1113-817a7d3e, State: Get operation info, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=ZmIwNjNjZWYtMzFhM2E1ZmMtMjg3ZTU4YjgtMTE3ODVkNjg=, TxId: 01jwtney6fb0qjffjy68ksxfh2 2025-06-03T10:30:14.487628Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: e32c754c-d09f4e2b-90ed1113-817a7d3e, State: Get operation info, RunDataQuery: -- TSaveScriptFinalStatusActor::FinishScriptExecution DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $operation_status AS Int32; DECLARE $execution_status AS Int32; DECLARE $finalization_status AS Int32; DECLARE $issues AS JsonDocument; DECLARE $plan AS JsonDocument; DECLARE $stats AS JsonDocument; DECLARE $ast AS Optional; DECLARE $ast_compressed AS Optional; DECLARE $ast_compression_method AS Optional; DECLARE $operation_ttl AS Interval; DECLARE $customer_supplied_id AS Text; DECLARE $user_token AS Text; DECLARE $script_sinks AS Optional; DECLARE $script_secret_names AS Optional; DECLARE $applicate_script_external_effect_required AS Bool; UPDATE `.metadata/script_executions` SET operation_status = $operation_status, execution_status = $execution_status, finalization_status = IF($applicate_script_external_effect_required, $finalization_status, NULL), issues = $issues, plan = $plan, end_ts = CurrentUtcTimestamp(), stats = $stats, ast = $ast, ast_compressed = $ast_compressed, ast_compression_method = $ast_compression_method, expire_at = IF($operation_ttl > CAST(0 AS Interval), CurrentUtcTimestamp() + $operation_ttl, NULL), customer_supplied_id = IF($applicate_script_external_effect_required, $customer_supplied_id, NULL), user_token = IF($applicate_script_external_effect_required, $user_token, NULL), script_sinks = IF($applicate_script_external_effect_required, $script_sinks, NULL), script_secret_names = IF($applicate_script_external_effect_required, $script_secret_names, NULL) WHERE database = $database AND execution_id = $execution_id; DELETE FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id; 2025-06-03T10:30:14.487893Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=ZmIwNjNjZWYtMzFhM2E1ZmMtMjg3ZTU4YjgtMTE3ODVkNjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 15, targetId: [2:7511668511399802041:2391] 2025-06-03T10:30:14.487902Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 15 timeout: 300.000000s actor id: [2:7511668511399802120:3085] >> TFlatTableExecutor_RejectProbability::SomeRejectProbability [GOOD] >> TFlatTableExecutor_RejectProbability::ZeroRejectProbability [GOOD] >> TFlatTableExecutor_RejectProbability::ZeroRejectProbabilityMultipleTables >> TFlatTableExecutor_RejectProbability::ZeroRejectProbabilityMultipleTables [GOOD] >> TFlatTableExecutor_Reschedule::TestExecuteReschedule [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorSetResourceProfile [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestTxData [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorReuseStaticMemory [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestPages >> KqpQueryService::ShowCreateView [GOOD] >> KqpQueryService::ShowCreateViewOnTable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/with_offset_ranges_mode_ut/unittest >> CompressExecutor::TestExecutorMemUsage [GOOD] Test command err: 2025-06-03T10:29:07.465777Z :WriteAndReadSomeMessagesWithAsyncCompression INFO: Random seed for debugging is 1748946547465765 2025-06-03T10:29:07.641871Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668224063888129:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:07.641893Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:29:07.830524Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00201f/r3tmp/tmp6hvjG1/pdisk_1.dat 2025-06-03T10:29:07.845819Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:29:07.848262Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668225969435696:2286];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:07.848483Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:29:07.964724Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:07.964761Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:07.970250Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:29:07.970637Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:07.986533Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5981, node 1 2025-06-03T10:29:08.014903Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-03T10:29:08.014918Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-03T10:29:08.041734Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/00201f/r3tmp/yandexhBNxub.tmp 2025-06-03T10:29:08.041748Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/00201f/r3tmp/yandexhBNxub.tmp 2025-06-03T10:29:08.041844Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/00201f/r3tmp/yandexhBNxub.tmp 2025-06-03T10:29:08.041880Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:29:08.046149Z INFO: TTestServer started on Port 6970 GrpcPort 5981 TClient is connected to server localhost:6970 PQClient connected to localhost:5981 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:29:08.061791Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:08.061826Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:08.066034Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:08.103530Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:29:08.145445Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:08.156852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:08.452121Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668230264403089:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:08.452139Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668230264403078:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:08.452177Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:08.454809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480 2025-06-03T10:29:08.465841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:29:08.466016Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511668230264403092:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-03T10:29:08.569374Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668230264403120:2128] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:08.575827Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668228358856386:2342], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:29:08.576424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-06-03T10:29:08.576614Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=YjdmYjY5NjMtODg1YjdhMmMtN2IzY2RhOTMtZWMyN2M2Y2M=, ActorId: [1:7511668228358856330:2331], ActorState: ExecuteState, TraceId: 01jwtncxrv0qa65tgt3pvfw3ss, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:29:08.577341Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:29:08.577365Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7511668230264403135:2314], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:29:08.578102Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=2&id=YzdjYzMyYzYtMWRkMjQ0NTgtNGNjYmNlMzctNDQ2NzcyZDQ=, ActorId: [2:7511668230264403076:2305], ActorState: ExecuteState, TraceId: 01jwtncxq29chf6q413y9adhsp, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:29:08.578260Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:29:08.661470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:29:08.751404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:5981", true, true, 1000); 2025-06-03T10:29:08.823717Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710664. Ctx: { TraceId: 01jwtncy1jc5tn23sh2s6er9q1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDQzNTI1ZWItOTZkYWI4ZmYtMjY3MThlMmYtOGVlMDg3NTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7511668228358856836:2971] 2025-06-03T10:29:12.64 ... Id AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-06-03T10:30:13.239661Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-03T10:30:13.239663Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-03T10:30:13.239670Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [13:7511668506860754335:2551] (SourceId=test-message-group-id, PreferedPartition=(NULL)) StartKqpSession 2025-06-03T10:30:13.240304Z node 13 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [13:7511668506860754335:2551] (SourceId=test-message-group-id, PreferedPartition=(NULL)) Select from the table 2025-06-03T10:30:13.437635Z node 13 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715698. Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-03T10:30:13.437688Z node 13 :KQP_EXECUTER WARN: kqp_executer_impl.h:257: ActorId: [13:7511668506860754347:2553] TxId: 281474976715698. Ctx: { TraceId: 01jwtnewzr3jcjzrn1ef7sznz1, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=ZDFlNDMxNzktYjM2OTBjMjAtM2ZmNjJlMzItNTBhZjY0M2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-03T10:30:13.437780Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=13&id=ZDFlNDMxNzktYjM2OTBjMjAtM2ZmNjJlMzItNTBhZjY0M2U=, ActorId: [13:7511668506860754336:2553], ActorState: ExecuteState, TraceId: 01jwtnewzr3jcjzrn1ef7sznz1, Create QueryResponse for error on request, msg: 2025-06-03T10:30:13.438589Z node 13 :PQ_PARTITION_CHOOSER INFO: partition_chooser_impl__abstract_chooser_actor.h:312: TPartitionChooser [13:7511668506860754335:2551] (SourceId=test-message-group-id, PreferedPartition=(NULL)) ReplyError: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=13&id=ZDFlNDMxNzktYjM2OTBjMjAtM2ZmNjJlMzItNTBhZjY0M2U=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jwtnewzr3jcjzrn1efffj9x1" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 2025-06-03T10:30:13.438623Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:809: session v1 error cookie: 3 reason: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=13&id=ZDFlNDMxNzktYjM2OTBjMjAtM2ZmNjJlMzItNTBhZjY0M2U=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jwtnewzr3jcjzrn1efffj9x1" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 sessionId: 2025-06-03T10:30:13.438909Z node 13 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 3 sessionId: is DEAD Test retry state: get retry delay 2025-06-03T10:30:13.440393Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|a5b2f220-8d0f5fd1-424097a3-7473de4d_0] Got error. Status: UNAVAILABLE, Description:
: Error: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=13&id=ZDFlNDMxNzktYjM2OTBjMjAtM2ZmNjJlMzItNTBhZjY0M2U=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jwtnewzr3jcjzrn1efffj9x1" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 , code: 500001 2025-06-03T10:30:13.440406Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|a5b2f220-8d0f5fd1-424097a3-7473de4d_0] Write session will restart in 2.000000s 2025-06-03T10:30:13.440436Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|a5b2f220-8d0f5fd1-424097a3-7473de4d_0] Write session: Do CDS request 2025-06-03T10:30:13.440444Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|a5b2f220-8d0f5fd1-424097a3-7473de4d_0] Do schedule cds request after 2000 ms 2025-06-03T10:30:13.652619Z node 13 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715700. Failed to resolve tablet: 72075186224037888 after several retries. 2025-06-03T10:30:13.652688Z node 13 :KQP_EXECUTER WARN: kqp_executer_impl.h:257: ActorId: [13:7511668506860754403:2555] TxId: 281474976715700. Ctx: { TraceId: 01jwtnex536qvm619q34cmvtvw, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGEyYjEzMzctZTUwYTczYS00MmE0M2VjNy00MzllOGJjZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037888 after several retries. 2025-06-03T10:30:13.652777Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=13&id=OGEyYjEzMzctZTUwYTczYS00MmE0M2VjNy00MzllOGJjZA==, ActorId: [13:7511668506860754381:2555], ActorState: ExecuteState, TraceId: 01jwtnex536qvm619q34cmvtvw, Create QueryResponse for error on request, msg: 2025-06-03T10:30:13.653154Z node 13 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Execution" issue_code: 1060 severity: 2 issues { position { row: 3 column: 120 } message: "Cost Based Optimizer could not be applied to this query: couldn\'t load statistics" end_position { row: 3 column: 120 } issue_code: 8001 severity: 2 } } QueryIssues { message: "Failed to resolve tablet: 72075186224037888 after several retries." severity: 1 } TxMeta { id: "01jwtnex704fswkqfm81jjnm4p" } } YdbStatus: UNAVAILABLE ConsumedRu: 39 } 2025-06-03T10:30:13.693678Z node 14 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976720684. Failed to resolve tablet: 72075186224037888 after several retries. 2025-06-03T10:30:13.693745Z node 14 :KQP_EXECUTER WARN: kqp_executer_impl.h:257: ActorId: [14:7511668508069972353:2490] TxId: 281474976720684. Ctx: { TraceId: 01jwtnex4p1c57ccbvxyb2argt, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=OTI1ODY1ODktYTVmN2U0ZGUtMzU0OGQxYTItYTM1MTAzNTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037888 after several retries. 2025-06-03T10:30:13.693853Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=14&id=OTI1ODY1ODktYTVmN2U0ZGUtMzU0OGQxYTItYTM1MTAzNTc=, ActorId: [14:7511668508069972338:2490], ActorState: ExecuteState, TraceId: 01jwtnex4p1c57ccbvxyb2argt, Create QueryResponse for error on request, msg: 2025-06-03T10:30:13.694524Z node 14 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Execution" issue_code: 1060 severity: 2 issues { position { row: 3 column: 120 } message: "Cost Based Optimizer could not be applied to this query: couldn\'t load statistics" end_position { row: 3 column: 120 } issue_code: 8001 severity: 2 } } QueryIssues { message: "Failed to resolve tablet: 72075186224037888 after several retries." severity: 1 } TxMeta { id: "01jwtnex888athmwmr3wdnstqn" } } YdbStatus: UNAVAILABLE ConsumedRu: 74 } 2025-06-03T10:30:14.007208Z node 13 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715702. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-03T10:30:14.007286Z node 13 :KQP_EXECUTER WARN: kqp_executer_impl.h:257: ActorId: [13:7511668506860754489:2562] TxId: 281474976715702. Ctx: { TraceId: 01jwtnexjg6y9sf60jfnqc99ct, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OTU2ZWExYTUtZjQ4ODYwYjQtMTYzYzYwZmItNzU3MWZhYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-03T10:30:14.007390Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=13&id=OTU2ZWExYTUtZjQ4ODYwYjQtMTYzYzYwZmItNzU3MWZhYTQ=, ActorId: [13:7511668506860754486:2562], ActorState: ExecuteState, TraceId: 01jwtnexjg6y9sf60jfnqc99ct, Create QueryResponse for error on request, msg: 2025-06-03T10:30:14.007880Z node 13 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jwtnexjg6y9sf60jfpwdcmdh" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } 2025-06-03T10:30:14.100011Z node 14 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976720686. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-03T10:30:14.100083Z node 14 :KQP_EXECUTER WARN: kqp_executer_impl.h:257: ActorId: [14:7511668508069972434:2497] TxId: 281474976720686. Ctx: { TraceId: 01jwtnexn7b9d23d3bfqa59cgx, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=Mjk0OGU2NjQtYjRhM2ZhNGQtMWZmMTVjMi1hMWJlNTNhOQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-03T10:30:14.100177Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=14&id=Mjk0OGU2NjQtYjRhM2ZhNGQtMWZmMTVjMi1hMWJlNTNhOQ==, ActorId: [14:7511668508069972431:2497], ActorState: ExecuteState, TraceId: 01jwtnexn7b9d23d3bfqa59cgx, Create QueryResponse for error on request, msg: 2025-06-03T10:30:14.101274Z node 14 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jwtnexn7b9d23d3bfrmcsre4" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } 2025-06-03T10:30:14.277400Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|a5b2f220-8d0f5fd1-424097a3-7473de4d_0] Write session: close. Timeout = 0 ms 2025-06-03T10:30:14.277426Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|a5b2f220-8d0f5fd1-424097a3-7473de4d_0] Write session will now close 2025-06-03T10:30:14.277441Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|a5b2f220-8d0f5fd1-424097a3-7473de4d_0] Write session: aborting 2025-06-03T10:30:14.277728Z :WARNING: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|a5b2f220-8d0f5fd1-424097a3-7473de4d_0] Write session: could not confirm all writes in time or session aborted, perform hard shutdown 2025-06-03T10:30:14.277738Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|a5b2f220-8d0f5fd1-424097a3-7473de4d_0] Write session: destroy ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/proxy_service/ut/unittest >> ScriptExecutionsTest::AttemptToUpdateDeletedLease [GOOD] Test command err: 2025-06-03T10:30:11.116380Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668498796411384:2207];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:11.116485Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002077/r3tmp/tmpFAUqIJ/pdisk_1.dat 2025-06-03T10:30:11.217509Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:11.219398Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:11.219430Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:11.239463Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 1651, node 1 2025-06-03T10:30:11.272058Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:11.272075Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:11.272078Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:11.272136Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10244 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:11.320377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:11.624651Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-06-03T10:30:11.625206Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /Root, empty 2025-06-03T10:30:11.627763Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:881: Received ping session request, request_id: 2, sender: [1:7511668498796412138:2317], trace_id: 01jwtnev4e44a44r9wne56adzw 2025-06-03T10:30:11.627921Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 2 timeout: 5.000000s actor id: [0:0:0] 2025-06-03T10:30:11.627945Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:11.627964Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-06-03T10:30:11.627969Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:11.627994Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-06-03T10:30:11.628004Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-06-03T10:30:11.630115Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:11.630254Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:559: Session not found, targetId: [2:8678280833929343339:121] requestId: 2 2025-06-03T10:30:11.630791Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: TraceId: "01jwtnev4e44a44r9wne56adzw", Forwarded response to sender actor, requestId: 2, sender: [1:7511668498796412138:2317], selfId: [1:7511668498796411461:2280], source: [1:7511668498796411461:2280] test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002077/r3tmp/tmpMdPMaM/pdisk_1.dat 2025-06-03T10:30:12.300525Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:30:12.304079Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:12.304697Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7511668504850317748:2079] 1748946612264691 != 1748946612264694 TClient is connected to server localhost:1185 TServer::EnableGrpc on GrpcPort 27442, node 4 2025-06-03T10:30:12.341725Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:12.341739Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:12.341740Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:12.341783Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-03T10:30:12.389681Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:12.389724Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:12.390568Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:30:12.391136Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:30:12.393986Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:30:12.734173Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-06-03T10:30:12.734595Z node 4 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-03T10:30:12.737766Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-06-03T10:30:12.737778Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:12.737789Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:12.737793Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-06-03T10:30:12.737801Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-06-03T10:30:12.738522Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-03T10:30:12.738524Z node 4 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-03T10:30:12.738532Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-03T10:30:12.738548Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-03T10:30:12.738549Z node 4 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-03T10:30:12.738553Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-03T10:30:12.738606Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-03T10:30:12.738607Z node 4 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-03T10:30:12.738610Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-03T10:30:12.739595Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:1, at schemeshard: 72057594046644480 2025-06-03T10:30:12.739967Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:30:12.740180Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:30:12.741455Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:12.741722Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-06-03T10:30:12.741734Z node 4 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976715660 2025-06-03T10:30:12.741746Z node 4 :KQP_PROXY DEBUG: table_c ... (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); SELECT lease_generation FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-06-03T10:30:14.510913Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=ODEyZjg1YmYtYmYwZTBjZTUtOGIxNGYzMTgtY2IzOGNlOGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 22, targetId: [4:7511668513440253633:2445] 2025-06-03T10:30:14.510920Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 22 timeout: 300.000000s actor id: [4:7511668513440253635:2610] 2025-06-03T10:30:14.521342Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 22, sender: [4:7511668513440253634:2446], selfId: [4:7511668504850317776:2068], source: [4:7511668513440253633:2445] 2025-06-03T10:30:14.521463Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: f2762efc-2ec696ef-3667a05e-21c19b0, State: Get operation info, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=4&id=ODEyZjg1YmYtYmYwZTBjZTUtOGIxNGYzMTgtY2IzOGNlOGE=, TxId: 01jwtney7f42g771jdrpary14c 2025-06-03T10:30:14.521602Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: f2762efc-2ec696ef-3667a05e-21c19b0, State: Get operation info, RunDataQuery: -- TSaveScriptFinalStatusActor::FinishScriptExecution DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $operation_status AS Int32; DECLARE $execution_status AS Int32; DECLARE $finalization_status AS Int32; DECLARE $issues AS JsonDocument; DECLARE $plan AS JsonDocument; DECLARE $stats AS JsonDocument; DECLARE $ast AS Optional; DECLARE $ast_compressed AS Optional; DECLARE $ast_compression_method AS Optional; DECLARE $operation_ttl AS Interval; DECLARE $customer_supplied_id AS Text; DECLARE $user_token AS Text; DECLARE $script_sinks AS Optional; DECLARE $script_secret_names AS Optional; DECLARE $applicate_script_external_effect_required AS Bool; UPDATE `.metadata/script_executions` SET operation_status = $operation_status, execution_status = $execution_status, finalization_status = IF($applicate_script_external_effect_required, $finalization_status, NULL), issues = $issues, plan = $plan, end_ts = CurrentUtcTimestamp(), stats = $stats, ast = $ast, ast_compressed = $ast_compressed, ast_compression_method = $ast_compression_method, expire_at = IF($operation_ttl > CAST(0 AS Interval), CurrentUtcTimestamp() + $operation_ttl, NULL), customer_supplied_id = IF($applicate_script_external_effect_required, $customer_supplied_id, NULL), user_token = IF($applicate_script_external_effect_required, $user_token, NULL), script_sinks = IF($applicate_script_external_effect_required, $script_sinks, NULL), script_secret_names = IF($applicate_script_external_effect_required, $script_secret_names, NULL) WHERE database = $database AND execution_id = $execution_id; DELETE FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id; 2025-06-03T10:30:14.521774Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=ODEyZjg1YmYtYmYwZTBjZTUtOGIxNGYzMTgtY2IzOGNlOGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 23, targetId: [4:7511668513440253633:2445] 2025-06-03T10:30:14.521783Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 23 timeout: 300.000000s actor id: [4:7511668513440253656:2615] 2025-06-03T10:30:14.568182Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 23, sender: [4:7511668513440253655:2452], selfId: [4:7511668504850317776:2068], source: [4:7511668513440253633:2445] 2025-06-03T10:30:14.568376Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: f2762efc-2ec696ef-3667a05e-21c19b0, State: Update final status, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=4&id=ODEyZjg1YmYtYmYwZTBjZTUtOGIxNGYzMTgtY2IzOGNlOGE=, TxId: 2025-06-03T10:30:14.568405Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: f2762efc-2ec696ef-3667a05e-21c19b0, State: Update final status, Finish with SUCCESS, SessionId: ydb://session/3?node_id=4&id=ODEyZjg1YmYtYmYwZTBjZTUtOGIxNGYzMTgtY2IzOGNlOGE=, TxId: 2025-06-03T10:30:14.568424Z node 4 :KQP_PROXY DEBUG: kqp_script_executions.cpp:2658: [ScriptExecutions] Finish script execution operation. ExecutionId: f2762efc-2ec696ef-3667a05e-21c19b0. UNAVAILABLE. Issues: {
: Error: Lease expired } 2025-06-03T10:30:14.568853Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=4&id=ODEyZjg1YmYtYmYwZTBjZTUtOGIxNGYzMTgtY2IzOGNlOGE=, workerId: [4:7511668513440253633:2445], local sessions count: 1 2025-06-03T10:30:14.568953Z node 4 :KQP_PROXY DEBUG: kqp_script_executions.cpp:633: [ScriptExecutions] [TCheckLeaseStatusActor] ExecutionId: f2762efc-2ec696ef-3667a05e-21c19b0, successfully finalized script execution operation 2025-06-03T10:30:14.568961Z node 4 :KQP_PROXY DEBUG: kqp_script_executions.cpp:838: [ScriptExecutions] [TCheckLeaseStatusActor] ExecutionId: f2762efc-2ec696ef-3667a05e-21c19b0, reply success 2025-06-03T10:30:14.573895Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: 01jwtney9d6xv67cwapvny68x2, Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=ODQ3YjA5ZWItY2IyMDlmZGQtZDA5NzZmNWItMTZlNTExOTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 24, targetId: [4:7511668509145286072:2360] 2025-06-03T10:30:14.573921Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 24 timeout: 300.000000s actor id: [4:7511668513440253681:2622] 2025-06-03T10:30:14.721326Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: TraceId: "01jwtney9d6xv67cwapvny68x2", Forwarded response to sender actor, requestId: 24, sender: [4:7511668513440253680:2457], selfId: [4:7511668504850317776:2068], source: [4:7511668509145286072:2360] 2025-06-03T10:30:14.727996Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TScriptLeaseUpdater] TraceId: f2762efc-2ec696ef-3667a05e-21c19b0, Bootstrap. Database: /dc-1 2025-06-03T10:30:14.728144Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444995127094.823478s seconds to be completed 2025-06-03T10:30:14.728754Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=4&id=NzA5ZDliZjQtMjQ3NTU4MzAtZjk0YzVhOTItODU3YzE3YTg=, workerId: [4:7511668513440253721:2469], database: /dc-1, longSession: 1, local sessions count: 2 2025-06-03T10:30:14.728791Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-06-03T10:30:14.728953Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TScriptLeaseUpdater] TraceId: f2762efc-2ec696ef-3667a05e-21c19b0, RunDataQuery: -- TScriptLeaseUpdater::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT lease_deadline FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-06-03T10:30:14.729086Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=NzA5ZDliZjQtMjQ3NTU4MzAtZjk0YzVhOTItODU3YzE3YTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 26, targetId: [4:7511668513440253721:2469] 2025-06-03T10:30:14.729093Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 26 timeout: 300.000000s actor id: [4:7511668513440253723:2638] 2025-06-03T10:30:14.776950Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 26, sender: [4:7511668513440253722:2470], selfId: [4:7511668504850317776:2068], source: [4:7511668513440253721:2469] 2025-06-03T10:30:14.777204Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TScriptLeaseUpdater] TraceId: f2762efc-2ec696ef-3667a05e-21c19b0, State: Get lease info, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=4&id=NzA5ZDliZjQtMjQ3NTU4MzAtZjk0YzVhOTItODU3YzE3YTg=, TxId: 01jwtneyfk73cmx1q1fhjq9sn5 2025-06-03T10:30:14.777233Z node 4 :KQP_PROXY WARN: query_actor.cpp:372: [TQueryBase] [TScriptLeaseUpdater] TraceId: f2762efc-2ec696ef-3667a05e-21c19b0, State: Get lease info, Finish with BAD_REQUEST, Issues: {
: Error: No such execution }, SessionId: ydb://session/3?node_id=4&id=NzA5ZDliZjQtMjQ3NTU4MzAtZjk0YzVhOTItODU3YzE3YTg=, TxId: 01jwtneyfk73cmx1q1fhjq9sn5 2025-06-03T10:30:14.777239Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:428: [TQueryBase] [TScriptLeaseUpdater] TraceId: f2762efc-2ec696ef-3667a05e-21c19b0, State: Get lease info, Rollback transaction: 01jwtneyfk73cmx1q1fhjq9sn5 2025-06-03T10:30:14.777453Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=4&id=NzA5ZDliZjQtMjQ3NTU4MzAtZjk0YzVhOTItODU3YzE3YTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 600.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 27, targetId: [4:7511668513440253721:2469] 2025-06-03T10:30:14.777460Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 27 timeout: 600.000000s actor id: [4:7511668513440253747:2647] 2025-06-03T10:30:14.777851Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 27, sender: [4:7511668513440253746:2478], selfId: [4:7511668504850317776:2068], source: [4:7511668513440253721:2469] 2025-06-03T10:30:14.777991Z node 4 :KQP_PROXY DEBUG: query_actor.cpp:437: [TQueryBase] [TScriptLeaseUpdater] TraceId: f2762efc-2ec696ef-3667a05e-21c19b0, State: Get lease info, RollbackTransactionResult: SUCCESS. Issues: 2025-06-03T10:30:14.779134Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=4&id=NzA5ZDliZjQtMjQ3NTU4MzAtZjk0YzVhOTItODU3YzE3YTg=, workerId: [4:7511668513440253721:2469], local sessions count: 1 2025-06-03T10:30:14.781842Z node 4 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=4&id=ODQ3YjA5ZWItY2IyMDlmZGQtZDA5NzZmNWItMTZlNTExOTA=, workerId: [4:7511668509145286072:2360], local sessions count: 0 >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestPages [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorPageLimitExceeded [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestMemory [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorRequestMemoryFollower [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorMemoryLimitExceeded >> KqpQueryService::StreamExecuteQueryPure >> KqpQueryService::ForbidInteractiveTxOnImplicitSession [GOOD] >> KqpQueryService::MaterializeTxResults >> TFlatTableExecutor_ResourceProfile::TestExecutorMemoryLimitExceeded [GOOD] >> TFlatTableExecutor_ResourceProfile::TestExecutorPreserveTxData [GOOD] >> TExecutorDb::RandomCoordinatorSimulation [GOOD] >> TExecutorDb::MultiPage >> KqpQueryService::AlterTempTable |66.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots |66.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots |66.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/ydb-core-tx-schemeshard-ut_external_table_reboots >> KqpQueryService::StreamExecuteCollectMeta [GOOD] >> KqpQueryService::StreamExecuteQueryMultiResult >> ScriptExecutionsTest::UpdatesLeaseAfterExpiring [GOOD] >> TExecutorDb::MultiPage [GOOD] >> TExecutorDb::EncodedPage >> BuildStatsHistogram::Five_Five_Mixed [GOOD] >> BuildStatsHistogram::Five_Five_Serial >> KqpQueryService::ShowCreateTableDisable [GOOD] >> KqpQueryService::ShowCreateTableNotSuccess >> KqpQueryService::TableSink_HtapInteractive-withOltpSink >> TExecutorDb::EncodedPage [GOOD] >> TFlatCxxDatabaseTest::BasicSchemaTest [GOOD] >> TFlatCxxDatabaseTest::RenameColumnSchemaTest [GOOD] >> TFlatCxxDatabaseTest::SchemaFillerTest [GOOD] >> TFlatDatabaseDecimal::UpdateRead [GOOD] >> TFlatEraseCacheTest::BasicUsage [GOOD] >> TFlatEraseCacheTest::BasicUsageReverse [GOOD] >> TFlatEraseCacheTest::CacheEviction [GOOD] >> TFlatEraseCacheTest::StressGarbageCollection [GOOD] >> TFlatEraseCacheTest::StressGarbageCollectionWithStrings [GOOD] >> TFlatExecutorLeases::Basics >> KqpQueryServiceScripts::ExecuteScriptStatsFull [GOOD] >> KqpQueryServiceScripts::ExecuteScriptStatsNone >> KqpQueryService::ShowCreateViewOnTable [GOOD] >> TPartBtreeIndexIteration::OneNode_Slices [GOOD] >> TPartBtreeIndexIteration::OneNode_Groups_Slices ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/proxy_service/ut/unittest >> ScriptExecutionsTest::UpdatesLeaseAfterExpiring [GOOD] Test command err: 2025-06-03T10:30:11.406452Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668497013971578:2220];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:11.460260Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00206e/r3tmp/tmpBPg5GA/pdisk_1.dat 2025-06-03T10:30:11.509426Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668497013971369:2079] 1748946611396768 != 1748946611396771 2025-06-03T10:30:11.514358Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:26036 TServer::EnableGrpc on GrpcPort 5672, node 1 2025-06-03T10:30:11.565969Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:11.566004Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:11.566916Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:11.581582Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:11.581600Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:11.581603Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:11.581660Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-03T10:30:11.618966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:11.623240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:30:11.959052Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-06-03T10:30:11.963277Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-03T10:30:11.969611Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-06-03T10:30:11.969617Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-06-03T10:30:11.969629Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-06-03T10:30:11.969647Z node 1 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-03T10:30:11.969947Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:11.969956Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:11.969959Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:11.970497Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-03T10:30:11.970499Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-03T10:30:11.970508Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-03T10:30:11.970531Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-03T10:30:11.970532Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-03T10:30:11.970534Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-03T10:30:11.970560Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-03T10:30:11.970561Z node 1 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-03T10:30:11.970564Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-03T10:30:11.971421Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:11.971639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480 2025-06-03T10:30:11.972045Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:30:11.972250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:30:11.974110Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-06-03T10:30:11.974128Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976715659 2025-06-03T10:30:11.974166Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-06-03T10:30:11.974169Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976715660 2025-06-03T10:30:11.974499Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976715658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-06-03T10:30:11.974514Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976715658 2025-06-03T10:30:12.019698Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_executions updater. Request: create. Transaction completed: 281474976715659. Doublechecking... 2025-06-03T10:30:12.038451Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table result_sets updater. Request: create. Transaction completed: 281474976715658. Doublechecking... 2025-06-03T10:30:12.049370Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:290: Table script_execution_leases updater. Request: create. Transaction completed: 281474976715660. Doublechecking... 2025-06-03T10:30:12.085670Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_executions updater. Column diff is empty, finishing 2025-06-03T10:30:12.098908Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table result_sets updater. Column diff is empty, finishing 2025-06-03T10:30:12.101724Z node 1 :KQP_PROXY DEBUG: table_creator.cpp:362: Table script_execution_leases updater. Column diff is empty, finishing 2025-06-03T10:30:12.105508Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 855fb342-a4f336b3-b320d835-8bed98a9, Bootstrap. Database: /dc-1 2025-06-03T10:30:12.109418Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444995127097.442215s seconds to be completed 2025-06-03T10:30:12.110325Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=1&id=ZmFmZDgwNTgtZjg0MTY2YTQtMTQ3MTRiYWUtOGNhYjViNTg=, workerId: [1:7511668501308939552:2330], database: /dc-1, longSession: 1, local sessions count: 1 2025-06-03T10:30:12.110379Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-06-03T10:30:12.111074Z node 1 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCreateScriptOperationQuery] TraceId: 855fb342-a4f336b3-b320d835-8bed98a9, RunDataQuery: -- TCreateScriptOperationQuery::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $run_script_actor_id AS Text; DECLARE $execution_status AS Int32; DECLARE $execution_mode AS Int32; DECLARE $query_text AS Text; DECLARE $syntax AS Int32; DECLARE $meta AS JsonDocument; DECLARE $lease_duration AS Interval; DECLARE $execution_meta_ttl AS Interval; UPSERT INTO `.metadata/script_executions` (database, execution_id, run_script_actor_id, execution_status, execution_mode, start_ts, query_text, syntax, meta, expire_at) VALUES ($database, $execution_id, $run_script_actor_id, $execution_status, $execution_mode, CurrentUtcTimestamp(), $query_text, $syntax, $meta, CurrentUtcTimestamp() + $execution_meta_ttl); UPSERT INTO `.metadata/script_execution_leases` (database, execution_id, lease_deadline, lease_generation, expire_at) VALUES ($database, $execution_id, CurrentUtcTimestamp() + $lease_duration, 1, CurrentUtcTimestamp() + $execution_meta_ttl); 2025-06-03T10:30:12.111291Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=1&id=ZmFmZDgwNTgtZjg0MTY2YTQtMTQ3MTRiYWUtOGNhYjViNTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 3, targetId: [1:7511668501308939552:2330] 2025-06-03T10:30:12.111301Z node 1 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 3 timeout: 300.000000s actor id: [1:7511668501308939554:2452] 2025-06-03T10:30:12.113461Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_act ... session/3?node_id=2&id=MjBlZDNjZTItYzQzYjdjNzYtOWM1YjNkMTUtZDc3ZDU0NTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 18, targetId: [2:7511668513384360432:2397] 2025-06-03T10:30:14.673207Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 18 timeout: 300.000000s actor id: [2:7511668513384360488:2574] 2025-06-03T10:30:14.750457Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 18, sender: [2:7511668513384360487:2414], selfId: [2:7511668509089392205:2245], source: [2:7511668513384360432:2397] 2025-06-03T10:30:14.750608Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: eef20cfd-58048061-bd917928-b1edcd19, State: Update final status, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=MjBlZDNjZTItYzQzYjdjNzYtOWM1YjNkMTUtZDc3ZDU0NTU=, TxId: 2025-06-03T10:30:14.750642Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TSaveScriptFinalStatusActor] TraceId: eef20cfd-58048061-bd917928-b1edcd19, State: Update final status, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=MjBlZDNjZTItYzQzYjdjNzYtOWM1YjNkMTUtZDc3ZDU0NTU=, TxId: 2025-06-03T10:30:14.750649Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:2658: [ScriptExecutions] Finish script execution operation. ExecutionId: eef20cfd-58048061-bd917928-b1edcd19. SUCCESS. Issues: 2025-06-03T10:30:14.750851Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=ZTA0OGU1ODctNjNkOTIwYTQtYTUyOTU2ZjMtZmUyNmVhZWU=, workerId: [2:7511668513384360289:2348], local sessions count: 2 2025-06-03T10:30:14.750866Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=MjBlZDNjZTItYzQzYjdjNzYtOWM1YjNkMTUtZDc3ZDU0NTU=, workerId: [2:7511668513384360432:2397], local sessions count: 1 2025-06-03T10:30:15.358204Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:15.521479Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TScriptLeaseUpdater] TraceId: ea12bf88-9b4f5c89-c9bb19d2-6c992b6b, Bootstrap. Database: /dc-1 2025-06-03T10:30:15.521806Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444995127094.029828s seconds to be completed 2025-06-03T10:30:15.522414Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=2&id=NTg1N2VkMGUtMWVlMWM1NmQtMTBlZjJhYjYtY2JkOWMzNjM=, workerId: [2:7511668517679327836:2427], database: /dc-1, longSession: 1, local sessions count: 2 2025-06-03T10:30:15.522454Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-06-03T10:30:15.525488Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TScriptLeaseUpdater] TraceId: ea12bf88-9b4f5c89-c9bb19d2-6c992b6b, RunDataQuery: -- TScriptLeaseUpdater::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT lease_deadline FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-06-03T10:30:15.525733Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=NTg1N2VkMGUtMWVlMWM1NmQtMTBlZjJhYjYtY2JkOWMzNjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 20, targetId: [2:7511668517679327836:2427] 2025-06-03T10:30:15.525764Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 20 timeout: 300.000000s actor id: [2:7511668517679327838:2594] 2025-06-03T10:30:15.574068Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 20, sender: [2:7511668517679327837:2428], selfId: [2:7511668509089392205:2245], source: [2:7511668517679327836:2427] 2025-06-03T10:30:15.574163Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TScriptLeaseUpdater] TraceId: ea12bf88-9b4f5c89-c9bb19d2-6c992b6b, State: Get lease info, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NTg1N2VkMGUtMWVlMWM1NmQtMTBlZjJhYjYtY2JkOWMzNjM=, TxId: 01jwtnez8g421qz99phzxqmgaa 2025-06-03T10:30:15.574192Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TScriptLeaseUpdater] TraceId: ea12bf88-9b4f5c89-c9bb19d2-6c992b6b, State: Get lease info, RunDataQuery: -- TScriptLeaseUpdater::OnGetLeaseInfo DECLARE $database AS Text; DECLARE $execution_id AS Text; DECLARE $lease_duration AS Interval; UPDATE `.metadata/script_execution_leases` SET lease_deadline=(CurrentUtcTimestamp() + $lease_duration) WHERE database = $database AND execution_id = $execution_id; 2025-06-03T10:30:15.574369Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=NTg1N2VkMGUtMWVlMWM1NmQtMTBlZjJhYjYtY2JkOWMzNjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 21, targetId: [2:7511668517679327836:2427] 2025-06-03T10:30:15.574384Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 21 timeout: 300.000000s actor id: [2:7511668517679327861:2603] 2025-06-03T10:30:15.606875Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 21, sender: [2:7511668517679327860:2435], selfId: [2:7511668509089392205:2245], source: [2:7511668517679327836:2427] 2025-06-03T10:30:15.606973Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TScriptLeaseUpdater] TraceId: ea12bf88-9b4f5c89-c9bb19d2-6c992b6b, State: Update lease, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NTg1N2VkMGUtMWVlMWM1NmQtMTBlZjJhYjYtY2JkOWMzNjM=, TxId: 2025-06-03T10:30:15.606993Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TScriptLeaseUpdater] TraceId: ea12bf88-9b4f5c89-c9bb19d2-6c992b6b, State: Update lease, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NTg1N2VkMGUtMWVlMWM1NmQtMTBlZjJhYjYtY2JkOWMzNjM=, TxId: 2025-06-03T10:30:15.607080Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=NTg1N2VkMGUtMWVlMWM1NmQtMTBlZjJhYjYtY2JkOWMzNjM=, workerId: [2:7511668517679327836:2427], local sessions count: 1 2025-06-03T10:30:15.608940Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: 01jwtnez9rcyscx5kpz2wqht1h, Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=NTRiYmE0ODEtNzZlNTY4NjEtNmFlMWMwOTItNDc2MmIxMmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 22, targetId: [2:7511668513384360319:2362] 2025-06-03T10:30:15.608964Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 22 timeout: 300.000000s actor id: [2:7511668517679327885:2610] 2025-06-03T10:30:15.684155Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: TraceId: "01jwtnez9rcyscx5kpz2wqht1h", Forwarded response to sender actor, requestId: 22, sender: [2:7511668517679327884:2442], selfId: [2:7511668509089392205:2245], source: [2:7511668513384360319:2362] 2025-06-03T10:30:15.692318Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:791: [ScriptExecutions] [TCheckLeaseStatusActor] ExecutionId: ea12bf88-9b4f5c89-c9bb19d2-6c992b6b, Bootstrap. Start TCheckLeaseStatusQueryActor 2025-06-03T10:30:15.692368Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:134: [TQueryBase] [TCheckLeaseStatusQueryActor] TraceId: ea12bf88-9b4f5c89-c9bb19d2-6c992b6b, Bootstrap. Database: /dc-1 2025-06-03T10:30:15.692466Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1468: Request has 18444995127093.859159s seconds to be completed 2025-06-03T10:30:15.693068Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1543: Created new session, sessionId: ydb://session/3?node_id=2&id=NDA1NjE4N2ItM2QzOTFhNTQtOTBmZjhjZmYtODZkMzU1MmQ=, workerId: [2:7511668517679327923:2454], database: /dc-1, longSession: 1, local sessions count: 2 2025-06-03T10:30:15.693123Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:649: Received create session request, trace_id: 2025-06-03T10:30:15.693218Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:197: [TQueryBase] [TCheckLeaseStatusQueryActor] TraceId: ea12bf88-9b4f5c89-c9bb19d2-6c992b6b, RunDataQuery: -- TCheckLeaseStatusQueryActor::OnRunQuery DECLARE $database AS Text; DECLARE $execution_id AS Text; SELECT operation_status, execution_status, finalization_status, issues, run_script_actor_id FROM `.metadata/script_executions` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); SELECT lease_deadline FROM `.metadata/script_execution_leases` WHERE database = $database AND execution_id = $execution_id AND (expire_at > CurrentUtcTimestamp() OR expire_at IS NULL); 2025-06-03T10:30:15.693374Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:767: Ctx: { TraceId: , Database: /dc-1, DatabaseId: , SessionId: ydb://session/3?node_id=2&id=NDA1NjE4N2ItM2QzOTFhNTQtOTBmZjhjZmYtODZkMzU1MmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: }. TEvQueryRequest, set timer for: 300.000000s timeout: 0.000000s cancelAfter: 0.000000s. Send request to target, requestId: 24, targetId: [2:7511668517679327923:2454] 2025-06-03T10:30:15.693384Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1294: Scheduled timeout timer for requestId: 24 timeout: 300.000000s actor id: [2:7511668517679327925:2624] 2025-06-03T10:30:15.783821Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:953: Forwarded response to sender actor, requestId: 24, sender: [2:7511668517679327924:2455], selfId: [2:7511668509089392205:2245], source: [2:7511668517679327923:2454] 2025-06-03T10:30:15.784246Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:240: [TQueryBase] [TCheckLeaseStatusQueryActor] TraceId: ea12bf88-9b4f5c89-c9bb19d2-6c992b6b, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=2&id=NDA1NjE4N2ItM2QzOTFhNTQtOTBmZjhjZmYtODZkMzU1MmQ=, TxId: 2025-06-03T10:30:15.784279Z node 2 :KQP_PROXY DEBUG: query_actor.cpp:367: [TQueryBase] [TCheckLeaseStatusQueryActor] TraceId: ea12bf88-9b4f5c89-c9bb19d2-6c992b6b, Finish with SUCCESS, SessionId: ydb://session/3?node_id=2&id=NDA1NjE4N2ItM2QzOTFhNTQtOTBmZjhjZmYtODZkMzU1MmQ=, TxId: 2025-06-03T10:30:15.784315Z node 2 :KQP_PROXY DEBUG: kqp_script_executions.cpp:838: [ScriptExecutions] [TCheckLeaseStatusActor] ExecutionId: ea12bf88-9b4f5c89-c9bb19d2-6c992b6b, reply success 2025-06-03T10:30:15.784577Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=NDA1NjE4N2ItM2QzOTFhNTQtOTBmZjhjZmYtODZkMzU1MmQ=, workerId: [2:7511668517679327923:2454], local sessions count: 1 2025-06-03T10:30:15.790135Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1353: Session closed, sessionId: ydb://session/3?node_id=2&id=NTRiYmE0ODEtNzZlNTY4NjEtNmFlMWMwOTItNDc2MmIxMmE=, workerId: [2:7511668513384360319:2362], local sessions count: 0 >> KqpQueryService::ExecuteQueryUpsertDoesntChangeIndexedValuesIfNotChanged >> ReadIteratorExternalBlobs::ExtBlobsWithFirstRowPreloaded [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithFirstRowPreloadedWithReboot >> KqpQueryService::DdlGroup >> KqpQueryService::TableSink_OltpUpsert >> CommitOffset::DistributedTxCommit_CheckOffsetCommitForDifferentCases [GOOD] >> CommitOffset::DistributedTxCommit_Flat_CheckOffsetCommitForDifferentCases |66.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build |66.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build |66.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_column_build/ydb-core-tx-schemeshard-ut_column_build >> KqpQueryService::StreamExecuteQueryPure [GOOD] >> KqpQueryService::TableSink_BadTransactions >> KqpQueryService::MaterializeTxResults [GOOD] >> KqpQueryService::StreamExecuteQueryMultiResult [GOOD] >> BuildStatsHistogram::Five_Five_Serial [GOOD] >> BuildStatsHistogram::Five_Five_Crossed ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut/unittest >> TFlatTableExecutor_ResourceProfile::TestExecutorPreserveTxData [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:02.067630Z 00000.004 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.004 II| FAKE_ENV: Starting storage for BS group 0 00000.004 II| FAKE_ENV: Starting storage for BS group 1 00000.004 II| FAKE_ENV: Starting storage for BS group 2 00000.004 II| FAKE_ENV: Starting storage for BS group 3 00000.056 C1| TABLET_EXECUTOR: Tablet 1 unhandled exception std::runtime_error: test ??+0 (0xCD512F2) ??+0 (0xCD51297) NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Exceptions::TTxExecuteThrowException::Execute(NKikimr::NTabletFlatExecutor::TTransactionContext&, NActors::TActorContext const&)+57 (0xC88B739) NKikimr::NTabletFlatExecutor::TExecutor::ExecuteTransaction(NKikimr::NTabletFlatExecutor::TSeat*)+1312 (0xF3276E0) NKikimr::NTabletFlatExecutor::TExecutor::StateWork(TAutoPtr&)+167 (0xF3124C7) NActors::IActor::Receive(TAutoPtr&)+85 (0xD5F11E5) 00000.056 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.056 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.056 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.056 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {62b, 2} 00000.056 II| FAKE_ENV: DS.1 gone, left {35b, 1}, put {35b, 1} 00000.056 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.056 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.056 II| FAKE_ENV: All BS storage groups are stopped 00000.056 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.056 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 1 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:02.125774Z 00000.002 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00000.003 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.003 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.003 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.003 II| FAKE_ENV: DS.0 gone, left {111b, 2}, put {131b, 3} 00000.003 II| FAKE_ENV: DS.1 gone, left {42b, 2}, put {42b, 2} 00000.003 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.003 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.003 II| FAKE_ENV: All BS storage groups are stopped 00000.003 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.003 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:02.130724Z 00000.002 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00000.004 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 4 actors 00000.004 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.004 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.004 II| FAKE_ENV: DS.0 gone, left {561b, 14}, put {623b, 16} 00000.004 II| FAKE_ENV: DS.1 gone, left {693b, 8}, put {693b, 8} 00000.004 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.004 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.004 II| FAKE_ENV: All BS storage groups are stopped 00000.004 II| FAKE_ENV: Model stopped, hosted 4 actors, spent 0.000s 00000.004 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:02.136904Z 00000.002 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00000.004 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 4 actors 00000.004 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.004 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.004 II| FAKE_ENV: DS.0 gone, left {141b, 4}, put {669b, 13} 00000.004 II| FAKE_ENV: DS.1 gone, left {868b, 8}, put {987b, 10} 00000.004 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.004 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.004 II| FAKE_ENV: All BS storage groups are stopped 00000.004 II| FAKE_ENV: Model stopped, hosted 5 actors, spent 0.000s 00000.004 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:02.143159Z 00000.002 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.002 II| FAKE_ENV: Starting storage for BS group 0 00000.002 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00000.002 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.002 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.002 DD| TABLET_EXECUTOR: Leader{1:2:2} commited cookie 2 for step 1 ... initializing schema 00000.002 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} queued, type NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema 00000.002 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} hope 1 -> done Change{2, redo 0b alter 209b annex 0, ~{ } -{ }, 0 gb} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NTabletFlatExecutor::TRowsModel::TTxSchema} release 4194304b of static, Memory{0 dyn 0} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:3} commited cookie 1 for step 2 ... inserting rows 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows} queued, type NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows} hope 1 -> done Change{2, redo 512b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NTabletFlatExecutor::TRowsModel::TTxAddRows} release 4194304b of static, Memory{0 dyn 0} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:4} commited cookie 1 for step 3 ... starting follower ... waiting for follower attach ... blocking NKikimr::TEvTablet::TEvNewFollowerAttached from TABLET_ACTOR to NKikimr::NTabletFlatExecutor::TTestFlatTablet cookie 0 ... waiting for follower attach (done) ... spamming QueueScan transactions 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.003 II| TABLET_EXECUTOR: Leader{1:2:5} starting Scan{2 on 101, TEmptyScan{}} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{3, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} release 4194304b of static, Memory{0 dyn 0} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:5} commited cookie 8 for step 4 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.003 II| TABLET_EXECUTOR: Leader{1:2:6} starting Scan{4 on 101, TEmptyScan{}} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{4, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} release 4194304b of static, Memory{0 dyn 0} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:6} commited cookie 8 for step 5 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.003 II| TABLET_EXECUTOR: Leader{1:2:7} starting Scan{6 on 101, TEmptyScan{}} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{5, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} release 4194304b of static, Memory{0 dyn 0} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:7} commited cookie 8 for step 6 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.003 II| TABLET_EXECUTOR: Leader{1:2:8} starting Scan{8 on 101, TEmptyScan{}} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{6, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} release 4194304b of static, Memory{0 dyn 0} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:8} commited cookie 8 for step 7 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.003 II| TABLET_EXECUTOR: Leader{1:2:9} starting Scan{10 on 101, TEmptyScan{}} 00000.003 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{7, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Follower::TTxQueueScan} hope 1 -> done Change{3, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.003 DD| TABLET_EXECUTOR: Leader{1 ... ange{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} found attached Res{10 20480b} 00000.005 DD| TABLET_EXECUTOR: release 10240b of static tx data due to attached res 10, Memory{0 dyn 20480} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{24, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 524267520b requested for data (524288000b in total) 00000.005 EE| TABLET_EXECUTOR: Leader{1:2:4} Tx{24, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} mem 524288000b terminated, limit 314572800b is exceeded 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{24, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release Res{10 20480b}, Memory{0 dyn 0} 00000.005 DD| RESOURCE_BROKER: Update cookie for task Tx{23, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (10 by [56:30:2062]) 00000.005 DD| RESOURCE_BROKER: Finish task Tx{23, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (10 by [56:30:2062]) (release resources {0, 20480}) 00000.005 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.001311 to 0.000000 (remove task Tx{23, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (10 by [56:30:2062])) 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 0} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 19456b requested for data (20480b in total) 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 1024b of static, Memory{0 dyn 0} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release tx data 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} request Res{11 20480b} type small_transaction 00000.005 DD| RESOURCE_BROKER: Submitted new unknown task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062]) priority=5 resources={0, 20480} 00000.005 EE| RESOURCE_BROKER: Assigning waiting task 'Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.005 DD| RESOURCE_BROKER: Allocate resources {0, 20480} for task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062]) from queue queue_default 00000.005 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.005 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.000000 to 0.001192 (insert task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])) 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} acquired dyn mem Res{11 20480b}, Memory{0 dyn 20480} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} update resource task 11 releasing 0b, Memory{0 dyn 20480} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} captured Res{11 20480b} 00000.005 DD| RESOURCE_BROKER: Update task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062]) (priority=5 type=small_transaction resources={0, 20480} resubmit=0) 00000.005 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.005 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.000000 to 0.001192 (insert task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])) 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} queued, type NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} took 1024b of static mem, Memory{1024 dyn 20480} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 1 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 19456b requested for data (20480b in total) 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release 1024b of static, Memory{0 dyn 20480} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release tx data 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} request Res{12 20480b} type small_transaction 00000.005 DD| RESOURCE_BROKER: Submitted new unknown task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062]) priority=5 resources={0, 20480} 00000.005 EE| RESOURCE_BROKER: Assigning waiting task 'Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.005 DD| RESOURCE_BROKER: Allocate resources {0, 20480} for task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062]) from queue queue_default 00000.005 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.005 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.001192 to 0.002384 (insert task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])) 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} acquired dyn mem Res{12 20480b}, Memory{0 dyn 40960} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} hope 2 -> retry Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} found attached Res{11 20480b} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} moving tx data from attached Res{11 20480b} to Res{12 ...} 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} touch new 0b, 0b lo load (0b in total), 524267520b requested for data (524288000b in total) 00000.005 EE| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} mem 524288000b terminated, limit 314572800b is exceeded 00000.005 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} release Res{12 40960b}, Memory{0 dyn 0} 00000.005 DD| RESOURCE_BROKER: Update task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062]) (priority=5 type=small_transaction resources={0, 40960} resubmit=0) 00000.005 EE| RESOURCE_BROKER: Assigning in-fly task 'Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])' of unknown type 'small_transaction' to default queue 00000.005 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.001192 to 0.003576 (insert task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])) 00000.005 DD| RESOURCE_BROKER: Finish task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062]) (release resources {0, 20480}) 00000.005 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.003576 to 0.002384 (remove task Tx{25, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (11 by [56:30:2062])) 00000.005 DD| RESOURCE_BROKER: Finish task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062]) (release resources {0, 40960}) 00000.005 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_default from 0.002384 to 0.000000 (remove task Tx{26, NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_ResourceProfile::TTxRequestMemory} at tablet 1 (12 by [56:30:2062])) 00000.005 II| TABLET_EXECUTOR: Leader{1:2:4} suiciding, Waste{2:0, 317b +(0, 0b), 3 trc, -0b acc} 00000.006 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.006 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.006 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.006 II| FAKE_ENV: DS.0 gone, left {180b, 3}, put {200b, 4} 00000.006 II| FAKE_ENV: DS.1 gone, left {352b, 3}, put {352b, 3} 00000.006 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.006 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.006 II| FAKE_ENV: All BS storage groups are stopped 00000.006 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.006 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 45 Left 401}, stopped ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ShowCreateViewOnTable [GOOD] Test command err: Trying to start YDB, gRPC: 26080, MsgBus: 14897 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d7d/r3tmp/tmpX23L5d/pdisk_1.dat 2025-06-03T10:30:12.841482Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:30:12.873788Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26080, node 1 2025-06-03T10:30:12.905778Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:12.905792Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:12.905794Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:12.905852Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:30:12.933627Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:12.933666Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:12.939626Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14897 TClient is connected to server localhost:14897 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:13.061084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:13.064533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:30:13.082503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:13.142588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:13.204102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:13.269706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:13.362961Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668508891757571:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.363003Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.434124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.456957Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.470683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.485320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.496058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.509155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.569697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.594099Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668508891758229:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.594142Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.594241Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668508891758234:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.598076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:13.604880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:30:13.604997Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668508891758236:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:13.706324Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668508891758287:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:13.950018Z node 1 :SYSTEM_VIEWS ERROR: scan_actor_base_impl.h:96: Scan error, actor: [1:7511668508891758612:2522], owner: [1:7511668508891758608:2520], scan id: 0, table id: [1:0:0:show_create], error: Path type mismatch, expected: Table, found: View 2025-06-03T10:30:13.950522Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7511668508891758609:2521], TxId: 281474976715673, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=YTI5NWM4MGYtM2Q5MjFhNmMtNzhiMDRhNjQtYzg4NWZjM2I=. TraceId : 01jwtnexna9ct1rav1gpv8pgqa. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [1:7511668508891758605:2508], status: BAD_REQUEST, reason: {
: Error: Terminate execution } 2025-06-03T10:30:13.950677Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=YTI5NWM4MGYtM2Q5MjFhNmMtNzhiMDRhNjQtYzg4NWZjM2I=, ActorId: [1:7511668508891758559:2508], ActorState: ExecuteState, TraceId: 01jwtnexna9ct1rav1gpv8pgqa, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 65352, MsgBus: 3772 2025-06-03T10:30:14.389533Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668512532590429:2091];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:14.389881Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d7d/r3tmp/tmpEiyN2z/pdisk_1.dat 2025-06-03T10:30:14.424898Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:14.428635Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511668512532590352:2079] 1748946614380693 != 1748946614380696 TServer::EnableGrpc on GrpcPort 65352, node 2 2025-06-03T10:30:14.454121Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:14.454139Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:14.454143Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:14.454207Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:30:14.497856Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:14.497889Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:14.505778Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3772 TClient is connected to server local ... N: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:15.105788Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668516827559948:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:15.105834Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:15.105938Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668516827559953:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:15.106920Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:15.116510Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511668516827559955:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:15.174019Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668516827560006:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 9266, MsgBus: 6549 2025-06-03T10:30:15.564122Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511668517892031315:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:15.564431Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d7d/r3tmp/tmp7l0Fta/pdisk_1.dat 2025-06-03T10:30:15.584841Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9266, node 3 2025-06-03T10:30:15.597483Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:15.597501Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:15.597504Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:15.597571Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6549 TClient is connected to server localhost:6549 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:15.669145Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:15.669184Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting waiting... 2025-06-03T10:30:15.669969Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:30:15.670869Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:30:15.676268Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:30:15.699803Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:15.775089Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:15.799109Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.093414Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668522187000214:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.093470Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.106942Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.143252Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.169659Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.183161Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.197095Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.210536Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.277435Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.298019Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668522187000873:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.298045Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.298185Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668522187000878:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.299132Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:16.301673Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511668522187000880:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:16.360283Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511668522187000931:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:16.529274Z node 3 :SYSTEM_VIEWS ERROR: scan_actor_base_impl.h:96: Scan error, actor: [3:7511668522187001217:2518], owner: [3:7511668522187001214:2516], scan id: 0, table id: [1:0:0:show_create], error: Path type mismatch, expected: View, found: Table 2025-06-03T10:30:16.529442Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [3:7511668522187001215:2517], TxId: 281474976715672, task: 2. Ctx: { CustomerSuppliedId : . TraceId : 01jwtnf05xcdeq17xbwsw8q7qm. SessionId : ydb://session/3?node_id=3&id=NzI1ZTc3ZTgtNjRiNmYxNDYtNDlhNDBlZTAtNTg3NjlmOGI=. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [3:7511668522187001211:2507], status: BAD_REQUEST, reason: {
: Error: Terminate execution } 2025-06-03T10:30:16.529590Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=3&id=NzI1ZTc3ZTgtNjRiNmYxNDYtNDlhNDBlZTAtNTg3NjlmOGI=, ActorId: [3:7511668522187001195:2507], ActorState: ExecuteState, TraceId: 01jwtnf05xcdeq17xbwsw8q7qm, Create QueryResponse for error on request, msg: >> KqpQueryService::AlterTempTable [GOOD] >> KqpQueryService::CTASWithoutPerStatement >> KqpQueryService::ShowCreateTableNotSuccess [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/service/unittest >> KqpQueryService::MaterializeTxResults [GOOD] Test command err: Trying to start YDB, gRPC: 10513, MsgBus: 8569 2025-06-03T10:30:12.393893Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668504401391172:2154];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:12.396616Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000da3/r3tmp/tmpgPbYsb/pdisk_1.dat 2025-06-03T10:30:12.449251Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:12.449962Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668504401391032:2079] 1748946612379015 != 1748946612379018 TServer::EnableGrpc on GrpcPort 10513, node 1 2025-06-03T10:30:12.474798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:12.474812Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:12.474813Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:12.474850Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8569 2025-06-03T10:30:12.525253Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:12.525287Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:12.527723Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8569 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:12.593982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:12.601746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:30:12.614398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:12.663167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:12.742542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:12.764399Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:12.890054Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668504401392664:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:12.890115Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:12.956205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:12.965436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:12.974725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:12.998158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.022499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.043974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.075204Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.098856Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668508696360613:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.098890Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.099095Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668508696360618:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.100196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:13.104878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:30:13.105002Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668508696360620:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:13.186925Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668508696360671:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:13.451385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.514548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.574984Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 25245, MsgBus: 26262 2025-06-03T10:30:14.430835Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668512455605527:2209];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000da3/r3tmp/tmpt1n9xw/pdisk_1.dat 2025-06-03T10:30:14.437582Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:30:14.445289Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25245, node 2 2025-06-03T10:30:14.455254Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:14.455270Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:14.455272Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:14.455328Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26262 TClient is connected to server localhost:26262 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserA ... at schemeshard: 72057594046644480 2025-06-03T10:30:14.977985Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:14.991676Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:15.007020Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:15.019193Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:15.036744Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:15.050197Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668516750574909:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:15.050235Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:15.050255Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668516750574914:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:15.051161Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:15.060538Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511668516750574916:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:15.146089Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668516750574967:3393] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 4983, MsgBus: 7379 2025-06-03T10:30:15.646702Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511668515358239173:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:15.646725Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000da3/r3tmp/tmpDEjLRc/pdisk_1.dat 2025-06-03T10:30:15.667373Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4983, node 3 2025-06-03T10:30:15.683118Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:15.683135Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:15.683137Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:15.683191Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7379 TClient is connected to server localhost:7379 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:30:15.749625Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:15.749661Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:15.753781Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:30:15.761812Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:30:15.805257Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:15.858211Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:15.911960Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:15.982833Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.252063Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668519653208054:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.252093Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.261643Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.279835Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.296744Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.309912Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.321798Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.338324Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.349989Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.366060Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668519653208709:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.366086Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668519653208714:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.366098Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.367186Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:16.376846Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511668519653208716:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:16.445189Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511668519653208767:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> KqpQueryService::TableSink_HtapInteractive-withOltpSink [GOOD] >> KqpQueryService::TableSink_OlapInsert >> TFlatExecutorLeases::Basics [GOOD] >> TFlatExecutorLeases::BasicsLeaseTimeout ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/service/unittest >> KqpQueryService::StreamExecuteQueryMultiResult [GOOD] Test command err: Trying to start YDB, gRPC: 64558, MsgBus: 65266 2025-06-03T10:30:12.806564Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668502299645156:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:12.807404Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d79/r3tmp/tmpEq47iW/pdisk_1.dat 2025-06-03T10:30:12.888939Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64558, node 1 2025-06-03T10:30:12.909661Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:12.909713Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:12.910890Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:12.925825Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:12.925840Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:12.925842Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:12.925891Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65266 TClient is connected to server localhost:65266 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:13.014471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:13.020069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:30:13.028184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:13.063762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:13.140373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:13.165186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:13.661583Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668506594613974:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.661620Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.726180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.739919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.779027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.838669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.850632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.867182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.882931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.901138Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668506594614639:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.901175Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.901288Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668506594614644:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.902620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:13.905832Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668506594614646:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:14.001248Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668506594614697:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 3760, MsgBus: 17825 2025-06-03T10:30:14.765919Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668512966338751:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:14.773856Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d79/r3tmp/tmpTdbeqU/pdisk_1.dat 2025-06-03T10:30:14.803061Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3760, node 2 2025-06-03T10:30:14.821653Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:14.821672Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:14.821675Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:14.821730Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17825 2025-06-03T10:30:14.865396Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:14.865431Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:14.866476Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17825 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:14.885475Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type ... n part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:15.235898Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:15.250375Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:15.264698Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:15.281044Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668517261308134:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:15.281078Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:15.281085Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668517261308139:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:15.282131Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:15.291084Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511668517261308141:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:15.351203Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668517261308192:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 7377, MsgBus: 14493 2025-06-03T10:30:16.071205Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511668522355028713:2138];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d79/r3tmp/tmpeUFhFD/pdisk_1.dat 2025-06-03T10:30:16.078240Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:30:16.133827Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:16.134255Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511668522355028606:2079] 1748946616070228 != 1748946616070231 TServer::EnableGrpc on GrpcPort 7377, node 3 2025-06-03T10:30:16.163187Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:16.163207Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:16.163209Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:16.163265Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:30:16.183044Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:16.183097Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:16.183831Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14493 TClient is connected to server localhost:14493 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:30:16.278551Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.281860Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.284500Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.318370Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.383180Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.421460Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.586422Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668522355030233:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.586456Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.596923Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.606694Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.615262Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.629643Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.642941Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.657314Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.671102Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.687789Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668522355030884:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.687810Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668522355030889:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.687814Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.688598Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:16.698173Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511668522355030891:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:16.752621Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511668522355030942:3393] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> TChargeBTreeIndex::NoNodes_Groups_History [GOOD] >> TChargeBTreeIndex::OneNode >> KqpQueryService::ClosedSessionRemovedWhileActiveWithQuery ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ShowCreateTableNotSuccess [GOOD] Test command err: Trying to start YDB, gRPC: 21236, MsgBus: 25082 2025-06-03T10:30:13.729606Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668508715419457:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:13.729633Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d70/r3tmp/tmpY5fVTv/pdisk_1.dat 2025-06-03T10:30:13.808823Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21236, node 1 2025-06-03T10:30:13.831989Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:13.832026Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:13.833022Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:13.833503Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:13.833514Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:13.833517Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:13.833567Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25082 TClient is connected to server localhost:25082 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:13.909667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:30:13.915373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.939644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:13.971164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:14.030669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:14.269359Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668513010388362:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:14.269416Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:14.353053Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:14.368283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:14.390629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:14.405315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:14.418692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:14.480802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:14.496061Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:14.520844Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668513010389017:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:14.520865Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:14.520933Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668513010389022:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:14.521748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:14.524966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-03T10:30:14.525017Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668513010389024:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:30:14.583971Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668513010389075:3397] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:14.792820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 23779, MsgBus: 26225 2025-06-03T10:30:15.081747Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668514621103377:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:15.081769Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d70/r3tmp/tmpHZqSLj/pdisk_1.dat 2025-06-03T10:30:15.098288Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23779, node 2 2025-06-03T10:30:15.107152Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:15.107168Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:15.107170Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:15.107238Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26225 TClient is connected to server localhost:26225 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:15.182093Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:15.182131Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:15.183157Z node 2 :HIVE WARN: node_info.cpp ... or: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:15.682356Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668514621105674:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:15.902046Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:30:15.940218Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7511668514621106023:2521], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:35: Error: At function: KiReadTable!
:2:35: Error: SHOW CREATE statement is not supported 2025-06-03T10:30:15.940807Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=2&id=ODFlMGM2YjAtMTAzOGQ0NzAtMTVlNTM5YTItMWYwZGU1MDc=, ActorId: [2:7511668514621105937:2507], ActorState: ExecuteState, TraceId: 01jwtnezkyetv0qmxgv7j6z99v, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 7482, MsgBus: 28464 2025-06-03T10:30:16.333749Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511668521397227839:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:16.333776Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d70/r3tmp/tmpRWIRCu/pdisk_1.dat 2025-06-03T10:30:16.356698Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7482, node 3 2025-06-03T10:30:16.366792Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:16.366828Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:16.366831Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:16.366908Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28464 TClient is connected to server localhost:28464 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:16.442336Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:16.442376Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:16.442835Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.443666Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:16.444530Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:30:16.454970Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.468162Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.530517Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.543912Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.729112Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668521397229470:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.729171Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.738905Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.747171Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.754476Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.769233Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.783596Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.797172Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.811659Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.830020Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668521397230124:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.830061Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.830168Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668521397230129:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.831519Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:16.846927Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511668521397230131:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:16.906475Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511668521397230182:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:17.093745Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7511668525692197750:2513], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:35: Error: At function: KiReadTable!
:2:35: Error: Cannot find table 'db.[/Root/test_show_create]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:30:17.093886Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=3&id=MzEwMGRjMWYtZDVmZTY5MzMtODYzYjMzZjctNWEwM2FhOTM=, ActorId: [3:7511668525692197741:2507], ActorState: ExecuteState, TraceId: 01jwtnf0qybxw5n0q7qfqac3wk, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:30:17.102427Z node 3 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [3:7511668525692197763:2516], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:35: Error: At function: KiReadTable!
:2:35: Error: Cannot find table 'db.[/Root/.sys/show_create]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:30:17.102825Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=3&id=MzEwMGRjMWYtZDVmZTY5MzMtODYzYjMzZjctNWEwM2FhOTM=, ActorId: [3:7511668525692197741:2507], ActorState: ExecuteState, TraceId: 01jwtnf0r81fs2wrt1nd1aq6m2, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: >> TVersions::Wreck1 [GOOD] >> TVersions::Wreck1Reverse >> KqpQueryService::TableSink_BadTransactions [GOOD] >> KqpQueryService::TableSinkHtapInsert >> BuildStatsHistogram::Five_Five_Crossed [GOOD] >> BuildStatsHistogram::Single_Small_2_Levels [GOOD] >> BuildStatsHistogram::Single_Small_2_Levels_3_Buckets [GOOD] >> BuildStatsHistogram::Single_Small_1_Level [GOOD] >> BuildStatsHistogram::Single_Small_0_Levels [GOOD] >> BuildStatsHistogram::Three_Mixed_Small_2_Levels >> KqpQueryServiceScripts::ExecuteScriptStatsNone [GOOD] >> KqpQueryService::CTASWithoutPerStatement [GOOD] >> KqpQueryService::CheckIsolationLevelFroPerStatementMode >> KqpQueryService::DdlGroup [GOOD] >> KqpQueryService::DdlPermission >> BuildStatsHistogram::Three_Mixed_Small_2_Levels [GOOD] >> BuildStatsHistogram::Three_Mixed_Small_2_Levels_3_Buckets [GOOD] >> BuildStatsHistogram::Three_Mixed_Small_1_Level [GOOD] >> BuildStatsHistogram::Three_Mixed_Small_0_Levels [GOOD] >> BuildStatsHistogram::Mixed_Groups_History >> KqpQueryService::TableSink_OltpReplace+HasSecondaryIndex >> TPartBtreeIndexIteration::OneNode_Groups_Slices [GOOD] >> TPartBtreeIndexIteration::OneNode_History_Slices >> KqpQueryServiceScripts::ExecuteScriptStatsProfile >> KqpWorkloadServiceTables::TestLeaseUpdates [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::ExecuteScriptStatsNone [GOOD] Test command err: Trying to start YDB, gRPC: 62937, MsgBus: 19130 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d74/r3tmp/tmpvnn0fx/pdisk_1.dat 2025-06-03T10:30:13.446675Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668507577034209:2202];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:13.450635Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:30:13.497596Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:13.500314Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668507577034044:2079] 1748946613386971 != 1748946613386974 TServer::EnableGrpc on GrpcPort 62937, node 1 2025-06-03T10:30:13.550154Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:13.550195Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:13.553845Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:13.573591Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:13.573606Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:13.573608Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:13.573681Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19130 TClient is connected to server localhost:19130 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:13.687892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:13.693043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:30:13.717607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:13.754474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:13.800049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:13.818773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:13.930476Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668507577035687:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.930506Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.994958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:14.006896Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:14.020856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:14.038966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:14.052932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:14.065036Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:14.076429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:14.107407Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668511872003637:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:14.107471Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:14.107658Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668511872003642:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:14.108857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:14.112704Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668511872003644:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:14.178054Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668511872003695:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:14.403797Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:30:14.404184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:30:14.404443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 25426, MsgBus: 15571 2025-06-03T10:30:15.149197Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668515337930223:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:15.149224Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d74/r3tmp/tmpAv9BN0/pdisk_1.dat 2025-06-03T10:30:15.164787Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25426, node 2 2025-06-03T10:30:15.174343Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:15.174360Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:15.174363Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:15.174423Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15571 TClient is connected to server localhost:15571 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 1844674407370 ... T_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:15.708024Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:15.708070Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668515337932464:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:15.709220Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:15.712621Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511668515337932466:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:15.786597Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668515337932517:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:16.014492Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.015135Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.015401Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 15601, MsgBus: 61926 2025-06-03T10:30:16.724739Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511668518903054591:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:16.724800Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d74/r3tmp/tmpAU1Cvh/pdisk_1.dat 2025-06-03T10:30:16.742461Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15601, node 3 2025-06-03T10:30:16.752578Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:16.752594Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:16.752596Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:16.752653Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61926 TClient is connected to server localhost:61926 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:16.825001Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:16.825032Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:16.826224Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:16.828665Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.832841Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.852076Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.883570Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:30:16.898608Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.179562Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668523198023490:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:17.179610Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:17.200891Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.257642Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.267655Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.280186Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.294094Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.308474Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.322565Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.343032Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668523198024147:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:17.343058Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668523198024152:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:17.343070Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:17.344141Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:17.349399Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511668523198024154:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:17.410660Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511668523198024205:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:17.571536Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.571905Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.572331Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 >> KqpService::CloseSessionsWithLoad >> TFlatExecutorLeases::BasicsLeaseTimeout [GOOD] >> TFlatExecutorLeases::BasicsInitialLease >> TKeyValueTest::TestEmptyWriteReadDeleteWithRestartsThenResponseOkNewApi [GOOD] >> TKeyValueTest::TestGetStatusWorks >> BuildStatsHistogram::Mixed_Groups_History [GOOD] >> BuildStatsHistogram::Serial_Groups_History >> IndexBuildTest::Lock >> BuildStatsHistogram::Serial_Groups_History [GOOD] >> BuildStatsHistogram::Benchmark >> KqpQueryService::TableSink_OlapUpdate [GOOD] >> KqpQueryService::TableSink_OlapOrder >> IndexBuildTest::WithFollowers >> BuildStatsHistogram::Benchmark [GOOD] >> BuildStatsHistogram::Many_Mixed >> KqpQueryService::ExecuteQueryUpsertDoesntChangeIndexedValuesIfNotChanged [GOOD] >> KqpQueryService::ExecuteQueryWithWorkloadManager ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/workload_service/ut/unittest >> KqpWorkloadServiceTables::TestLeaseUpdates [GOOD] Test command err: test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001b60/r3tmp/tmpHtabvW/pdisk_1.dat 2025-06-03T10:29:23.618809Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:23.661500Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668294362139059:2079] 1748946563560019 != 1748946563560022 2025-06-03T10:29:23.667939Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64803, node 1 2025-06-03T10:29:23.681527Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:23.681540Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:23.681542Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:23.681592Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13034 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:23.713472Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:23.713501Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:23.714202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:29:23.714520Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:29:23.716391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:29:23.967110Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-03T10:29:23.967835Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=MWVhZjIwOWEtNDUzNDRkNmQtY2U4ZWIyMmYtZmZhYzQyNmU=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id MWVhZjIwOWEtNDUzNDRkNmQtY2U4ZWIyMmYtZmZhYzQyNmU= 2025-06-03T10:29:23.967988Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 1 2025-06-03T10:29:23.967999Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-03T10:29:23.968001Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-03T10:29:23.969798Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7511668294362139701:2326], Start check tables existence, number paths: 2 2025-06-03T10:29:23.969864Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=MWVhZjIwOWEtNDUzNDRkNmQtY2U4ZWIyMmYtZmZhYzQyNmU=, ActorId: [1:7511668294362139702:2327], ActorState: unknown state, session actor bootstrapped 2025-06-03T10:29:23.970181Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668294362139719:2293], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-03T10:29:23.970190Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7511668294362139701:2326], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-03T10:29:23.970208Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7511668294362139701:2326], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-03T10:29:23.970214Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7511668294362139701:2326], Successfully finished 2025-06-03T10:29:23.970246Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-03T10:29:23.971064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:29:23.971486Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:429: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668294362139719:2293], DatabaseId: Root, PoolId: sample_pool_id, Subscribe on create pool tx: 281474976715658 2025-06-03T10:29:23.971528Z node 1 :KQP_WORKLOAD_SERVICE TRACE: scheme_actors.cpp:352: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668294362139719:2293], DatabaseId: Root, PoolId: sample_pool_id, Tablet to pipe successfully connected 2025-06-03T10:29:23.973400Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668294362139719:2293], DatabaseId: Root, PoolId: sample_pool_id, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:29:24.069147Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:387: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668294362139719:2293], DatabaseId: Root, PoolId: sample_pool_id, Start pool creating 2025-06-03T10:29:24.070407Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668298657107066:2325] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/sample_pool_id\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:24.070469Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:480: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668294362139719:2293], DatabaseId: Root, PoolId: sample_pool_id, Pool successfully created 2025-06-03T10:29:24.070626Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: Root, PoolId: sample_pool_id 2025-06-03T10:29:24.070640Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id Root 2025-06-03T10:29:24.070673Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668298657107073:2328], DatabaseId: Root, PoolId: sample_pool_id, Start pool fetching 2025-06-03T10:29:24.071150Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:223: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668298657107073:2328], DatabaseId: Root, PoolId: sample_pool_id, Pool info successfully fetched 2025-06-03T10:29:24.071185Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:253: [WorkloadService] [Service] Successfully fetched pool sample_pool_id, DatabaseId: Root 2025-06-03T10:29:24.071195Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:571: [WorkloadService] [Service] Creating new handler for pool /Root/sample_pool_id 2025-06-03T10:29:24.071256Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:466: [WorkloadService] [TPoolHandlerActorBase] ActorId: [1:7511668298657107082:2329], DatabaseId: Root, PoolId: sample_pool_id, Subscribed on schemeboard notifications for path: [OwnerId: 72057594046644480, LocalPathId: 5] 2025-06-03T10:29:24.071463Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:274: [WorkloadService] [TPoolHandlerActorBase] ActorId: [1:7511668298657107082:2329], DatabaseId: Root, PoolId: sample_pool_id, Got watch notification 2025-06-03T10:29:24.072486Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:157: [WorkloadService] [Service] Recieved subscription request, DatabaseId: /Root, PoolId: default 2025-06-03T10:29:24.072494Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:561: [WorkloadService] [Service] Creating new database state for id /Root 2025-06-03T10:29:24.072507Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:185: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668298657107094:2331], DatabaseId: /Root, PoolId: default, Start pool fetching 2025-06-03T10:29:24.072599Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=MWVhZjIwOWEtNDUzNDRkNmQtY2U4ZWIyMmYtZmZhYzQyNmU=, ActorId: [1:7511668294362139702:2327], ActorState: ReadyState, TraceId: 01jwtndcz87b93jqqyvedtw2aa, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DDL text: DROP RESOURCE POOL sample_pool_id; rpcActor: [0:0:0] database: /Root databaseId: /Root pool id: default 2025-06-03T10:29:24.073530Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668298657107094:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:24.073568Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:24.113933Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: pool_handlers_actors.cpp:294: [WorkloadService] [TPoolHandlerActorBase] ActorId: [1:7511668298657107082:2329], DatabaseId: Root, PoolId: sample_pool_id, Got delete notification 2025-06-03T10:29:24.115069Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2481: SessionId: ydb://session/3?node_id=1&id=MWVhZjIwOWEtNDUzNDRkNmQtY2U4ZWIyMmYtZmZhYzQyNmU=, ActorId: [1:7511668294362139702:2327], ActorState: ExecuteState, TraceId: 01jwtndcz87b93jqqyvedtw2aa, Cleanup start, isFinal: 0 CleanupCtx: 1 TransactionsToBeAborted.size(): 0 WorkerId: [1:7511668298657107103:2327] WorkloadServiceCleanup: 0 2025-06-03T10:29:24.115905Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2542: SessionId: ydb://session/3?node_id=1&id=MWVhZjIwOWEtNDUzNDRkNmQtY2U4Z ... _SESSION INFO: kqp_session_actor.cpp:1966: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ExecuteState, TraceId: 01jwtnf27a53py6hrjsz6m8ftm, txInfo Status: Committed Kind: ReadWrite TotalDuration: 5.477 ServerDuration: 5.377 QueriesCount: 2 2025-06-03T10:30:18.608341Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2121: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ExecuteState, TraceId: 01jwtnf27a53py6hrjsz6m8ftm, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-03T10:30:18.608364Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2481: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ExecuteState, TraceId: 01jwtnf27a53py6hrjsz6m8ftm, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-03T10:30:18.608367Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2542: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ExecuteState, TraceId: 01jwtnf27a53py6hrjsz6m8ftm, EndCleanup, isFinal: 0 2025-06-03T10:30:18.608381Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2278: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ExecuteState, TraceId: 01jwtnf27a53py6hrjsz6m8ftm, Sent query response back to proxy, proxyRequestId: 28, proxyId: [10:7511668463279039798:2268] 2025-06-03T10:30:18.609108Z node 10 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Update lease, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, TxId: 2025-06-03T10:30:18.609142Z node 10 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:197: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Update lease, RunDataQuery: -- TRefreshPoolStateQuery::OnLeaseUpdated DECLARE $database_id AS Text; DECLARE $pool_id AS Text; SELECT COUNT(*) AS delayed_requests FROM `.metadata/workload_manager/delayed_requests` WHERE database = $database_id AND pool_id = $pool_id AND (wait_deadline IS NULL OR wait_deadline >= CurrentUtcTimestamp()) AND lease_deadline >= CurrentUtcTimestamp(); SELECT COUNT(*) AS running_requests FROM `.metadata/workload_manager/running_requests` WHERE database = $database_id AND pool_id = $pool_id AND lease_deadline >= CurrentUtcTimestamp(); 2025-06-03T10:30:18.609490Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ReadyState, TraceId: 01jwtnf27hfncmwhw03aabe2tk, received request, proxyRequestId: 29 prepared: 0 tx_control: 1 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DML text: -- TRefreshPoolStateQuery::OnLeaseUpdated DECLARE $database_id AS Text; DECLARE $pool_id AS Text; SELECT COUNT(*) AS delayed_requests FROM `.metadata/workload_manager/delayed_requests` WHERE database = $database_id AND pool_id = $pool_id AND (wait_deadline IS NULL OR wait_deadline >= CurrentUtcTimestamp()) AND lease_deadline >= CurrentUtcTimestamp(); SELECT COUNT(*) AS running_requests FROM `.metadata/workload_manager/running_requests` WHERE database = $database_id AND pool_id = $pool_id AND lease_deadline >= CurrentUtcTimestamp(); rpcActor: [10:7511668527703550490:2529] database: /Root databaseId: /Root pool id: default 2025-06-03T10:30:18.609498Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ReadyState, TraceId: 01jwtnf27hfncmwhw03aabe2tk, request placed into pool from cache: default 2025-06-03T10:30:18.609825Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1306: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ExecuteState, TraceId: 01jwtnf27hfncmwhw03aabe2tk, ExecutePhyTx, tx: 0x0000330E7CD8A998 literal: 0 commit: 0 txCtx.DeferredEffects.size(): 0 2025-06-03T10:30:18.609846Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1457: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ExecuteState, TraceId: 01jwtnf27hfncmwhw03aabe2tk, Sending to Executer TraceId: 0 8 2025-06-03T10:30:18.609872Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1515: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ExecuteState, TraceId: 01jwtnf27hfncmwhw03aabe2tk, Created new KQP executer: [10:7511668527703550493:2523] isRollback: 0 2025-06-03T10:30:18.613352Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1707: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ExecuteState, TraceId: 01jwtnf27hfncmwhw03aabe2tk, TEvTxResponse, CurrentTx: 1/2 response.status: SUCCESS 2025-06-03T10:30:18.613398Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1306: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ExecuteState, TraceId: 01jwtnf27hfncmwhw03aabe2tk, ExecutePhyTx, tx: 0x0000330E7CCBD498 literal: 1 commit: 1 txCtx.DeferredEffects.size(): 0 2025-06-03T10:30:18.613968Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:1707: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ExecuteState, TraceId: 01jwtnf27hfncmwhw03aabe2tk, TEvTxResponse, CurrentTx: 2/2 response.status: SUCCESS 2025-06-03T10:30:18.614054Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:1966: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ExecuteState, TraceId: 01jwtnf27hfncmwhw03aabe2tk, txInfo Status: Committed Kind: ReadOnly TotalDuration: 4.295 ServerDuration: 4.236 QueriesCount: 2 2025-06-03T10:30:18.614109Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2121: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ExecuteState, TraceId: 01jwtnf27hfncmwhw03aabe2tk, Create QueryResponse for action: QUERY_ACTION_EXECUTE with SUCCESS status 2025-06-03T10:30:18.614131Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2481: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ExecuteState, TraceId: 01jwtnf27hfncmwhw03aabe2tk, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-03T10:30:18.614140Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2542: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ExecuteState, TraceId: 01jwtnf27hfncmwhw03aabe2tk, EndCleanup, isFinal: 0 2025-06-03T10:30:18.614154Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2278: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ExecuteState, TraceId: 01jwtnf27hfncmwhw03aabe2tk, Sent query response back to proxy, proxyRequestId: 29, proxyId: [10:7511668463279039798:2268] 2025-06-03T10:30:18.614341Z node 10 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:240: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Describe pool, TEvDataQueryResult SUCCESS, Issues: , SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, TxId: 2025-06-03T10:30:18.614374Z node 10 :KQP_WORKLOAD_SERVICE DEBUG: query_actor.cpp:367: [TQueryBase] [TRefreshPoolStateQuery] TraceId: sample_pool_id, RequestDatabase: /Root, RequestSessionId: , State: Describe pool, Finish with SUCCESS, SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, TxId: 2025-06-03T10:30:18.614520Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2323: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ReadyState, Session closed due to explicit close event 2025-06-03T10:30:18.614525Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2481: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-03T10:30:18.614526Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2542: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-03T10:30:18.614529Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2554: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: unknown state, Cleanup temp tables: 0 2025-06-03T10:30:18.614541Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2645: SessionId: ydb://session/3?node_id=10&id=NDZiZmNmMmQtMjY1OTNhMWQtMjk1NjBlNzYtYWI0MjE3ZjY=, ActorId: [10:7511668527703550462:2523], ActorState: unknown state, Session actor destroyed 2025-06-03T10:30:18.615664Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2323: SessionId: ydb://session/3?node_id=10&id=NmE2OGMxOTMtMzgxZDU0OWYtNDQwN2I5MWItYjcxOWZmOGQ=, ActorId: [10:7511668463279040196:2325], ActorState: ReadyState, Session closed due to explicit close event 2025-06-03T10:30:18.615681Z node 10 :KQP_SESSION INFO: kqp_session_actor.cpp:2481: SessionId: ydb://session/3?node_id=10&id=NmE2OGMxOTMtMzgxZDU0OWYtNDQwN2I5MWItYjcxOWZmOGQ=, ActorId: [10:7511668463279040196:2325], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-03T10:30:18.615684Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2542: SessionId: ydb://session/3?node_id=10&id=NmE2OGMxOTMtMzgxZDU0OWYtNDQwN2I5MWItYjcxOWZmOGQ=, ActorId: [10:7511668463279040196:2325], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-03T10:30:18.615689Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2554: SessionId: ydb://session/3?node_id=10&id=NmE2OGMxOTMtMzgxZDU0OWYtNDQwN2I5MWItYjcxOWZmOGQ=, ActorId: [10:7511668463279040196:2325], ActorState: unknown state, Cleanup temp tables: 0 2025-06-03T10:30:18.615713Z node 10 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2645: SessionId: ydb://session/3?node_id=10&id=NmE2OGMxOTMtMzgxZDU0OWYtNDQwN2I5MWItYjcxOWZmOGQ=, ActorId: [10:7511668463279040196:2325], ActorState: unknown state, Session actor destroyed >> KqpQueryService::TableSink_OltpReplace+HasSecondaryIndex [GOOD] >> KqpQueryService::TableSink_OltpReplace-HasSecondaryIndex >> TChargeBTreeIndex::OneNode [GOOD] >> TChargeBTreeIndex::OneNode_Groups >> IndexBuildTest::Lock [GOOD] >> IndexBuildTest::IndexPartitioningIsPersisted >> KqpQueryService::TableSinkHtapInsert [GOOD] >> KqpQueryService::CheckIsolationLevelFroPerStatementMode [GOOD] >> KqpQueryService::AlterTable_DropNotNull_WithSetFamily_Valid >> KqpQueryService::DdlPermission [GOOD] >> KqpQueryService::DdlSecret >> TFlatExecutorLeases::BasicsInitialLease [GOOD] >> TFlatExecutorLeases::BasicsInitialLeaseTimeout >> TKeyValueTest::TestCleanUpDataOnEmptyTablet [GOOD] >> TKeyValueTest::TestCleanUpDataOnEmptyTabletResetGeneration >> TPartBtreeIndexIteration::OneNode_History_Slices [GOOD] >> TPartBtreeIndexIteration::OneNode_Groups_History_Slices >> KqpQueryServiceScripts::ExecuteScriptStatsProfile [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithCancelAfter >> TSequence::CreateSequenceParallel >> TKeyValueTest::TestCleanUpDataOnEmptyTabletResetGeneration [GOOD] >> TKeyValueTest::TestCleanUpDataWithMockDisk ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSinkHtapInsert [GOOD] Test command err: Trying to start YDB, gRPC: 1833, MsgBus: 24998 2025-06-03T10:30:15.720155Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668518122140506:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:15.720260Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d6b/r3tmp/tmpiGgbx7/pdisk_1.dat 2025-06-03T10:30:15.809594Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:15.821386Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:15.821419Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 1833, node 1 2025-06-03T10:30:15.823906Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:15.853537Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:15.853556Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:15.853559Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:15.858194Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24998 TClient is connected to server localhost:24998 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:16.050599Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.069254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.075014Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.134397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.168838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.187054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.358429Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668522417109244:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.358459Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.426434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.438683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.450040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.461377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.475882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.489480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.545942Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.561714Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668522417109901:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.561750Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.561784Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668522417109906:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.562971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:16.572224Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668522417109908:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:16.654335Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668522417109959:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 63125, MsgBus: 22798 2025-06-03T10:30:17.087080Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668522629015170:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:17.088020Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d6b/r3tmp/tmpLCezNk/pdisk_1.dat 2025-06-03T10:30:17.108850Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 63125, node 2 2025-06-03T10:30:17.133913Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:17.133928Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:17.133930Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:17.133996Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22798 TClient is connected to server localhost:22798 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:30:17.194217Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:17.194256Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:17.194659Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.197131Z node 2 :HIVE WARN: n ... rogressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.093075Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037908;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.094017Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037949;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.094705Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.095154Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037933;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.095973Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037943;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.096378Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037946;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.097087Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037935;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.097523Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037905;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.098216Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037903;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.098695Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037940;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.099215Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037951;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.099794Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037913;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.100316Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.100970Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037947;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.101447Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037911;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.102189Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037927;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.102586Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037909;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.103392Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037941;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.103769Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037929;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.104199Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037923;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.105052Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037945;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.105164Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037899;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.107529Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037939;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:19.113654Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.214205Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668533657150695:2917], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:19.214236Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668533657150700:2920], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:19.214242Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:19.215198Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480 2025-06-03T10:30:19.217870Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511668533657150702:2921], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-03T10:30:19.274967Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511668533657150755:5186] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:19.329383Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=3; 2025-06-03T10:30:19.330709Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 3 at tablet 72075186224037952 errors: Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-03T10:30:19.330784Z node 3 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 3 at tablet 72075186224037952 Status: STATUS_CONSTRAINT_VIOLATION Issues: { message: "Conflict with existing key." issue_code: 2012 severity: 1 } 2025-06-03T10:30:19.331062Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:815: SelfId: [3:7511668533657151010:2930], Table: `/Root/DataShard` ([72057594046644480:3:1]), SessionActorId: [3:7511668533657150801:2930]Got CONSTRAINT VIOLATION for table `/Root/DataShard`. ShardID=72075186224037952, Sink=[3:7511668533657151010:2930].{
: Error: Conflict with existing key., code: 2012 } 2025-06-03T10:30:19.331224Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2935: SelfId: [3:7511668533657150823:2930], SessionActorId: [3:7511668533657150801:2930], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[3:7511668533657150801:2930]. isRollback=0 2025-06-03T10:30:19.331314Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1848: SessionId: ydb://session/3?node_id=3&id=MTdkNTJjZTgtYzNiNzNmZTMtNmI5NmI3NjYtYzRlZDFkZGY=, ActorId: [3:7511668533657150801:2930], ActorState: ExecuteState, TraceId: 01jwtnf2xkbces4wg4qxdxbt8b, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [3:7511668533657151004:2930] from: [3:7511668533657150823:2930] 2025-06-03T10:30:19.331341Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [3:7511668533657151004:2930] TxId: 281474976715664. Ctx: { TraceId: 01jwtnf2xkbces4wg4qxdxbt8b, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=MTdkNTJjZTgtYzNiNzNmZTMtNmI5NmI3NjYtYzRlZDFkZGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-03T10:30:19.331386Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037942;self_id=[3:7511668529362178967:2345];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037942;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:30:19.331398Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=3&id=MTdkNTJjZTgtYzNiNzNmZTMtNmI5NmI3NjYtYzRlZDFkZGY=, ActorId: [3:7511668533657150801:2930], ActorState: ExecuteState, TraceId: 01jwtnf2xkbces4wg4qxdxbt8b, Create QueryResponse for error on request, msg: 2025-06-03T10:30:19.331606Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037949;self_id=[3:7511668529362178919:2333];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037949;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:30:19.331630Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7511668529362179063:2358];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:30:19.331648Z node 3 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037946;self_id=[3:7511668529362178970:2348];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037946;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; >> IndexBuildTest::WithFollowers [GOOD] >> VectorIndexBuildTest::BaseCase >> IndexBuildTest::IndexPartitioningIsPersisted [GOOD] >> KqpQueryService::TableSink_OltpReplace-HasSecondaryIndex [GOOD] >> KqpQueryService::TableSink_OltpOrder >> KqpQueryService::ClosedSessionRemovedWhileActiveWithQuery [GOOD] >> KqpQueryService::CloseSessionsWithLoad >> TSequence::CreateSequenceParallel [GOOD] >> TSequence::CreateSequenceSequential >> KqpQueryService::ExecuteQueryWithWorkloadManager [GOOD] >> KqpQueryService::ExecuteQueryWithResourcePoolClassifier >> TSequence::CreateSequence >> KqpQueryService::AlterTable_DropNotNull_WithSetFamily_Valid [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::IndexPartitioningIsPersisted [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:19.246609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:19.246639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:19.246646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:19.246651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:19.246672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:19.246676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:19.246687Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:19.246706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:19.246827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:19.246891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:19.256840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:19.256865Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:19.260590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:19.260734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:19.260774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:19.263597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:19.263667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:19.263808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:19.263887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:19.264593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:19.264661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:19.265043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:19.265054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:19.265061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:19.265071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:19.265078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:19.265105Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:19.266514Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:19.291090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:19.291197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:19.291274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:19.291324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:19.291339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:19.292597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:19.292655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:19.292748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:19.292760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:19.292768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:19.292774Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:19.293484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:19.293500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:19.293506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:19.293950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:19.293965Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:19.293977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:19.294005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:19.294697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:19.295334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:19.295383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:19.295605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:19.295632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:19.295639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:19.295699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:19.295705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:19.295740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:19.295770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:19.296249Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:19.296260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:19.296314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 8 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "Index" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:20.219534Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:30:20.219568Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index" took 37us result status StatusSuccess 2025-06-03T10:30:20.219708Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Index" PathDescription { Self { Name: "Index" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } TableIndex { Name: "Index" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:20.219767Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:30:20.219810Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index/indexImplTable" took 46us result status StatusSuccess 2025-06-03T10:30:20.219979Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Index/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } SplitBoundary { KeyPrefix { Tuple { Optional { Text: "alice" } } Tuple { } } } SplitBoundary { KeyPrefix { Tuple { Optional { Text: "bob" } } Tuple { } } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\002\000\005\000\000\000alice\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409547 } TablePartitions { EndOfRangeKeyPrefix: "\002\000\003\000\000\000bob\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 3 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |66.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut |66.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut |66.3%| [LD] {RESULT} $(B)/ydb/core/mind/hive/ut/ydb-core-mind-hive-ut >> TSequence::CreateSequenceSequential [GOOD] >> TSequence::CreateSequenceInsideTableThenDropSequence >> TFlatExecutorLeases::BasicsInitialLeaseTimeout [GOOD] >> TFlatExecutorLeases::BasicsInitialLeaseSleep >> TSequence::CreateSequence [GOOD] >> TSequence::CreateDropRecreate ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/service/unittest >> KqpQueryService::AlterTable_DropNotNull_WithSetFamily_Valid [GOOD] Test command err: Trying to start YDB, gRPC: 27913, MsgBus: 7019 2025-06-03T10:30:16.084221Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668519219993326:2204];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:16.084410Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d67/r3tmp/tmp978aFn/pdisk_1.dat 2025-06-03T10:30:16.237126Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:16.237355Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668519219993160:2079] 1748946616081599 != 1748946616081602 TServer::EnableGrpc on GrpcPort 27913, node 1 2025-06-03T10:30:16.257638Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:16.257672Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:16.258636Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:16.271110Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:16.271128Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:16.271130Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:16.271181Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7019 TClient is connected to server localhost:7019 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:16.418892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.658051Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668519219993818:2329], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.658083Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668519219993826:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.658093Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.658994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:30:16.660915Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668519219993832:2333], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:30:16.754456Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668519219993883:2324] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:16.803086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:1, at schemeshard: 72057594046644480 2025-06-03T10:30:16.885361Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.895263Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668519219994128:2469] txid# 281474976715664, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155" severity: 1 } 2025-06-03T10:30:16.897889Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668519219994135:2474] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/ZWQ3YWYxZTktZjVjZjRiMGEtMmYyMDViMGQtODNlYmNlMWM=\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155" severity: 1 } 2025-06-03T10:30:16.904313Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-03T10:30:16.911331Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668519219994195:2521] txid# 281474976715667, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155" severity: 1 } 2025-06-03T10:30:16.911727Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668519219994202:2526] txid# 281474976715668, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/ZWQ3YWYxZTktZjVjZjRiMGEtMmYyMDViMGQtODNlYmNlMWM=\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155" severity: 1 } 2025-06-03T10:30:16.912332Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.974404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.002431Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668523514961676:2630] txid# 281474976715674, issues: { message: "Check failed: path: \'/Root/.tmp/sessions\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155" severity: 1 } 2025-06-03T10:30:17.002931Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668523514961683:2635] txid# 281474976715675, issues: { message: "Check failed: path: \'/Root/.tmp/sessions/ZWQ3YWYxZTktZjVjZjRiMGEtMmYyMDViMGQtODNlYmNlMWM=\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155" severity: 1 } 2025-06-03T10:30:17.015944Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-03T10:30:17.017818Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668523514961729:2403], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:21: Error: At function: KiReadTable!
:3:21: Error: Cannot find table 'db.[/Root/Temp]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:30:17.017977Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=MjUzYTY2NjMtYWEyNDUyZC02ZDY4MzE1Ny1mYTcyYWFkOA==, ActorId: [1:7511668523514961727:2402], ActorState: ExecuteState, TraceId: 01jwtnf0nm5f760qjwtj9k6nm0, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:30:17.026847Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668523514961747:2408], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:17: Error: At function: KiReadTable!
:3:17: Error: Cannot find table 'db.[/Root/Temp]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:30:17.026980Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=YzRkMzc2ODItYTIzZjdlOGItMzMwNjI4MDgtOGIzNzJlNjI=, ActorId: [1:7511668523514961745:2407], ActorState: ExecuteState, TraceId: 01jwtnf0nz253qvamgw0szc628, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 22633, MsgBus: 25526 2025-06-03T10:30:17.381209Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668523337336587:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:17.381243Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d67/r3tmp/tmpsdLprK/pdisk_1.dat 2025-06-03T10:30:17.393116Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22633, node 2 2025-06-03T10:30:17.408163Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (e ... 178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:18.939542Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:18.956110Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668530003201800:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:18.956134Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:18.956153Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668530003201805:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:18.956980Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:18.966270Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511668530003201807:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:19.038216Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511668534298169154:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:19.170793Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.189022Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.200176Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.262705Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.295181Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.315006Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715683:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.340366Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715685:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.378107Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715688:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.404507Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715690:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 61288, MsgBus: 28995 2025-06-03T10:30:19.752306Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511668532514707989:2091];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:19.752643Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d67/r3tmp/tmpA6nDen/pdisk_1.dat 2025-06-03T10:30:19.780981Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61288, node 4 2025-06-03T10:30:19.790333Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:19.790352Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:19.790355Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:19.790409Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28995 2025-06-03T10:30:19.860196Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:19.860242Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:19.861124Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28995 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:19.884436Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:19.891120Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:30:20.211142Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511668536809675836:2326], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:20.211173Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:20.211338Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511668536809675872:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:20.212237Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:30:20.214534Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-03T10:30:20.214652Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511668536809675874:2331], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:30:20.307336Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511668536809675925:2323] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:20.598698Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:1, at schemeshard: 72057594046644480 2025-06-03T10:30:20.637956Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7511668536809676075:2352], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:2:30: Error: At function: KiWriteTable!
:2:84: Error: Failed to convert type: Struct<'id':Int32,'val1':Null,'val2':Int32> to Struct<'id':Int32,'val1':Int32,'val2':Int32?>
:2:84: Error: Failed to convert 'val1': Null to Int32
:2:84: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-03T10:30:20.638766Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=4&id=NDIyYmJiMTEtMzJiYzE5NGYtYTZiZWQyMzMtNzNlYzJjMTI=, ActorId: [4:7511668536809676073:2351], ActorState: ExecuteState, TraceId: 01jwtnf46r9syx4p53g5mtkg4d, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: 2025-06-03T10:30:20.645760Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:20.657669Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 >> IndexBuildTest::MergeIndexTableShardsOnlyWhenReady >> TKeyValueTest::TestLargeWriteAndDelete [GOOD] >> TSequence::CreateSequenceInsideTableThenDropSequence [GOOD] >> TSequence::CreateSequenceInsideTableThenDropTable >> TVersions::Wreck1Reverse [GOOD] >> TVersions::Wreck0 >> TSequence::CreateDropRecreate [GOOD] >> TSequence::CreateSequenceInsideSequenceNotAllowed ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestLargeWriteAndDelete [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:58:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:75:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:58:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:75:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:77:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:80:2057] recipient: [2:79:2110] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:83:2057] recipient: [2:79:2110] !Reboot 72057594037927937 (actor [2:57:2097]) rebooted! !Reboot 72057594037927937 (actor [2:57:2097]) tablet resolver refreshed! new actor is[2:82:2111] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:168:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:58:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:75:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:77:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:83:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:57:2097]) rebooted! !Reboot 72057594037927937 (actor [3:57:2097]) tablet resolver refreshed! new actor is[3:82:2111] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:168:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:58:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:75:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:78:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:82:2057] recipient: [4:80:2110] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:84:2057] recipient: [4:80:2110] !Reboot 72057594037927937 (actor [4:57:2097]) rebooted! !Reboot 72057594037927937 (actor [4:57:2097]) tablet resolver refreshed! new actor is[4:83:2111] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:169:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:58:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:75:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:81:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:84:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:85:2057] recipient: [5:83:2113] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:87:2057] recipient: [5:83:2113] !Reboot 72057594037927937 (actor [5:57:2097]) rebooted! !Reboot 72057594037927937 (actor [5:57:2097]) tablet resolver refreshed! new actor is[5:86:2114] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:172:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:58:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:75:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:57:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:81:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:84:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:85:2057] recipient: [6:83:2113] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:87:2057] recipient: [6:83:2113] !Reboot 72057594037927937 (actor [6:57:2097]) rebooted! !Reboot 72057594037927937 (actor [6:57:2097]) tablet resolver refreshed! new actor is[6:86:2114] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:172:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:58:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:75:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:57:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:82:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:84:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:86:2057] recipient: [7:85:2113] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:88:2057] recipient: [7:85:2113] !Reboot 72057594037927937 (actor [7:57:2097]) rebooted! !Reboot 72057594037927937 (actor [7:57:2097]) tablet resolver refreshed! new actor is[7:87:2114] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:58:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:75:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:58:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:75:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:58:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:75:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:77:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:80:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:81:2057] recipient: [10:79:2110] Leader for TabletID 72057594037927937 is [10:82:2111] sender: [10:83:2057] recipient: [10:79:2110] !Reboot 72057594037927937 (actor [10:57:2097]) rebooted! !Reboot 72057594037927937 (actor [10:57:2097]) tablet resolver refreshed! new actor is[10:82:2111] Leader for TabletID 72057594037927937 is [10:82:2111] sender: [10:168:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:58:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:75:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:57:2097]) on event NKikimr::TEvKeyValue::TEvAcquireLock ! Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:77:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:80:2057] recipient: [11:79:2110] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:81:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:82:2111] sender: [11:83:2057] recipient: [11:79:2110] !Reboot 72057594037927937 (actor [11:57:2097]) rebooted! !Reboot 72057594037927937 (actor [11:57:2097]) tablet resolver refreshed! new actor is[11:82:2111] Leader for TabletID 72057594037927937 is [11:82:2111] sender: [11:168:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:58:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:75:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:78:2057] recipient: [12:36:2083] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:81:2057] recipient: [12:80:2110] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:82:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:83:2111] sender: [12:84:2057] recipient: [12:80:2110] !Reboot 72057594037927937 (actor [12:57:2097]) rebooted! !Reboot 72057594037927937 (actor [12:57:2097]) tablet resolver refreshed! new actor is[12:83:2111] Leader for TabletID 72057594037927937 is [12:83:2111] sender: [12:169:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:55:2057] recipient: [13:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:55:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:58:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:75:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:81:2057] recipient: [13:36:2083] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:84:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:85:2057] recipient: [13:83:2113] Leader for TabletID 72057594037927937 is [13:86:2114] sender: [13:87:2057] recipient: [13:83:2113] !Reboot 72057594037927937 (actor [13:57:2097]) rebooted! !Reboot 72057594037927937 (actor [13:57:2097]) tablet resolver refreshed! new actor is[13:86:2114] Leader for TabletID 72057594037927937 is [13:86:2114] sender: [13:172:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:55:2057] recipient: [14:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:55:2057] recipient: [14:50:2095] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:58:2057] recipient: [14:50:2095] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:75:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:81:2057] recipient: [14:36:2083] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:84:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:85:2057] recipient: [14:83:2113] Leader for TabletID 72057594037927937 is [14:86:2114] sender: [14:87:2057] recipient: [14:83:2113] !Reboot 72057594037927937 (actor [14:57:2097]) rebooted! !Reboot 72057594037927937 (actor [14:57:2097]) tablet resolver refreshed! new actor is[14:86:2114] Leader for TabletID 72057594037927937 is [14:86:2114] sender: [14:172:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:55:2057] recipient: [15:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:55:2057] recipient: [15:50:2095] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:58:2057] recipient: [15:50:2095] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:75:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:82:2057] recipient: [15:36:2083] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:85:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:86:2057] recipient: [15:84:2113] Leader for TabletID 72057594037927937 is [15:87:2114] sender: [15:88:2057] recipient: [15:84:2113] !Reboot 72057594037927937 (actor [15:57:2097]) rebooted! !Reboot 72057594037927937 (actor [15:57:2097]) tablet resolver refreshed! new actor is[15:87:2114] Leader for TabletID 72057594037927937 is [15:87:2114] sender: [15:173:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:55:2057] recipient: [16:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:55:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:58:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:75:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:85:2057] recipient: [16:36:2083] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:88:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:89:2057] recipient: [16:87:2116] Leader for TabletID 72057594037927937 is [16:90:2117] sender: [16:91:2057] recipient: [16:87:2116] !Reboot 72057594037927937 (actor [16:57:2097]) rebooted! !Reboot 72057594037927937 (actor [16:57:2097]) tablet resolver refreshed! new actor is[16:90:2117] Leader for TabletID 72057594037927937 is [16:90:2117] sender: [16:176:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:55:2057] recipient: [17:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:55:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:58:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:75:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:85:2057] recipient: [17:36:2083] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:88:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:89:2057] recipient: [17:87:2116] Leader for TabletID 72057594037927937 is [17:90:2117] sender: [17:91:2057] recipient: [17:87:2116] !Reboot 72057594037927937 (actor [17:57:2097]) rebooted! !Reboot 72057594037927937 (actor [17:57:2097]) tablet resolver refreshed! new actor is[17:90:2117] Leader for TabletID 72057594037927937 is [17:90:2117] sender: [17:176:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:55:2057] recipient: [18:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:55:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:58:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:75:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:86:2057] recipient: [18:36:2083] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:89:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:90:2057] recipient: [18:88:2116] Leader for TabletID 72057594037927937 is [18:91:2117] sender: [18:92:2057] recipient: [18:88:2116] !Reboot 72057594037927937 (actor [18:57:2097]) rebooted! !Reboot 72057594037927937 (actor [18:57:2097]) tablet resolver refreshed! new actor is[18:91:2117] Leader for TabletID 72057594037927937 is [18:91:2117] sender: [18:177:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2095] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:58:2057] recipient: [19:51:2095] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:75:2057] recipient: [19:14:2061] >> TSequence::CreateSequenceInsideTableThenDropTable [GOOD] >> TSequence::CreateSequencesWithIndexedTable >> TSequence::CreateSequenceInsideSequenceNotAllowed [GOOD] >> TSequence::CreateSequenceInsideIndexTableNotAllowed >> IndexBuildTest::CheckLimitWithDroppedIndex >> TSequence::CreateSequenceInsideIndexTableNotAllowed [GOOD] >> TSequence::CopyTableWithSequence >> TSequence::CreateSequencesWithIndexedTable [GOOD] >> TSequence::CreateTableWithDefaultFromSequence >> KqpQueryService::TableSink_OltpUpsert [GOOD] >> KqpQueryService::TableSink_OltpUpdate >> TSequence::CreateTableWithDefaultFromSequence [GOOD] >> TSequence::CreateTableWithDefaultFromSequenceAndIndex |66.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> KqpQueryServiceScripts::ExecuteScriptWithCancelAfter [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithCancelAfterAndTimeout >> TSequence::CopyTableWithSequence [GOOD] >> TSequence::AlterSequence >> TSequence::CreateTableWithDefaultFromSequenceAndIndex [GOOD] >> ColumnBuildTest::AlreadyExists >> IndexBuildTest::CheckLimitWithDroppedIndex [GOOD] >> IndexBuildTest::DropIndex >> TSequence::AlterSequence [GOOD] >> TSequence::AlterTableSetDefaultFromSequence >> KqpQueryService::ExecuteQueryWithResourcePoolClassifier [GOOD] >> KqpQueryService::ExecuteQueryScalar >> KqpQueryService::TableSink_OlapInsert [GOOD] >> KqpQueryService::TableSink_OlapDelete ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_sequence/unittest >> TSequence::CreateTableWithDefaultFromSequenceAndIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:20.279987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:20.280028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:20.280035Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:20.280042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:20.280058Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:20.280062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:20.280072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:20.280089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:20.280215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:20.280307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:20.297436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:20.297467Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:20.302828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:20.303015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:20.303057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:20.306698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:20.306771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:20.306923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:20.306979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:20.307857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:20.307924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:20.308267Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:20.308282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:20.308295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:20.308305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:20.308312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:20.308336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:20.310089Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:20.329671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:20.329742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:20.329810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:20.329848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:20.329856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:20.330721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:20.330750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:20.330802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:20.330813Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:20.330820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:20.330826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:20.331383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:20.331404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:20.331410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:20.332789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:20.332810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:20.332817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:20.332826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:20.333693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:20.334270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:20.334323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:20.334539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:20.334586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:20.334596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:20.334668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:20.334678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:20.334717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:20.334732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:20.335336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:20.335346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:20.335405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:2 progress is 3/4 2025-06-03T10:30:23.290272Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 3/4 2025-06-03T10:30:23.290275Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 3/4, is published: true 2025-06-03T10:30:23.290465Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:30:23.290472Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 102:0 2025-06-03T10:30:23.290483Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [7:344:2320] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 102 at schemeshard: 72057594046678944 2025-06-03T10:30:23.290524Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435072, Sender [7:124:2149], Recipient [7:124:2149]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-03T10:30:23.290527Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4899: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-03T10:30:23.290531Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:30:23.290535Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 102:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:23.290560Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:30:23.290573Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:30:23.290576Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 4/4 2025-06-03T10:30:23.290579Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 4/4 2025-06-03T10:30:23.290586Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 4/4 2025-06-03T10:30:23.290589Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 4/4 2025-06-03T10:30:23.290591Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 4/4, is published: true 2025-06-03T10:30:23.290598Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [7:414:2371] message: TxId: 102 2025-06-03T10:30:23.290602Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 4/4 2025-06-03T10:30:23.290607Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:30:23.290609Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:30:23.290625Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:30:23.290629Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:1 2025-06-03T10:30:23.290631Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:1 2025-06-03T10:30:23.290635Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:30:23.290638Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:2 2025-06-03T10:30:23.290640Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:2 2025-06-03T10:30:23.290645Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:30:23.290648Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:3 2025-06-03T10:30:23.290650Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:3 2025-06-03T10:30:23.290656Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-03T10:30:23.290722Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:30:23.290726Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:30:23.290741Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435084, Sender [7:124:2149], Recipient [7:124:2149]: NKikimr::NSchemeShard::TEvPrivate::TEvCleanDroppedPaths 2025-06-03T10:30:23.290746Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5054: StateWork, processing event TEvPrivate::TEvCleanDroppedPaths 2025-06-03T10:30:23.290752Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:23.290758Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-03T10:30:23.290769Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:30:23.290804Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:30:23.290808Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:30:23.290829Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:30:23.290833Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:30:23.290838Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:30:23.290841Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:30:23.290846Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:30:23.290849Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:30:23.291277Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:30:23.291287Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:30:23.291482Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:30:23.291607Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:30:23.291630Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [7:414:2371] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 102 at schemeshard: 72057594046678944 2025-06-03T10:30:23.291663Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:30:23.291669Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [7:521:2471] 2025-06-03T10:30:23.291689Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:23.291727Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [7:523:2473], Recipient [7:124:2149]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:30:23.291735Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:30:23.291739Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 102 2025-06-03T10:30:23.291819Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122945, Sender [7:599:2548], Recipient [7:124:2149]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-03T10:30:23.291824Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4894: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-03T10:30:23.291842Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:23.291885Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 37us result status StatusPathDoesNotExist 2025-06-03T10:30:23.291920Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeTable, state: EPathStateNotExist), drop stepId: 5000003, drop txId: 102, source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Table" PathId: 2 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOkNewApi [GOOD] >> KqpQueryService::DdlSecret [GOOD] >> KqpQueryService::DdlMixedDml >> ColumnBuildTest::BaseCase >> VectorIndexBuildTest::BaseCase [FAIL] >> IndexBuildTest::DropIndex [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOkNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:58:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:75:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:58:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:75:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:77:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:80:2057] recipient: [2:79:2110] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:83:2057] recipient: [2:79:2110] !Reboot 72057594037927937 (actor [2:57:2097]) rebooted! !Reboot 72057594037927937 (actor [2:57:2097]) tablet resolver refreshed! new actor is[2:82:2111] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:168:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:58:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:75:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:77:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:83:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:57:2097]) rebooted! !Reboot 72057594037927937 (actor [3:57:2097]) tablet resolver refreshed! new actor is[3:82:2111] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:168:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:58:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:75:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:78:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:82:2057] recipient: [4:80:2110] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:84:2057] recipient: [4:80:2110] !Reboot 72057594037927937 (actor [4:57:2097]) rebooted! !Reboot 72057594037927937 (actor [4:57:2097]) tablet resolver refreshed! new actor is[4:83:2111] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:169:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:58:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:75:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:81:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:84:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:85:2057] recipient: [5:83:2113] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:87:2057] recipient: [5:83:2113] !Reboot 72057594037927937 (actor [5:57:2097]) rebooted! !Reboot 72057594037927937 (actor [5:57:2097]) tablet resolver refreshed! new actor is[5:86:2114] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:172:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:58:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:75:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:81:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:84:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:85:2057] recipient: [6:83:2113] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:87:2057] recipient: [6:83:2113] !Reboot 72057594037927937 (actor [6:57:2097]) rebooted! !Reboot 72057594037927937 (actor [6:57:2097]) tablet resolver refreshed! new actor is[6:86:2114] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:172:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:58:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:75:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:82:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:84:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:86:2057] recipient: [7:85:2113] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:88:2057] recipient: [7:85:2113] !Reboot 72057594037927937 (actor [7:57:2097]) rebooted! !Reboot 72057594037927937 (actor [7:57:2097]) tablet resolver refreshed! new actor is[7:87:2114] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:173:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:58:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:75:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:58:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:75:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:58:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:75:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:77:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:80:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:81:2057] recipient: [10:79:2110] Leader for TabletID 72057594037927937 is [10:82:2111] sender: [10:83:2057] recipient: [10:79:2110] !Reboot 72057594037927937 (actor [10:57:2097]) rebooted! !Reboot 72057594037927937 (actor [10:57:2097]) tablet resolver refreshed! new actor is[10:82:2111] Leader for TabletID 72057594037927937 is [10:82:2111] sender: [10:168:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:58:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:75:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:77:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:80:2057] recipient: [11:79:2110] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:81:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:82:2111] sender: [11:83:2057] recipient: [11:79:2110] !Reboot 72057594037927937 (actor [11:57:2097]) rebooted! !Reboot 72057594037927937 (actor [11:57:2097]) tablet resolver refreshed! new actor is[11:82:2111] Leader for TabletID 72057594037927937 is [11:82:2111] sender: [11:168:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:58:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:75:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:78:2057] recipient: [12:36:2083] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:81:2057] recipient: [12:80:2110] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:82:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [12:83:2111] sender: [12:84:2057] recipient: [12:80:2110] !Reboot 72057594037927937 (actor [12:57:2097]) rebooted! !Reboot 72057594037927937 (actor [12:57:2097]) tablet resolver refreshed! new actor is[12:83:2111] Leader for TabletID 72057594037927937 is [12:83:2111] sender: [12:169:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:55:2057] recipient: [13:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:55:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:58:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:75:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:81:2057] recipient: [13:36:2083] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:84:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:85:2057] recipient: [13:83:2113] Leader for TabletID 72057594037927937 is [13:86:2114] sender: [13:87:2057] recipient: [13:83:2113] !Reboot 72057594037927937 (actor [13:57:2097]) rebooted! !Reboot 72057594037927937 (actor [13:57:2097]) tablet resolver refreshed! new actor is[13:86:2114] Leader for TabletID 72057594037927937 is [13:86:2114] sender: [13:172:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:55:2057] recipient: [14:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:55:2057] recipient: [14:50:2095] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:58:2057] recipient: [14:50:2095] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:75:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:81:2057] recipient: [14:36:2083] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:84:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:85:2057] recipient: [14:83:2113] Leader for TabletID 72057594037927937 is [14:86:2114] sender: [14:87:2057] recipient: [14:83:2113] !Reboot 72057594037927937 (actor [14:57:2097]) rebooted! !Reboot 72057594037927937 (actor [14:57:2097]) tablet resolver refreshed! new actor is[14:86:2114] Leader for TabletID 72057594037927937 is [14:86:2114] sender: [14:172:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:55:2057] recipient: [15:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:55:2057] recipient: [15:50:2095] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:58:2057] recipient: [15:50:2095] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:75:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:82:2057] recipient: [15:36:2083] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:85:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:86:2057] recipient: [15:84:2113] Leader for TabletID 72057594037927937 is [15:87:2114] sender: [15:88:2057] recipient: [15:84:2113] !Reboot 72057594037927937 (actor [15:57:2097]) rebooted! !Reboot 72057594037927937 (actor [15:57:2097]) tablet resolver refreshed! new actor is[15:87:2114] Leader for TabletID 72057594037927937 is [15:87:2114] sender: [15:173:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:55:2057] recipient: [16:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:55:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:58:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:75:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:85:2057] recipient: [16:36:2083] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:88:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:89:2057] recipient: [16:87:2116] Leader for TabletID 72057594037927937 is [16:90:2117] sender: [16:91:2057] recipient: [16:87:2116] !Reboot 72057594037927937 (actor [16:57:2097]) rebooted! !Reboot 72057594037927937 (actor [16:57:2097]) tablet resolver refreshed! new actor is[16:90:2117] Leader for TabletID 72057594037927937 is [16:90:2117] sender: [16:176:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:55:2057] recipient: [17:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:55:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:58:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:75:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:85:2057] recipient: [17:36:2083] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:88:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:89:2057] recipient: [17:87:2116] Leader for TabletID 72057594037927937 is [17:90:2117] sender: [17:91:2057] recipient: [17:87:2116] !Reboot 72057594037927937 (actor [17:57:2097]) rebooted! !Reboot 72057594037927937 (actor [17:57:2097]) tablet resolver refreshed! new actor is[17:90:2117] Leader for TabletID 72057594037927937 is [17:90:2117] sender: [17:176:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:55:2057] recipient: [18:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:55:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:58:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:75:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:86:2057] recipient: [18:36:2083] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:89:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:90:2057] recipient: [18:88:2116] Leader for TabletID 72057594037927937 is [18:91:2117] sender: [18:92:2057] recipient: [18:88:2116] !Reboot 72057594037927937 (actor [18:57:2097]) rebooted! !Reboot 72057594037927937 (actor [18:57:2097]) tablet resolver refreshed! new actor is[18:91:2117] Leader for TabletID 72057594037927937 is [18:91:2117] sender: [18:177:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2095] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:58:2057] recipient: [19:51:2095] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:75:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:89:2057] recipient: [19:36:2083] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:92:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:93:2057] recipient: [19:91:2119] Leader for TabletID 72057594037927937 is [19:94:2120] sender: [19:95:2057] recipient: [19:91:2119] !Reboot 72057594037927937 (actor [19:57:2097]) rebooted! !Reboot 72057594037927937 (actor [19:57:2097]) tablet resolver refreshed! new actor is[19:94:2120] Leader for TabletID 72057594037927937 is [19:94:2120] sender: [19:180:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:55:2057] recipient: [20:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:55:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:58:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:75:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:57:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:89:2057] recipient: [20:36:2083] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:92:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:93:2057] recipient: [20:91:2119] Leader for TabletID 72057594037927937 is [20:94:2120] sender: [20:95:2057] recipient: [20:91:2119] !Reboot 72057594037927937 (actor [20:57:2097]) rebooted! !Reboot 72057594037927937 (actor [20:57:2097]) tablet resolver refreshed! new actor is[20:94:2120] Leader for TabletID 72057594037927937 is [20:94:2120] sender: [20:180:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:55:2057] recipient: [21:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:55:2057] recipient: [21:51:2095] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:58:2057] recipient: [21:51:2095] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:75:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:57:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:90:2057] recipient: [21:36:2083] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:93:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:94:2057] recipient: [21:92:2119] Leader for TabletID 72057594037927937 is [21:95:2120] sender: [21:96:2057] recipient: [21:92:2119] !Reboot 72057594037927937 (actor [21:57:2097]) rebooted! !Reboot 72057594037927937 (actor [21:57:2097]) tablet resolver refreshed! new actor is[21:95:2120] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:55:2057] recipient: [22:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:55:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:58:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:75:2057] recipient: [22:14:2061] >> TFlatExecutorLeases::BasicsInitialLeaseSleep [GOOD] >> TFlatExecutorLeases::BasicsInitialLeaseSleepTimeout >> TPartBtreeIndexIteration::OneNode_Groups_History_Slices [GOOD] >> TPartBtreeIndexIteration::FewNodes >> TSequence::AlterTableSetDefaultFromSequence [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::DropIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:22.453546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:22.453581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:22.453587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:22.453592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:22.453608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:22.453612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:22.453628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:22.453643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:22.453756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:22.453842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:22.468353Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:22.468390Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:22.477796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:22.477969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:22.478014Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:22.483214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:22.483311Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:22.483501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:22.483612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:22.486612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:22.486688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:22.487044Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:22.487053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:22.487061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:22.487070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:22.487076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:22.487098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:22.488983Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:22.524970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:22.525079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:22.525164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:22.525225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:22.525241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:22.526951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:22.526999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:22.527089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:22.527104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:22.527112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:22.527119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:22.527848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:22.527869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:22.527876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:22.528420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:22.528436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:22.528443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:22.528469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:22.529464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:22.530106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:22.530151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:22.530396Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:22.530434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:22.530448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:22.530544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:22.530558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:22.530605Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:22.530621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:22.531196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:22.531210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:22.531273Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 5 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:30:24.186544Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-03T10:30:24.186550Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 7], version: 18446744073709551615 2025-06-03T10:30:24.186557Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 5 2025-06-03T10:30:24.186678Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:30:24.186690Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:30:24.186695Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-03T10:30:24.186700Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 8], version: 18446744073709551615 2025-06-03T10:30:24.186705Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 3 2025-06-03T10:30:24.187109Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:30:24.187131Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:30:24.187137Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-03T10:30:24.187143Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 15 2025-06-03T10:30:24.187153Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:24.187498Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:30:24.187520Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 7 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:30:24.187526Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-03T10:30:24.187807Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:30:24.187825Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 8 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:30:24.187830Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-03T10:30:24.188036Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 105:0, at schemeshard: 72057594046678944 2025-06-03T10:30:24.188049Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 105:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:24.188145Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 4 2025-06-03T10:30:24.188191Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#105:0 progress is 2/3 2025-06-03T10:30:24.188197Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 105 ready parts: 2/3 2025-06-03T10:30:24.188203Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#105:0 progress is 2/3 2025-06-03T10:30:24.188207Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 105 ready parts: 2/3 2025-06-03T10:30:24.188213Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 105, ready parts: 2/3, is published: false 2025-06-03T10:30:24.188584Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 9 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:30:24.188624Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 9 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 105 2025-06-03T10:30:24.188631Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 105 2025-06-03T10:30:24.188637Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 105, pathId: [OwnerId: 72057594046678944, LocalPathId: 9], version: 18446744073709551615 2025-06-03T10:30:24.188647Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 4 2025-06-03T10:30:24.188670Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 105, ready parts: 2/3, is published: true 2025-06-03T10:30:24.189020Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-03T10:30:24.189132Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-03T10:30:24.189326Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 105:2, at schemeshard: 72057594046678944 2025-06-03T10:30:24.189337Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 105:2 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:24.189410Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 3 2025-06-03T10:30:24.189441Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#105:2 progress is 3/3 2025-06-03T10:30:24.189447Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 105 ready parts: 3/3 2025-06-03T10:30:24.189454Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#105:2 progress is 3/3 2025-06-03T10:30:24.189458Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 105 ready parts: 3/3 2025-06-03T10:30:24.189464Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 105, ready parts: 3/3, is published: true 2025-06-03T10:30:24.189483Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:416:2372] message: TxId: 105 2025-06-03T10:30:24.189490Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 105 ready parts: 3/3 2025-06-03T10:30:24.189496Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 105:0 2025-06-03T10:30:24.189502Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 105:0 2025-06-03T10:30:24.189530Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 7] was 3 2025-06-03T10:30:24.189538Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 105:1 2025-06-03T10:30:24.189543Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 105:1 2025-06-03T10:30:24.189549Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 8] was 2 2025-06-03T10:30:24.189554Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 105:2 2025-06-03T10:30:24.189558Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 105:2 2025-06-03T10:30:24.189566Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 9] was 2 2025-06-03T10:30:24.189814Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-03T10:30:24.189830Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-03T10:30:24.189836Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-03T10:30:24.190500Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-03T10:30:24.190961Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-03T10:30:24.190981Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:947:2869] TestWaitNotification: OK eventTxId 105 >> KqpQueryService::ExecuteQueryScalar [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithFirstRowPreloadedWithReboot [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsMultipleColumns >> KqpProxy::DatabasesCacheForServerless [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_sequence/unittest >> TSequence::AlterTableSetDefaultFromSequence [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:21.035215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:21.035241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:21.035248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:21.035253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:21.035269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:21.035273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:21.035283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:21.035298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:21.035407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:21.035469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:21.050506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:21.050539Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:21.054996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:21.055134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:21.055170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:21.058843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:21.058914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:21.059047Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:21.059096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:21.059749Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:21.059807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:21.060110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:21.060125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:21.060135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:21.060144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:21.060151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:21.060174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:21.061759Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:21.100196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:21.100280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:21.100351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:21.100401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:21.100413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:21.102445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:21.102507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:21.102598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:21.102616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:21.102625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:21.102633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:21.107620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:21.107661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:21.107672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:21.111067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:21.111112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:21.111123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:21.111137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:21.112211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:21.115909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:21.116006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:21.116319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:21.116385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:21.116404Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:21.116530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:21.116560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:21.116634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:21.116653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:21.118528Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:21.118551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:21.118648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... CHEME Origin: 72075186233409549 Status: COMPLETE TxId: 114 Step: 5000014 OrderId: 114 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 294 } } CommitVersion { Step: 5000014 TxId: 114 } 2025-06-03T10:30:24.555534Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:30:24.555857Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [7:1050:2986], Recipient [7:124:2149]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:30:24.555870Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:30:24.555875Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046678944 2025-06-03T10:30:24.556192Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269551620, Sender [7:988:2932], Recipient [7:124:2149]: NKikimrTxDataShard.TEvSchemaChanged Source { RawX1: 988 RawX2: 30064774004 } Origin: 72075186233409549 State: 2 TxId: 114 Step: 0 Generation: 2 2025-06-03T10:30:24.556203Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4914: StateWork, processing event TEvDataShard::TEvSchemaChanged 2025-06-03T10:30:24.556213Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 988 RawX2: 30064774004 } Origin: 72075186233409549 State: 2 TxId: 114 Step: 0 Generation: 2 2025-06-03T10:30:24.556219Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 114, tablet: 72075186233409549, partId: 0 2025-06-03T10:30:24.556241Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 114:0, at schemeshard: 72057594046678944, message: Source { RawX1: 988 RawX2: 30064774004 } Origin: 72075186233409549 State: 2 TxId: 114 Step: 0 Generation: 2 2025-06-03T10:30:24.556249Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1014: NTableState::TProposedWaitParts operationId# 114:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-06-03T10:30:24.556259Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1018: NTableState::TProposedWaitParts operationId# 114:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 988 RawX2: 30064774004 } Origin: 72075186233409549 State: 2 TxId: 114 Step: 0 Generation: 2 2025-06-03T10:30:24.556274Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 114:0, shardIdx: 72057594046678944:4, datashard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:24.556279Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 114:0, at schemeshard: 72057594046678944 2025-06-03T10:30:24.556285Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 114:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-06-03T10:30:24.556292Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 114:0 129 -> 240 2025-06-03T10:30:24.556321Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:30:24.556556Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:30:24.556616Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 114 2025-06-03T10:30:24.556621Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:30:24.556638Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 114 2025-06-03T10:30:24.556642Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:30:24.557221Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 114:0, at schemeshard: 72057594046678944 2025-06-03T10:30:24.557235Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:30:24.557278Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 114:0, at schemeshard: 72057594046678944 2025-06-03T10:30:24.557283Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:30:24.557289Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 114:0 2025-06-03T10:30:24.557325Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [7:988:2932] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 114 at schemeshard: 72057594046678944 2025-06-03T10:30:24.557394Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435072, Sender [7:124:2149], Recipient [7:124:2149]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-03T10:30:24.557402Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4899: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-03T10:30:24.557410Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 114:0, at schemeshard: 72057594046678944 2025-06-03T10:30:24.557418Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 114:0 ProgressState 2025-06-03T10:30:24.557436Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:30:24.557441Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#114:0 progress is 1/1 2025-06-03T10:30:24.557446Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 114 ready parts: 1/1 2025-06-03T10:30:24.557451Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#114:0 progress is 1/1 2025-06-03T10:30:24.557455Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 114 ready parts: 1/1 2025-06-03T10:30:24.557461Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 114, ready parts: 1/1, is published: true 2025-06-03T10:30:24.557472Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [7:390:2357] message: TxId: 114 2025-06-03T10:30:24.557480Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 114 ready parts: 1/1 2025-06-03T10:30:24.557485Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 114:0 2025-06-03T10:30:24.557490Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 114:0 2025-06-03T10:30:24.557520Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-03T10:30:24.558271Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:30:24.558298Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [7:390:2357] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 114 at schemeshard: 72057594046678944 2025-06-03T10:30:24.558337Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 114: got EvNotifyTxCompletionResult 2025-06-03T10:30:24.558344Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 114: satisfy waiter [7:1017:2953] 2025-06-03T10:30:24.558417Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [7:1019:2955], Recipient [7:124:2149]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:30:24.558424Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:30:24.558428Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 114 TestModificationResults wait txId: 115 2025-06-03T10:30:24.558650Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [7:1058:2994], Recipient [7:124:2149]: {TEvModifySchemeTransaction txid# 115 TabletId# 72057594046678944} 2025-06-03T10:30:24.558656Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:30:24.559323Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterTable AlterTable { Name: "Table3" Columns { Name: "value" DefaultFromSequence: "/MyRoot/seq1" } } } TxId: 115 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:24.559386Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:508: TAlterTable Propose, path: /MyRoot/Table3, pathId: , opId: 115:0, at schemeshard: 72057594046678944 2025-06-03T10:30:24.559479Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 115:1, propose status:StatusInvalidParameter, reason: Column 'value' is of type Bool but default expression is of type Int64, at schemeshard: 72057594046678944 2025-06-03T10:30:24.559536Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:30:24.560792Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 115, response: Status: StatusInvalidParameter Reason: "Column \'value\' is of type Bool but default expression is of type Int64" TxId: 115 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:24.560835Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 115, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Column 'value' is of type Bool but default expression is of type Int64, operation: ALTER TABLE, path: /MyRoot/Table3 2025-06-03T10:30:24.560841Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 115, wait until txId: 115 |66.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_column_build/unittest >> KqpQueryService::DdlMixedDml [GOOD] >> TSequenceReboots::CreateSequence [GOOD] >> TPartBtreeIndexIteration::FewNodes [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/service/unittest >> KqpQueryService::ExecuteQueryScalar [GOOD] Test command err: Trying to start YDB, gRPC: 2335, MsgBus: 8051 2025-06-03T10:30:16.910015Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668519891891544:2151];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d54/r3tmp/tmp8xBx7u/pdisk_1.dat 2025-06-03T10:30:16.974525Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:30:16.992817Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:16.995151Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668519891891404:2079] 1748946616905549 != 1748946616905552 TServer::EnableGrpc on GrpcPort 2335, node 1 2025-06-03T10:30:17.009829Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:17.009899Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:17.011420Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:17.021489Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:17.021508Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:17.021509Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:17.021561Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8051 TClient is connected to server localhost:8051 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:30:17.106365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.108950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:30:17.119085Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:30:17.184966Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:17.219985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.237167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:17.365479Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668524186860336:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:17.365511Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:17.423385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.434403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.444717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.455804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.514903Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.572537Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.587955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.654426Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668524186860998:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:17.654453Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:17.654562Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668524186861003:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:17.655481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:17.659235Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:30:17.659313Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668524186861005:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:17.750074Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668524186861059:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:17.937901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 27958, MsgBus: 2787 2025-06-03T10:30:19.529730Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668532500858845:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:19.529752Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d54/r3tmp/tmpq3J60n/pdisk_1.dat 2025-06-03T10:30:19.545423Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27958, node 2 2025-06-03T10:30:19.553007Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:19.553024Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:19.553027Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:19.553078Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2787 TClient is connected to server localhost:2787 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) ... eId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:21.541663Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511668543099526832:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:21.711056Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:30:21.860894Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:1, at schemeshard: 72057594046644480 2025-06-03T10:30:21.950771Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480 2025-06-03T10:30:22.007741Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715679:1, at schemeshard: 72057594046644480 2025-06-03T10:30:22.084941Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715682:0, at schemeshard: 72057594046644480 2025-06-03T10:30:22.171215Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715685:0, at schemeshard: 72057594046644480 2025-06-03T10:30:22.241329Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715688:0, at schemeshard: 72057594046644480 2025-06-03T10:30:22.468263Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715697:0, at schemeshard: 72057594046644480 Wait resource pool classifier 0.020163s: status = SUCCESS, issues = 2025-06-03T10:30:23.493882Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=3&id=MTBjZjJlN2UtNzM1ZThmNTQtM2MzNmMwZjctMjdjZGQyNTc=, ActorId: [3:7511668551689462542:2734], ActorState: ExecuteState, TraceId: 01jwtnf704dzgpj9j7f11th9hf, Create QueryResponse for error on request, msg: Query failed during adding/waiting in workload pool MyPool Trying to start YDB, gRPC: 5959, MsgBus: 4278 2025-06-03T10:30:23.865823Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511668549151090390:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:23.866117Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d54/r3tmp/tmpkfCAUZ/pdisk_1.dat 2025-06-03T10:30:23.886052Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5959, node 4 2025-06-03T10:30:23.897941Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:23.897956Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:23.897958Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:23.898010Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4278 TClient is connected to server localhost:4278 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:30:23.965809Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:23.965841Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:23.967068Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:23.971558Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:23.973126Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:30:23.974312Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:23.987079Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:24.009225Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:24.021174Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:24.239745Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511668553446059251:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:24.239771Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:24.248042Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.257067Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.265164Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.280318Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.294375Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.308082Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.322359Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.338430Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511668553446059904:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:24.338473Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:24.338501Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511668553446059909:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:24.339439Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:24.341746Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511668553446059911:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:24.414257Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511668553446059962:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> TVersions::Wreck0 [GOOD] >> TVersions::Wreck0Reverse >> ColumnBuildTest::ValidDefaultValue ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/proxy_service/ut/unittest >> KqpProxy::DatabasesCacheForServerless [GOOD] Test command err: 2025-06-03T10:30:10.366455Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668495334849059:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:10.369385Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:30:10.381257Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511668495724934908:2094];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:10.386936Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668494960651293:2172];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:10.390645Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7511668493599706238:2199];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:10.381894Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511668494444123369:2158];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:10.398891Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002083/r3tmp/tmpWZS6Zu/pdisk_1.dat 2025-06-03T10:30:10.488530Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:30:10.491748Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:30:10.494448Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:30:10.710629Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:10.718631Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:10.718663Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:10.718995Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:10.719011Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:10.719065Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:10.719072Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:10.719128Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:10.719137Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:10.721856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:10.721881Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:10.801820Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-03T10:30:10.801844Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 4 Cookie 4 2025-06-03T10:30:10.801848Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:30:10.802046Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 5 Cookie 5 2025-06-03T10:30:10.802152Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:10.802477Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:10.803239Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:10.803281Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:10.805678Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:7408 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-03T10:30:10.878504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:11.102899Z node 5 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-06-03T10:30:11.103560Z node 5 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-03T10:30:11.105819Z node 5 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:11.105844Z node 5 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:11.106016Z node 5 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-06-03T10:30:11.106019Z node 5 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-06-03T10:30:11.106029Z node 5 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-06-03T10:30:11.109876Z node 5 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool size: 0, user pool id: 1 2025-06-03T10:30:11.114048Z node 5 :KQP_PROXY DEBUG: table_creator.cpp:147: Table result_sets updater. Describe result: PathErrorUnknown 2025-06-03T10:30:11.114067Z node 5 :KQP_PROXY NOTICE: table_creator.cpp:167: Table result_sets updater. Creating table 2025-06-03T10:30:11.114085Z node 5 :KQP_PROXY DEBUG: table_creator.cpp:100: Table result_sets updater. Full table path:/dc-1/.metadata/result_sets 2025-06-03T10:30:11.114195Z node 5 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_executions updater. Describe result: PathErrorUnknown 2025-06-03T10:30:11.114196Z node 5 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_executions updater. Creating table 2025-06-03T10:30:11.114201Z node 5 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_executions updater. Full table path:/dc-1/.metadata/script_executions 2025-06-03T10:30:11.114245Z node 5 :KQP_PROXY DEBUG: table_creator.cpp:147: Table script_execution_leases updater. Describe result: PathErrorUnknown 2025-06-03T10:30:11.114246Z node 5 :KQP_PROXY NOTICE: table_creator.cpp:167: Table script_execution_leases updater. Creating table 2025-06-03T10:30:11.114249Z node 5 :KQP_PROXY DEBUG: table_creator.cpp:100: Table script_execution_leases updater. Full table path:/dc-1/.metadata/script_execution_leases 2025-06-03T10:30:11.116922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976725659:1, at schemeshard: 72057594046644480 2025-06-03T10:30:11.117595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976725658:0, at schemeshard: 72057594046644480 2025-06-03T10:30:11.117899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976725657:0, at schemeshard: 72057594046644480 2025-06-03T10:30:11.122699Z node 5 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_execution_leases updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976725659 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 } 2025-06-03T10:30:11.122736Z node 5 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_execution_leases updater. Subscribe on create table tx: 281474976725659 2025-06-03T10:30:11.122756Z node 5 :KQP_PROXY DEBUG: table_creator.cpp:190: Table script_executions updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976725658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 4 } 2025-06-03T10:30:11.122760Z node 5 :KQP_PROXY DEBUG: table_creator.cpp:261: Table script_executions updater. Subscribe on create table tx: 281474976725658 2025-06-03T10:30:11.122769Z node 5 :KQP_PROXY DEBUG: table_creator.cpp:190: Table result_sets updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976725657 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 5 } 2025-06-03T10:30:11.122774Z node 5 :KQP_PROXY DEBUG: table_creator.cpp:261: Table result_sets updater. Subscribe on create table tx: 281474976725657 2025-06-03T10:30:11.132050Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-06-03T10:30:11.133649Z node 2 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-03T10:30:11.133796Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:512: Subscribed for config changes. 2025-06-03T10:30:11.133802Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:519: Updated table service config. 2025-06-03T10:30:11.133809Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:1665: Updated YQL logs priority to current level: 4 2025-06-03T10:30:11.133823Z node 2 :KQP_PROXY INFO: kqp_proxy_service.cpp:442: Cannot start publishing usage, tenants: /dc-1, empty 2025-06-03T10:30:11.133842Z node 2 :KQP_PROXY DEBUG: kqp_proxy_service.cpp:425: Unexpected whiteboard info: pool size is smaller than user pool id, pool siz ... ::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:30:13.702797Z node 8 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:30:13.702805Z node 8 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:30:13.767796Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:13.767848Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:13.772057Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:13.796382Z node 8 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:13.801319Z node 8 :STATISTICS WARN: tx_init.cpp:287: [72075186224037894] TTxInit::Complete. EnableColumnStatistics=false 2025-06-03T10:30:13.969042Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.977738Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511668505632077488:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:13.977793Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/test-shared/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:30:13.983879Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:13.983907Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:13.985175Z node 6 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 7 Cookie 7 2025-06-03T10:30:13.986348Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:14.031278Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:30:14.031350Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:30:14.031364Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:30:14.031385Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:30:14.031399Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:30:14.031416Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:30:14.031431Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:30:14.031472Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:30:14.031493Z node 7 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:30:14.091942Z node 8 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-03T10:30:14.092038Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [8:7511668512073176546:2340], Start check tables existence, number paths: 2 2025-06-03T10:30:14.092350Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-03T10:30:14.092354Z node 8 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-03T10:30:14.092879Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:14.092920Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:14.093649Z node 8 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 3 2025-06-03T10:30:14.093685Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [8:7511668512073176546:2340], Describe table /Root/test-dedicated/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-03T10:30:14.093702Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [8:7511668512073176546:2340], Describe table /Root/test-dedicated/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-03T10:30:14.093712Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [8:7511668512073176546:2340], Successfully finished 2025-06-03T10:30:14.093743Z node 8 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-03T10:30:14.099770Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:14.119556Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:14.129693Z node 7 :STATISTICS WARN: tx_init.cpp:287: [72075186224038895] TTxInit::Complete. EnableColumnStatistics=false 2025-06-03T10:30:14.330158Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:14.372006Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:14.444525Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:574: [WorkloadService] [TDatabaseFetcherActor] ActorId: [7:7511668509927045698:2496], Database: /Root/test-serverless, Start database fetching 2025-06-03T10:30:14.444599Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: scheme_actors.cpp:600: [WorkloadService] [TDatabaseFetcherActor] ActorId: [7:7511668509927045698:2496], Database: /Root/test-serverless, Database info successfully fetched, serverless: 1 2025-06-03T10:30:14.445526Z node 7 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-03T10:30:14.445634Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [7:7511668509927045719:2367], Start check tables existence, number paths: 2 2025-06-03T10:30:14.445756Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-03T10:30:14.445759Z node 7 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-03T10:30:14.446200Z node 7 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 3 2025-06-03T10:30:14.446455Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [7:7511668509927045719:2367], Describe table /Root/test-shared/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-03T10:30:14.446471Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [7:7511668509927045719:2367], Describe table /Root/test-shared/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-03T10:30:14.446478Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [7:7511668509927045719:2367], Successfully finished 2025-06-03T10:30:14.446498Z node 7 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-03T10:30:17.857451Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[6:7511668501823320700:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:17.857508Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:30:18.652148Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[8:7511668507778208589:2094];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:18.652207Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/test-dedicated/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:30:18.978237Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7511668505632077488:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:18.978287Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/test-shared/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:30:24.447073Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2323: SessionId: ydb://session/3?node_id=6&id=MjhhOTNmNGYtZTFkYjM5MTAtOGE2NDBiOWMtYTAyMDgzNDE=, ActorId: [6:7511668506118288645:2331], ActorState: ReadyState, Session closed due to explicit close event 2025-06-03T10:30:24.447097Z node 6 :KQP_SESSION INFO: kqp_session_actor.cpp:2481: SessionId: ydb://session/3?node_id=6&id=MjhhOTNmNGYtZTFkYjM5MTAtOGE2NDBiOWMtYTAyMDgzNDE=, ActorId: [6:7511668506118288645:2331], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-03T10:30:24.447101Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2542: SessionId: ydb://session/3?node_id=6&id=MjhhOTNmNGYtZTFkYjM5MTAtOGE2NDBiOWMtYTAyMDgzNDE=, ActorId: [6:7511668506118288645:2331], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-03T10:30:24.447105Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2554: SessionId: ydb://session/3?node_id=6&id=MjhhOTNmNGYtZTFkYjM5MTAtOGE2NDBiOWMtYTAyMDgzNDE=, ActorId: [6:7511668506118288645:2331], ActorState: unknown state, Cleanup temp tables: 0 2025-06-03T10:30:24.447127Z node 6 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2645: SessionId: ydb://session/3?node_id=6&id=MjhhOTNmNGYtZTFkYjM5MTAtOGE2NDBiOWMtYTAyMDgzNDE=, ActorId: [6:7511668506118288645:2331], ActorState: unknown state, Session actor destroyed 2025-06-03T10:30:24.447792Z node 6 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 8 2025-06-03T10:30:24.447949Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connected -> Disconnected 2025-06-03T10:30:24.447982Z node 6 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 7 2025-06-03T10:30:24.448004Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connected -> Disconnected >> CommitOffset::Commit_WithSession_ToPastParentPartition [GOOD] >> KqpQueryServiceScripts::ExecuteScriptWithCancelAfterAndTimeout [GOOD] >> TChargeBTreeIndex::OneNode_Groups [GOOD] >> TChargeBTreeIndex::OneNode_History |66.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_column_build/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_sequence_reboots/unittest >> TSequenceReboots::CreateSequence [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:30:09.225022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:09.225061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:09.225069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:09.225076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:09.225084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:09.225089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:09.225103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:09.225131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:09.225273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:09.225412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:09.244469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:30:09.244506Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:09.244621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:30:09.248401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:09.248582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:09.248628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:09.250849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:09.250926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:09.251101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:09.251202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:09.251774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:09.251830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:09.252220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:09.252236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:09.252256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:09.252267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:09.252275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:09.252332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:30:09.254385Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:30:09.283151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:09.283281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:09.283382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:09.283446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:09.283462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:09.284610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:09.284656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:09.284778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:09.284794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:09.284802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:09.284810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:09.285601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:09.285624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:09.285633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:09.286449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:09.286468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:09.286476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:09.286487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:09.287426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:09.288036Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:09.288095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:09.288368Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:09.288407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:09.288418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:09.288498Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1002, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:30:25.167843Z node 52 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:25.167849Z node 52 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [52:208:2209], at schemeshard: 72057594046678944, txId: 1002, path id: 1 2025-06-03T10:30:25.167859Z node 52 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [52:208:2209], at schemeshard: 72057594046678944, txId: 1002, path id: 3 2025-06-03T10:30:25.167956Z node 52 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1002:0, at schemeshard: 72057594046678944 2025-06-03T10:30:25.167966Z node 52 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1002:0 ProgressState 2025-06-03T10:30:25.167979Z node 52 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:30:25.167986Z node 52 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1002:0 progress is 1/1 2025-06-03T10:30:25.167992Z node 52 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-03T10:30:25.167999Z node 52 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1002:0 progress is 1/1 2025-06-03T10:30:25.168004Z node 52 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-03T10:30:25.168011Z node 52 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1002, ready parts: 1/1, is published: false 2025-06-03T10:30:25.168018Z node 52 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1002 ready parts: 1/1 2025-06-03T10:30:25.168025Z node 52 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1002:0 2025-06-03T10:30:25.168031Z node 52 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1002:0 2025-06-03T10:30:25.168062Z node 52 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:30:25.168071Z node 52 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1002, publications: 2, subscribers: 1 2025-06-03T10:30:25.168077Z node 52 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1002, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-03T10:30:25.168082Z node 52 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1002, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-03T10:30:25.168225Z node 52 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 274137603, Sender [52:208:2209], Recipient [52:126:2151]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 7 } 2025-06-03T10:30:25.168234Z node 52 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-06-03T10:30:25.168253Z node 52 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:30:25.168267Z node 52 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:30:25.168273Z node 52 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1002 2025-06-03T10:30:25.168280Z node 52 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-03T10:30:25.168286Z node 52 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-03T10:30:25.168304Z node 52 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:30:25.168667Z node 52 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 274137603, Sender [52:208:2209], Recipient [52:126:2151]: NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 3] Version: 2 } 2025-06-03T10:30:25.168677Z node 52 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4966: StateWork, processing event NSchemeBoard::NSchemeshardEvents::TEvUpdateAck 2025-06-03T10:30:25.168690Z node 52 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:30:25.168705Z node 52 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:30:25.168712Z node 52 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1002 2025-06-03T10:30:25.168719Z node 52 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-03T10:30:25.168725Z node 52 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:30:25.168740Z node 52 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1002, subscribers: 1 2025-06-03T10:30:25.168747Z node 52 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [52:363:2342] 2025-06-03T10:30:25.168754Z node 52 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:30:25.169157Z node 52 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:30:25.169361Z node 52 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-06-03T10:30:25.169370Z node 52 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:30:25.169583Z node 52 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-06-03T10:30:25.169590Z node 52 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:30:25.169608Z node 52 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [52:363:2342] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 1002 at schemeshard: 72057594046678944 2025-06-03T10:30:25.169625Z node 52 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1002: got EvNotifyTxCompletionResult 2025-06-03T10:30:25.169631Z node 52 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1002: satisfy waiter [52:364:2343] 2025-06-03T10:30:25.169672Z node 52 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [52:366:2345], Recipient [52:126:2151]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:30:25.169679Z node 52 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:30:25.169688Z node 52 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 1002 2025-06-03T10:30:25.169771Z node 52 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122945, Sender [52:388:2366], Recipient [52:126:2151]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/seq" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false } 2025-06-03T10:30:25.169777Z node 52 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4894: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-03T10:30:25.169791Z node 52 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/seq" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:25.169837Z node 52 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/seq" took 40us result status StatusSuccess 2025-06-03T10:30:25.169920Z node 52 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/seq" PathDescription { Self { Name: "seq" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSequence CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SequenceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } SequenceDescription { Name: "seq" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SequenceShard: 72075186233409546 MinValue: 1 MaxValue: 9223372036854775807 StartValue: 1 Cache: 1 Increment: 1 Cycle: false DataType: "Int64" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/service/unittest >> KqpQueryService::DdlMixedDml [GOOD] Test command err: Trying to start YDB, gRPC: 26365, MsgBus: 28994 2025-06-03T10:30:16.848910Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668519594212604:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:16.848968Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d5e/r3tmp/tmpFL4XU2/pdisk_1.dat 2025-06-03T10:30:16.943544Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26365, node 1 2025-06-03T10:30:16.951233Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:16.951271Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:16.952319Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:16.960293Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:16.960310Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:16.960313Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:16.960370Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28994 TClient is connected to server localhost:28994 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:17.054595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:17.058479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:17.123476Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:17.190308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:17.206041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:17.359991Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668523889181513:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:17.360036Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:17.419397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.430331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.490802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.506719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.519915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.534171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.548524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.570640Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668523889182167:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:17.570674Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:17.570951Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668523889182172:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:17.571989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:17.575548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:30:17.575936Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668523889182174:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:17.628606Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668523889182225:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:17.846882Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668523889182514:3581] txid# 281474976715673, issues: { message: "Group already exists" severity: 1 } 2025-06-03T10:30:17.848376Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=ZmIxZGZhZjUtNTQwYmNjMzctYmExMjM2Y2EtYzA3NDI0YjE=, ActorId: [1:7511668523889182508:2513], ActorState: ExecuteState, TraceId: 01jwtnf1fj4b3vqdjnxc9kzqrm, Create QueryResponse for error on request, msg: 2025-06-03T10:30:17.868831Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668523889182567:3604] txid# 281474976715677, issues: { message: "Group not found" severity: 1 } 2025-06-03T10:30:17.868983Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=NGU1NTQxNjEtNmY2ZjRkZS1kYzJlZDMyMS1iNmM3YWE5Yw==, ActorId: [1:7511668523889182561:2523], ActorState: ExecuteState, TraceId: 01jwtnf1g89szcr56fbdntbrz9, Create QueryResponse for error on request, msg: 2025-06-03T10:30:17.906339Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668523889182626:3633] txid# 281474976715682, issues: { message: "Group already exists" severity: 1 } 2025-06-03T10:30:17.906709Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=OTlkYzkzMzMtM2VhZGU2YzktYTE0ZGI2YmUtODgzZDVjYjU=, ActorId: [1:7511668523889182620:2535], ActorState: ExecuteState, TraceId: 01jwtnf1h86snyqs9q0r0sj3ap, Create QueryResponse for error on request, msg: 2025-06-03T10:30:17.945047Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668523889182684:3661] txid# 281474976715687, issues: { message: "Role \"user1\" is already a member of role \"group1\"" issue_code: 2 severity: 3 } 2025-06-03T10:30:17.950718Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668523889182698:3668] txid# 281474976715688, issues: { message: "Member account not found" severity: 1 } 2025-06-03T10:30:17.950893Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=MjRiOTZlNWQtOGYzNzNlNmQtM2IxZTRiZWUtZmZjMWY5NjY=, ActorId: [1:7511668523889182692:2550], ActorState: ExecuteState, TraceId: 01jwtnf1jtf2jcj0kyxazx4h1x, Create QueryResponse for error on request, msg: 2025-06-03T10:30:17.965049Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668523889182729:3683] txid# 281474976715691, issues: { message: "Role \"user1\" is not a member of role \"group1\"" issue_code: 3 severity: 2 } 2025-06-03T10:30:17.970849Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668523889182743:3690] txid# 281474976715692, issues: { message: "Role \"user3\" is not a member of role \"group1\"" issue_code: 3 severity: 2 } 2025-06-03T10:30:17.981915Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668523889182769:3704] txid# 281474976715694, issues: { message: "Member account not found" severity: 1 } 2025-06-03T10:30:17.982030Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=YmY4MWY5ZDgtYzc4YmUxNGEtZDlkYjQ0N2ItZmFlMjEwMDc=, ActorId: [1:75116685238891 ... kOTI=. TraceId : 01jwtnf72yd1fwemd1t306hm1q. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [3:7511668550217526254:3479], status: PRECONDITION_FAILED, reason: {
: Error: Terminate execution } 2025-06-03T10:30:23.656747Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=3&id=NDIzYmY2NzMtZjU0NmZiOGYtYzViNmVlZjYtNmFjZmRkOTI=, ActorId: [3:7511668550217526026:3479], ActorState: ExecuteState, TraceId: 01jwtnf72yd1fwemd1t306hm1q, Create QueryResponse for error on request, msg: 2025-06-03T10:30:23.657328Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor.h:64;event=unexpected reply;error_message=operation { ready: true status: PRECONDITION_FAILED issues { message: "Conflict with existing key." issue_code: 2012 severity: 1 } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { id: "01jwtnf70nd7qwmxyv1qx9n4m4" } } } } ;request=session_id: "ydb://session/3?node_id=3&id=NDIzYmY2NzMtZjU0NmZiOGYtYzViNmVlZjYtNmFjZmRkOTI=" tx_control { tx_id: "01jwtnf70nd7qwmxyv1qx9n4m4" } query { yql_text: "DECLARE $objects AS List>;\nINSERT INTO `//Root/.metadata/secrets/values`\nSELECT ownerUserId,secretId,value FROM AS_TABLE($objects)\n" } parameters { key: "$objects" value { type { list_type { item { struct_type { members { name: "ownerUserId" type { type_id: UTF8 } } members { name: "secretId" type { type_id: UTF8 } } members { name: "value" type { type_id: UTF8 } } } } } } value { items { items { text_value: "" } items { text_value: "my_secret_2" } items { text_value: "qwerty" } } } } } ; 2025-06-03T10:30:23.657436Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=3&id=OTZlNDRiNWQtZDFkMjAzNmMtNzhhYTIyMzEtYjc1ODllMzk=, ActorId: [3:7511668550217526018:3474], ActorState: ExecuteState, TraceId: 01jwtnf6ygaw1xgwrq1da6wj1b, Create QueryResponse for error on request, msg: Execute SQL: UPSERT OBJECT my_secret_2 (TYPE SECRET) WITH value = "edcba"; Trying to start YDB, gRPC: 32662, MsgBus: 30962 2025-06-03T10:30:24.173811Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511668555462066151:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:24.173840Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d5e/r3tmp/tmpkWiSsw/pdisk_1.dat 2025-06-03T10:30:24.188584Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 32662, node 4 2025-06-03T10:30:24.197148Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:24.197164Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:24.197166Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:24.197207Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30962 TClient is connected to server localhost:30962 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:24.277318Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:24.277356Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:24.278135Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.278352Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:30:24.287296Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:24.300200Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:24.326994Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:24.339224Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:24.555519Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511668555462067738:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:24.555546Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:24.564832Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.574246Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.590149Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.647194Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.707481Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.722964Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.738039Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.822785Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511668555462068396:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:24.822823Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:24.825053Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511668555462068401:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:24.826235Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:24.828602Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511668555462068403:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:24.906467Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511668555462068454:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:25.032453Z node 4 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [4:7511668559757036022:2511], status: GENERIC_ERROR, issues:
: Error: Optimization, code: 1070
:8:25: Error: Queries with mixed data and scheme operations are not supported. Use separate queries for different types of operations., code: 2009 2025-06-03T10:30:25.032561Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=4&id=ZTE5MmZmOWMtNTNiMDVhODUtY2JjM2VmY2ItN2VkZGJjNmE=, ActorId: [4:7511668559757036015:2507], ActorState: ExecuteState, TraceId: 01jwtnf8g33hrj3vj8q6zdvd9p, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/service/unittest >> KqpQueryServiceScripts::ExecuteScriptWithCancelAfterAndTimeout [GOOD] Test command err: Trying to start YDB, gRPC: 20663, MsgBus: 13570 2025-06-03T10:30:18.700207Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668528902630346:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:18.700233Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d47/r3tmp/tmpV7gERc/pdisk_1.dat 2025-06-03T10:30:18.781798Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20663, node 1 2025-06-03T10:30:18.803918Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:18.803953Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:18.805121Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:18.817532Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:18.817551Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:18.817553Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:18.817605Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13570 TClient is connected to server localhost:13570 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:18.894267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:18.897328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:30:18.906720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:18.926759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:18.963338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:18.979404Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:19.135778Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668533197599238:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:19.135809Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:19.183570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.191710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.204354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.219013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.233071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.246823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.260840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.276724Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668533197599890:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:19.276744Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:19.276762Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668533197599895:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:19.277536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:19.280529Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668533197599897:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:30:19.337280Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668533197599948:3396] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:19.476213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.476482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.476833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 24789, MsgBus: 4262 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d47/r3tmp/tmpPLRAIe/pdisk_1.dat 2025-06-03T10:30:20.088015Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668537795498421:2093];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:20.088281Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:30:20.111456Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24789, node 2 2025-06-03T10:30:20.122160Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:20.122173Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:20.122176Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:20.122228Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4262 TClient is connected to server localhost:4262 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { Sche ... posed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:23.674594Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:30:23.674712Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511668551180640848:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:23.769621Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511668551180640899:3393] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:23.940621Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:30:23.941082Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:30:23.941369Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.087375Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=4&id=YTMzNjM4YjYtMWJiODMxYy04NDQyNmQ0Yy05MDdhMmZjYQ==, ActorId: [4:7511668555475608723:2531], ActorState: ExecuteState, TraceId: 01jwtnf7e13t8v6xqqfbhhhe75, Create QueryResponse for error on request, msg: Trying to start YDB, gRPC: 30146, MsgBus: 10109 2025-06-03T10:30:24.396153Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7511668554758062358:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:24.396175Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d47/r3tmp/tmprOJ15z/pdisk_1.dat 2025-06-03T10:30:24.413196Z node 5 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30146, node 5 2025-06-03T10:30:24.423529Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:24.423545Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:24.423547Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:24.423614Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10109 TClient is connected to server localhost:10109 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:24.500795Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:24.500831Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:24.501336Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:24.501856Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:24.510626Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:24.527422Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:24.594658Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:24.610222Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:24.839522Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511668554758063978:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:24.839547Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:24.848961Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.857771Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.867966Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.882237Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.895809Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.911739Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.923902Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:24.940225Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511668554758064630:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:24.940257Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511668554758064635:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:24.940257Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:24.941069Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:24.951561Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7511668554758064637:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:25.002686Z node 5 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [5:7511668559053031984:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:25.110687Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:30:25.111055Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:30:25.111219Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:30:25.241451Z node 5 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=5&id=OTc1YmE2Y2YtYjI1NWQ2MzItNDYxNTM0YzItMjI0OTgzOQ==, ActorId: [5:7511668559053032499:2530], ActorState: ExecuteState, TraceId: 01jwtnf8jn7vqj80gew2ds3f9x, Create QueryResponse for error on request, msg: |66.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_column_build/unittest |66.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::CancelBuild >> ColumnBuildTest::AlreadyExists [GOOD] |66.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_column_build/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::AlreadyExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:23.503547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:23.503582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:23.503589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:23.503595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:23.503611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:23.503616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:23.503632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:23.503650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:23.503785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:23.503968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:23.523969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:23.524006Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:23.529037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:23.529201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:23.529254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:23.531460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:23.531544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:23.531689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:23.531751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:23.532403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:23.532453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:23.532839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:23.532854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:23.532865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:23.532875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:23.532882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:23.532904Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:23.534461Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:23.562717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:23.562838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:23.562970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:23.563033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:23.563050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:23.564224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:23.564265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:23.564348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:23.564363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:23.564370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:23.564377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:23.565156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:23.565177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:23.565185Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:23.565701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:23.565718Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:23.565725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:23.565734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:23.566637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:23.567165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:23.567218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:23.567456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:23.567494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:23.625396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:23.625548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:23.625561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:23.625612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:23.625636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:23.629998Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:23.630054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:23.630130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... ull: false IsBuildInProgress: false } Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409551 SchemeShard: 72075186233409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SharedHive: 72057594037968897 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared } } PathId: 2 PathOwnerId: 72075186233409549, at schemeshard: 72075186233409549 2025-06-03T10:30:26.563883Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__create.cpp:23: TIndexBuilder::TXTYPE_CREATE_INDEX_BUILD: DoExecute TxId: 106 DatabaseName: "/MyRoot/ServerLessDB" Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } 2025-06-03T10:30:26.564667Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1117: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 AlterMainTable 2025-06-03T10:30:26.564727Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1118: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 AlterMainTable TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: AlterMainTable, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:1151:3020], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-03T10:30:26.564739Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.cpp:186: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: AllocateTxId 106 2025-06-03T10:30:26.564792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 106, at schemeshard: 72075186233409549 2025-06-03T10:30:26.564806Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2616: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvAllocateResult, BuildIndexId: 106, txId# 281474976725757 2025-06-03T10:30:26.564818Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2623: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvAllocateResult, buildInfo: TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: AlterMainTable, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:1151:3020], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-03T10:30:26.565621Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1117: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 AlterMainTable 2025-06-03T10:30:26.565652Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1118: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 AlterMainTable TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: AlterMainTable, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:1151:3020], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-03T10:30:26.565743Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:429: AlterMainTablePropose 106 AlterMainTable Transaction { WorkingDir: "/MyRoot/ServerLessDB" OperationType: ESchemeOpAlterTable AlterTable { Name: "Table" Columns { Name: "value" Type: "Uint64" DefaultFromLiteral { type { type_id: UINT64 } value { uint64_value: 10 } } IsBuildInProgress: true } } Internal: true } TxId: 281474976725757 TabletId: 72075186233409549 FailOnExist: true 2025-06-03T10:30:26.580087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/ServerLessDB" OperationType: ESchemeOpAlterTable AlterTable { Name: "Table" Columns { Name: "value" Type: "Uint64" DefaultFromLiteral { type { type_id: UINT64 } value { uint64_value: 10 } } IsBuildInProgress: true } } Internal: true } TxId: 281474976725757 TabletId: 72075186233409549 FailOnExist: true , at schemeshard: 72075186233409549 2025-06-03T10:30:26.580159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:508: TAlterTable Propose, path: /MyRoot/ServerLessDB/Table, pathId: , opId: 281474976725757:0, at schemeshard: 72075186233409549 2025-06-03T10:30:26.580225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976725757:1, propose status:StatusInvalidParameter, reason: Cannot alter type for column 'value', at schemeshard: 72075186233409549 2025-06-03T10:30:26.580952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 281474976725757, response: Status: StatusInvalidParameter Reason: "Cannot alter type for column \'value\'" TxId: 281474976725757 SchemeshardId: 72075186233409549, at schemeshard: 72075186233409549 2025-06-03T10:30:26.580987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976725757, database: /MyRoot/ServerLessDB, subject: , status: StatusInvalidParameter, reason: Cannot alter type for column 'value', operation: ALTER TABLE, path: /MyRoot/ServerLessDB/Table 2025-06-03T10:30:26.581016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6702: Handle: TEvModifySchemeTransactionResult: txId# 281474976725757, status# StatusInvalidParameter 2025-06-03T10:30:26.581023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6704: Message: Status: StatusInvalidParameter Reason: "Cannot alter type for column \'value\'" TxId: 281474976725757 SchemeshardId: 72075186233409549 2025-06-03T10:30:26.581039Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2460: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvModifySchemeTransactionResult, BuildIndexId: 106, cookie: 106, txId: 281474976725757, status: StatusInvalidParameter 2025-06-03T10:30:26.581063Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2464: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvModifySchemeTransactionResult, buildInfo: TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: AlterMainTable, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [1:1151:3020], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 0, LockTxStatus: StatusSuccess, LockTxDone: 0, InitiateTxId: 0, InitiateTxStatus: StatusSuccess, InitiateTxDone: 0, SnapshotStepId: 0, ApplyTxId: 0, ApplyTxStatus: StatusSuccess, ApplyTxDone: 0, UnlockTxId: 0, UnlockTxStatus: StatusSuccess, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }}, record: Status: StatusInvalidParameter Reason: "Cannot alter type for column \'value\'" TxId: 281474976725757 SchemeshardId: 72075186233409549 2025-06-03T10:30:26.581397Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:2430: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuilder::TTxReply: ReplyOnCreation, BuildIndexId: 106, status: BAD_REQUEST, error: At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column 'value', replyTo: [1:1151:3020], message: TxId: 106 Status: BAD_REQUEST Issues { message: "At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column \'value\'" severity: 1 } IndexBuild { Id: 106 Issues { message: "At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column \'value\'" severity: 1 } State: STATE_PREPARING Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } } BUILDCOLUMN RESPONSE CREATE: NKikimrIndexBuilder.TEvCreateResponse TxId: 106 Status: BAD_REQUEST Issues { message: "At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column \'value\'" severity: 1 } IndexBuild { Id: 106 Issues { message: "At AlterMainTable state got unsuccess propose result, status: StatusInvalidParameter, reason: Cannot alter type for column \'value\'" severity: 1 } State: STATE_PREPARING Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "value" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } } >> ColumnBuildTest::BaseCase [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/ut_with_sdk/unittest >> CommitOffset::Commit_WithSession_ToPastParentPartition [GOOD] Test command err: 2025-06-03T10:28:47.329271Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668138759235713:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:47.329328Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:28:47.367739Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000e35/r3tmp/tmpxQgh8S/pdisk_1.dat 2025-06-03T10:28:47.402774Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:47.402926Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668138759235692:2079] 1748946527329009 != 1748946527329012 TServer::EnableGrpc on GrpcPort 19891, node 1 2025-06-03T10:28:47.417094Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/000e35/r3tmp/yandexRrBrYR.tmp 2025-06-03T10:28:47.417105Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/000e35/r3tmp/yandexRrBrYR.tmp 2025-06-03T10:28:47.417178Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/000e35/r3tmp/yandexRrBrYR.tmp 2025-06-03T10:28:47.417232Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:28:47.424231Z INFO: TTestServer started on Port 15274 GrpcPort 19891 2025-06-03T10:28:47.431822Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:47.431854Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:47.433059Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15274 PQClient connected to localhost:19891 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:47.477608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:28:47.491437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-03T10:28:47.753234Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668138759236490:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.753274Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.753506Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668138759236517:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.754368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480 2025-06-03T10:28:47.754649Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668138759236548:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.754667Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.756350Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668138759236519:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-03T10:28:47.797689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.807029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.817627Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668138759236703:2505] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:47.833527Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668138759236719:2359], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:28:47.833657Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=NDBiNGZkZGQtNWY3YTZlZDgtYjIxMzhkODYtZThjNTUyOGM=, ActorId: [1:7511668138759236487:2334], ActorState: ExecuteState, TraceId: 01jwtnc9g8c14rc4ncyad1ryd9, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:28:47.834236Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:28:47.872806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7511668138759236879:2608] 2025-06-03T10:28:52.329509Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668138759235713:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:52.329563Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:28:53.054571Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-03T10:28:53.057773Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-03T10:28:53.058144Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [1:7511668164529040837:2673], Recipient [1:7511668138759236143:2194]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:53.058156Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:53.058159Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:28:53.058165Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [1:7511668164529040833:2670], Recipient [1:7511668138759236143:2194]: {TEvModifySchemeTransaction txid# 281474976715673 TabletId# 72057594046644480} 2025-06-03T10:28:53.058167Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:28:53.065728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } PartitionStrategy { MinPartitionCount: 1 MaxPartitionCount: 100 ScaleThres ... lableSize 2025-06-03T10:30:25.410152Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:25.410165Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:25.410176Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:25.410178Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:25.410183Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:25.410184Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:25.410185Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:25.410191Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:25.410198Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668547834730157:2855], Partition 4, Sender [0:0:0], Recipient [7:7511668547834730243:2868], Cookie: 0 2025-06-03T10:30:25.410201Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668547834730243:2868]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:25.410203Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:25.410206Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:25.410211Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:25.410213Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:25.410215Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:25.413196Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668526359892222:2451], Partition 0, Sender [0:0:0], Recipient [7:7511668526359892279:2455], Cookie: 0 2025-06-03T10:30:25.413223Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668526359892279:2455]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:25.413228Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:25.413242Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:25.413268Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:25.413271Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:25.413277Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:25.494127Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188544 (NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated), Tablet [7:7511668547834730142:2854], Partition 3, Sender [7:7511668547834730235:2865], Recipient [7:7511668547834730229:2863], Cookie: 0 2025-06-03T10:30:25.494161Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188544, Sender [7:7511668547834730235:2865], Recipient [7:7511668547834730229:2863]: NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-03T10:30:25.494170Z node 7 :PERSQUEUE TRACE: partition.h:609: StateIdle, processing event NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-03T10:30:25.495613Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188544 (NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated), Tablet [7:7511668547834730157:2855], Partition 4, Sender [7:7511668547834730248:2870], Recipient [7:7511668547834730243:2868], Cookie: 0 2025-06-03T10:30:25.495625Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188544, Sender [7:7511668547834730248:2870], Recipient [7:7511668547834730243:2868]: NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-03T10:30:25.495627Z node 7 :PERSQUEUE TRACE: partition.h:609: StateIdle, processing event NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-03T10:30:25.498693Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668534949827668:2679], Partition 2, Sender [0:0:0], Recipient [7:7511668534949827744:2686], Cookie: 0 2025-06-03T10:30:25.498726Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668534949827744:2686]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:25.498731Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:25.498748Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:25.498783Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:25.498797Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:25.498805Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:25.510450Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668547834730142:2854], Partition 3, Sender [0:0:0], Recipient [7:7511668547834730229:2863], Cookie: 0 2025-06-03T10:30:25.510461Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668534949827670:2680], Partition 1, Sender [0:0:0], Recipient [7:7511668534949827747:2688], Cookie: 0 2025-06-03T10:30:25.510478Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668547834730229:2863]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:25.510479Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668534949827747:2688]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:25.510484Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:25.510484Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:25.510501Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:25.510502Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:25.510529Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:25.510530Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:25.510532Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:25.510534Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:25.510538Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:25.510540Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:25.510554Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668547834730157:2855], Partition 4, Sender [0:0:0], Recipient [7:7511668547834730243:2868], Cookie: 0 2025-06-03T10:30:25.510559Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668547834730243:2868]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:25.510562Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:25.510566Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:25.510572Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:25.510574Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:25.510580Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:25.513517Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668526359892222:2451], Partition 0, Sender [0:0:0], Recipient [7:7511668526359892279:2455], Cookie: 0 2025-06-03T10:30:25.513540Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668526359892279:2455]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:25.513545Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:25.513558Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:25.513579Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:25.513581Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:25.513587Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 >> TPartBtreeIndexIteration::FewNodes_Groups [GOOD] >> TPartBtreeIndexIteration::FewNodes_History >> TFlatExecutorLeases::BasicsInitialLeaseSleepTimeout [GOOD] >> TFlatTableDatetime::TestDate [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundSnapshot [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundSnapshotToRegular [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundCompactionGen1 [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundCompactionToRegular [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundCompactionGen2 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::BaseCase [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:24.394919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:24.394957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:24.394964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:24.394970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:24.394985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:24.394990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:24.395000Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:24.395024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:24.395117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:24.395189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:24.407326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:24.407365Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:24.410949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:24.411056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:24.411104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:24.413245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:24.413379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:24.413536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:24.413618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:24.414624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:24.414689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:24.415016Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:24.415027Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:24.415037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:24.415044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:24.415049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:24.415066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:24.416266Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:24.444395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:24.444514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:24.444634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:24.444701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:24.444722Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:24.446215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:24.446280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:24.446398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:24.446419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:24.446429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:24.446437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:24.447693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:24.447735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:24.447746Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:24.448660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:24.448682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:24.448691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:24.448701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:24.449610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:24.450402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:24.450462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:24.450744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:24.450787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:24.450797Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:24.450879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:24.450892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:24.450938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:24.450953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:24.452611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:24.452624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:24.452687Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... AffectedSet { TabletId: 72075186233409549 Flags: 2 } ExecLevel: 0 TxId: 281474976725761 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72075186233409550 2025-06-03T10:30:26.941098Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1117: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking 2025-06-03T10:30:26.941123Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1118: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1149:3018], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-06-03T10:30:26.941162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976725761:4294967295 from tablet: 72075186233409549 to tablet: 72075186233409550 cookie: 0:281474976725761 msg type: 269090816 2025-06-03T10:30:26.941195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 281474976725761, partId: 4294967295, tablet: 72075186233409550 2025-06-03T10:30:26.941238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976725761, at schemeshard: 72075186233409549 2025-06-03T10:30:26.941250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 0/1, is published: true 2025-06-03T10:30:26.941255Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976725761, at schemeshard: 72075186233409549 2025-06-03T10:30:26.952643Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877763, Sender [1:1821:3682], Recipient [1:757:2645]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186233409549 ClientId: [1:1821:3682] ServerId: [1:1823:3684] } 2025-06-03T10:30:26.952678Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3163: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-03T10:30:27.005510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 650, transactions count in step: 1, at schemeshard: 72075186233409549 2025-06-03T10:30:27.005573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976725761 AckTo { RawX1: 0 RawX2: 0 } } Step: 650 MediatorID: 72075186233409551 TabletID: 72075186233409549, at schemeshard: 72075186233409549 2025-06-03T10:30:27.005587Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72075186233409549] TDropLock TPropose opId# 281474976725761:0 HandleReply TEvOperationPlan: step# 650 2025-06-03T10:30:27.005597Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976725761:0 128 -> 240 2025-06-03T10:30:27.006197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976725761:0, at schemeshard: 72075186233409549 2025-06-03T10:30:27.006213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72075186233409549] TDone opId# 281474976725761:0 ProgressState 2025-06-03T10:30:27.006230Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976725761:0 progress is 1/1 2025-06-03T10:30:27.006235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-03T10:30:27.006242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976725761:0 progress is 1/1 2025-06-03T10:30:27.006246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-03T10:30:27.006251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 1/1, is published: true 2025-06-03T10:30:27.006270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:564:2501] message: TxId: 281474976725761 2025-06-03T10:30:27.006281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-03T10:30:27.006287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976725761:0 2025-06-03T10:30:27.006292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976725761:0 2025-06-03T10:30:27.006314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 3 2025-06-03T10:30:27.006968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvNotifyTxCompletionResult: txId# 281474976725761 2025-06-03T10:30:27.006989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6753: Message: TxId: 281474976725761 2025-06-03T10:30:27.007008Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2331: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976725761, buildInfoId: 106 2025-06-03T10:30:27.007038Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2334: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976725761, buildInfo: TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1149:3018], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-06-03T10:30:27.007459Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1117: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking 2025-06-03T10:30:27.007482Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1118: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1149:3018], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-06-03T10:30:27.007491Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-03T10:30:27.007883Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1117: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Done 2025-06-03T10:30:27.007902Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1118: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Done TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Done, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1149:3018], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-06-03T10:30:27.007909Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:339: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 106, subscribers count# 1 2025-06-03T10:30:27.007937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-03T10:30:27.007944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:1169:3038] TestWaitNotification: OK eventTxId 106 2025-06-03T10:30:27.008370Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot/ServerLessDB" IndexBuildId: 106 2025-06-03T10:30:27.008478Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:93: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 106 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "DefaultValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 106 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "DefaultValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build/unittest >> VectorIndexBuildTest::BaseCase [FAIL] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:19.490882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:19.490909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:19.490914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:19.490918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:19.490933Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:19.490936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:19.490942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:19.490955Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:19.491071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:19.491148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:19.505355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:19.505388Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:19.510138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:19.510281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:19.510325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:19.512909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:19.512983Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:19.513121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:19.513205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:19.513877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:19.513927Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:19.514298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:19.514308Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:19.514316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:19.514322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:19.514327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:19.514344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:19.515838Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:19.534950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:19.535041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:19.535108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:19.535158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:19.535171Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:19.535890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:19.535918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:19.535965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:19.535973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:19.535977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:19.535981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:19.536401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:19.536412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:19.536415Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:19.536783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:19.536796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:19.536802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:19.536823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:19.537437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:19.537908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:19.537954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:19.538171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:19.538198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:19.538210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:19.538285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:19.538293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:19.538330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:19.538341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:19.538795Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:19.538804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:19.538844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 1, UnlockTxId: 281474976730760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 611, upload bytes: 11212, read rows: 609, read bytes: 10928 }, Billed: { upload rows: 611, upload bytes: 11212, read rows: 609, read bytes: 10928 }} 2025-06-03T10:30:24.015004Z node 2 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-03T10:30:24.015536Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1117: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 109 Done 2025-06-03T10:30:24.015559Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1118: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 109 Done TBuildInfo{ IndexBuildId: 109, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: index1, IndexColumn: embedding, State: Done, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [0:0:0], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976725757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976730759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976730760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 611, upload bytes: 11212, read rows: 609, read bytes: 10928 }, Billed: { upload rows: 611, upload bytes: 11212, read rows: 609, read bytes: 10928 }} 2025-06-03T10:30:24.015566Z node 2 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:339: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 109, subscribers count# 1 2025-06-03T10:30:24.015601Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 109: got EvNotifyTxCompletionResult 2025-06-03T10:30:24.015609Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 109: satisfy waiter [2:2143:3740] TestWaitNotification: OK eventTxId 109 2025-06-03T10:30:24.015851Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ServerLessDB/Table/index1/indexImplPostingTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72075186233409549 2025-06-03T10:30:24.015938Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409549 describe path "/MyRoot/ServerLessDB/Table/index1/indexImplPostingTable" took 104us result status StatusSuccess 2025-06-03T10:30:24.016191Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ServerLessDB/Table/index1/indexImplPostingTable" PathDescription { Self { Name: "indexImplPostingTable" PathId: 5 SchemeshardId: 72075186233409549 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976725758 CreateStep: 300 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplPostingTable" Columns { Name: "__ydb_parent" Type: "Uint64" TypeId: 4 Id: 1 NotNull: true IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "__ydb_parent" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409555 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 3 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409550 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409551 SchemeShard: 72075186233409549 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 8 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SharedHive: 72057594037968897 ServerlessComputeResourcesMode: EServerlessComputeResourcesModeShared } } PathId: 5 PathOwnerId: 72075186233409549, at schemeshard: 72075186233409549 2025-06-03T10:30:24.017570Z node 2 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [2:3609:5141], Recipient [2:1040:2880]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "(\n (let range \'(\'(\'__ydb_parent (Null) (Void)) \'(\'key (Null) (Void)) ))\n (let columns \'(\'__ydb_parent))\n (let result (SelectRange \'__user__indexImplPostingTable range columns \'()))\n (return (AsList (SetResult \'Result result) ))\n )" } } ... posting table contains 200 rows 2025-06-03T10:30:24.021192Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__list.cpp:23: TIndexBuilder::TXTYPE_LIST_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot/ServerLessDB" PageSize: 100 PageToken: "" 2025-06-03T10:30:24.022341Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:93: TIndexBuilder::TXTYPE_LIST_INDEX_BUILD: Reply Status: SUCCESS Entries { Id: 109 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" index { name: "index1" index_columns: "embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } Progress: 100 EndTime { } } NextPageToken: "0" BUILDINDEX RESPONSE LIST: NKikimrIndexBuilder.TEvListResponse Status: SUCCESS Entries { Id: 109 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" index { name: "index1" index_columns: "embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } Progress: 100 EndTime { } } NextPageToken: "0" 2025-06-03T10:30:24.022708Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot/ServerLessDB" IndexBuildId: 109 2025-06-03T10:30:24.022759Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:93: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 109 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" index { name: "index1" index_columns: "embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } Progress: 100 EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 109 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" index { name: "index1" index_columns: "embedding" global_vector_kmeans_tree_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 MaxBatchBytes: 8388608 MaxBatchRetries: 50 } } Progress: 100 EndTime { } } strings (meteringMessages) and (meteringData) are different at ydb/core/tx/schemeshard/ut_index_build/ut_vector_index_build.cpp:192, virtual void NTestSuiteVectorIndexBuildTest::TTestCaseBaseCase::Execute_(NUnitTest::TTestContext &): {"usage":{"start":0,"quantity":433,"finish":0,"unit":"request_unit","type":"delta"},"tags":{},"id":"(109-72075186233409549-2-0-0-0-0-611-609-11212-10928|109-72075186233409549-2-0-0-0-0-611-609-11032-11108)","cloud_id":"CLOUD_ID_VAL","source_wt":0,"source_id":"sless-docapi-ydb-ss","resource_id":"DATABASE_ID_VAL","schema":"ydb.serverless.requests.v1","folder_id":"FOLDER_ID_VAL","version":"1.0.0"} TBackTrace::Capture()+28 (0x106E259C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+137 (0x10896329) NTestSuiteVectorIndexBuildTest::TTestCaseBaseCase::Execute_(NUnitTest::TTestContext&)+18646 (0x105D2EE6) NTestSuiteVectorIndexBuildTest::TCurrentTest::Execute()::'lambda'()::operator()() const+71 (0x105DAAE7) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+126 (0x108981DE) NTestSuiteVectorIndexBuildTest::TCurrentTest::Execute()+426 (0x105DA4AA) NUnitTest::TTestFactory::Execute()+803 (0x10898953) NUnitTest::RunMain(int, char**)+3021 (0x108A6C6D) ??+0 (0x7F90FCDD8D90) __libc_start_main+128 (0x7F90FCDD8E40) _start+41 (0xF6E6029) >> TFlatTableExecutor_BackgroundCompactions::TestRunBackgroundCompactionGen2 [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundSnapshotPriorityByTime [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundCompactionPriorityByTime >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorksNewApi [GOOD] >> TFlatTableExecutor_BackgroundCompactions::TestChangeBackgroundCompactionPriorityByTime [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_Default >> TChargeBTreeIndex::OneNode_History [GOOD] >> TChargeBTreeIndex::OneNode_Groups_History >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_Default [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True >> ColumnBuildTest::ValidDefaultValue [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestInlineWriteReadRangeLimitThenLimitWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:58:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:75:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:58:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:75:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:77:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:80:2057] recipient: [2:79:2110] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:83:2057] recipient: [2:79:2110] !Reboot 72057594037927937 (actor [2:57:2097]) rebooted! !Reboot 72057594037927937 (actor [2:57:2097]) tablet resolver refreshed! new actor is[2:82:2111] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:168:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:58:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:75:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:77:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:83:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:57:2097]) rebooted! !Reboot 72057594037927937 (actor [3:57:2097]) tablet resolver refreshed! new actor is[3:82:2111] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:168:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:58:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:75:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:78:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:82:2057] recipient: [4:80:2110] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:84:2057] recipient: [4:80:2110] !Reboot 72057594037927937 (actor [4:57:2097]) rebooted! !Reboot 72057594037927937 (actor [4:57:2097]) tablet resolver refreshed! new actor is[4:83:2111] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:169:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:58:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:75:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:81:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:84:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:85:2057] recipient: [5:83:2113] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:87:2057] recipient: [5:83:2113] !Reboot 72057594037927937 (actor [5:57:2097]) rebooted! !Reboot 72057594037927937 (actor [5:57:2097]) tablet resolver refreshed! new actor is[5:86:2114] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:172:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:58:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:75:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:81:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:84:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:85:2057] recipient: [6:83:2113] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:87:2057] recipient: [6:83:2113] !Reboot 72057594037927937 (actor [6:57:2097]) rebooted! !Reboot 72057594037927937 (actor [6:57:2097]) tablet resolver refreshed! new actor is[6:86:2114] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:172:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:58:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:75:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:82:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:84:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:86:2057] recipient: [7:85:2113] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:88:2057] recipient: [7:85:2113] !Reboot 72057594037927937 (actor [7:57:2097]) rebooted! !Reboot 72057594037927937 (actor [7:57:2097]) tablet resolver refreshed! new actor is[7:87:2114] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:173:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:58:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:75:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:84:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:87:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:88:2057] recipient: [8:86:2115] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:90:2057] recipient: [8:86:2115] !Reboot 72057594037927937 (actor [8:57:2097]) rebooted! !Reboot 72057594037927937 (actor [8:57:2097]) tablet resolver refreshed! new actor is[8:89:2116] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:175:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:58:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:75:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:84:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:87:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:88:2057] recipient: [9:86:2115] Leader for TabletID 72057594037927937 is [9:89:2116] sender: [9:90:2057] recipient: [9:86:2115] !Reboot 72057594037927937 (actor [9:57:2097]) rebooted! !Reboot 72057594037927937 (actor [9:57:2097]) tablet resolver refreshed! new actor is[9:89:2116] Leader for TabletID 72057594037927937 is [9:89:2116] sender: [9:175:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:58:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:75:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:85:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:88:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:89:2057] recipient: [10:87:2115] Leader for TabletID 72057594037927937 is [10:90:2116] sender: [10:91:2057] recipient: [10:87:2115] !Reboot 72057594037927937 (actor [10:57:2097]) rebooted! !Reboot 72057594037927937 (actor [10:57:2097]) tablet resolver refreshed! new actor is[10:90:2116] Leader for TabletID 72057594037927937 is [10:90:2116] sender: [10:176:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:58:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:75:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:58:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:75:2057] recipient: [12:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:55:2057] recipient: [13:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [13:55:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:58:2057] recipient: [13:51:2095] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:75:2057] recipient: [13:14:2061] !Reboot 72057594037927937 (actor [13:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:77:2057] recipient: [13:36:2083] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:79:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:81:2057] recipient: [13:80:2110] Leader for TabletID 72057594037927937 is [13:82:2111] sender: [13:83:2057] recipient: [13:80:2110] !Reboot 72057594037927937 (actor [13:57:2097]) rebooted! !Reboot 72057594037927937 (actor [13:57:2097]) tablet resolver refreshed! new actor is[13:82:2111] Leader for TabletID 72057594037927937 is [13:82:2111] sender: [13:168:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:55:2057] recipient: [14:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:55:2057] recipient: [14:50:2095] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:58:2057] recipient: [14:50:2095] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:75:2057] recipient: [14:14:2061] !Reboot 72057594037927937 (actor [14:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:77:2057] recipient: [14:36:2083] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:80:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:81:2057] recipient: [14:79:2110] Leader for TabletID 72057594037927937 is [14:82:2111] sender: [14:83:2057] recipient: [14:79:2110] !Reboot 72057594037927937 (actor [14:57:2097]) rebooted! !Reboot 72057594037927937 (actor [14:57:2097]) tablet resolver refreshed! new actor is[14:82:2111] Leader for TabletID 72057594037927937 is [14:82:2111] sender: [14:168:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:55:2057] recipient: [15:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:55:2057] recipient: [15:50:2095] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:58:2057] recipient: [15:50:2095] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:75:2057] recipient: [15:14:2061] !Reboot 72057594037927937 (actor [15:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:78:2057] recipient: [15:36:2083] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:80:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:82:2057] recipient: [15:81:2110] Leader for TabletID 72057594037927937 is [15:83:2111] sender: [15:84:2057] recipient: [15:81:2110] !Reboot 72057594037927937 (actor [15:57:2097]) rebooted! !Reboot 72057594037927937 (actor [15:57:2097]) tablet resolver refreshed! new actor is[15:83:2111] Leader for TabletID 72057594037927937 is [15:83:2111] sender: [15:169:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:55:2057] recipient: [16:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:55:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:58:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:75:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:81:2057] recipient: [16:36:2083] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:84:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:85:2057] recipient: [16:83:2113] Leader for TabletID 72057594037927937 is [16:86:2114] sender: [16:87:2057] recipient: [16:83:2113] !Reboot 72057594037927937 (actor [16:57:2097]) rebooted! !Reboot 72057594037927937 (actor [16:57:2097]) tablet resolver refreshed! new actor is[16:86:2114] Leader for TabletID 72057594037927937 is [16:86:2114] sender: [16:172:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:55:2057] recipient: [17:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:55:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:58:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:75:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:57:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:81:2057] recipient: [17:36:2083] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:84:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:85:2057] recipient: [17:83:2113] Leader for TabletID 72057594037927937 is [17:86:2114] sender: [17:87:2057] recipient: [17:83:2113] !Reboot 72057594037927937 (actor [17:57:2097]) rebooted! !Reboot 72057594037927937 (actor [17:57:2097]) tablet resolver refreshed! new actor is[17:86:2114] Leader for TabletID 72057594037927937 is [17:86:2114] sender: [17:172:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:55:2057] recipient: [18:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:55:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:58:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:75:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:57:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:82:2057] recipient: [18:36:2083] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:85:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:86:2057] recipient: [18:84:2113] Leader for TabletID 72057594037927937 is [18:87:2114] sender: [18:88:2057] recipient: [18:84:2113] !Reboot 72057594037927937 (actor [18:57:2097]) rebooted! !Reboot 72057594037927937 (actor [18:57:2097]) tablet resolver refreshed! new actor is[18:87:2114] Leader for TabletID 72057594037927937 is [18:87:2114] sender: [18:105:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2095] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:58:2057] recipient: [19:51:2095] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:75:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:84:2057] recipient: [19:36:2083] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:87:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:88:2057] recipient: [19:86:2115] Leader for TabletID 72057594037927937 is [19:89:2116] sender: [19:90:2057] recipient: [19:86:2115] !Reboot 72057594037927937 (actor [19:57:2097]) rebooted! !Reboot 72057594037927937 (actor [19:57:2097]) tablet resolver refreshed! new actor is[19:89:2116] Leader for TabletID 72057594037927937 is [19:89:2116] sender: [19:175:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:55:2057] recipient: [20:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:55:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:58:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:75:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:57:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:84:2057] recipient: [20:36:2083] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:87:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:88:2057] recipient: [20:86:2115] Leader for TabletID 72057594037927937 is [20:89:2116] sender: [20:90:2057] recipient: [20:86:2115] !Reboot 72057594037927937 (actor [20:57:2097]) rebooted! !Reboot 72057594037927937 (actor [20:57:2097]) tablet resolver refreshed! new actor is[20:89:2116] Leader for TabletID 72057594037927937 is [20:89:2116] sender: [20:175:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:55:2057] recipient: [21:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:55:2057] recipient: [21:51:2095] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:58:2057] recipient: [21:51:2095] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:75:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:57:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:85:2057] recipient: [21:36:2083] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:88:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:89:2057] recipient: [21:87:2115] Leader for TabletID 72057594037927937 is [21:90:2116] sender: [21:91:2057] recipient: [21:87:2115] !Reboot 72057594037927937 (actor [21:57:2097]) rebooted! !Reboot 72057594037927937 (actor [21:57:2097]) tablet resolver refreshed! new actor is[21:90:2116] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:55:2057] recipient: [22:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:55:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:58:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:75:2057] recipient: [22:14:2061] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_False >> KqpQueryService::CloseSessionsWithLoad [GOOD] >> KqpQueryService::ClosedSessionRemovedFromPool >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_False [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_EnableLocalDBFlatIndex_False >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_EnableLocalDBFlatIndex_False [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_False_EnableLocalDBFlatIndex_False >> ColumnBuildTest::CancelBuild [GOOD] >> TPartBtreeIndexIteration::FewNodes_History [GOOD] >> TPartBtreeIndexIteration::FewNodes_Sticky >> TVersions::Wreck0Reverse [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_False_EnableLocalDBFlatIndex_False [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_TurnOff ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::ValidDefaultValue [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:25.680768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:25.680795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:25.680799Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:25.680803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:25.680817Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:25.680820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:25.680829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:25.680846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:25.680934Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:25.681008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:25.691672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:25.691699Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:25.695487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:25.695594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:25.695641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:25.698055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:25.698149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:25.698304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:25.698376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:25.699176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:25.699236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:25.699640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:25.699652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:25.699664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:25.699674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:25.699681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:25.699704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:25.701063Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:25.725272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:25.725416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:25.725512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:25.725572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:25.725588Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:25.726569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:25.726610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:25.726691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:25.726705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:25.726725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:25.726732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:25.727411Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:25.727434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:25.727442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:25.727991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:25.728005Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:25.728012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:25.728021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:25.728760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:25.729381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:25.729456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:25.729666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:25.729698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:25.729705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:25.729768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:25.729775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:25.729809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:25.729820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:25.730407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:25.730422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:25.730505Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... ffectedSet { TabletId: 72075186233409549 Flags: 2 } ExecLevel: 0 TxId: 281474976725761 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72075186233409550 2025-06-03T10:30:28.143178Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1117: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking 2025-06-03T10:30:28.143199Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1118: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1149:3018], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-06-03T10:30:28.143235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 281474976725761:4294967295 from tablet: 72075186233409549 to tablet: 72075186233409550 cookie: 0:281474976725761 msg type: 269090816 2025-06-03T10:30:28.143266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 281474976725761, partId: 4294967295, tablet: 72075186233409550 2025-06-03T10:30:28.143305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 281474976725761, at schemeshard: 72075186233409549 2025-06-03T10:30:28.143314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 0/1, is published: true 2025-06-03T10:30:28.143320Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 281474976725761, at schemeshard: 72075186233409549 2025-06-03T10:30:28.154250Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877763, Sender [1:1821:3682], Recipient [1:757:2645]: NKikimr::TEvTabletPipe::TEvClientDestroyed { TabletId: 72075186233409549 ClientId: [1:1821:3682] ServerId: [1:1823:3684] } 2025-06-03T10:30:28.154280Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3163: StateWork, processing event TEvTabletPipe::TEvClientDestroyed 2025-06-03T10:30:28.207024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 650, transactions count in step: 1, at schemeshard: 72075186233409549 2025-06-03T10:30:28.207086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976725761 AckTo { RawX1: 0 RawX2: 0 } } Step: 650 MediatorID: 72075186233409551 TabletID: 72075186233409549, at schemeshard: 72075186233409549 2025-06-03T10:30:28.207100Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72075186233409549] TDropLock TPropose opId# 281474976725761:0 HandleReply TEvOperationPlan: step# 650 2025-06-03T10:30:28.207110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976725761:0 128 -> 240 2025-06-03T10:30:28.207754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976725761:0, at schemeshard: 72075186233409549 2025-06-03T10:30:28.207771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72075186233409549] TDone opId# 281474976725761:0 ProgressState 2025-06-03T10:30:28.207790Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976725761:0 progress is 1/1 2025-06-03T10:30:28.207797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-03T10:30:28.207805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976725761:0 progress is 1/1 2025-06-03T10:30:28.207809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-03T10:30:28.207816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976725761, ready parts: 1/1, is published: true 2025-06-03T10:30:28.207834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:564:2501] message: TxId: 281474976725761 2025-06-03T10:30:28.207842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976725761 ready parts: 1/1 2025-06-03T10:30:28.207849Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976725761:0 2025-06-03T10:30:28.207854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976725761:0 2025-06-03T10:30:28.207871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409549, LocalPathId: 2] was 3 2025-06-03T10:30:28.208633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvNotifyTxCompletionResult: txId# 281474976725761 2025-06-03T10:30:28.208652Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6753: Message: TxId: 281474976725761 2025-06-03T10:30:28.208668Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2331: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976725761, buildInfoId: 106 2025-06-03T10:30:28.208701Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2334: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976725761, buildInfo: TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1149:3018], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-06-03T10:30:28.209153Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1117: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking 2025-06-03T10:30:28.209175Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1118: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Unlocking TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Unlocking, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1149:3018], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-06-03T10:30:28.209185Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-03T10:30:28.209673Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1117: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Done 2025-06-03T10:30:28.209696Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1118: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 106 Done TBuildInfo{ IndexBuildId: 106, Uid: , DomainPathId: [OwnerId: 72075186233409549, LocalPathId: 1], TablePathId: [OwnerId: 72075186233409549, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Done, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [1:1149:3018], AlterMainTableTxId: 281474976725757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976725758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976725759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 450, ApplyTxId: 281474976725760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976725761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }, Billed: { upload rows: 101, upload bytes: 2424, read rows: 101, read bytes: 2424 }} 2025-06-03T10:30:28.209702Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:339: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 106, subscribers count# 1 2025-06-03T10:30:28.209732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-03T10:30:28.209740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:1169:3038] TestWaitNotification: OK eventTxId 106 2025-06-03T10:30:28.210210Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot/ServerLessDB" IndexBuildId: 106 2025-06-03T10:30:28.210320Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:93: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 106 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "ColumnValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 1111 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 106 State: STATE_DONE Settings { source_path: "/MyRoot/ServerLessDB/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "ColumnValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 1111 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { } } >> KqpQueryService::TableSink_OltpUpdate [GOOD] >> KqpQueryService::TableSink_Oltp_Replace+UseSink >> ColumnBuildTest::BuildColumnDoesnotRestoreDeletedRows >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_TurnOff [GOOD] >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_Generations >> CommitOffset::DistributedTxCommit_Flat_CheckOffsetCommitForDifferentCases [GOOD] >> TPersQueueMirrorer::TestBasicRemote >> KqpQueryService::TableSink_OlapOrder [GOOD] >> KqpQueryService::TableSink_OlapRWQueries >> TFlatTableExecutor_BTreeIndex::EnableLocalDBBtreeIndex_True_Generations [GOOD] >> TFlatTableExecutor_CachePressure::TestNotEnoughLocalCache [GOOD] >> TFlatTableExecutor_Cold::ColdBorrowScan |66.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain |66.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain |66.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_subdomain/ydb-core-tx-schemeshard-ut_subdomain |66.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> KqpQueryService::TableSink_OltpOrder [GOOD] >> TFlatTableExecutor_Cold::ColdBorrowScan [GOOD] >> TFlatTableExecutor_ColumnGroups::TestManyRows >> TPartBtreeIndexIteration::FewNodes_Sticky [GOOD] >> TPartBtreeIndexIteration::FewNodes_Slices ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut/unittest >> TVersions::Wreck0Reverse [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:04.235930Z 00000.005 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.005 II| FAKE_ENV: Starting storage for BS group 0 00000.006 II| FAKE_ENV: Starting storage for BS group 1 00000.006 II| FAKE_ENV: Starting storage for BS group 2 00000.006 II| FAKE_ENV: Starting storage for BS group 3 00000.007 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.007 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:2} commited cookie 2 for step 1 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} hope 1 -> done Change{2, redo 0b alter 209b annex 0, ~{ } -{ }, 0 gb} 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:2} Tx{1, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxInitSchema} release 4194304b of static, Memory{0 dyn 0} 00000.007 DD| TABLET_EXECUTOR: Leader{1:2:3} commited cookie 1 for step 2 00000.008 NN| TABLET_SAUSAGECACHE: Update config MemoryLimit: 8388608 ReplacementPolicy: ThreeLeveledLRU 00000.008 NN| TABLET_SAUSAGECACHE: Switch replacement policy from S3FIFO to ThreeLeveledLRU 00000.008 NN| TABLET_SAUSAGECACHE: Switch replacement policy done from S3FIFO to ThreeLeveledLRU 00000.008 II| TABLET_SAUSAGECACHE: Limit memory consumer with 16777216TiB 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{2, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:3} Tx{2, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} commited cookie 1 for step 3 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{3, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:4} Tx{3, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.008 DD| TABLET_EXECUTOR: Leader{1:2:5} commited cookie 1 for step 4 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{4, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:5} Tx{4, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:6} commited cookie 1 for step 5 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{5, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:6} Tx{5, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:7} commited cookie 1 for step 6 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{6, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:7} Tx{6, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:8} commited cookie 1 for step 7 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.009 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{7, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:8} Tx{7, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:9} commited cookie 1 for step 8 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{8, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:9} Tx{8, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:10} commited cookie 1 for step 9 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{9, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:10} Tx{9, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:11} commited cookie 1 for step 10 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.010 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{10, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:11} Tx{10, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:12} commited cookie 1 for step 11 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{11, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:12} Tx{11, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:13} commited cookie 1 for step 12 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{12, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:13} Tx{12, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:14} commited cookie 1 for step 13 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{13, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.011 DD| TABLET_EXECUTOR: Leader{1:2:14} Tx{13, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:15} commited cookie 1 for step 14 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} queued, type NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} took 4194304b of static mem, Memory{4194304 dyn 0} 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} hope 1 -> done Change{14, redo 102475b alter 0b annex 0, ~{ 101 } -{ }, 0 gb} 00000.012 DD| TABLET_EXECUTOR: Leader{1:2:15} Tx{14, NKikimr::NSharedCache::NTestSuiteTSharedPageCache::TTxWriteRow} release 4194304b of static, Memory{0 dyn 0} 00000.012 DD| TABLET_EXECUTOR: Leader{ ... ageCollection: [1:0:256:0:0:0:1] Pages: [ 1 3 ] Cookie: 4 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 3 ] Cookie: 4 2025-06-03T10:30:05.780061Z node 35 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1265: Bootstrap with config MemoryLimit: 456 AsyncQueueInFlyLimit: 19 ... waiting for NKikimr::NSharedCache::TEvRequest 2025-06-03T10:30:05.780161Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:385: Add page collection [1:0:256:0:0:0:1] 2025-06-03T10:30:05.780169Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:1] owner [35:5:2052] 2025-06-03T10:30:05.780196Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [35:5:2052] cookie 1 class AsyncLoad from cache [ ] already requested [ ] to request [ 1 2 3 4 5 ] 2025-06-03T10:30:05.780204Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:1] async queue pages [ 1 2 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 2025-06-03T10:30:05.780251Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [35:5:2052] cookie 2 class AsyncLoad from cache [ ] already requested [ ] to request [ 6 7 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest 2025-06-03T10:30:05.780265Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:385: Add page collection [1:0:256:0:0:0:2] 2025-06-03T10:30:05.780269Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:2] owner [35:5:2052] 2025-06-03T10:30:05.780277Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:2] owner [35:5:2052] cookie 3 class AsyncLoad from cache [ ] already requested [ ] to request [ 10 11 12 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest 2025-06-03T10:30:05.780293Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:1] owner [35:6:2053] 2025-06-03T10:30:05.780301Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [35:6:2053] cookie 4 class AsyncLoad from cache [ ] already requested [ 1 5 ] to request [ 9 10 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) Checking fetches#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 2 ] Cookie: 20 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 2 ] Cookie: 20 ... waiting for NKikimr::NSharedCache::TEvUnregister 2025-06-03T10:30:05.780346Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:803: Unregister owner [35:5:2052] 2025-06-03T10:30:05.780355Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1072: Send page collection error [1:0:256:0:0:0:2] owner [35:5:2052] class AsyncLoad error RACE cookie 3 2025-06-03T10:30:05.780361Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:816: Remove page collection [1:0:256:0:0:0:2] owner [35:5:2052] 2025-06-03T10:30:05.780365Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1072: Send page collection error [1:0:256:0:0:0:1] owner [35:5:2052] class AsyncLoad error RACE cookie 1 2025-06-03T10:30:05.780372Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1072: Send page collection error [1:0:256:0:0:0:1] owner [35:5:2052] class AsyncLoad error RACE cookie 2 2025-06-03T10:30:05.780376Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:816: Remove page collection [1:0:256:0:0:0:1] owner [35:5:2052] 2025-06-03T10:30:05.780380Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:823: Remove owner [35:5:2052] ... waiting for NKikimr::NSharedCache::TEvUnregister (done) ... waiting for results #4 ... waiting for results #4 (done) Checking results#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 1 PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 2 PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 3 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 1 PageCollection: [1:0:256:0:0:0:1] Pages: [ ] Cookie: 2 PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 3 ... waiting for fetches #4 2025-06-03T10:30:05.780438Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:867: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 1 2 ] 2025-06-03T10:30:05.780447Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:730: Drop pending page collection request [1:0:256:0:0:0:1] class AsyncLoad cookie 1 2025-06-03T10:30:05.780453Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:1] async queue pages [ 5 9 ] ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 ... waiting for fetches #4 (done) Checking fetches#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 5 9 ] Cookie: 20 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 5 9 ] Cookie: 20 ... waiting for fetches #4 2025-06-03T10:30:05.780485Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:867: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 5 9 ] 2025-06-03T10:30:05.780491Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:730: Drop pending page collection request [1:0:256:0:0:0:1] class AsyncLoad cookie 2 2025-06-03T10:30:05.780495Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:1] async queue pages [ 10 ] 2025-06-03T10:30:05.780502Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:730: Drop pending page collection request [1:0:256:0:0:0:2] class AsyncLoad cookie 3 2025-06-03T10:30:05.780523Z node 35 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1012: Drop page collection [1:0:256:0:0:0:1] pages [ 2 ] owner [35:6:2053] ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 ... waiting for fetches #4 (done) Checking fetches#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 10 ] Cookie: 10 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 10 ] Cookie: 10 ... waiting for results #4 2025-06-03T10:30:05.780556Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:867: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 10 ] 2025-06-03T10:30:05.780562Z node 35 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:1050: Send page collection result [1:0:256:0:0:0:1] owner [35:6:2053] class AsyncLoad pages [ 1 5 9 10 ] cookie 4 ... waiting for results #4 (done) Checking results#4 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 5 9 10 ] Cookie: 4 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 5 9 10 ] Cookie: 4 2025-06-03T10:30:05.793036Z node 36 :TABLET_SAUSAGECACHE NOTICE: shared_sausagecache.cpp:1265: Bootstrap with config MemoryLimit: 456 AsyncQueueInFlyLimit: 19 ... waiting for NKikimr::NSharedCache::TEvRequest 2025-06-03T10:30:05.793274Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:385: Add page collection [1:0:256:0:0:0:1] 2025-06-03T10:30:05.793286Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:1] owner [36:5:2052] 2025-06-03T10:30:05.793322Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:1] owner [36:5:2052] cookie 1 class AsyncLoad from cache [ ] already requested [ ] to request [ 1 ] 2025-06-03T10:30:05.793332Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:1] async queue pages [ 1 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 2025-06-03T10:30:05.793378Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:385: Add page collection [1:0:256:0:0:0:2] 2025-06-03T10:30:05.793382Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:400: Add page collection [1:0:256:0:0:0:2] owner [36:6:2053] 2025-06-03T10:30:05.793390Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:2] owner [36:6:2053] cookie 2 class AsyncLoad from cache [ ] already requested [ ] to request [ 10 11 ] 2025-06-03T10:30:05.793395Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:681: Request page collection [1:0:256:0:0:0:2] async queue pages [ 10 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) ... waiting for NKikimr::NSharedCache::TEvRequest ... blocking NKikimr::NTabletFlatExecutor::NBlockIO::TEvFetch from SAUSAGE_CACHE to SAUSAGE_BIO_A cookie 0 2025-06-03T10:30:05.793420Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:597: Request page collection [1:0:256:0:0:0:2] owner [36:6:2053] cookie 3 class AsyncLoad from cache [ ] already requested [ ] to request [ 12 ] ... waiting for NKikimr::NSharedCache::TEvRequest (done) Checking fetches#3 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 ] Cookie: 10 PageCollection: [1:0:256:0:0:0:2] Pages: [ 10 ] Cookie: 10 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 ] Cookie: 10 PageCollection: [1:0:256:0:0:0:2] Pages: [ 10 ] Cookie: 10 ... waiting for NKikimr::NSharedCache::TEvUnregister 2025-06-03T10:30:05.793472Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:803: Unregister owner [36:6:2053] 2025-06-03T10:30:05.793481Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1072: Send page collection error [1:0:256:0:0:0:2] owner [36:6:2053] class AsyncLoad error RACE cookie 2 2025-06-03T10:30:05.793487Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:1072: Send page collection error [1:0:256:0:0:0:2] owner [36:6:2053] class AsyncLoad error RACE cookie 3 2025-06-03T10:30:05.793492Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:816: Remove page collection [1:0:256:0:0:0:2] owner [36:6:2053] 2025-06-03T10:30:05.793496Z node 36 :TABLET_SAUSAGECACHE DEBUG: shared_sausagecache.cpp:823: Remove owner [36:6:2053] ... waiting for NKikimr::NSharedCache::TEvUnregister (done) ... waiting for results #3 ... waiting for results #3 (done) Checking results#3 Expected: PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 2 PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 3 Actual: PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 2 PageCollection: [1:0:256:0:0:0:2] Pages: [ ] Cookie: 3 ... waiting for results #3 2025-06-03T10:30:05.793545Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:867: Receive page collection [1:0:256:0:0:0:1] status OK pages [ 1 ] 2025-06-03T10:30:05.793553Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:1050: Send page collection result [1:0:256:0:0:0:1] owner [36:5:2052] class AsyncLoad pages [ 1 ] cookie 1 2025-06-03T10:30:05.793561Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:730: Drop pending page collection request [1:0:256:0:0:0:2] class AsyncLoad cookie 2 2025-06-03T10:30:05.793568Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:730: Drop pending page collection request [1:0:256:0:0:0:2] class AsyncLoad cookie 3 ... waiting for results #3 (done) Checking results#3 Expected: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 ] Cookie: 1 Actual: PageCollection: [1:0:256:0:0:0:1] Pages: [ 1 ] Cookie: 1 Checking fetches#3 Expected: Actual: 2025-06-03T10:30:05.803805Z node 36 :TABLET_SAUSAGECACHE TRACE: shared_sausagecache.cpp:867: Receive page collection [1:0:256:0:0:0:2] status OK pages [ 10 ] Checking results#3 Expected: Actual: Checking fetches#3 Expected: Actual: ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::CancelBuild [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:26.668739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:26.668764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:26.668768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:26.668772Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:26.668785Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:26.668788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:26.668795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:26.668811Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:26.668899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:26.668963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:26.680923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:26.680946Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:26.684333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:26.684442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:26.684480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:26.686527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:26.686605Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:26.686742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:26.686806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:26.687566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:26.687615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:26.687963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:26.687982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:26.687997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:26.688006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:26.688013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:26.688036Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:26.689672Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:26.709601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:26.709701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:26.709771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:26.709818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:26.709830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:26.711511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:26.711551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:26.711623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:26.711635Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:26.711641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:26.711648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:26.712289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:26.712302Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:26.712307Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:26.712729Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:26.712742Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:26.712748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:26.712756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:26.713490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:26.713975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:26.714019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:26.714223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:26.714251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:26.714260Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:26.714321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:26.714330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:26.714367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:26.714379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:26.715003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:26.715014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:26.715065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... TICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000007, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:28.526721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 281474976710761 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000007 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:28.526728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_lock.cpp:44: [72057594046678944] TDropLock TPropose opId# 281474976710761:0 HandleReply TEvOperationPlan: step# 5000007 2025-06-03T10:30:28.526733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710761:0 128 -> 240 2025-06-03T10:30:28.527093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 281474976710761:0, at schemeshard: 72057594046678944 2025-06-03T10:30:28.527102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 281474976710761:0 ProgressState 2025-06-03T10:30:28.527114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-03T10:30:28.527117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-03T10:30:28.527122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710761:0 progress is 1/1 2025-06-03T10:30:28.527125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-03T10:30:28.527128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710761, ready parts: 1/1, is published: true 2025-06-03T10:30:28.527136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:122:2147] message: TxId: 281474976710761 2025-06-03T10:30:28.527141Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710761 ready parts: 1/1 2025-06-03T10:30:28.527145Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710761:0 2025-06-03T10:30:28.527148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976710761:0 2025-06-03T10:30:28.527158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 12 FAKE_COORDINATOR: Erasing txId 281474976710761 2025-06-03T10:30:28.527683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvNotifyTxCompletionResult: txId# 281474976710761 2025-06-03T10:30:28.527715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6753: Message: TxId: 281474976710761 2025-06-03T10:30:28.527731Z node 1 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2331: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976710761, buildInfoId: 102 2025-06-03T10:30:28.527754Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2334: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976710761, buildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Cancellation_Unlocking, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [1:1169:3022], AlterMainTableTxId: 281474976710757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976710758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-03T10:30:28.528358Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1117: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancellation_Unlocking 2025-06-03T10:30:28.528381Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1118: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancellation_Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Cancellation_Unlocking, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [1:1169:3022], AlterMainTableTxId: 281474976710757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976710758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-03T10:30:28.528391Z node 1 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Cancellation_Unlocking to Cancelled 2025-06-03T10:30:28.528947Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1117: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancelled 2025-06-03T10:30:28.528976Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1118: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancelled TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeInvalid, IndexName: , State: Cancelled, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [1:1169:3022], AlterMainTableTxId: 281474976710757, AlterMainTableTxStatus: StatusAccepted, AlterMainTableTxDone: 1, LockTxId: 281474976710758, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710759, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710760, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710761, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-03T10:30:28.528984Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:339: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-06-03T10:30:28.529012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:30:28.529018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:1193:3046] TestWaitNotification: OK eventTxId 102 2025-06-03T10:30:28.529333Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 102 2025-06-03T10:30:28.529432Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:93: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 102 State: STATE_CANCELLED Settings { source_path: "/MyRoot/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "DefaultValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 State: STATE_CANCELLED Settings { source_path: "/MyRoot/Table" max_shards_in_flight: 2 column_build_operation { column { ColumnName: "DefaultValue" default_from_literal { type { type_id: UINT64 } value { uint64_value: 10 } } } } ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } EndTime { } } 2025-06-03T10:30:28.529617Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:28.529661Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 48us result status StatusSuccess 2025-06-03T10:30:28.529745Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 4 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "DefaultValue" Type: "Uint64" TypeId: 4 Id: 4 NotNull: false DefaultFromLiteral { type { type_id: UINT64 } value { uint64_value: 10 } } IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 4 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 10 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 10 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> THiveTest::TestUpdateChannelValues ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_OltpOrder [GOOD] Test command err: Trying to start YDB, gRPC: 8700, MsgBus: 25349 2025-06-03T10:30:18.454390Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668528684106738:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:18.454538Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d4b/r3tmp/tmptG54eV/pdisk_1.dat 2025-06-03T10:30:18.527394Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668528684106581:2079] 1748946618452486 != 1748946618452489 2025-06-03T10:30:18.530598Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8700, node 1 2025-06-03T10:30:18.548583Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:18.548596Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:18.548598Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:18.548658Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25349 2025-06-03T10:30:18.596437Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:18.596476Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:18.597431Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25349 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:18.628818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:18.919561Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668528684107237:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:18.919594Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:18.977734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.049485Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668532979074685:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:19.049562Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:19.051037Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668532979074690:2344], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:19.052063Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:30:19.054323Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668532979074692:2345], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:30:19.130445Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668532979074743:2418] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 9602, MsgBus: 32210 2025-06-03T10:30:19.552040Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668534889275495:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:19.552144Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d4b/r3tmp/tmpgoaywO/pdisk_1.dat 2025-06-03T10:30:19.568178Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9602, node 2 2025-06-03T10:30:19.585832Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:19.585847Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:19.585850Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:19.585919Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32210 TClient is connected to server localhost:32210 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:19.652547Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:19.652584Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:19.653723Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:30:19.656542Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.660282Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:30:20.034257Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668539184243405:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:20.034298Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:20.036560Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:30:20.106473Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668539184243508:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:20.106524Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:20.106621Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668539184243513:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:20.107480Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:30:20.109890Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-03T10:30:20.109949Z node 2 :KQP_WORKL ... n/3?node_id=3&id=ZTE5OGE5ZWQtZmMzNzY5MDUtODdiMzUwNDgtZWE3ODVmYjg=, ActorId: [3:7511668541409881823:2395], ActorState: ExecuteState, TraceId: 01jwtnf7f5a2jzy477kbcbmvst, Create QueryResponse for error on request, msg: 2025-06-03T10:30:24.008646Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=97; 2025-06-03T10:30:24.008770Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:815: SelfId: [3:7511668554294786787:2337], Table: `/Root/DataShard` ([72057594046644480:2:1]), SessionActorId: [3:7511668537114914307:2337]Got CONSTRAINT VIOLATION for table `/Root/DataShard`. ShardID=72075186224037888, Sink=[3:7511668554294786787:2337].{
: Error: Conflict with existing key., code: 2012 } 2025-06-03T10:30:24.008798Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2935: SelfId: [3:7511668554294786777:2337], SessionActorId: [3:7511668537114914307:2337], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[3:7511668537114914307:2337]. isRollback=0 2025-06-03T10:30:24.008879Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1848: SessionId: ydb://session/3?node_id=3&id=OTk1M2EyZGItNGE4MTM0OTItZGFhYzRlOWQtZTJmNjk5OTE=, ActorId: [3:7511668537114914307:2337], ActorState: ExecuteState, TraceId: 01jwtnf7fx1m4kgrry3y506dbk, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [3:7511668554294786778:2337] from: [3:7511668554294786777:2337] 2025-06-03T10:30:24.008902Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [3:7511668554294786778:2337] TxId: 281474976715756. Ctx: { TraceId: 01jwtnf7fx1m4kgrry3y506dbk, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OTk1M2EyZGItNGE4MTM0OTItZGFhYzRlOWQtZTJmNjk5OTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-03T10:30:24.008977Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=3&id=OTk1M2EyZGItNGE4MTM0OTItZGFhYzRlOWQtZTJmNjk5OTE=, ActorId: [3:7511668537114914307:2337], ActorState: ExecuteState, TraceId: 01jwtnf7fx1m4kgrry3y506dbk, Create QueryResponse for error on request, msg: 2025-06-03T10:30:24.037982Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=98; 2025-06-03T10:30:24.038100Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:815: SelfId: [3:7511668554294786818:2395], Table: `/Root/DataShard` ([72057594046644480:2:1]), SessionActorId: [3:7511668541409881823:2395]Got CONSTRAINT VIOLATION for table `/Root/DataShard`. ShardID=72075186224037888, Sink=[3:7511668554294786818:2395].{
: Error: Conflict with existing key., code: 2012 } 2025-06-03T10:30:24.038116Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2935: SelfId: [3:7511668554294786810:2395], SessionActorId: [3:7511668541409881823:2395], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[3:7511668541409881823:2395]. isRollback=0 2025-06-03T10:30:24.038170Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1848: SessionId: ydb://session/3?node_id=3&id=ZTE5OGE5ZWQtZmMzNzY5MDUtODdiMzUwNDgtZWE3ODVmYjg=, ActorId: [3:7511668541409881823:2395], ActorState: ExecuteState, TraceId: 01jwtnf7gr7fsrpz4y690xw4kg, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [3:7511668554294786811:2395] from: [3:7511668554294786810:2395] 2025-06-03T10:30:24.038189Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [3:7511668554294786811:2395] TxId: 281474976715757. Ctx: { TraceId: 01jwtnf7gr7fsrpz4y690xw4kg, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZTE5OGE5ZWQtZmMzNzY5MDUtODdiMzUwNDgtZWE3ODVmYjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-03T10:30:24.038240Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=3&id=ZTE5OGE5ZWQtZmMzNzY5MDUtODdiMzUwNDgtZWE3ODVmYjg=, ActorId: [3:7511668541409881823:2395], ActorState: ExecuteState, TraceId: 01jwtnf7gr7fsrpz4y690xw4kg, Create QueryResponse for error on request, msg: 2025-06-03T10:30:24.063717Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=99; 2025-06-03T10:30:24.063856Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:815: SelfId: [3:7511668554294786853:2337], Table: `/Root/DataShard` ([72057594046644480:2:1]), SessionActorId: [3:7511668537114914307:2337]Got CONSTRAINT VIOLATION for table `/Root/DataShard`. ShardID=72075186224037888, Sink=[3:7511668554294786853:2337].{
: Error: Conflict with existing key., code: 2012 } 2025-06-03T10:30:24.063880Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2935: SelfId: [3:7511668554294786843:2337], SessionActorId: [3:7511668537114914307:2337], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[3:7511668537114914307:2337]. isRollback=0 2025-06-03T10:30:24.063945Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1848: SessionId: ydb://session/3?node_id=3&id=OTk1M2EyZGItNGE4MTM0OTItZGFhYzRlOWQtZTJmNjk5OTE=, ActorId: [3:7511668537114914307:2337], ActorState: ExecuteState, TraceId: 01jwtnf7hm7kyjy7v00v8s6rjk, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [3:7511668554294786844:2337] from: [3:7511668554294786843:2337] 2025-06-03T10:30:24.063972Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [3:7511668554294786844:2337] TxId: 281474976715758. Ctx: { TraceId: 01jwtnf7hm7kyjy7v00v8s6rjk, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OTk1M2EyZGItNGE4MTM0OTItZGFhYzRlOWQtZTJmNjk5OTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-03T10:30:24.064034Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=3&id=OTk1M2EyZGItNGE4MTM0OTItZGFhYzRlOWQtZTJmNjk5OTE=, ActorId: [3:7511668537114914307:2337], ActorState: ExecuteState, TraceId: 01jwtnf7hm7kyjy7v00v8s6rjk, Create QueryResponse for error on request, msg: 2025-06-03T10:30:24.086688Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=100; 2025-06-03T10:30:24.086791Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:815: SelfId: [3:7511668554294786887:2395], Table: `/Root/DataShard` ([72057594046644480:2:1]), SessionActorId: [3:7511668541409881823:2395]Got CONSTRAINT VIOLATION for table `/Root/DataShard`. ShardID=72075186224037888, Sink=[3:7511668554294786887:2395].{
: Error: Conflict with existing key., code: 2012 } 2025-06-03T10:30:24.086812Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2935: SelfId: [3:7511668554294786877:2395], SessionActorId: [3:7511668541409881823:2395], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[3:7511668541409881823:2395]. isRollback=0 2025-06-03T10:30:24.086866Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1848: SessionId: ydb://session/3?node_id=3&id=ZTE5OGE5ZWQtZmMzNzY5MDUtODdiMzUwNDgtZWE3ODVmYjg=, ActorId: [3:7511668541409881823:2395], ActorState: ExecuteState, TraceId: 01jwtnf7jcae7x4t86c1ch3qst, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [3:7511668554294786878:2395] from: [3:7511668554294786877:2395] 2025-06-03T10:30:24.086887Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [3:7511668554294786878:2395] TxId: 281474976715759. Ctx: { TraceId: 01jwtnf7jcae7x4t86c1ch3qst, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=ZTE5OGE5ZWQtZmMzNzY5MDUtODdiMzUwNDgtZWE3ODVmYjg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-03T10:30:24.086939Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=3&id=ZTE5OGE5ZWQtZmMzNzY5MDUtODdiMzUwNDgtZWE3ODVmYjg=, ActorId: [3:7511668541409881823:2395], ActorState: ExecuteState, TraceId: 01jwtnf7jcae7x4t86c1ch3qst, Create QueryResponse for error on request, msg: 2025-06-03T10:30:24.107309Z node 3 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_CONSTRAINT_VIOLATION;details=Conflict with existing key.;tx_id=101; 2025-06-03T10:30:24.107430Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:815: SelfId: [3:7511668554294786920:2337], Table: `/Root/DataShard` ([72057594046644480:2:1]), SessionActorId: [3:7511668537114914307:2337]Got CONSTRAINT VIOLATION for table `/Root/DataShard`. ShardID=72075186224037888, Sink=[3:7511668554294786920:2337].{
: Error: Conflict with existing key., code: 2012 } 2025-06-03T10:30:24.107455Z node 3 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2935: SelfId: [3:7511668554294786910:2337], SessionActorId: [3:7511668537114914307:2337], statusCode=PRECONDITION_FAILED. Issue=
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012
: Error: Conflict with existing key., code: 2012 . sessionActorId=[3:7511668537114914307:2337]. isRollback=0 2025-06-03T10:30:24.107505Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:1848: SessionId: ydb://session/3?node_id=3&id=OTk1M2EyZGItNGE4MTM0OTItZGFhYzRlOWQtZTJmNjk5OTE=, ActorId: [3:7511668537114914307:2337], ActorState: ExecuteState, TraceId: 01jwtnf7k17afyp6ye86xmn74q, got TEvKqpBuffer::TEvError in ExecuteState, status: PRECONDITION_FAILED send to: [3:7511668554294786911:2337] from: [3:7511668554294786910:2337] 2025-06-03T10:30:24.107526Z node 3 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [3:7511668554294786911:2337] TxId: 281474976715760. Ctx: { TraceId: 01jwtnf7k17afyp6ye86xmn74q, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=3&id=OTk1M2EyZGItNGE4MTM0OTItZGFhYzRlOWQtZTJmNjk5OTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. PRECONDITION_FAILED: {
: Error: Constraint violated. Table: `/Root/DataShard`., code: 2012 subissue: {
: Error: Conflict with existing key., code: 2012 } } 2025-06-03T10:30:24.107575Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=3&id=OTk1M2EyZGItNGE4MTM0OTItZGFhYzRlOWQtZTJmNjk5OTE=, ActorId: [3:7511668537114914307:2337], ActorState: ExecuteState, TraceId: 01jwtnf7k17afyp6ye86xmn74q, Create QueryResponse for error on request, msg: WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 2025-06-03T10:30:25.535346Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7511668537114913626:2087];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:25.535400Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 |66.5%| [TA] $(B)/ydb/core/tx/schemeshard/ut_sequence/test-results/unittest/{meta.json ... results_accumulator.log} >> TCutHistoryRestrictions::BasicTest [GOOD] >> TCutHistoryRestrictions::EmptyAllowList [GOOD] >> TCutHistoryRestrictions::EmptyDenyList [GOOD] >> TCutHistoryRestrictions::BothListsEmpty [GOOD] >> ObjectDistribution::TestImbalanceCalcualtion >> THiveTest::TestCreateTablet >> TFlatTableExecutor_ColumnGroups::TestManyRows [GOOD] >> TFlatTableExecutor_CompactionScan::TestCompactionScan [GOOD] >> TFlatTableExecutor_CompressedSelectRows::TestCompressedSelectRows [GOOD] >> TFlatTableExecutor_Exceptions::TestTabletExecuteExceptionDirect >> ObjectDistribution::TestImbalanceCalcualtion [GOOD] >> ObjectDistribution::TestAllowedDomainsAndDown [GOOD] >> ObjectDistribution::TestAddSameNode [GOOD] >> ObjectDistribution::TestManyIrrelevantNodes >> KqpQueryService::TableSink_Oltp_Replace+UseSink [GOOD] >> THiveTest::TestNoMigrationToSelf >> TFlatTableExecutor_Exceptions::TestTabletExecuteExceptionDirect [GOOD] >> TFlatTableExecutorGC::TestGCVectorDeduplicaton [GOOD] >> KqpQueryService::TableSink_OlapDelete [GOOD] |66.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> KqpQueryService::TableSink_OlapRWQueries [GOOD] >> THiveTest::TestLocalDisconnect >> TCutHistoryRestrictions::SameTabletInBothLists [GOOD] >> THeavyPerfTest::TTestLoadEverything >> THiveTest::TestCreateTablet [GOOD] >> THiveTest::TestCreate100Tablets ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_Oltp_Replace+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 7021, MsgBus: 31779 2025-06-03T10:30:16.871844Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668522060064443:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:16.872061Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d59/r3tmp/tmpM35lh6/pdisk_1.dat 2025-06-03T10:30:16.946828Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 7021, node 1 2025-06-03T10:30:16.964681Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:16.964692Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:16.964694Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:16.964743Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:30:16.976869Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:16.976894Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:16.978022Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:31779 TClient is connected to server localhost:31779 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:17.048706Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:17.052137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:30:17.347549Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668526355032227:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:17.347588Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:17.408114Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-03T10:30:17.476856Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668526355032329:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:17.476910Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:17.477059Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668526355032334:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:17.478343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-06-03T10:30:17.481459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710659, at schemeshard: 72057594046644480 2025-06-03T10:30:17.481575Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668526355032336:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-03T10:30:17.538466Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668526355032387:2382] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:17.632576Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668526355032478:2370], status: PRECONDITION_FAILED, issues:
: Error: Type annotation, code: 1030
:2:29: Error: At function: KiWriteTable!
:2:29: Error: Missing key column in input: Col1 for table: /Root/DataShard, code: 2029 2025-06-03T10:30:17.632711Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=ZWMyZGI2Ny0yN2QzNWI2My02ODdiNzI3ZC05NzYyNDg1Nw==, ActorId: [1:7511668526355032476:2369], ActorState: ExecuteState, TraceId: 01jwtnf18t7kns8w3fy26hdz5e, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id: WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 2025-06-03T10:30:21.871608Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668522060064443:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:21.871663Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 11758, MsgBus: 15919 2025-06-03T10:30:22.886610Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668544028910742:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:22.886654Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d59/r3tmp/tmptQqQlm/pdisk_1.dat 2025-06-03T10:30:22.904187Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11758, node 2 2025-06-03T10:30:22.931405Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:22.931421Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:22.931425Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:22.931478Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15919 TClient is connected to server localhost:15919 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:30:22.991292Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:22.991335Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:22.992386Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:22.995251Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:23.253623Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668548323878660:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:23.253672Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:23.257461Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:30:23.269067Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668548323878761:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:23.269100Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:23.269159Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668548323878766:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:23.270027Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:30:23.277612Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511668548323878768:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:30:23.343852Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668548323878819:2381] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 2025-06-03T10:30:27.886896Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7511668544028910742:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:27.886941Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 21691, MsgBus: 8518 2025-06-03T10:30:28.822992Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511668571508749511:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:28.823013Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d59/r3tmp/tmpGxvg7C/pdisk_1.dat 2025-06-03T10:30:28.837550Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21691, node 3 2025-06-03T10:30:28.850352Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:28.850370Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:28.850373Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:28.850439Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8518 TClient is connected to server localhost:8518 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:28.923406Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:28.923441Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:28.924444Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:28.927280Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:28.928435Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:30:29.234494Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668575803717422:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:29.234522Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:29.240723Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:30:29.303327Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:30:29.331767Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668575803718779:2438], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:29.331797Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:29.331865Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668575803718784:2441], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:29.332681Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480 2025-06-03T10:30:29.334638Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511668575803718786:2442], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715660 completed, doublechecking } 2025-06-03T10:30:29.411105Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511668575803718837:3178] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> TKeyValueTest::TestWriteReadWhileWriteWorks [GOOD] >> THiveTest::TestUpdateChannelValues [GOOD] >> THiveTest::TestStorageBalancer ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_OlapDelete [GOOD] Test command err: Trying to start YDB, gRPC: 8386, MsgBus: 12301 2025-06-03T10:30:16.471575Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668522371990799:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:16.471625Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d62/r3tmp/tmpnUx50q/pdisk_1.dat 2025-06-03T10:30:16.553488Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8386, node 1 2025-06-03T10:30:16.571919Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:16.571966Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:16.573058Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:16.573267Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:16.573279Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:16.573282Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:16.573363Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12301 TClient is connected to server localhost:12301 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:16.649726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:16.856022Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668522371991415:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.856049Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:16.905121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:30:16.943675Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511668522371991576:2336];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:30:16.943763Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511668522371991576:2336];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:30:16.943836Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511668522371991576:2336];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:30:16.943880Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511668522371991576:2336];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:30:16.943913Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511668522371991576:2336];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:30:16.943939Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511668522371991576:2336];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:30:16.943965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511668522371991576:2336];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:30:16.943990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511668522371991576:2336];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:30:16.944019Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511668522371991576:2336];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:30:16.944043Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511668522371991576:2336];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:30:16.944053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511668522371991577:2337];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:30:16.944070Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511668522371991577:2337];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:30:16.944079Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511668522371991576:2336];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:30:16.944105Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[1:7511668522371991576:2336];tablet_id=72075186224037897;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:30:16.944118Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511668522371991577:2337];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:30:16.944140Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511668522371991577:2337];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:30:16.944160Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511668522371991577:2337];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:30:16.944180Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511668522371991577:2337];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:30:16.944210Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511668522371991577:2337];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:30:16.944232Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511668522371991577:2337];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:30:16.944253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511668522371991577:2337];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:30:16.944272Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511668522371991577:2337];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:30:16.944291Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511668522371991577:2337];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:30:16.944310Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[1:7511668522371991577:2337];tablet_id=72075186224037896;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:30:16.949969Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668522371991578:2338];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:30:16.950011Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668522371991578:2338];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:30:16.950076Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668522371991578:2338];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:30:16.950109Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668522371991578:2338];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:30:16.950141Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: table ... 2976287992:2397], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:24.398377Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:24.398385Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668552976287997:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:24.399218Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:30:24.401333Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511668552976287999:2401], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:30:24.460152Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511668552976288050:2557] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:24.494681Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:30:24.494681Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:30:24.494781Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[3:7511668552976287689:2340];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037894;local_tx_no=12;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037890; 2025-06-03T10:30:24.494885Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715664;tx_id=281474976715664;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715664; 2025-06-03T10:30:24.622195Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:30:24.623080Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:30:24.623135Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:30:24.623170Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:30:24.623183Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:30:24.623240Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:30:24.623303Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:30:24.623351Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:30:24.623480Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:30:24.623511Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:30:24.704059Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[3:7511668552976287650:2335];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037891;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-03T10:30:24.704059Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[3:7511668552976287672:2338];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037896;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-03T10:30:24.704077Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;self_id=[3:7511668552976287672:2338];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037896;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-03T10:30:24.704084Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;self_id=[3:7511668552976287650:2335];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037891;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-03T10:30:24.704105Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[3:7511668552976287637:2332];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037888;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-03T10:30:24.704109Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[3:7511668552976287664:2337];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037890;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-03T10:30:24.704113Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[3:7511668552976287637:2332];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037888;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-03T10:30:24.704117Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;self_id=[3:7511668552976287664:2337];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037890;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-03T10:30:24.704131Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[3:7511668552976287734:2341];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037892;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-03T10:30:24.704137Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;self_id=[3:7511668552976287734:2341];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037892;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-03T10:30:24.704139Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[3:7511668552976287680:2339];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037893;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-03T10:30:24.704146Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;self_id=[3:7511668552976287680:2339];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037893;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-03T10:30:24.704156Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[3:7511668552976287689:2340];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037894;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-03T10:30:24.704164Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[3:7511668552976287689:2340];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037894;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-03T10:30:24.704165Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7511668552976287663:2336];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-03T10:30:24.704173Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;self_id=[3:7511668552976287663:2336];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037897;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-03T10:30:24.704180Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[3:7511668552976287649:2334];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037895;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-03T10:30:24.704188Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037895;self_id=[3:7511668552976287649:2334];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037895;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-03T10:30:24.704206Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7511668552976287638:2333];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037889;event=TEvWrite;fline=manager.cpp:116;event=abort;tx_id=281474976715669;problem=finished; 2025-06-03T10:30:24.704213Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7511668552976287638:2333];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037889;event=TEvWrite;fline=manager.cpp:134;event=abort;tx_id=281474976715669;problem=finished; 2025-06-03T10:30:24.725156Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976715672;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:30:24.725928Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Complete;commit_tx_id=281474976715672;commit_lock_id=281474976715671;fline=manager.cpp:94;broken_lock_id=281474976715669; WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 WAIT_INDEXATION: 0 |66.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> THiveTest::TestDrain ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/service/unittest >> KqpQueryService::TableSink_OlapRWQueries [GOOD] Test command err: Trying to start YDB, gRPC: 12905, MsgBus: 61010 2025-06-03T10:30:12.485058Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668502413757255:2140];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:12.493518Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d82/r3tmp/tmpwejkdW/pdisk_1.dat TServer::EnableGrpc on GrpcPort 12905, node 1 2025-06-03T10:30:12.597446Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668502413757142:2079] 1748946612483597 != 1748946612483600 2025-06-03T10:30:12.605196Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:12.605818Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-03T10:30:12.605829Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-03T10:30:12.605936Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:12.605964Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:12.606919Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:12.626053Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:12.626068Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:12.626070Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:12.626121Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61010 TClient is connected to server localhost:61010 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:12.770283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:12.774497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:30:13.095127Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668506708725099:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.095185Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.275632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.316511Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668506708725244:2335];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:30:13.316585Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668506708725244:2335];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:30:13.316678Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668506708725244:2335];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:30:13.316710Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668506708725244:2335];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:30:13.316742Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668506708725244:2335];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:30:13.316768Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668506708725244:2335];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:30:13.316800Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668506708725244:2335];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:30:13.316826Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668506708725244:2335];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:30:13.316858Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668506708725244:2335];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:30:13.316882Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668506708725244:2335];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:30:13.316916Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668506708725244:2335];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:30:13.316942Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[1:7511668506708725244:2335];tablet_id=72075186224037894;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:30:13.318753Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668506708725230:2333];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:30:13.318811Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668506708725230:2333];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:30:13.318882Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668506708725230:2333];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:30:13.318909Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668506708725230:2333];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:30:13.318932Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668506708725230:2333];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:30:13.318957Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668506708725230:2333];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:30:13.318982Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668506708725230:2333];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:30:13.319006Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668506708725230:2333];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:30:13.319030Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668506708725230:2333];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:30:13.319051Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668506708725230:2333];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:30:13.319080Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668506708725230:2333];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:30:13.319104Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668506708725230:2333];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:30:13.323693Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668506708725229:2332];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:30:13.323734Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:7511668506708725229:2332];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025 ... 224037889;self_id=[3:7511668575458160395:2334];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:30:29.608449Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7511668575458160395:2334];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:30:29.608492Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7511668575458160395:2334];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:30:29.608522Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7511668575458160395:2334];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:30:29.608551Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7511668575458160395:2334];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:30:29.608592Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7511668575458160395:2334];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:30:29.608621Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7511668575458160395:2334];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:30:29.608648Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7511668575458160395:2334];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:30:29.608676Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7511668575458160395:2334];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:30:29.608703Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7511668575458160395:2334];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:30:29.608729Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7511668575458160395:2334];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:30:29.608755Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[3:7511668575458160395:2334];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:30:29.609308Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-03T10:30:29.609325Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-03T10:30:29.609342Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-03T10:30:29.609347Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-03T10:30:29.609360Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-03T10:30:29.609365Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-03T10:30:29.609374Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-03T10:30:29.609382Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-06-03T10:30:29.609390Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-06-03T10:30:29.609393Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-03T10:30:29.609399Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-03T10:30:29.609402Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-03T10:30:29.609417Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:30:29.609425Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:30:29.609440Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:30:29.609447Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:30:29.609456Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:30:29.609463Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:30:29.609469Z node 3 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:30:29.609473Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:30:29.609477Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:30:29.609549Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-03T10:30:29.609558Z node 3 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-03T10:30:29.642218Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:29.642218Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:29.643423Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:30:29.646830Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668575458160501:2355], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:29.646861Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668575458160506:2358], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:29.646864Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:29.647684Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:30:29.654567Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511668575458160508:2359], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:30:29.727892Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511668575458160559:2418] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:29.862373Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976715665;tx_id=281474976715665;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715665; 2025-06-03T10:30:29.862385Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715665;tx_id=281474976715665;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715665; 2025-06-03T10:30:29.862578Z node 3 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715665;tx_id=281474976715665;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715665; |66.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> THiveTest::TestNoMigrationToSelf [GOOD] >> THiveTest::TestReCreateTablet >> THiveTest::TestLocalDisconnect [GOOD] >> THiveTest::TestLocalReplacement ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut/unittest >> TFlatTableExecutorGC::TestGCVectorDeduplicaton [GOOD] Test command err: 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:05.518418Z 00000.004 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.004 II| FAKE_ENV: TNanny initiates TDummy tablet 72057594037927937 birth 00000.004 II| FAKE_ENV: Starting storage for BS group 0 00000.004 II| FAKE_ENV: Starting storage for BS group 1 00000.004 II| FAKE_ENV: Starting storage for BS group 2 00000.004 II| FAKE_ENV: Starting storage for BS group 3 00000.005 II| TABLET_EXECUTOR: Leader{1:2:0} activating executor 00000.005 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1, 35b, wait} done, Waste{2:0, 0b +(0, 0b), 0 trc} 00000.027 II| TABLET_EXECUTOR: LSnap{1:2, on 2:301, 5619b, wait} done, Waste{2:0, 535636b +(0, 0b), 300 trc} 00000.029 II| TABLET_EXECUTOR: Leader{1:2:318} starting compaction 00000.029 II| TABLET_EXECUTOR: Leader{1:2:319} starting Scan{1 on 3, Compact{1.2.318, eph 1}} 00000.029 II| TABLET_EXECUTOR: Leader{1:2:319} started compaction 1 00000.029 II| TABLET_OPS_HOST: Scan{1 on 3, Compact{1.2.318, eph 1}} begin on TSubset{head 2, 1m 0p 0c} 00000.030 II| TABLET_OPS_HOST: Scan{1 on 3, Compact{1.2.318, eph 1}} end=Done, 107r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 15 of 18 ~1p 00000.030 II| OPS_COMPACT: Compact{1.2.318, eph 1} end=Done, 9 blobs 84r (max 107), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 15 +5, (171556 26642 179776)b }, ecr=1.000 00000.030 II| TABLET_EXECUTOR: Leader{1:2:320} Compact 1 on TGenCompactionParams{3: gen 0 epoch +inf, 0 parts} step 318, product {1 parts epoch 2} done 00000.035 II| TABLET_EXECUTOR: Leader{1:2:344} starting compaction 00000.035 II| TABLET_EXECUTOR: Leader{1:2:345} starting Scan{3 on 2, Compact{1.2.344, eph 1}} 00000.035 II| TABLET_EXECUTOR: Leader{1:2:345} started compaction 3 00000.035 II| TABLET_OPS_HOST: Scan{3 on 2, Compact{1.2.344, eph 1}} begin on TSubset{head 2, 1m 0p 0c} 00000.036 II| TABLET_OPS_HOST: Scan{3 on 2, Compact{1.2.344, eph 1}} end=Done, 112r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 14 of 17 ~1p 00000.036 II| OPS_COMPACT: Compact{1.2.344, eph 1} end=Done, 7 blobs 81r (max 112), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 14 +3, (181816 33988 163282)b }, ecr=1.000 00000.036 II| TABLET_EXECUTOR: Leader{1:2:346} Compact 3 on TGenCompactionParams{2: gen 0 epoch +inf, 0 parts} step 344, product {1 parts epoch 2} done 00000.066 II| TABLET_EXECUTOR: LSnap{1:2, on 2:601, 8376b, wait} done, Waste{2:0, 1350227b +(147, 71296b), 300 trc} 00000.077 II| TABLET_EXECUTOR: Leader{1:2:680} starting compaction 00000.077 II| TABLET_EXECUTOR: Leader{1:2:681} starting Scan{5 on 2, Compact{1.2.680, eph 2}} 00000.077 II| TABLET_EXECUTOR: Leader{1:2:681} started compaction 5 00000.077 II| TABLET_OPS_HOST: Scan{5 on 2, Compact{1.2.680, eph 2}} begin on TSubset{head 3, 1m 0p 0c} 00000.078 II| TABLET_OPS_HOST: Scan{5 on 2, Compact{1.2.680, eph 2}} end=Done, 111r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 14 of 15 ~1p 00000.078 II| OPS_COMPACT: Compact{1.2.680, eph 2} end=Done, 9 blobs 111r (max 111), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 14 +5, (154868 27008 171803)b }, ecr=1.000 00000.078 II| TABLET_EXECUTOR: Leader{1:2:682} Compact 5 on TGenCompactionParams{2: gen 0 epoch +inf, 0 parts} step 680, product {1 parts epoch 3} done 00000.078 II| TABLET_EXECUTOR: Leader{1:2:683} starting compaction 00000.078 II| TABLET_EXECUTOR: Leader{1:2:684} starting Scan{7 on 2, Compact{1.2.683, eph 2}} 00000.078 II| TABLET_EXECUTOR: Leader{1:2:684} started compaction 7 00000.078 II| TABLET_OPS_HOST: Scan{7 on 2, Compact{1.2.683, eph 2}} begin on TSubset{head 0, 0m 2p 0c} 00000.080 II| TABLET_OPS_HOST: Scan{7 on 2, Compact{1.2.683, eph 2}} end=Done, 163r seen, TFwd{fetch=322KiB,saved=322KiB,usage=315KiB,after=6.5KiB,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=7}, trace 30 of 36 ~3p 00000.080 II| OPS_COMPACT: Compact{1.2.683, eph 2} end=Done, 4 blobs 134r (max 192), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 30 +0, (280573 47811 274517)b }, ecr=1.000 00000.080 II| TABLET_EXECUTOR: Leader{1:2:687} Compact 7 on TGenCompactionParams{2: gen 1 epoch 0, 2 parts} step 683, product {1 parts epoch 0} done 00000.082 II| TABLET_EXECUTOR: Leader{1:2:695} starting compaction 00000.082 II| TABLET_EXECUTOR: Leader{1:2:696} starting Scan{9 on 3, Compact{1.2.695, eph 2}} 00000.082 II| TABLET_EXECUTOR: Leader{1:2:696} started compaction 9 00000.082 II| TABLET_OPS_HOST: Scan{9 on 3, Compact{1.2.695, eph 2}} begin on TSubset{head 3, 1m 0p 0c} 00000.083 II| TABLET_OPS_HOST: Scan{9 on 3, Compact{1.2.695, eph 2}} end=Done, 109r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 11 of 15 ~1p 00000.083 II| OPS_COMPACT: Compact{1.2.695, eph 2} end=Done, 11 blobs 109r (max 109), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 11 +7, (158542 6884 158818)b }, ecr=1.000 00000.083 II| TABLET_EXECUTOR: Leader{1:2:697} Compact 9 on TGenCompactionParams{3: gen 0 epoch +inf, 0 parts} step 695, product {1 parts epoch 3} done 00000.083 II| TABLET_EXECUTOR: Leader{1:2:699} starting compaction 00000.083 II| TABLET_EXECUTOR: Leader{1:2:700} starting Scan{11 on 3, Compact{1.2.699, eph 2}} 00000.083 II| TABLET_EXECUTOR: Leader{1:2:700} started compaction 11 00000.083 II| TABLET_OPS_HOST: Scan{11 on 3, Compact{1.2.699, eph 2}} begin on TSubset{head 0, 0m 2p 0c} 00000.085 II| TABLET_OPS_HOST: Scan{11 on 3, Compact{1.2.699, eph 2}} end=Done, 161r seen, TFwd{fetch=321KiB,saved=321KiB,usage=309KiB,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=7}, trace 31 of 38 ~3p 00000.085 II| OPS_COMPACT: Compact{1.2.699, eph 2} end=Done, 4 blobs 136r (max 193), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 31 +0, (257093 20370 276226)b }, ecr=1.000 00000.086 II| TABLET_EXECUTOR: Leader{1:2:702} Compact 11 on TGenCompactionParams{3: gen 1 epoch 0, 2 parts} step 699, product {1 parts epoch 0} done 00000.119 II| TABLET_EXECUTOR: LSnap{1:2, on 2:901, 10215b, wait} done, Waste{2:0, 2026146b +(185, 860132b), 300 trc} 00000.132 II| TABLET_EXECUTOR: Leader{1:2:993} starting compaction 00000.132 II| TABLET_EXECUTOR: Leader{1:2:994} starting Scan{13 on 2, Compact{1.2.993, eph 3}} 00000.132 II| TABLET_EXECUTOR: Leader{1:2:994} started compaction 13 00000.132 II| TABLET_OPS_HOST: Scan{13 on 2, Compact{1.2.993, eph 3}} begin on TSubset{head 4, 1m 0p 0c} 00000.133 II| TABLET_OPS_HOST: Scan{13 on 2, Compact{1.2.993, eph 3}} end=Done, 114r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 20 of 22 ~1p 00000.133 II| OPS_COMPACT: Compact{1.2.993, eph 3} end=Done, 9 blobs 114r (max 114), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 20 +5, (146829 20310 222952)b }, ecr=1.000 00000.133 II| TABLET_EXECUTOR: Leader{1:2:995} Compact 13 on TGenCompactionParams{2: gen 0 epoch +inf, 0 parts} step 993, product {1 parts epoch 4} done 00000.138 II| TABLET_EXECUTOR: Leader{1:2:1008} starting compaction 00000.138 II| TABLET_EXECUTOR: Leader{1:2:1009} starting Scan{15 on 3, Compact{1.2.1008, eph 3}} 00000.138 II| TABLET_EXECUTOR: Leader{1:2:1009} started compaction 15 00000.138 II| TABLET_OPS_HOST: Scan{15 on 3, Compact{1.2.1008, eph 3}} begin on TSubset{head 4, 1m 0p 0c} 00000.139 II| TABLET_OPS_HOST: Scan{15 on 3, Compact{1.2.1008, eph 3}} end=Done, 116r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 12 of 14 ~1p 00000.139 II| OPS_COMPACT: Compact{1.2.1008, eph 3} end=Done, 12 blobs 116r (max 116), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 1 pk, lobs 12 +10, (165522 0 182464)b }, ecr=1.000 00000.140 II| TABLET_EXECUTOR: Leader{1:2:1009} Compact 15 on TGenCompactionParams{3: gen 0 epoch +inf, 0 parts} step 1008, product {1 parts epoch 4} done 00000.175 II| TABLET_EXECUTOR: LSnap{1:2, on 2:1201, 11399b, wait} done, Waste{2:0, 3133141b +(138, 48209b), 300 trc} 00000.190 II| TABLET_EXECUTOR: Leader{1:2:1310} starting compaction 00000.190 II| TABLET_EXECUTOR: Leader{1:2:1311} starting Scan{17 on 2, Compact{1.2.1310, eph 4}} 00000.190 II| TABLET_EXECUTOR: Leader{1:2:1311} started compaction 17 00000.190 II| TABLET_OPS_HOST: Scan{17 on 2, Compact{1.2.1310, eph 4}} begin on TSubset{head 5, 1m 0p 0c} 00000.191 II| TABLET_OPS_HOST: Scan{17 on 2, Compact{1.2.1310, eph 4}} end=Done, 114r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 15 of 18 ~1p 00000.191 II| OPS_COMPACT: Compact{1.2.1310, eph 4} end=Done, 8 blobs 114r (max 114), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 15 +4, (187809 53863 175125)b }, ecr=1.000 00000.191 II| TABLET_EXECUTOR: Leader{1:2:1312} Compact 17 on TGenCompactionParams{2: gen 0 epoch +inf, 0 parts} step 1310, product {1 parts epoch 5} done 00000.191 II| TABLET_EXECUTOR: Leader{1:2:1313} starting compaction 00000.191 II| TABLET_EXECUTOR: Leader{1:2:1314} starting Scan{19 on 2, Compact{1.2.1313, eph 4}} 00000.191 II| TABLET_EXECUTOR: Leader{1:2:1314} started compaction 19 00000.191 II| TABLET_OPS_HOST: Scan{19 on 2, Compact{1.2.1313, eph 4}} begin on TSubset{head 0, 0m 2p 0c} 00000.193 II| TABLET_OPS_HOST: Scan{19 on 2, Compact{1.2.1313, eph 4}} end=Done, 186r seen, TFwd{fetch=325KiB,saved=325KiB,usage=312KiB,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=7}, trace 34 of 44 ~3p 00000.193 II| OPS_COMPACT: Compact{1.2.1313, eph 4} end=Done, 4 blobs 186r (max 228), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 34 +0, (273793 60803 310326)b }, ecr=1.000 00000.194 II| TABLET_EXECUTOR: Leader{1:2:1315} Compact 19 on TGenCompactionParams{2: gen 1 epoch 0, 2 parts} step 1313, product {1 parts epoch 0} done 00000.194 II| TABLET_EXECUTOR: Leader{1:2:1316} got result TEvResult{0 pages [1:2:1310:1:12289:0:0] fail RACE}, category 1 00000.210 II| TABLET_EXECUTOR: Leader{1:2:1374} starting compaction 00000.210 II| TABLET_EXECUTOR: Leader{1:2:1375} starting Scan{21 on 3, Compact{1.2.1374, eph 4}} 00000.210 II| TABLET_EXECUTOR: Leader{1:2:1375} started compaction 21 00000.210 II| TABLET_OPS_HOST: Scan{21 on 3, Compact{1.2.1374, eph 4}} begin on TSubset{head 5, 1m 0p 0c} 00000.211 II| TABLET_OPS_HOST: Scan{21 on 3, Compact{1.2.1374, eph 4}} end=Done, 100r seen, TFwd{fetch=0B,saved=0B,usage=0B,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=1}, trace 8 of 14 ~1p 00000.211 II| OPS_COMPACT: Compact{1.2.1374, eph 4} end=Done, 11 blobs 100r (max 100), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 8 +7, (146221 26853 130175)b }, ecr=1.000 00000.212 II| TABLET_EXECUTOR: Leader{1:2:1375} Compact 21 on TGenCompactionParams{3: gen 0 epoch +inf, 0 parts} step 1374, product {1 parts epoch 5} done 00000.212 II| TABLET_EXECUTOR: Leader{1:2:1376} starting compaction 00000.212 II| TABLET_EXECUTOR: Leader{1:2:1377} starting Scan{23 on 3, Compact{1.2.1376, eph 4}} 00000.212 II| TABLET_EXECUTOR: Leader{1:2:1377} started compaction 23 00000.212 II| TABLET_OPS_HOST: Scan{23 on 3, Compact{1.2.1376, eph 4}} begin on TSubset{head 0, 0m 2p 0c} 00000.214 II| TABLET_OPS_HOST: Scan{23 on 3, Compact{1.2.1376, eph 4}} end=Done, 177r seen, TFwd{fetch=303KiB,saved=303KiB,usage=303KiB,after=0B,before=0B}, bio Spent{time=0.000s,wait=0.000s,interrupts=7}, trace 29 of 37 ~3p 00000.214 II| OPS_COMPACT: Compact{1.2.1376, eph 4} end=Done, 4 blobs 177r (max 216), put Spent{time=0.000s,wait=0.000s,interrupts=1} Part{ 2 pk, lobs 29 +0, (245314 26853 244550)b }, ecr=1.000 00000.214 II| TABLET_EXECUTOR: Leader{1:2:1377} Compact 23 on TGenCompactionParams{3: gen 1 epoch 0, 2 parts} step 1376, pro ... ] 00000.023 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:250:1:12288:161:0] owner [37:418:2424] class Scan pages [ 0 ] cookie 0 00000.023 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:313:1:12288:161:0] owner [37:418:2424] 00000.023 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:313:1:12288:161:0] owner [37:418:2424] cookie 0 class Scan from cache [ 0 ] 00000.023 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:313:1:12288:161:0] owner [37:418:2424] class Scan pages [ 0 ] cookie 0 00000.023 DD| TABLET_SAUSAGECACHE: Save page collection [1:2:315:1:12288:163:0] owner [37:419:2424] compacted pages [ 2 ] 00000.023 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:315:1:12288:163:0] 00000.023 DD| TABLET_SAUSAGECACHE: Unregister owner [37:418:2424] 00000.023 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:188:1:12288:161:0] owner [37:418:2424] 00000.023 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:313:1:12288:161:0] owner [37:418:2424] 00000.023 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:250:1:12288:161:0] owner [37:418:2424] 00000.023 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:126:1:12288:161:0] owner [37:418:2424] 00000.023 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:64:1:12288:161:0] owner [37:418:2424] 00000.023 DD| TABLET_SAUSAGECACHE: Remove owner [37:418:2424] 00000.023 II| TABLET_EXECUTOR: Leader{1:2:316} Compact 63 on TGenCompactionParams{101: gen 2 epoch 0, 5 parts} step 315, product {1 parts epoch 0} done 00000.023 DD| TABLET_EXECUTOR: TGenCompactionStrategy CompactionFinished for 1: compaction 63, generation 2 00000.023 DD| TABLET_EXECUTOR: TGenCompactionStrategy CheckGeneration for 1 generation 2, state Free, final id 0, final level 2 00000.024 DD| RESOURCE_BROKER: Finish task gen2-table-101-tablet-1 (32 by [37:30:2062]) (release resources {1, 0}) 00000.024 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_compaction_gen2 from 4.687500 to 0.000000 (remove task gen2-table-101-tablet-1 (32 by [37:30:2062])) 00000.024 DD| TABLET_SAUSAGECACHE: Attach page collection [1:2:315:1:12288:163:0] owner [37:30:2062] 00000.024 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:315:1:12288:163:0] owner [37:30:2062] 00000.024 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:313:1:12288:161:0] owner [37:30:2062] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:313:1:12288:161:0] owner [37:30:2062] 00000.024 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:250:1:12288:161:0] owner [37:30:2062] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:250:1:12288:161:0] owner [37:30:2062] 00000.024 DD| TABLET_EXECUTOR: Leader{1:2:317} commited cookie 3 for step 316 00000.024 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:188:1:12288:161:0] owner [37:30:2062] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:188:1:12288:161:0] owner [37:30:2062] 00000.024 DD| TABLET_EXECUTOR: Leader{1:2:317} switch applied on followers, step 316 00000.024 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:126:1:12288:161:0] owner [37:30:2062] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:126:1:12288:161:0] owner [37:30:2062] 00000.024 DD| TABLET_SAUSAGECACHE: Detach page collection [1:2:64:1:12288:161:0] owner [37:30:2062] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:64:1:12288:161:0] owner [37:30:2062] 00000.024 TT| TABLET_SAUSAGECACHE: Touch page collection [1:2:315:1:12288:163:0] owner [37:30:2062] pages [ 2 ] 00000.024 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:64:1:12288:161:0] owner [37:405:2414] 00000.024 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:64:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.024 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:64:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.024 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:126:1:12288:161:0] owner [37:405:2414] 00000.024 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:126:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.024 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:126:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.024 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:188:1:12288:161:0] owner [37:405:2414] 00000.024 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:188:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.024 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:188:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.024 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:250:1:12288:161:0] owner [37:405:2414] 00000.024 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:250:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.024 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:250:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.024 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:261:1:12288:161:0] owner [37:405:2414] 00000.024 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:261:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.024 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:261:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.024 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:273:1:12288:161:0] owner [37:405:2414] 00000.024 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:273:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.024 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:273:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.024 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:285:1:12288:161:0] owner [37:405:2414] 00000.024 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:285:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.024 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:285:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.024 DD| TABLET_SAUSAGECACHE: Add page collection [1:2:297:1:12288:161:0] owner [37:405:2414] 00000.024 TT| TABLET_SAUSAGECACHE: Request page collection [1:2:297:1:12288:161:0] owner [37:405:2414] cookie 0 class Scan from cache [ 0 ] 00000.024 TT| TABLET_SAUSAGECACHE: Send page collection result [1:2:297:1:12288:161:0] owner [37:405:2414] class Scan pages [ 0 ] cookie 0 00000.024 DD| TABLET_SAUSAGECACHE: Unregister owner [37:405:2414] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:126:1:12288:161:0] owner [37:405:2414] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:297:1:12288:161:0] owner [37:405:2414] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:273:1:12288:161:0] owner [37:405:2414] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:188:1:12288:161:0] owner [37:405:2414] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:64:1:12288:161:0] owner [37:405:2414] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:261:1:12288:161:0] owner [37:405:2414] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:285:1:12288:161:0] owner [37:405:2414] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:250:1:12288:161:0] owner [37:405:2414] 00000.024 DD| TABLET_SAUSAGECACHE: Remove owner [37:405:2414] 00000.024 DD| RESOURCE_BROKER: Finish task Scan{58 on 101}::1 (29 by [37:30:2062]) (release resources {1, 0}) 00000.024 DD| RESOURCE_BROKER: Updated planned resource usage for queue queue_scan from 11.718750 to 0.000000 (remove task Scan{58 on 101}::1 (29 by [37:30:2062])) 00000.024 II| TABLET_EXECUTOR: Leader{1:2:317} suiciding, Waste{2:0, 7661b +(30, 11928b), 16 trc, -42337b acc} 00000.024 DD| TABLET_SAUSAGECACHE: Unregister owner [37:30:2062] 00000.024 DD| TABLET_SAUSAGECACHE: Remove page collection [1:2:315:1:12288:163:0] owner [37:30:2062] 00000.024 DD| TABLET_SAUSAGECACHE: Remove owner [37:30:2062] 00000.024 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.024 NN| TABLET_SAUSAGECACHE: Poison cache serviced 38 reqs hit {38 21480b} miss {0 0b} 00000.024 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.024 II| FAKE_ENV: DS.0 gone, left {2353b, 18}, put {31654b, 317} 00000.024 II| FAKE_ENV: DS.1 gone, left {23847b, 37}, put {57237b, 346} 00000.024 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.024 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.024 II| FAKE_ENV: All BS storage groups are stopped 00000.024 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.024 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 2287}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:29.858320Z 00000.001 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.001 II| FAKE_ENV: Starting storage for BS group 0 00000.001 II| FAKE_ENV: Starting storage for BS group 1 00000.001 II| FAKE_ENV: Starting storage for BS group 2 00000.001 II| FAKE_ENV: Starting storage for BS group 3 00000.012 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.012 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.012 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.012 II| FAKE_ENV: DS.0 gone, left {536b, 6}, put {556b, 7} 00000.012 II| FAKE_ENV: DS.1 gone, left {30495b, 8}, put {30495b, 8} 00000.012 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.012 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.012 II| FAKE_ENV: All BS storage groups are stopped 00000.012 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.012 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 0 Error 0 Left 15}, stopped 00000.000 II| FAKE_ENV: Born at 2025-06-03T10:30:29.871648Z 00000.001 NN| TABLET_SAUSAGECACHE: Bootstrap with config MemoryLimit: 8388608 ScanQueueInFlyLimit: 262144 AsyncQueueInFlyLimit: 262144 00000.001 II| FAKE_ENV: Starting storage for BS group 0 00000.001 II| FAKE_ENV: Starting storage for BS group 1 00000.002 II| FAKE_ENV: Starting storage for BS group 2 00000.002 II| FAKE_ENV: Starting storage for BS group 3 00000.068 CC| TABLET_EXECUTOR: Tablet 1 unhandled exception std::runtime_error: test ??+0 (0xCD512F2) ??+0 (0xCD51297) NKikimr::NTabletFlatExecutor::NTestSuiteTFlatTableExecutor_Exceptions::TTxExecuteThrowException::Execute(NKikimr::NTabletFlatExecutor::TTransactionContext&, NActors::TActorContext const&)+57 (0xC88B739) NKikimr::NTabletFlatExecutor::TExecutor::ExecuteTransaction(NKikimr::NTabletFlatExecutor::TSeat*)+1312 (0xF3276E0) NKikimr::NTabletFlatExecutor::TExecutor::DoExecute(TAutoPtr, NKikimr::NTabletFlatExecutor::TExecutor::ETxMode)+3356 (0xF325B8C) non-virtual thunk to NKikimr::NTabletFlatExecutor::TExecutor::Execute(TAutoPtr, NActors::TActorContext const&)+35 (0xF328C03) ??+0 (0xC88B650) NKikimr::NFake::TDummy::Inbox(TAutoPtr&)+480 (0xC83D250) NActors::IActor::Receive(TAutoPtr&)+85 (0xD5F11E5) 00000.068 II| FAKE_ENV: Model starts hard shutdown on level 7 of 8, left 2 actors 00000.068 NN| TABLET_SAUSAGECACHE: Poison cache serviced 0 reqs hit {0 0b} miss {0 0b} 00000.068 II| FAKE_ENV: Shut order, stopping 4 BS groups 00000.068 II| FAKE_ENV: DS.0 gone, left {42b, 1}, put {62b, 2} 00000.068 II| FAKE_ENV: DS.1 gone, left {35b, 1}, put {35b, 1} 00000.068 II| FAKE_ENV: DS.2 gone, left {0b, 0}, put {0b, 0} 00000.068 II| FAKE_ENV: DS.3 gone, left {0b, 0}, put {0b, 0} 00000.068 II| FAKE_ENV: All BS storage groups are stopped 00000.068 II| FAKE_ENV: Model stopped, hosted 3 actors, spent 0.000s 00000.068 II| FAKE_ENV: Logged {Emerg 0 Alert 0 Crit 1 Error 0 Left 15}, stopped |66.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteReadWhileWriteWorks [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:58:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:75:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:58:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:75:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:77:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:80:2057] recipient: [2:79:2110] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:83:2057] recipient: [2:79:2110] !Reboot 72057594037927937 (actor [2:57:2097]) rebooted! !Reboot 72057594037927937 (actor [2:57:2097]) tablet resolver refreshed! new actor is[2:82:2111] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:168:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:58:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:75:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:77:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:83:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:57:2097]) rebooted! !Reboot 72057594037927937 (actor [3:57:2097]) tablet resolver refreshed! new actor is[3:82:2111] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:168:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:58:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:75:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:78:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:82:2057] recipient: [4:80:2110] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:84:2057] recipient: [4:80:2110] !Reboot 72057594037927937 (actor [4:57:2097]) rebooted! !Reboot 72057594037927937 (actor [4:57:2097]) tablet resolver refreshed! new actor is[4:83:2111] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:169:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:58:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:75:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:81:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:83:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:85:2057] recipient: [5:84:2113] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:87:2057] recipient: [5:84:2113] !Reboot 72057594037927937 (actor [5:57:2097]) rebooted! !Reboot 72057594037927937 (actor [5:57:2097]) tablet resolver refreshed! new actor is[5:86:2114] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:172:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:58:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:75:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:81:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:83:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:85:2057] recipient: [6:84:2113] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:87:2057] recipient: [6:84:2113] !Reboot 72057594037927937 (actor [6:57:2097]) rebooted! !Reboot 72057594037927937 (actor [6:57:2097]) tablet resolver refreshed! new actor is[6:86:2114] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:172:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:58:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:75:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:82:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:85:2057] recipient: [7:84:2113] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:88:2057] recipient: [7:84:2113] !Reboot 72057594037927937 (actor [7:57:2097]) rebooted! !Reboot 72057594037927937 (actor [7:57:2097]) tablet resolver refreshed! new actor is[7:87:2114] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:173:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:58:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:75:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:84:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:87:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:88:2057] recipient: [8:86:2115] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:90:2057] recipient: [8:86:2115] !Reboot 72057594037927937 (actor [8:57:2097]) rebooted! !Reboot 72057594037927937 (actor [8:57:2097]) tablet resolver refreshed! new actor is[8:89:2116] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:175:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:58:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:75:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:84:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:87:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:88:2057] recipient: [9:86:2115] Leader for TabletID 72057594037927937 is [9:89:2116] sender: [9:90:2057] recipient: [9:86:2115] !Reboot 72057594037927937 (actor [9:57:2097]) rebooted! !Reboot 72057594037927937 (actor [9:57:2097]) tablet resolver refreshed! new actor is[9:89:2116] Leader for TabletID 72057594037927937 is [9:89:2116] sender: [9:175:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:58:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:75:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:85:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:88:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:89:2057] recipient: [10:87:2115] Leader for TabletID 72057594037927937 is [10:90:2116] sender: [10:91:2057] recipient: [10:87:2115] !Reboot 72057594037927937 (actor [10:57:2097]) rebooted! !Reboot 72057594037927937 (actor [10:57:2097]) tablet resolver refreshed! new actor is[10:90:2116] Leader for TabletID 72057594037927937 is [10:90:2116] sender: [10:176:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:58:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:75:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:87:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:90:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:91:2057] recipient: [11:89:2117] Leader for TabletID 72057594037927937 is [11:92:2118] sender: [11:93:2057] recipient: [11:89:2117] !Reboot 72057594037927937 (actor [11:57:2097]) rebooted! !Reboot 72057594037927937 (actor [11:57:2097]) tablet resolver refreshed! new actor is[11:92:2118] Leader for TabletID 72057594037927937 is [11:92:2118] sender: [11:178:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:58:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:75:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... TabletID 72057594037927937 is [13:57:2097] sender: [13:90:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [13:57:2097] sender: [13:92:2057] recipient: [13:91:2117] Leader for TabletID 72057594037927937 is [13:93:2118] sender: [13:94:2057] recipient: [13:91:2117] !Reboot 72057594037927937 (actor [13:57:2097]) rebooted! !Reboot 72057594037927937 (actor [13:57:2097]) tablet resolver refreshed! new actor is[13:93:2118] Leader for TabletID 72057594037927937 is [13:93:2118] sender: [13:179:2057] recipient: [13:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:55:2057] recipient: [14:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [14:55:2057] recipient: [14:50:2095] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:58:2057] recipient: [14:50:2095] Leader for TabletID 72057594037927937 is [14:57:2097] sender: [14:75:2057] recipient: [14:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:55:2057] recipient: [15:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [15:55:2057] recipient: [15:50:2095] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:58:2057] recipient: [15:50:2095] Leader for TabletID 72057594037927937 is [15:57:2097] sender: [15:75:2057] recipient: [15:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:55:2057] recipient: [16:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [16:55:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:58:2057] recipient: [16:51:2095] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:75:2057] recipient: [16:14:2061] !Reboot 72057594037927937 (actor [16:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:77:2057] recipient: [16:36:2083] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:80:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [16:57:2097] sender: [16:81:2057] recipient: [16:79:2110] Leader for TabletID 72057594037927937 is [16:82:2111] sender: [16:83:2057] recipient: [16:79:2110] !Reboot 72057594037927937 (actor [16:57:2097]) rebooted! !Reboot 72057594037927937 (actor [16:57:2097]) tablet resolver refreshed! new actor is[16:82:2111] Leader for TabletID 72057594037927937 is [16:82:2111] sender: [16:168:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:55:2057] recipient: [17:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:55:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:58:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:75:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:77:2057] recipient: [17:36:2083] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:79:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:81:2057] recipient: [17:80:2110] Leader for TabletID 72057594037927937 is [17:82:2111] sender: [17:83:2057] recipient: [17:80:2110] !Reboot 72057594037927937 (actor [17:57:2097]) rebooted! !Reboot 72057594037927937 (actor [17:57:2097]) tablet resolver refreshed! new actor is[17:82:2111] Leader for TabletID 72057594037927937 is [17:82:2111] sender: [17:168:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:55:2057] recipient: [18:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:55:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:58:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:75:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:78:2057] recipient: [18:36:2083] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:81:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:82:2057] recipient: [18:80:2110] Leader for TabletID 72057594037927937 is [18:83:2111] sender: [18:84:2057] recipient: [18:80:2110] !Reboot 72057594037927937 (actor [18:57:2097]) rebooted! !Reboot 72057594037927937 (actor [18:57:2097]) tablet resolver refreshed! new actor is[18:83:2111] Leader for TabletID 72057594037927937 is [18:83:2111] sender: [18:169:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2095] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:58:2057] recipient: [19:51:2095] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:75:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:81:2057] recipient: [19:36:2083] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:84:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:85:2057] recipient: [19:83:2113] Leader for TabletID 72057594037927937 is [19:86:2114] sender: [19:87:2057] recipient: [19:83:2113] !Reboot 72057594037927937 (actor [19:57:2097]) rebooted! !Reboot 72057594037927937 (actor [19:57:2097]) tablet resolver refreshed! new actor is[19:86:2114] Leader for TabletID 72057594037927937 is [19:86:2114] sender: [19:172:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:55:2057] recipient: [20:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:55:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:58:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:75:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:81:2057] recipient: [20:36:2083] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:84:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:85:2057] recipient: [20:83:2113] Leader for TabletID 72057594037927937 is [20:86:2114] sender: [20:87:2057] recipient: [20:83:2113] !Reboot 72057594037927937 (actor [20:57:2097]) rebooted! !Reboot 72057594037927937 (actor [20:57:2097]) tablet resolver refreshed! new actor is[20:86:2114] Leader for TabletID 72057594037927937 is [20:86:2114] sender: [20:172:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:55:2057] recipient: [21:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:55:2057] recipient: [21:51:2095] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:58:2057] recipient: [21:51:2095] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:75:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:82:2057] recipient: [21:36:2083] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:85:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:86:2057] recipient: [21:84:2113] Leader for TabletID 72057594037927937 is [21:87:2114] sender: [21:88:2057] recipient: [21:84:2113] !Reboot 72057594037927937 (actor [21:57:2097]) rebooted! !Reboot 72057594037927937 (actor [21:57:2097]) tablet resolver refreshed! new actor is[21:87:2114] Leader for TabletID 72057594037927937 is [21:87:2114] sender: [21:173:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:55:2057] recipient: [22:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:55:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:58:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:75:2057] recipient: [22:14:2061] !Reboot 72057594037927937 (actor [22:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:85:2057] recipient: [22:36:2083] Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:88:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:89:2057] recipient: [22:87:2116] Leader for TabletID 72057594037927937 is [22:90:2117] sender: [22:91:2057] recipient: [22:87:2116] !Reboot 72057594037927937 (actor [22:57:2097]) rebooted! !Reboot 72057594037927937 (actor [22:57:2097]) tablet resolver refreshed! new actor is[22:90:2117] Leader for TabletID 72057594037927937 is [22:90:2117] sender: [22:176:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:55:2057] recipient: [23:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:55:2057] recipient: [23:50:2095] Leader for TabletID 72057594037927937 is [23:57:2097] sender: [23:58:2057] recipient: [23:50:2095] Leader for TabletID 72057594037927937 is [23:57:2097] sender: [23:75:2057] recipient: [23:14:2061] !Reboot 72057594037927937 (actor [23:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [23:57:2097] sender: [23:85:2057] recipient: [23:36:2083] Leader for TabletID 72057594037927937 is [23:57:2097] sender: [23:88:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [23:57:2097] sender: [23:89:2057] recipient: [23:87:2116] Leader for TabletID 72057594037927937 is [23:90:2117] sender: [23:91:2057] recipient: [23:87:2116] !Reboot 72057594037927937 (actor [23:57:2097]) rebooted! !Reboot 72057594037927937 (actor [23:57:2097]) tablet resolver refreshed! new actor is[23:90:2117] Leader for TabletID 72057594037927937 is [23:90:2117] sender: [23:176:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:55:2057] recipient: [24:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:55:2057] recipient: [24:51:2095] Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:58:2057] recipient: [24:51:2095] Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:75:2057] recipient: [24:14:2061] !Reboot 72057594037927937 (actor [24:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:86:2057] recipient: [24:36:2083] Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:88:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:90:2057] recipient: [24:89:2116] Leader for TabletID 72057594037927937 is [24:91:2117] sender: [24:92:2057] recipient: [24:89:2116] !Reboot 72057594037927937 (actor [24:57:2097]) rebooted! !Reboot 72057594037927937 (actor [24:57:2097]) tablet resolver refreshed! new actor is[24:91:2117] Leader for TabletID 72057594037927937 is [24:91:2117] sender: [24:177:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:55:2057] recipient: [25:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:55:2057] recipient: [25:51:2095] Leader for TabletID 72057594037927937 is [25:57:2097] sender: [25:58:2057] recipient: [25:51:2095] Leader for TabletID 72057594037927937 is [25:57:2097] sender: [25:75:2057] recipient: [25:14:2061] |66.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> THiveTest::TestHiveBalancerWithPrefferedDC1 >> TKeyValueTest::TestGetStatusWorks [GOOD] >> TargetTrackingScaleRecommenderPolicy::ScaleOut [GOOD] >> TargetTrackingScaleRecommenderPolicy::ScaleIn [GOOD] >> TargetTrackingScaleRecommenderPolicy::BigNumbersScaleOut [GOOD] >> TargetTrackingScaleRecommenderPolicy::BigNumbersScaleIn [GOOD] >> TargetTrackingScaleRecommenderPolicy::SpikeResistance [GOOD] >> TargetTrackingScaleRecommenderPolicy::NearTarget [GOOD] >> TargetTrackingScaleRecommenderPolicy::AtTarget [GOOD] >> TargetTrackingScaleRecommenderPolicy::Fluctuations [GOOD] >> TargetTrackingScaleRecommenderPolicy::FluctuationsBigNumbers [GOOD] >> TargetTrackingScaleRecommenderPolicy::ScaleInToMaxSeen [GOOD] >> TargetTrackingScaleRecommenderPolicy::Idle [GOOD] >> TStorageBalanceTest::TestScenario1 >> THiveTest::TestReCreateTablet [GOOD] >> THiveTest::TestReCreateTabletError |66.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> KqpService::CloseSessionsWithLoad [GOOD] >> KqpService::PatternCache |66.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestGetStatusWorks [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:58:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:75:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:58:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:75:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:77:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:80:2057] recipient: [2:79:2110] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:83:2057] recipient: [2:79:2110] !Reboot 72057594037927937 (actor [2:57:2097]) rebooted! !Reboot 72057594037927937 (actor [2:57:2097]) tablet resolver refreshed! new actor is[2:82:2111] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:168:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:58:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:75:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:77:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:83:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:57:2097]) rebooted! !Reboot 72057594037927937 (actor [3:57:2097]) tablet resolver refreshed! new actor is[3:82:2111] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:168:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:58:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:75:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:78:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:82:2057] recipient: [4:80:2110] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:84:2057] recipient: [4:80:2110] !Reboot 72057594037927937 (actor [4:57:2097]) rebooted! !Reboot 72057594037927937 (actor [4:57:2097]) tablet resolver refreshed! new actor is[4:83:2111] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:169:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:58:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:75:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:81:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:84:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:85:2057] recipient: [5:83:2113] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:87:2057] recipient: [5:83:2113] !Reboot 72057594037927937 (actor [5:57:2097]) rebooted! !Reboot 72057594037927937 (actor [5:57:2097]) tablet resolver refreshed! new actor is[5:86:2114] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:172:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:58:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:75:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:81:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:84:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:85:2057] recipient: [6:83:2113] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:87:2057] recipient: [6:83:2113] !Reboot 72057594037927937 (actor [6:57:2097]) rebooted! !Reboot 72057594037927937 (actor [6:57:2097]) tablet resolver refreshed! new actor is[6:86:2114] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:172:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:58:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:75:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:82:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:84:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:86:2057] recipient: [7:85:2113] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:88:2057] recipient: [7:85:2113] !Reboot 72057594037927937 (actor [7:57:2097]) rebooted! !Reboot 72057594037927937 (actor [7:57:2097]) tablet resolver refreshed! new actor is[7:87:2114] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:173:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:58:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:75:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:84:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:87:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:88:2057] recipient: [8:86:2115] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:90:2057] recipient: [8:86:2115] !Reboot 72057594037927937 (actor [8:57:2097]) rebooted! !Reboot 72057594037927937 (actor [8:57:2097]) tablet resolver refreshed! new actor is[8:89:2116] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:175:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:58:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:75:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:84:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:87:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:88:2057] recipient: [9:86:2115] Leader for TabletID 72057594037927937 is [9:89:2116] sender: [9:90:2057] recipient: [9:86:2115] !Reboot 72057594037927937 (actor [9:57:2097]) rebooted! !Reboot 72057594037927937 (actor [9:57:2097]) tablet resolver refreshed! new actor is[9:89:2116] Leader for TabletID 72057594037927937 is [9:89:2116] sender: [9:175:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:58:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:75:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:85:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:88:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:89:2057] recipient: [10:87:2115] Leader for TabletID 72057594037927937 is [10:90:2116] sender: [10:91:2057] recipient: [10:87:2115] !Reboot 72057594037927937 (actor [10:57:2097]) rebooted! !Reboot 72057594037927937 (actor [10:57:2097]) tablet resolver refreshed! new actor is[10:90:2116] Leader for TabletID 72057594037927937 is [10:90:2116] sender: [10:176:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:58:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:75:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:87:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:90:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:91:2057] recipient: [11:89:2117] Leader for TabletID 72057594037927937 is [11:92:2118] sender: [11:93:2057] recipient: [11:89:2117] !Reboot 72057594037927937 (actor [11:57:2097]) rebooted! !Reboot 72057594037927937 (actor [11:57:2097]) tablet resolver refreshed! new actor is[11:92:2118] Leader for TabletID 72057594037927937 is [11:92:2118] sender: [11:178:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:58:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:75:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... or TabletID 72057594037927937 is [29:57:2097] sender: [29:90:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [29:57:2097] sender: [29:91:2057] recipient: [29:89:2117] Leader for TabletID 72057594037927937 is [29:92:2118] sender: [29:93:2057] recipient: [29:89:2117] !Reboot 72057594037927937 (actor [29:57:2097]) rebooted! !Reboot 72057594037927937 (actor [29:57:2097]) tablet resolver refreshed! new actor is[29:92:2118] Leader for TabletID 72057594037927937 is [29:92:2118] sender: [29:178:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:55:2057] recipient: [30:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:55:2057] recipient: [30:50:2095] Leader for TabletID 72057594037927937 is [30:57:2097] sender: [30:58:2057] recipient: [30:50:2095] Leader for TabletID 72057594037927937 is [30:57:2097] sender: [30:75:2057] recipient: [30:14:2061] !Reboot 72057594037927937 (actor [30:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [30:57:2097] sender: [30:88:2057] recipient: [30:36:2083] Leader for TabletID 72057594037927937 is [30:57:2097] sender: [30:91:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [30:57:2097] sender: [30:92:2057] recipient: [30:90:2117] Leader for TabletID 72057594037927937 is [30:93:2118] sender: [30:94:2057] recipient: [30:90:2117] !Reboot 72057594037927937 (actor [30:57:2097]) rebooted! !Reboot 72057594037927937 (actor [30:57:2097]) tablet resolver refreshed! new actor is[30:93:2118] Leader for TabletID 72057594037927937 is [30:93:2118] sender: [30:179:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:55:2057] recipient: [31:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:55:2057] recipient: [31:51:2095] Leader for TabletID 72057594037927937 is [31:57:2097] sender: [31:58:2057] recipient: [31:51:2095] Leader for TabletID 72057594037927937 is [31:57:2097] sender: [31:75:2057] recipient: [31:14:2061] !Reboot 72057594037927937 (actor [31:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [31:57:2097] sender: [31:91:2057] recipient: [31:36:2083] Leader for TabletID 72057594037927937 is [31:57:2097] sender: [31:93:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [31:57:2097] sender: [31:95:2057] recipient: [31:94:2120] Leader for TabletID 72057594037927937 is [31:96:2121] sender: [31:97:2057] recipient: [31:94:2120] !Reboot 72057594037927937 (actor [31:57:2097]) rebooted! !Reboot 72057594037927937 (actor [31:57:2097]) tablet resolver refreshed! new actor is[31:96:2121] Leader for TabletID 72057594037927937 is [31:96:2121] sender: [31:182:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:55:2057] recipient: [32:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:55:2057] recipient: [32:51:2095] Leader for TabletID 72057594037927937 is [32:57:2097] sender: [32:58:2057] recipient: [32:51:2095] Leader for TabletID 72057594037927937 is [32:57:2097] sender: [32:75:2057] recipient: [32:14:2061] !Reboot 72057594037927937 (actor [32:57:2097]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [32:57:2097] sender: [32:91:2057] recipient: [32:36:2083] Leader for TabletID 72057594037927937 is [32:57:2097] sender: [32:94:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [32:57:2097] sender: [32:95:2057] recipient: [32:93:2120] Leader for TabletID 72057594037927937 is [32:96:2121] sender: [32:97:2057] recipient: [32:93:2120] !Reboot 72057594037927937 (actor [32:57:2097]) rebooted! !Reboot 72057594037927937 (actor [32:57:2097]) tablet resolver refreshed! new actor is[32:96:2121] Leader for TabletID 72057594037927937 is [32:96:2121] sender: [32:182:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:55:2057] recipient: [33:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:55:2057] recipient: [33:51:2095] Leader for TabletID 72057594037927937 is [33:57:2097] sender: [33:58:2057] recipient: [33:51:2095] Leader for TabletID 72057594037927937 is [33:57:2097] sender: [33:75:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:55:2057] recipient: [34:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:55:2057] recipient: [34:51:2095] Leader for TabletID 72057594037927937 is [34:57:2097] sender: [34:58:2057] recipient: [34:51:2095] Leader for TabletID 72057594037927937 is [34:57:2097] sender: [34:75:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:55:2057] recipient: [35:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:55:2057] recipient: [35:51:2095] Leader for TabletID 72057594037927937 is [35:57:2097] sender: [35:58:2057] recipient: [35:51:2095] Leader for TabletID 72057594037927937 is [35:57:2097] sender: [35:75:2057] recipient: [35:14:2061] !Reboot 72057594037927937 (actor [35:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [35:57:2097] sender: [35:77:2057] recipient: [35:36:2083] Leader for TabletID 72057594037927937 is [35:57:2097] sender: [35:80:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [35:57:2097] sender: [35:81:2057] recipient: [35:79:2110] Leader for TabletID 72057594037927937 is [35:82:2111] sender: [35:83:2057] recipient: [35:79:2110] !Reboot 72057594037927937 (actor [35:57:2097]) rebooted! !Reboot 72057594037927937 (actor [35:57:2097]) tablet resolver refreshed! new actor is[35:82:2111] Leader for TabletID 72057594037927937 is [35:82:2111] sender: [35:168:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:55:2057] recipient: [36:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:55:2057] recipient: [36:51:2095] Leader for TabletID 72057594037927937 is [36:57:2097] sender: [36:58:2057] recipient: [36:51:2095] Leader for TabletID 72057594037927937 is [36:57:2097] sender: [36:75:2057] recipient: [36:14:2061] !Reboot 72057594037927937 (actor [36:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [36:57:2097] sender: [36:77:2057] recipient: [36:36:2083] Leader for TabletID 72057594037927937 is [36:57:2097] sender: [36:79:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [36:57:2097] sender: [36:81:2057] recipient: [36:80:2110] Leader for TabletID 72057594037927937 is [36:82:2111] sender: [36:83:2057] recipient: [36:80:2110] !Reboot 72057594037927937 (actor [36:57:2097]) rebooted! !Reboot 72057594037927937 (actor [36:57:2097]) tablet resolver refreshed! new actor is[36:82:2111] Leader for TabletID 72057594037927937 is [36:82:2111] sender: [36:168:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:55:2057] recipient: [37:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:55:2057] recipient: [37:51:2095] Leader for TabletID 72057594037927937 is [37:57:2097] sender: [37:58:2057] recipient: [37:51:2095] Leader for TabletID 72057594037927937 is [37:57:2097] sender: [37:75:2057] recipient: [37:14:2061] !Reboot 72057594037927937 (actor [37:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [37:57:2097] sender: [37:78:2057] recipient: [37:36:2083] Leader for TabletID 72057594037927937 is [37:57:2097] sender: [37:81:2057] recipient: [37:80:2110] Leader for TabletID 72057594037927937 is [37:57:2097] sender: [37:82:2057] recipient: [37:14:2061] Leader for TabletID 72057594037927937 is [37:83:2111] sender: [37:84:2057] recipient: [37:80:2110] !Reboot 72057594037927937 (actor [37:57:2097]) rebooted! !Reboot 72057594037927937 (actor [37:57:2097]) tablet resolver refreshed! new actor is[37:83:2111] Leader for TabletID 72057594037927937 is [37:83:2111] sender: [37:169:2057] recipient: [37:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:55:2057] recipient: [38:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:55:2057] recipient: [38:50:2095] Leader for TabletID 72057594037927937 is [38:57:2097] sender: [38:58:2057] recipient: [38:50:2095] Leader for TabletID 72057594037927937 is [38:57:2097] sender: [38:75:2057] recipient: [38:14:2061] !Reboot 72057594037927937 (actor [38:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [38:57:2097] sender: [38:80:2057] recipient: [38:36:2083] Leader for TabletID 72057594037927937 is [38:57:2097] sender: [38:83:2057] recipient: [38:82:2112] Leader for TabletID 72057594037927937 is [38:57:2097] sender: [38:84:2057] recipient: [38:14:2061] Leader for TabletID 72057594037927937 is [38:85:2113] sender: [38:86:2057] recipient: [38:82:2112] !Reboot 72057594037927937 (actor [38:57:2097]) rebooted! !Reboot 72057594037927937 (actor [38:57:2097]) tablet resolver refreshed! new actor is[38:85:2113] Leader for TabletID 72057594037927937 is [38:85:2113] sender: [38:171:2057] recipient: [38:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:55:2057] recipient: [39:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:55:2057] recipient: [39:51:2095] Leader for TabletID 72057594037927937 is [39:57:2097] sender: [39:58:2057] recipient: [39:51:2095] Leader for TabletID 72057594037927937 is [39:57:2097] sender: [39:75:2057] recipient: [39:14:2061] !Reboot 72057594037927937 (actor [39:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [39:57:2097] sender: [39:80:2057] recipient: [39:36:2083] Leader for TabletID 72057594037927937 is [39:57:2097] sender: [39:83:2057] recipient: [39:14:2061] Leader for TabletID 72057594037927937 is [39:57:2097] sender: [39:84:2057] recipient: [39:82:2112] Leader for TabletID 72057594037927937 is [39:85:2113] sender: [39:86:2057] recipient: [39:82:2112] !Reboot 72057594037927937 (actor [39:57:2097]) rebooted! !Reboot 72057594037927937 (actor [39:57:2097]) tablet resolver refreshed! new actor is[39:85:2113] Leader for TabletID 72057594037927937 is [39:85:2113] sender: [39:171:2057] recipient: [39:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:55:2057] recipient: [40:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:55:2057] recipient: [40:50:2095] Leader for TabletID 72057594037927937 is [40:57:2097] sender: [40:58:2057] recipient: [40:50:2095] Leader for TabletID 72057594037927937 is [40:57:2097] sender: [40:75:2057] recipient: [40:14:2061] !Reboot 72057594037927937 (actor [40:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [40:57:2097] sender: [40:81:2057] recipient: [40:36:2083] Leader for TabletID 72057594037927937 is [40:57:2097] sender: [40:84:2057] recipient: [40:14:2061] Leader for TabletID 72057594037927937 is [40:57:2097] sender: [40:85:2057] recipient: [40:83:2112] Leader for TabletID 72057594037927937 is [40:86:2113] sender: [40:87:2057] recipient: [40:83:2112] !Reboot 72057594037927937 (actor [40:57:2097]) rebooted! !Reboot 72057594037927937 (actor [40:57:2097]) tablet resolver refreshed! new actor is[40:86:2113] Leader for TabletID 72057594037927937 is [40:86:2113] sender: [40:172:2057] recipient: [40:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [41:55:2057] recipient: [41:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [41:55:2057] recipient: [41:51:2095] Leader for TabletID 72057594037927937 is [41:57:2097] sender: [41:58:2057] recipient: [41:51:2095] Leader for TabletID 72057594037927937 is [41:57:2097] sender: [41:75:2057] recipient: [41:14:2061] >> THiveTest::TestReCreateTabletError [GOOD] >> THiveTest::TestNodeDisconnect >> THiveTest::TestLocalReplacement [GOOD] >> THiveTest::TestHiveRestart |66.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |66.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |66.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TSchemeShardSubDomainTest::SchemeLimitsRejects >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTablets >> TSchemeShardSubDomainTest::CreateDropSolomon >> THiveTest::TestFollowers >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-false |66.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> ColumnBuildTest::BuildColumnDoesnotRestoreDeletedRows [GOOD] >> THiveTest::TestStorageBalancer [GOOD] >> THiveTest::TestRestartsWithFollower >> TPartBtreeIndexIteration::FewNodes_Slices [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups_Slices |66.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing |66.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTablets [GOOD] >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-false [GOOD] >> THiveTest::TestNodeDisconnect [GOOD] >> THiveTest::TestHiveRestart [GOOD] >> TSchemeShardSubDomainTest::DeclareAndForbidTableInside >> THiveTest::TestReassignGroupsWithRecreateTablet >> THiveTest::TestLimitedNodeList >> TSchemeShardSubDomainTest::CreateDropSolomon [GOOD] >> TSchemeShardSubDomainTest::RestartAtInFly >> TSchemeShardSubDomainTest::CreateWithoutTimeCastBuckets >> TSchemeShardSubDomainTest::DeclareAndForbidTableInside [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTablets [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:31.913339Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:31.913366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:31.913371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:31.913376Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:31.913391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:31.913394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:31.913403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:31.913418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:31.913513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:31.913578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:31.924401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:31.924425Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:31.927931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:31.928027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:31.928061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:31.930359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:31.930455Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:31.930588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:31.930654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:31.931470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:31.931546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:31.931860Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:31.931870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:31.931880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:31.931886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:31.931891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:31.931927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.933167Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:31.950768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:31.950846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.950903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:31.950948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:31.950958Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.951603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:31.951638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:31.951691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.951700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:31.951705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:31.951710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:31.952202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.952215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:31.952219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:31.952589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.952601Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.952606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:31.952613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:31.953114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:31.953643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:31.953696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:31.953936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:31.953972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:31.953991Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:31.954071Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:31.954079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:31.954122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:31.954137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:31.954719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:31.954731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:31.954779Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:31.963114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:31.963518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:31.963529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:31.963561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:31.963577Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:31.963582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-03T10:30:31.963587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 100, path id: 2 FAKE_COORDINATOR: Erasing txId 100 2025-06-03T10:30:31.963656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.963663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-03T10:30:31.963677Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-06-03T10:30:31.963682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:31.963688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-06-03T10:30:31.963691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:31.963697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-03T10:30:31.963702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:31.963711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 100:0 2025-06-03T10:30:31.963715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 100:0 2025-06-03T10:30:31.963728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:30:31.963734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-06-03T10:30:31.963738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-03T10:30:31.963741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-03T10:30:31.963853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:31.963867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:31.963872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-03T10:30:31.963877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-03T10:30:31.963882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:31.963994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:31.964005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:31.964010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-03T10:30:31.964015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-03T10:30:31.964020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:31.964031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-06-03T10:30:31.964736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-03T10:30:31.965035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-03T10:30:31.965100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-03T10:30:31.965108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-06-03T10:30:31.965185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-03T10:30:31.965209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-03T10:30:31.965216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:309:2299] TestWaitNotification: OK eventTxId 100 2025-06-03T10:30:31.965324Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:31.965367Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 75us result status StatusSuccess 2025-06-03T10:30:31.965484Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:31.965563Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:31.965581Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 21us result status StatusSuccess 2025-06-03T10:30:31.965638Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:31.943473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:31.943510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:31.943516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:31.943523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:31.943542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:31.943547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:31.943558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:31.943573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:31.943694Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:31.943770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:31.955263Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:31.955282Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:31.958751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:31.958845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:31.958886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:31.961340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:31.961428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:31.961575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:31.961640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:31.962538Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:31.962595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:31.962902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:31.962913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:31.962923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:31.962930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:31.962936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:31.962951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.964179Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:31.979325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:31.979394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.979454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:31.979499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:31.979509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.980116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:31.980142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:31.980186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.980194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:31.980198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:31.980202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:31.980580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.980589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:31.980593Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:31.980860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.980869Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.980873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:31.980879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:31.981471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:31.981941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:31.981987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:31.982214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:31.982244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:31.982263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:31.982341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:31.982350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:31.982388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:31.982398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:31.982826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:31.982833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:31.982873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... ons { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:31.989368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-06-03T10:30:31.989413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 101:0 128 -> 240 2025-06-03T10:30:31.989419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-06-03T10:30:31.989457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:31.989464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:31.989471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:31.989844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:31.989853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:31.989888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:31.989900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:31.989904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-03T10:30:31.989907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 FAKE_COORDINATOR: Erasing txId 101 2025-06-03T10:30:31.989973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.989979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-03T10:30:31.989989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:31.989992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:31.989996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:31.989998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:31.990002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-03T10:30:31.990006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:31.990009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-06-03T10:30:31.990013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 101:0 2025-06-03T10:30:31.990021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:30:31.990025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-03T10:30:31.990028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-03T10:30:31.990031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-03T10:30:31.990124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:31.990136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:31.990140Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:30:31.990143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-03T10:30:31.990146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:31.990224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:31.990231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:31.990234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:30:31.990236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-03T10:30:31.990239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:31.990245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-03T10:30:31.990869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:30:31.991166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-06-03T10:30:31.991873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "SomeDatabase" TimeCastBucketsPerMediator: 2 DatabaseQuotas { storage_quotas { unit_kind: "nonexistent_storage_kind" data_size_hard_quota: 1 } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:31.991912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: /MyRoot/SomeDatabase, opId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.991950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain /MyRoot/SomeDatabase has the specified kinds. Existing storage kinds are: pool-kind-1, pool-kind-2, at schemeshard: 72057594046678944 2025-06-03T10:30:31.992410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain /MyRoot/SomeDatabase has the specified kinds. Existing storage kinds are: pool-kind-1, pool-kind-2" TxId: 102 SchemeshardId: 72057594046678944 PathId: 2, at schemeshard: 72057594046678944 2025-06-03T10:30:31.992449Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain /MyRoot/SomeDatabase has the specified kinds. Existing storage kinds are: pool-kind-1, pool-kind-2, operation: ALTER DATABASE, path: /MyRoot/SomeDatabase TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 2025-06-03T10:30:31.992522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:30:31.992529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-03T10:30:31.992567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-03T10:30:31.992570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-03T10:30:31.992631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:30:31.992657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:30:31.992661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:313:2303] 2025-06-03T10:30:31.992687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-03T10:30:31.992701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:30:31.992706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:313:2303] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 >> TSchemeShardSubDomainTest::ConcurrentCreateSubDomainAndDescribe >> THiveTest::TestReassignGroupsWithRecreateTablet [GOOD] >> THiveTest::TestReassignUseRelativeSpace ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_column_build/unittest >> ColumnBuildTest::BuildColumnDoesnotRestoreDeletedRows [GOOD] >> TSchemeShardSubDomainTest::CreateWithoutTimeCastBuckets [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:28.966336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:28.966364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:28.966369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:28.966373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:28.966388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:28.966391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:28.966403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:28.966416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:28.966515Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:28.966588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:28.979601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:28.979631Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:28.984200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:28.984311Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:28.984357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:28.986611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:28.986690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:28.986814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:28.986881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:28.987498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:28.987543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:28.987878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:28.987888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:28.987900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:28.987906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:28.987912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:28.987931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:28.989176Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:29.004685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:29.004806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:29.004917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:29.004971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:29.004983Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:29.005830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:29.005860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:29.005929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:29.005938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:29.005943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:29.005947Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:29.006418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:29.006430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:29.006436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:29.006799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:29.006813Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:29.006820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:29.006828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:29.007392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:29.007830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:29.007876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:29.008041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:29.008062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:29.008068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:29.008118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:29.008124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:29.008154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:29.008164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:29.008557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:29.008579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:29.008633Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... lMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'28))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.910168Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2052:3913], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'29))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.910648Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2053:3914], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'30))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.911176Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2054:3915], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'31))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.911641Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2055:3916], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'32))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.912189Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2056:3917], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'33))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.912710Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2057:3918], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'34))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.913328Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2058:3919], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'35))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.913861Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2059:3920], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'36))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.914335Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2060:3921], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'37))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.914805Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2061:3922], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'38))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.915269Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2062:3923], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'39))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.915731Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2063:3924], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'40))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.916272Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2064:3925], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'41))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.916831Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2065:3926], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'42))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.917403Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2066:3927], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'43))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.917919Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2067:3928], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'44))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.918417Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2068:3929], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'45))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.918879Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2069:3930], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'46))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.919322Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2070:3931], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'47))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.919784Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2071:3932], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'48))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.920254Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2072:3933], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'49))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } 2025-06-03T10:30:31.920844Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 268830210, Sender [1:2073:3934], Recipient [1:757:2645]: NKikimrTabletTxBase.TEvLocalMKQL Program { Program { Text: "\n (\n (let key \'(\'(\'key (Uint64 \'50))))\n (let select \'(\'key))\n (return (AsList\n (SetResult \'Result (SelectRow \'__user__Table key select))\n ))\n )\n " } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateDropSolomon [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:31.932807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:31.932832Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:31.932836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:31.932843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:31.932858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:31.932861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:31.932869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:31.932886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:31.932996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:31.933075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:31.943572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:31.943595Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:31.946959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:31.947060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:31.947093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:31.949398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:31.949482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:31.949576Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:31.949631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:31.950360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:31.950436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:31.950818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:31.950834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:31.950848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:31.950858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:31.950865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:31.950893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.952412Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:31.976023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:31.976118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.976202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:31.976266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:31.976280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.977221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:31.977256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:31.977358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.977374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:31.977382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:31.977390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:31.978315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.978333Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:31.978341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:31.978872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.978886Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.978895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:31.978905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:31.979732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:31.980201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:31.980260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:31.980500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:31.980533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:31.980570Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:31.980654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:31.980664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:31.980710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:31.980724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:31.981316Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:31.981330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:31.981390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-06-03T10:30:32.269134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 104:0 2025-06-03T10:30:32.269172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:30:32.269179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-06-03T10:30:32.269183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-03T10:30:32.269188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-03T10:30:32.269361Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:30:32.269376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:30:32.269382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-03T10:30:32.269387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-03T10:30:32.269392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:32.269515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:30:32.269526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:30:32.269530Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-03T10:30:32.269534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-03T10:30:32.269539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:30:32.269547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-03T10:30:32.269957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:32.269968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:32.270276Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-03T10:30:32.270348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.270419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409546 2025-06-03T10:30:32.270699Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409547 2025-06-03T10:30:32.270935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:30:32.270982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:32.271182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:32.271190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:32.271213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:30:32.271324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:32.271331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:32.271342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:32.271596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-03T10:30:32.271633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-03T10:30:32.271699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:30:32.271708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:30:32.272167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:30:32.272178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:30:32.272194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:32.272238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-03T10:30:32.272325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-03T10:30:32.272334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-03T10:30:32.272432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-03T10:30:32.272456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-03T10:30:32.272466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:2093:3695] TestWaitNotification: OK eventTxId 104 2025-06-03T10:30:32.273968Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/Solomon" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:32.274031Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/Solomon" took 89us result status StatusPathDoesNotExist 2025-06-03T10:30:32.274090Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/Solomon\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0/Solomon" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:32.274198Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:32.274212Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 16us result status StatusPathDoesNotExist 2025-06-03T10:30:32.274228Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::CreateSubDomainsInSeparateDir >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain-EnableSeparateQuotas >> THiveTest::TestFollowers [GOOD] >> TSchemeShardSubDomainTest::SimultaneousCreateTenantDirTable >> TSchemeShardSubDomainTest::RestartAtInFly [GOOD] >> THiveTest::TestFollowersReconfiguration >> TSchemeShardSubDomainTest::ConcurrentCreateSubDomainAndDescribe [GOOD] >> TKeyValueTest::TestInlineWriteReadDeleteWithRestartsThenResponseOkNewApi [GOOD] >> THiveTest::TestReassignUseRelativeSpace [GOOD] >> THiveTest::TestManyFollowersOnOneNode >> THiveTest::TestLimitedNodeList [GOOD] >> TSchemeShardSubDomainTest::ColumnSchemeLimitsRejects >> THiveTest::TestHiveNoBalancingWithLowResourceUsage >> TSchemeShardSubDomainTest::CreateSubDomainsInSeparateDir [GOOD] >> THiveTest::TestRestartsWithFollower [GOOD] >> THiveTest::TestStartTabletTwiceInARow >> TSchemeShardSubDomainTest::SimultaneousCreateTenantDirTable [GOOD] >> TSchemeShardSubDomainTest::SchemeLimitsRejects [GOOD] >> TKeyValueTest::TestInlineCopyRangeWorks [GOOD] >> TSchemeShardSubDomainTest::ColumnSchemeLimitsRejects [GOOD] >> TSchemeShardSubDomainTest::SimultaneousCreateDelete >> THiveTest::TestManyFollowersOnOneNode [GOOD] >> THiveTest::TestCreate100Tablets [GOOD] >> TSchemeShardSubDomainTest::CreateForceDropSolomon >> THiveTest::TestHiveBalancerWithPrefferedDC1 [GOOD] >> THiveTest::TestFollowersReconfiguration [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups_Slices [GOOD] >> THiveTest::TestCreateSubHiveCreateTablet >> THiveTest::TestFollowerPromotion >> TPartBtreeIndexIteration::FewNodes_History_Slices >> THiveTest::TestStartTabletTwiceInARow [GOOD] >> THiveTest::TestSpreadNeighboursWithUpdateTabletsObject >> TKeyValueTest::TestInlineCopyRangeWorksNewApi >> THiveTest::TestLockTabletExecutionTimeout >> THiveTest::TestHiveBalancerWithPrefferedDC2 >> THiveTest::TestCreateSubHiveCreateTablet [GOOD] >> THiveTest::TestCreateSubHiveCreateManyTablets >> TSchemeShardSubDomainTest::SimultaneousCreateDelete [GOOD] >> TSchemeShardSubDomainTest::CreateForceDropSolomon [GOOD] >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenDrop ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeLimitsRejects [GOOD] >> THiveTest::TestFollowerPromotion [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:31.849331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:31.849360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:31.849366Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:31.849373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:31.849390Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:31.849395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:31.849407Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:31.849428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:31.849543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:31.849619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:31.862500Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:31.862525Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:31.866363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:31.866493Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:31.866537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:31.871513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:31.871587Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:31.871692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:31.871741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:31.872413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:31.872469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:31.872841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:31.872853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:31.872865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:31.872874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:31.872880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:31.872901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.874571Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:31.889807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:31.889882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.889944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:31.889986Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:31.889995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.890725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:31.890747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:31.890801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.890808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:31.890813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:31.890817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:31.891241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.891250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:31.891254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:31.891578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.891588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:31.891593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:31.891599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:31.892096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:31.892472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:31.892509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:31.892668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:31.892687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:31.892703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:31.892757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:31.892762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:31.892795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:31.892803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:31.893197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:31.893204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:31.893239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 75Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:33.297286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:15 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:33.297289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:14 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:33.297312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:33.297317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:16 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:33.297379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 139 2025-06-03T10:30:33.298024Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-03T10:30:33.298144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:33.298210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409546 2025-06-03T10:30:33.298454Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 15 TxId_Deprecated: 15 TabletID: 72075186233409556 2025-06-03T10:30:33.298541Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 14 TxId_Deprecated: 14 TabletID: 72075186233409555 2025-06-03T10:30:33.298860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 15 ShardOwnerId: 72057594046678944 ShardLocalIdx: 15, at schemeshard: 72057594046678944 2025-06-03T10:30:33.298894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 16] was 3 2025-06-03T10:30:33.298988Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-03T10:30:33.299086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 14 ShardOwnerId: 72057594046678944 ShardLocalIdx: 14, at schemeshard: 72057594046678944 2025-06-03T10:30:33.299107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 16] was 2 Forgetting tablet 72075186233409556 Forgetting tablet 72075186233409555 2025-06-03T10:30:33.299277Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 16 TxId_Deprecated: 16 TabletID: 72075186233409557 Forgetting tablet 72075186233409547 Forgetting tablet 72075186233409557 2025-06-03T10:30:33.299736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:30:33.299772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:30:33.299818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 16 ShardOwnerId: 72057594046678944 ShardLocalIdx: 16, at schemeshard: 72057594046678944 2025-06-03T10:30:33.299833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 16] was 1 2025-06-03T10:30:33.299868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 139 2025-06-03T10:30:33.299895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 139 2025-06-03T10:30:33.299905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:33.299909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 16], at schemeshard: 72057594046678944 2025-06-03T10:30:33.299918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:33.300043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:33.300049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:33.300068Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:30:33.300451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:30:33.300462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:30:33.300477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:15 2025-06-03T10:30:33.300480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:15 tabletId 72075186233409556 2025-06-03T10:30:33.300491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:14 2025-06-03T10:30:33.300494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:14 tabletId 72075186233409555 2025-06-03T10:30:33.300850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:30:33.300857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:30:33.300870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:16 2025-06-03T10:30:33.300874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:16 tabletId 72075186233409557 2025-06-03T10:30:33.300898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:33.300924Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:33.300934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:33.300938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:33.300949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:33.301222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 139, wait until txId: 139 TestWaitNotification wait txId: 139 2025-06-03T10:30:33.301443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 139: send EvNotifyTxCompletion 2025-06-03T10:30:33.301450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 139 2025-06-03T10:30:33.301566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 139, at schemeshard: 72057594046678944 2025-06-03T10:30:33.301583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 139: got EvNotifyTxCompletionResult 2025-06-03T10:30:33.301587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 139: satisfy waiter [1:2267:4038] TestWaitNotification: OK eventTxId 139 2025-06-03T10:30:33.301720Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:33.301757Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 54us result status StatusSuccess 2025-06-03T10:30:33.301826Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 5 ShardsInside: 0 ShardsLimit: 6 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 20 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateTenantDirTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:32.968701Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:32.968724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:32.968728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:32.968732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:32.968743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:32.968746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:32.968753Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:32.968769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:32.968856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:32.968920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:32.979097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:32.979118Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:32.982352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:32.982430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:32.982463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:32.984422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:32.984485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:32.984591Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.984639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:32.985240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:32.985283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:32.985554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:32.985563Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:32.985572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:32.985578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:32.985583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:32.985600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.986842Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:33.003575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:33.003669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:33.003744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:33.003805Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:33.003818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:33.004867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:33.004908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:33.004985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:33.004999Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:33.005005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:33.005011Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:33.005801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:33.005821Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:33.005830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:33.006403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:33.006419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:33.006426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:33.006434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:33.007267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:33.007895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:33.007950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:33.008186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:33.008223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:33.008244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:33.008331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:33.008345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:33.008390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:33.008407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:33.009242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:33.009263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:33.009344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... rdOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:33.174810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-03T10:30:33.174987Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 6 TxId_Deprecated: 6 TabletID: 72075186233409551 2025-06-03T10:30:33.175087Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-03T10:30:33.175162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-03T10:30:33.175188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:30:33.175292Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 Forgetting tablet 72075186233409548 Forgetting tablet 72075186233409551 2025-06-03T10:30:33.175706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-06-03T10:30:33.175734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409547 Forgetting tablet 72075186233409549 2025-06-03T10:30:33.175974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:30:33.175996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:30:33.176146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-03T10:30:33.176172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:33.176503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:30:33.176606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:30:33.176627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:33.176633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:33.176660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:30:33.176738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:30:33.176801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-03T10:30:33.176811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-03T10:30:33.176826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-03T10:30:33.176830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186233409552 2025-06-03T10:30:33.177370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:33.177393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:30:33.177399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:30:33.177408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:30:33.177413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-03T10:30:33.177437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-03T10:30:33.177441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-03T10:30:33.177502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:33.177508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:33.177523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:33.177863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:30:33.177870Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:30:33.177884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-03T10:30:33.177889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-03T10:30:33.177920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:33.178191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-03T10:30:33.178254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-03T10:30:33.178263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-03T10:30:33.178338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-03T10:30:33.178358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:30:33.178364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:785:2674] TestWaitNotification: OK eventTxId 103 2025-06-03T10:30:33.178451Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:33.178496Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 62us result status StatusPathDoesNotExist 2025-06-03T10:30:33.178551Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:33.178606Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:33.178638Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 33us result status StatusSuccess 2025-06-03T10:30:33.178719Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::ColumnSchemeLimitsRejects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:32.785950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:32.785980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:32.785987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:32.785994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:32.786010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:32.786015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:32.786032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:32.786048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:32.786177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:32.786250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:32.803150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:32.803178Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:32.807896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:32.808059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:32.808107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:32.810597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:32.810688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:32.810813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.810879Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:32.811828Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:32.811890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:32.812185Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:32.812194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:32.812205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:32.812211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:32.812216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:32.812235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.813585Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:32.829683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:32.829779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.829850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:32.829899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:32.829909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.830621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.830646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:32.830698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.830706Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:32.830710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:32.830715Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:32.831116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.831126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:32.831131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:32.831439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.831446Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.831450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:32.831456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:32.832007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:32.832432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:32.832472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:32.832647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.832670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:32.832685Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:32.832745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:32.832752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:32.832785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:32.832796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:32.833237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:32.833244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:32.833284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... : schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 108 Coordinator: 72057594046316545 AckTo { RawX1: 131 RawX2: 8589936746 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:33.465430Z node 2 :FLAT_TX_SCHEMESHARD INFO: alter_store.cpp:199: TAlterOlapStore TPropose operationId# 108:0 HandleReply TEvOperationPlan at tablet: 72057594046678944, stepId: 5000004 2025-06-03T10:30:33.465489Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 108:0 128 -> 129 2025-06-03T10:30:33.465533Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:33.465544Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-03T10:30:33.465651Z node 2 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186233409549;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=108;fline=tx_controller.cpp:214;event=finished_tx;tx_id=108; 2025-06-03T10:30:33.466031Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:33.466042Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 108, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:33.466087Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 108, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-03T10:30:33.466139Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:33.466145Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:337:2312], at schemeshard: 72057594046678944, txId: 108, path id: 1 2025-06-03T10:30:33.466149Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:337:2312], at schemeshard: 72057594046678944, txId: 108, path id: 5 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000004 2025-06-03T10:30:33.466238Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-03T10:30:33.466242Z node 2 :FLAT_TX_SCHEMESHARD INFO: alter_store.cpp:305: TAlterOlapStore TProposedWaitParts operationId# 108:0 ProgressState at tablet: 72057594046678944 2025-06-03T10:30:33.466249Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: alter_store.cpp:332: TAlterOlapStore TProposedWaitParts operationId# 108:0 ProgressState wait for NotifyTxCompletionResult tabletId: 72075186233409549 2025-06-03T10:30:33.466394Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 108 2025-06-03T10:30:33.466404Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 108 2025-06-03T10:30:33.466408Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 108 2025-06-03T10:30:33.466412Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 108, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-03T10:30:33.466416Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:30:33.466543Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 5 Version: 5 PathOwnerId: 72057594046678944, cookie: 108 2025-06-03T10:30:33.466552Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 5 Version: 5 PathOwnerId: 72057594046678944, cookie: 108 2025-06-03T10:30:33.466554Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 108 2025-06-03T10:30:33.466557Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 108, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 5 2025-06-03T10:30:33.466560Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 4 2025-06-03T10:30:33.466567Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 108, ready parts: 0/1, is published: true 2025-06-03T10:30:33.466924Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 108:0 from tablet: 72057594046678944 to tablet: 72075186233409549 cookie: 72057594046678944:4 msg type: 275382275 2025-06-03T10:30:33.467245Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 108 2025-06-03T10:30:33.467269Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 108 2025-06-03T10:30:33.478349Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6151: Handle TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, message: Origin: 72075186233409549 TxId: 108 2025-06-03T10:30:33.478377Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 108, tablet: 72075186233409549, partId: 0 2025-06-03T10:30:33.478405Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 108:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409549 TxId: 108 2025-06-03T10:30:33.478420Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 108:0 129 -> 240 FAKE_COORDINATOR: Erasing txId 108 2025-06-03T10:30:33.479002Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-03T10:30:33.479053Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-03T10:30:33.479061Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 108:0 ProgressState 2025-06-03T10:30:33.479076Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#108:0 progress is 1/1 2025-06-03T10:30:33.479081Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-03T10:30:33.479085Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#108:0 progress is 1/1 2025-06-03T10:30:33.479088Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-03T10:30:33.479092Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 108, ready parts: 1/1, is published: true 2025-06-03T10:30:33.479105Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:490:2437] message: TxId: 108 2025-06-03T10:30:33.479111Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-03T10:30:33.479116Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 108:0 2025-06-03T10:30:33.479120Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 108:0 2025-06-03T10:30:33.479149Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-03T10:30:33.479610Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-06-03T10:30:33.479621Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [2:901:2809] TestWaitNotification: OK eventTxId 108 TestModificationResults wait txId: 109 2025-06-03T10:30:33.480603Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterColumnStore AlterColumnStore { Name: "OlapStore1" AlterSchemaPresets { Name: "default" AlterSchema { AddColumns { Name: "comment2" Type: "Utf8" } } } } } TxId: 109 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:33.480667Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: alter_store.cpp:465: TAlterOlapStore Propose, path: /MyRoot/OlapStore1, opId: 109:0, at schemeshard: 72057594046678944 2025-06-03T10:30:33.480776Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 109:1, propose status:StatusSchemeError, reason: Too many columns. new: 4. Limit: 3, at schemeshard: 72057594046678944 2025-06-03T10:30:33.481441Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 109, response: Status: StatusSchemeError Reason: "Too many columns. new: 4. Limit: 3" TxId: 109 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:33.481474Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 109, database: /MyRoot, subject: , status: StatusSchemeError, reason: Too many columns. new: 4. Limit: 3, operation: ALTER COLUMN STORE, path: /MyRoot/OlapStore1 TestModificationResult got TxId: 109, wait until txId: 109 TestWaitNotification wait txId: 109 2025-06-03T10:30:33.481560Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 109: send EvNotifyTxCompletion 2025-06-03T10:30:33.481570Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 109 2025-06-03T10:30:33.481651Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 109, at schemeshard: 72057594046678944 2025-06-03T10:30:33.481675Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 109: got EvNotifyTxCompletionResult 2025-06-03T10:30:33.481680Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 109: satisfy waiter [2:934:2842] TestWaitNotification: OK eventTxId 109 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainsInSeparateDir [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:32.875693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:32.875718Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:32.875722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:32.875727Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:32.875743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:32.875746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:32.875754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:32.875771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:32.875855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:32.875922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:32.886757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:32.886783Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:32.890291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:32.890390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:32.890425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:32.894816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:32.894916Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:32.895090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.895153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:32.896195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:32.896273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:32.896630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:32.896646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:32.896659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:32.896669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:32.896676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:32.896707Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.898580Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:32.920641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:32.920738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.920819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:32.920884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:32.920897Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.921941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.921978Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:32.922053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.922066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:32.922072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:32.922079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:32.922768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.922784Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:32.922790Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:32.923259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.923271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.923279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:32.923288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:32.924158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:32.924739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:32.924790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:32.925004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.925039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:32.925061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:32.925149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:32.925158Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:32.925204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:32.925219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:32.925805Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:32.925819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:32.925872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... EMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:30:33.049907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:30:33.049914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:30:33.049918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:30:33.049924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-03T10:30:33.049931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:30:33.049937Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:30:33.049943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:30:33.049994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 9 2025-06-03T10:30:33.050001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 102, publications: 2, subscribers: 1 2025-06-03T10:30:33.050010Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 7 2025-06-03T10:30:33.050014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 4], 3 2025-06-03T10:30:33.050174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:30:33.050189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:30:33.050194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:30:33.050201Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 7 2025-06-03T10:30:33.050207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:30:33.050588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:30:33.050604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:30:33.050610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:30:33.050616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 3 2025-06-03T10:30:33.050621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 8 2025-06-03T10:30:33.050635Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 1 2025-06-03T10:30:33.050641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:913:2745] 2025-06-03T10:30:33.051155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:30:33.051356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:30:33.051375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:30:33.051383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:914:2746] TestWaitNotification: OK eventTxId 102 2025-06-03T10:30:33.051514Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SubDomains/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:33.051568Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SubDomains/USER_0" took 66us result status StatusSuccess 2025-06-03T10:30:33.051719Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomains/USER_0" PathDescription { Self { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 Coordinators: 72075186233409547 Coordinators: 72075186233409548 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409549 Mediators: 72075186233409550 Mediators: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:33.051812Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SubDomains/USER_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:33.051837Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SubDomains/USER_1" took 27us result status StatusSuccess 2025-06-03T10:30:33.051885Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomains/USER_1" PathDescription { Self { Name: "USER_1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409552 Coordinators: 72075186233409553 Coordinators: 72075186233409554 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409555 Mediators: 72075186233409556 Mediators: 72075186233409557 } DomainKey { SchemeShard: 72057594046678944 PathId: 4 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 4 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:33.051940Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SubDomains" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:33.051954Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SubDomains" took 17us result status StatusSuccess 2025-06-03T10:30:33.052013Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SubDomains" PathDescription { Self { Name: "SubDomains" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 } ChildrenExist: true } Children { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } Children { Name: "USER_1" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateWithoutTimeCastBuckets [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:32.598374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:32.598408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:32.598415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:32.598422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:32.598441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:32.598446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:32.598459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:32.598481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:32.598642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:32.598725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:32.614422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:32.614455Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:32.619261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:32.619385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:32.619425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:32.622754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:32.622844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:32.622970Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.623033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:32.623992Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:32.624061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:32.624429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:32.624440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:32.624449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:32.624456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:32.624461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:32.624482Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.626218Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:32.648629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:32.648739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.648822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:32.648882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:32.648895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.649900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.649941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:32.650030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.650046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:32.650053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:32.650061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:32.650711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.650724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:32.650729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:32.651110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.651118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.651123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:32.651131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:32.651698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:32.652069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:32.652111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:32.652337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.652365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:32.652386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:32.652463Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:32.652472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:32.652515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:32.652531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:32.652924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:32.652930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:32.652975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:32.652979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-03T10:30:32.653065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.653072Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-03T10:30:32.653084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-06-03T10:30:32.653088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:30:32.653093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-06-03T10:30:32.653095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:30:32.653099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-03T10:30:32.653103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:30:32.653107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-06-03T10:30:32.653110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1:0 2025-06-03T10:30:32.653120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:32.653125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-03T10:30:32.653128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-03T10:30:32.653417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-03T10:30:32.653433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-03T10:30:32.653437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-03T10:30:32.653443Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-03T10:30:32.653446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:32.653458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-03T10:30:32.654383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-03T10:30:32.654495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 100 2025-06-03T10:30:32.655200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "USER_0" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 100 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:32.655249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /MyRoot/USER_0, opId: 100:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.655262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 100:1, propose status:StatusInvalidParameter, reason: Malformed subdomain request: TimeCastBucketsPerMediator is 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.655366Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:269:2259] Bootstrap 2025-06-03T10:30:32.656830Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:269:2259] Become StateWork (SchemeCache [1:274:2264]) 2025-06-03T10:30:32.657117Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:269:2259] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:30:32.658041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 100, response: Status: StatusInvalidParameter Reason: "Malformed subdomain request: TimeCastBucketsPerMediator is 0" TxId: 100 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:32.658090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 100, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Malformed subdomain request: TimeCastBucketsPerMediator is 0, operation: CREATE DATABASE, path: /MyRoot/USER_0 2025-06-03T10:30:32.658280Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-03T10:30:32.658346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-03T10:30:32.658354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-06-03T10:30:32.658431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-03T10:30:32.658454Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-03T10:30:32.658463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:284:2274] TestWaitNotification: OK eventTxId 100 2025-06-03T10:30:32.658554Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:32.658583Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 40us result status StatusPathDoesNotExist 2025-06-03T10:30:32.658636Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenDrop [GOOD] >> THiveTest::TestFollowerPromotionFollowerDies >> THiveTest::TestFollowerPromotionFollowerDies [GOOD] >> THiveTest::TestHiveBalancer >> TPartBtreeIndexIteration::FewNodes_History_Slices [GOOD] >> KqpQueryService::ClosedSessionRemovedFromPool [GOOD] >> THiveTest::TestSpreadNeighboursWithUpdateTabletsObject [GOOD] >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomain >> TPartBtreeIndexIteration::FewNodes_Groups_History_Slices >> TopicAutoscaling::PartitionSplit_AutosplitByLoad_AfterAlter [GOOD] >> THiveTest::TestLockTabletExecutionTimeout [GOOD] >> KqpQueryService::CloseConnection >> THiveTest::TestLockTabletExecutionRebootTimeout >> THiveTest::TestSpreadNeighboursDifferentOwners >> KqpQueryService::CloseConnection [GOOD] >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomain [GOOD] |66.6%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sequence/test-results/unittest/{meta.json ... results_accumulator.log} |66.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/ydb-core-blobstorage-ut_blobstorage-ut_balancing ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateDelete [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:33.993540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:33.993568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:33.993572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:33.993577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:33.993593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:33.993597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:33.993606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:33.993622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:33.993734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:33.993802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:34.003949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:34.003980Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:34.007840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:34.007988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:34.008032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:34.010354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:34.010440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:34.010553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:34.010606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:34.011209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:34.011261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:34.011551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:34.011560Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:34.011571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:34.011577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:34.011582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:34.011599Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.012935Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:34.028648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:34.028740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.028813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:34.028863Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:34.028874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.030315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:34.030358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:34.030439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.030451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:34.030456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:34.030462Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:34.031057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.031066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:34.031070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:34.031371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.031381Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.031386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:34.031393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:34.031973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:34.032347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:34.032384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:34.032567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:34.032591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:34.032611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:34.032674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:34.032679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:34.032717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:34.032727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:34.033114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:34.033121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:34.033168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 101 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:34.116954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 101:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:101 msg type: 269090816 2025-06-03T10:30:34.116991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 101, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 2025-06-03T10:30:34.117081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:34.117114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:34.117123Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-06-03T10:30:34.117229Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 101:0 128 -> 240 2025-06-03T10:30:34.117238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-06-03T10:30:34.117283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:34.117317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-06-03T10:30:34.117331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 101 2025-06-03T10:30:34.117996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:34.118009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:34.118105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:34.118128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:34.118135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-03T10:30:34.118143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-03T10:30:34.118236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.118259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-03T10:30:34.118275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:34.118281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:34.118288Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:34.118293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:34.118300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-03T10:30:34.118307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:34.118314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-06-03T10:30:34.118320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 101:0 2025-06-03T10:30:34.118368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 9 2025-06-03T10:30:34.118377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 1 2025-06-03T10:30:34.118386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-03T10:30:34.118391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-03T10:30:34.118569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:34.118585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:34.118591Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:30:34.118598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-03T10:30:34.118604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:34.118758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:34.118770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:34.118775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:30:34.118780Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-03T10:30:34.118786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-06-03T10:30:34.118797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 1 2025-06-03T10:30:34.118803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:569:2477] 2025-06-03T10:30:34.120103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:30:34.120453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:30:34.120474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:30:34.120480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:570:2478] TestWaitNotification: OK eventTxId 101 2025-06-03T10:30:34.120640Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:34.120699Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 85us result status StatusSuccess 2025-06-03T10:30:34.120829Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:34.900866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:34.900896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:34.900901Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:34.900906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:34.900919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:34.900923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:34.900936Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:34.900949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:34.901075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:34.901162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:34.912472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:34.912497Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:34.915948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:34.916035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:34.916067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:34.918937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:34.919035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:34.919173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:34.919237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:34.920134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:34.920198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:34.920566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:34.920578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:34.920593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:34.920603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:34.920610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:34.920634Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.922243Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:34.942353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:34.942457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.942539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:34.942603Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:34.942619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.943722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:34.943761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:34.943851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.943865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:34.943871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:34.943877Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:34.944567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.944582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:34.944589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:34.945103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.945119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.945126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:34.945135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:34.945935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:34.946514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:34.946568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:34.946798Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:34.946829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:34.946850Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:34.946925Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:34.946934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:34.946980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:34.946993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:34.947888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:34.947902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:34.947962Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 2] 2025-06-03T10:30:34.964119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:34.964124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-03T10:30:34.964130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-03T10:30:34.964186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.964194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:416: [72057594046678944] TDeleteParts opId# 101:0 ProgressState 2025-06-03T10:30:34.964201Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:34.964206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:34.964212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:34.964215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:34.964220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-03T10:30:34.964225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:34.964232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-06-03T10:30:34.964236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 101:0 2025-06-03T10:30:34.964248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:30:34.964254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-03T10:30:34.964258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-03T10:30:34.964263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-03T10:30:34.964364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:34.964375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:34.964380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:30:34.964385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-03T10:30:34.964390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:34.964473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:34.964484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:34.964488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:30:34.964492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-03T10:30:34.964496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:34.964507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-03T10:30:34.964720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:34.964727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:34.964749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:30:34.964785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:34.964789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:34.964795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:34.965131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:30:34.965450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:30:34.965467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:34.965475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-03T10:30:34.965514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:30:34.965519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-03T10:30:34.965580Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:30:34.965593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:30:34.965598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:341:2331] TestWaitNotification: OK eventTxId 101 2025-06-03T10:30:34.965659Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:34.965680Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 31us result status StatusPathDoesNotExist 2025-06-03T10:30:34.965734Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:34.965798Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:34.965813Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 15us result status StatusSuccess 2025-06-03T10:30:34.965867Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateForceDropSolomon [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:34.063196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:34.063230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:34.063234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:34.063239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:34.063254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:34.063257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:34.063268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:34.063281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:34.063370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:34.063432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:34.075177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:34.075210Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:34.080438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:34.080634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:34.080684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:34.083862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:34.083966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:34.084105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:34.084171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:34.085208Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:34.085321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:34.085777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:34.085794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:34.085810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:34.085818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:34.085825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:34.085849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.087548Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:34.113938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:34.114051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.114142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:34.114205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:34.114218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.115267Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:34.115309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:34.115391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.115407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:34.115414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:34.115421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:34.116320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.116341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:34.116350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:34.116932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.116944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:34.116950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:34.116959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:34.117985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:34.118610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:34.118659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:34.118903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:34.118938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:34.118961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:34.119052Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:34.119064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:34.119107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:34.119121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:34.119684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:34.119696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:34.119750Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:8 2025-06-03T10:30:34.402882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:8 tabletId 72075186233409553 2025-06-03T10:30:34.404990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:12 2025-06-03T10:30:34.405013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:12 tabletId 72075186233409557 2025-06-03T10:30:34.405032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-03T10:30:34.405035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186233409552 2025-06-03T10:30:34.405181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:16 2025-06-03T10:30:34.405186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:16 tabletId 72075186233409561 2025-06-03T10:30:34.405195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:25 2025-06-03T10:30:34.405198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:25 tabletId 72075186233409570 2025-06-03T10:30:34.405227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:20 2025-06-03T10:30:34.405231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:20 tabletId 72075186233409565 2025-06-03T10:30:34.405719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:29 2025-06-03T10:30:34.405733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:29 tabletId 72075186233409574 2025-06-03T10:30:34.405755Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:33 2025-06-03T10:30:34.405760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:33 tabletId 72075186233409578 2025-06-03T10:30:34.405788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:30:34.405793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:30:34.406076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:37 2025-06-03T10:30:34.406088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:37 tabletId 72075186233409582 2025-06-03T10:30:34.406246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:42 2025-06-03T10:30:34.406256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:42 tabletId 72075186233409587 2025-06-03T10:30:34.406301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-03T10:30:34.406307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-03T10:30:34.406400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:11 2025-06-03T10:30:34.406404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:11 tabletId 72075186233409556 2025-06-03T10:30:34.406413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:15 2025-06-03T10:30:34.406416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:15 tabletId 72075186233409560 2025-06-03T10:30:34.406779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:19 2025-06-03T10:30:34.406790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:19 tabletId 72075186233409564 2025-06-03T10:30:34.406829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:24 2025-06-03T10:30:34.406834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:24 tabletId 72075186233409569 2025-06-03T10:30:34.406854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:23 2025-06-03T10:30:34.406859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:23 tabletId 72075186233409568 2025-06-03T10:30:34.407075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:28 2025-06-03T10:30:34.407082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:28 tabletId 72075186233409573 2025-06-03T10:30:34.407100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:32 2025-06-03T10:30:34.407103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:32 tabletId 72075186233409577 2025-06-03T10:30:34.407116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:30:34.407119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:30:34.407129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:36 2025-06-03T10:30:34.407136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:36 tabletId 72075186233409581 2025-06-03T10:30:34.407169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:34.407232Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:34.407250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:34.407262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:34.407306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:34.408030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-03T10:30:34.408117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-03T10:30:34.408124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-03T10:30:34.408211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-03T10:30:34.408234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:30:34.408242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:2059:3658] TestWaitNotification: OK eventTxId 103 2025-06-03T10:30:34.408344Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/Solomon" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:34.408389Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/Solomon" took 74us result status StatusPathDoesNotExist 2025-06-03T10:30:34.408443Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/Solomon\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0/Solomon" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:34.408520Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:34.408544Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 12us result status StatusPathDoesNotExist 2025-06-03T10:30:34.408561Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomain [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:35.576444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:35.576471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:35.576475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:35.576481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:35.576495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:35.576498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:35.576506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:35.576522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:35.576646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:35.576710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:35.587702Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:35.587729Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:35.592156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:35.592352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:35.592403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:35.595386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:35.595507Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:35.595659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:35.595745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:35.596806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:35.596881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:35.597224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:35.597234Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:35.597245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:35.597252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:35.597258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:35.597280Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:35.598857Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:35.622723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:35.622838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:35.622931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:35.622996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:35.623009Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:35.624174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:35.624262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:35.624391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:35.624412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:35.624423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:35.624433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:35.625655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:35.625688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:35.625700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:35.626620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:35.626647Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:35.626657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:35.626669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:35.627508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:35.628154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:35.628211Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:35.628457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:35.628495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:35.628518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:35.628623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:35.628633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:35.628685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:35.628700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:35.629360Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:35.629373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:35.629438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:30:35.830220Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:30:35.830223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:30:35.830226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:30:35.830230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-03T10:30:35.830241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:486:2440] message: TxId: 103 2025-06-03T10:30:35.830247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:30:35.830254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-06-03T10:30:35.830261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:0 2025-06-03T10:30:35.830286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-03T10:30:35.831063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:30:35.831074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:487:2441] TestWaitNotification: OK eventTxId 103 2025-06-03T10:30:35.831172Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:35.831223Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 60us result status StatusSuccess 2025-06-03T10:30:35.831324Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "dir_0" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 150 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:35.831404Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:35.831431Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 29us result status StatusSuccess 2025-06-03T10:30:35.831492Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/table_0" PathDescription { Self { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_0" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:35.831540Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:35.831552Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir_0" took 12us result status StatusSuccess 2025-06-03T10:30:35.831593Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/dir_0" PathDescription { Self { Name: "dir_0" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "table_1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 4 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:35.831633Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir_0/table_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:35.831646Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir_0/table_1" took 14us result status StatusSuccess 2025-06-03T10:30:35.831680Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/dir_0/table_1" PathDescription { Self { Name: "table_1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_1" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::RestartAtInFly [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:32.609795Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:32.609830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:32.609836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:32.609843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:32.609860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:32.609865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:32.609877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:32.609905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:32.610036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:32.610116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:32.625359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:32.625381Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:32.629039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:32.629121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:32.629165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:32.631175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:32.631258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:32.631388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.631443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:32.632150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:32.632207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:32.632498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:32.632510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:32.632519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:32.632528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:32.632534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:32.632572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.633985Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:32.656560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:32.656634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.656695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:32.656738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:32.656747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.657561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.657588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:32.657642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.657650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:32.657654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:32.657659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:32.658128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.658138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:32.658141Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:32.658483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.658493Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.658499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:32.658508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:32.659019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:32.659347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:32.659380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:32.659551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.659571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:32.659588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:32.659640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:32.659645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:32.659674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:32.659683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:32.660053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:32.660060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:32.660096Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... de 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:32.697984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit for Paths, read records: 2, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: USER_0, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:32.698026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1457: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1483: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-06-03T10:30:32.698141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1785: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-03T10:30:32.698198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2033: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2093: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2151: TTxInit for Shards, read records: 3, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:30:32.698242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:32.698247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:30:32.698271Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2237: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2303: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2453: TTxInit for ChannelsBinding, read records: 9, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698395Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2832: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2911: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3412: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3448: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3665: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3810: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3827: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3987: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4003: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4288: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4593: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4651: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4746: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4773: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.698732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4800: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.700563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:32.701091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:32.701106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:32.701193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:32.701205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:32.701213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:32.701265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 100 2025-06-03T10:30:32.762738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-03T10:30:32.762761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 Leader for TabletID 72057594046678944 is [1:457:2406] sender: [1:518:2058] recipient: [1:15:2062] 2025-06-03T10:30:32.762982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-03T10:30:32.763020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-03T10:30:32.763027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:516:2451] TestWaitNotification: OK eventTxId 100 2025-06-03T10:30:32.763131Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:32.763196Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 83us result status StatusSuccess 2025-06-03T10:30:32.763334Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:32.763436Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:32.763463Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 32us result status StatusSuccess 2025-06-03T10:30:32.763526Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DeclareAndForbidTableInside [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:32.284188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:32.284214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:32.284218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:32.284223Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:32.284235Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:32.284239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:32.284251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:32.284265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:32.284363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:32.284433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:32.295818Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:32.295846Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:32.299640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:32.299747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:32.299786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:32.302226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:32.302335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:32.302483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.302568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:32.303486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:32.303542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:32.303811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:32.303819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:32.303830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:32.303836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:32.303841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:32.303858Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.305080Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:32.321202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:32.321281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.321362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:32.321412Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:32.321423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.322353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.322385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:32.322448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.322457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:32.322461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:32.322467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:32.323011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.323024Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:32.323029Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:32.323417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.323430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.323435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:32.323441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:32.323992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:32.324434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:32.324469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:32.324652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.324675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:32.324691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:32.324755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:32.324762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:32.324796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:32.324806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:32.325259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:32.325266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:32.325320Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... : 72057594046678944, cookie: 101 2025-06-03T10:30:32.336246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:32.336249Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:30:32.336252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-03T10:30:32.336258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:30:32.336265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 101, ready parts: 0/1, is published: true 2025-06-03T10:30:32.336805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 101:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:101 msg type: 269090816 2025-06-03T10:30:32.336832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 101, partId: 4294967295, tablet: 72057594046316545 2025-06-03T10:30:32.336872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 FAKE_COORDINATOR: Add transaction: 101 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000003 2025-06-03T10:30:32.337082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.337100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:32.337106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:33: MkDir::TPropose operationId# 101:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000003, at schemeshard: 72057594046678944 2025-06-03T10:30:32.337127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 101:0 128 -> 240 2025-06-03T10:30:32.337153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:32.337160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:30:32.337280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 FAKE_COORDINATOR: Erasing txId 101 2025-06-03T10:30:32.337624Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:32.337630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:32.337654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:30:32.337665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:32.337668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-03T10:30:32.337672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 101, path id: 3 2025-06-03T10:30:32.337736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.337744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-03T10:30:32.337755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:32.337759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:32.337762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:32.337765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:32.337768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-03T10:30:32.337772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:32.337776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-06-03T10:30:32.337779Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 101:0 2025-06-03T10:30:32.337789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:30:32.337794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-03T10:30:32.337796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-03T10:30:32.337799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-03T10:30:32.337874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:32.337882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:32.337885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:30:32.337888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-03T10:30:32.337892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:30:32.337970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:32.337978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:32.337984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:30:32.337987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-03T10:30:32.337989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:30:32.337996Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-03T10:30:32.339005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:30:32.339127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-06-03T10:30:32.340223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_0/dir" OperationType: ESchemeOpCreateTable CreateTable { Name: "table_0" Columns { Name: "RowId" Type: "Uint64" } Columns { Name: "Value" Type: "Utf8" } KeyColumnNames: "RowId" } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:32.340309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /MyRoot/USER_0/dir/table_0, opId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.340328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:433: TCreateTable Propose, path: /MyRoot/USER_0/dir/table_0, opId: 102:0, schema: Name: "table_0" Columns { Name: "RowId" Type: "Uint64" } Columns { Name: "Value" Type: "Utf8" } KeyColumnNames: "RowId", at schemeshard: 72057594046678944 2025-06-03T10:30:32.340351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 102:1, propose status:StatusNameConflict, reason: Inclusive subDomain do not support shared transactions, at schemeshard: 72057594046678944 2025-06-03T10:30:32.341382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 102, response: Status: StatusNameConflict Reason: "Inclusive subDomain do not support shared transactions" TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:32.341464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot/USER_0, subject: , status: StatusNameConflict, reason: Inclusive subDomain do not support shared transactions, operation: CREATE TABLE, path: /MyRoot/USER_0/dir/table_0 TestModificationResult got TxId: 102, wait until txId: 102 >> THiveTest::TestHiveBalancerWithPrefferedDC2 [GOOD] >> THiveTest::TestHiveBalancerWithPreferredDC3 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestInlineWriteReadDeleteWithRestartsThenResponseOkNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:58:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:75:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:58:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:75:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:77:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:80:2057] recipient: [2:79:2110] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:83:2057] recipient: [2:79:2110] !Reboot 72057594037927937 (actor [2:57:2097]) rebooted! !Reboot 72057594037927937 (actor [2:57:2097]) tablet resolver refreshed! new actor is[2:82:2111] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:168:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:58:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:75:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:77:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:83:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:57:2097]) rebooted! !Reboot 72057594037927937 (actor [3:57:2097]) tablet resolver refreshed! new actor is[3:82:2111] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:168:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:58:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:75:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:78:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:82:2057] recipient: [4:80:2110] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:84:2057] recipient: [4:80:2110] !Reboot 72057594037927937 (actor [4:57:2097]) rebooted! !Reboot 72057594037927937 (actor [4:57:2097]) tablet resolver refreshed! new actor is[4:83:2111] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:169:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:58:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:75:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:81:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:84:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:85:2057] recipient: [5:83:2113] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:87:2057] recipient: [5:83:2113] !Reboot 72057594037927937 (actor [5:57:2097]) rebooted! !Reboot 72057594037927937 (actor [5:57:2097]) tablet resolver refreshed! new actor is[5:86:2114] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:172:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:58:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:75:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:81:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:84:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:85:2057] recipient: [6:83:2113] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:87:2057] recipient: [6:83:2113] !Reboot 72057594037927937 (actor [6:57:2097]) rebooted! !Reboot 72057594037927937 (actor [6:57:2097]) tablet resolver refreshed! new actor is[6:86:2114] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:172:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:58:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:75:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:82:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:84:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:86:2057] recipient: [7:85:2113] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:88:2057] recipient: [7:85:2113] !Reboot 72057594037927937 (actor [7:57:2097]) rebooted! !Reboot 72057594037927937 (actor [7:57:2097]) tablet resolver refreshed! new actor is[7:87:2114] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:173:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:58:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:75:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:84:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:87:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:88:2057] recipient: [8:86:2115] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:90:2057] recipient: [8:86:2115] !Reboot 72057594037927937 (actor [8:57:2097]) rebooted! !Reboot 72057594037927937 (actor [8:57:2097]) tablet resolver refreshed! new actor is[8:89:2116] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:175:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:58:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:75:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:84:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:87:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:88:2057] recipient: [9:86:2115] Leader for TabletID 72057594037927937 is [9:89:2116] sender: [9:90:2057] recipient: [9:86:2115] !Reboot 72057594037927937 (actor [9:57:2097]) rebooted! !Reboot 72057594037927937 (actor [9:57:2097]) tablet resolver refreshed! new actor is[9:89:2116] Leader for TabletID 72057594037927937 is [9:89:2116] sender: [9:175:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:58:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:75:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:85:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:88:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:89:2057] recipient: [10:87:2115] Leader for TabletID 72057594037927937 is [10:90:2116] sender: [10:91:2057] recipient: [10:87:2115] !Reboot 72057594037927937 (actor [10:57:2097]) rebooted! !Reboot 72057594037927937 (actor [10:57:2097]) tablet resolver refreshed! new actor is[10:90:2116] Leader for TabletID 72057594037927937 is [10:90:2116] sender: [10:176:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:58:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:75:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:88:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:91:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:92:2057] recipient: [11:90:2118] Leader for TabletID 72057594037927937 is [11:93:2119] sender: [11:94:2057] recipient: [11:90:2118] !Reboot 72057594037927937 (actor [11:57:2097]) rebooted! !Reboot 72057594037927937 (actor [11:57:2097]) tablet resolver refreshed! new actor is[11:93:2119] Leader for TabletID 72057594037927937 is [11:93:2119] sender: [11:179:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:58:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:75:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... 7927937 (actor [16:57:2097]) tablet resolver refreshed! new actor is[16:82:2111] Leader for TabletID 72057594037927937 is [16:82:2111] sender: [16:168:2057] recipient: [16:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:55:2057] recipient: [17:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [17:55:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:58:2057] recipient: [17:51:2095] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:75:2057] recipient: [17:14:2061] !Reboot 72057594037927937 (actor [17:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:77:2057] recipient: [17:36:2083] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:79:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [17:57:2097] sender: [17:81:2057] recipient: [17:80:2110] Leader for TabletID 72057594037927937 is [17:82:2111] sender: [17:83:2057] recipient: [17:80:2110] !Reboot 72057594037927937 (actor [17:57:2097]) rebooted! !Reboot 72057594037927937 (actor [17:57:2097]) tablet resolver refreshed! new actor is[17:82:2111] Leader for TabletID 72057594037927937 is [17:82:2111] sender: [17:168:2057] recipient: [17:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:55:2057] recipient: [18:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [18:55:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:58:2057] recipient: [18:51:2095] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:75:2057] recipient: [18:14:2061] !Reboot 72057594037927937 (actor [18:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:78:2057] recipient: [18:36:2083] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:81:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [18:57:2097] sender: [18:82:2057] recipient: [18:80:2110] Leader for TabletID 72057594037927937 is [18:83:2111] sender: [18:84:2057] recipient: [18:80:2110] !Reboot 72057594037927937 (actor [18:57:2097]) rebooted! !Reboot 72057594037927937 (actor [18:57:2097]) tablet resolver refreshed! new actor is[18:83:2111] Leader for TabletID 72057594037927937 is [18:83:2111] sender: [18:169:2057] recipient: [18:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [19:55:2057] recipient: [19:51:2095] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:58:2057] recipient: [19:51:2095] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:75:2057] recipient: [19:14:2061] !Reboot 72057594037927937 (actor [19:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:81:2057] recipient: [19:36:2083] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:84:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [19:57:2097] sender: [19:85:2057] recipient: [19:83:2113] Leader for TabletID 72057594037927937 is [19:86:2114] sender: [19:87:2057] recipient: [19:83:2113] !Reboot 72057594037927937 (actor [19:57:2097]) rebooted! !Reboot 72057594037927937 (actor [19:57:2097]) tablet resolver refreshed! new actor is[19:86:2114] Leader for TabletID 72057594037927937 is [19:86:2114] sender: [19:172:2057] recipient: [19:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:55:2057] recipient: [20:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [20:55:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:58:2057] recipient: [20:52:2095] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:75:2057] recipient: [20:14:2061] !Reboot 72057594037927937 (actor [20:57:2097]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:81:2057] recipient: [20:36:2083] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:84:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [20:57:2097] sender: [20:85:2057] recipient: [20:83:2113] Leader for TabletID 72057594037927937 is [20:86:2114] sender: [20:87:2057] recipient: [20:83:2113] !Reboot 72057594037927937 (actor [20:57:2097]) rebooted! !Reboot 72057594037927937 (actor [20:57:2097]) tablet resolver refreshed! new actor is[20:86:2114] Leader for TabletID 72057594037927937 is [20:86:2114] sender: [20:172:2057] recipient: [20:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:55:2057] recipient: [21:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [21:55:2057] recipient: [21:51:2095] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:58:2057] recipient: [21:51:2095] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:75:2057] recipient: [21:14:2061] !Reboot 72057594037927937 (actor [21:57:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:82:2057] recipient: [21:36:2083] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:85:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [21:57:2097] sender: [21:86:2057] recipient: [21:84:2113] Leader for TabletID 72057594037927937 is [21:87:2114] sender: [21:88:2057] recipient: [21:84:2113] !Reboot 72057594037927937 (actor [21:57:2097]) rebooted! !Reboot 72057594037927937 (actor [21:57:2097]) tablet resolver refreshed! new actor is[21:87:2114] Leader for TabletID 72057594037927937 is [21:87:2114] sender: [21:105:2057] recipient: [21:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:55:2057] recipient: [22:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [22:55:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:58:2057] recipient: [22:51:2095] Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:75:2057] recipient: [22:14:2061] !Reboot 72057594037927937 (actor [22:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:84:2057] recipient: [22:36:2083] Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:87:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [22:57:2097] sender: [22:88:2057] recipient: [22:86:2115] Leader for TabletID 72057594037927937 is [22:89:2116] sender: [22:90:2057] recipient: [22:86:2115] !Reboot 72057594037927937 (actor [22:57:2097]) rebooted! !Reboot 72057594037927937 (actor [22:57:2097]) tablet resolver refreshed! new actor is[22:89:2116] Leader for TabletID 72057594037927937 is [22:89:2116] sender: [22:175:2057] recipient: [22:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:55:2057] recipient: [23:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [23:55:2057] recipient: [23:50:2095] Leader for TabletID 72057594037927937 is [23:57:2097] sender: [23:58:2057] recipient: [23:50:2095] Leader for TabletID 72057594037927937 is [23:57:2097] sender: [23:75:2057] recipient: [23:14:2061] !Reboot 72057594037927937 (actor [23:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [23:57:2097] sender: [23:84:2057] recipient: [23:36:2083] Leader for TabletID 72057594037927937 is [23:57:2097] sender: [23:87:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [23:57:2097] sender: [23:88:2057] recipient: [23:86:2115] Leader for TabletID 72057594037927937 is [23:89:2116] sender: [23:90:2057] recipient: [23:86:2115] !Reboot 72057594037927937 (actor [23:57:2097]) rebooted! !Reboot 72057594037927937 (actor [23:57:2097]) tablet resolver refreshed! new actor is[23:89:2116] Leader for TabletID 72057594037927937 is [23:89:2116] sender: [23:175:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:55:2057] recipient: [24:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:55:2057] recipient: [24:51:2095] Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:58:2057] recipient: [24:51:2095] Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:75:2057] recipient: [24:14:2061] !Reboot 72057594037927937 (actor [24:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:85:2057] recipient: [24:36:2083] Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:88:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:89:2057] recipient: [24:87:2115] Leader for TabletID 72057594037927937 is [24:90:2116] sender: [24:91:2057] recipient: [24:87:2115] !Reboot 72057594037927937 (actor [24:57:2097]) rebooted! !Reboot 72057594037927937 (actor [24:57:2097]) tablet resolver refreshed! new actor is[24:90:2116] Leader for TabletID 72057594037927937 is [24:90:2116] sender: [24:176:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:55:2057] recipient: [25:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:55:2057] recipient: [25:51:2095] Leader for TabletID 72057594037927937 is [25:57:2097] sender: [25:58:2057] recipient: [25:51:2095] Leader for TabletID 72057594037927937 is [25:57:2097] sender: [25:75:2057] recipient: [25:14:2061] !Reboot 72057594037927937 (actor [25:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [25:57:2097] sender: [25:88:2057] recipient: [25:36:2083] Leader for TabletID 72057594037927937 is [25:57:2097] sender: [25:90:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [25:57:2097] sender: [25:92:2057] recipient: [25:91:2118] Leader for TabletID 72057594037927937 is [25:93:2119] sender: [25:94:2057] recipient: [25:91:2118] !Reboot 72057594037927937 (actor [25:57:2097]) rebooted! !Reboot 72057594037927937 (actor [25:57:2097]) tablet resolver refreshed! new actor is[25:93:2119] Leader for TabletID 72057594037927937 is [25:93:2119] sender: [25:179:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:55:2057] recipient: [26:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:55:2057] recipient: [26:50:2095] Leader for TabletID 72057594037927937 is [26:57:2097] sender: [26:58:2057] recipient: [26:50:2095] Leader for TabletID 72057594037927937 is [26:57:2097] sender: [26:75:2057] recipient: [26:14:2061] !Reboot 72057594037927937 (actor [26:57:2097]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [26:57:2097] sender: [26:88:2057] recipient: [26:36:2083] Leader for TabletID 72057594037927937 is [26:57:2097] sender: [26:91:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [26:57:2097] sender: [26:92:2057] recipient: [26:90:2118] Leader for TabletID 72057594037927937 is [26:93:2119] sender: [26:94:2057] recipient: [26:90:2118] !Reboot 72057594037927937 (actor [26:57:2097]) rebooted! !Reboot 72057594037927937 (actor [26:57:2097]) tablet resolver refreshed! new actor is[26:93:2119] Leader for TabletID 72057594037927937 is [26:93:2119] sender: [26:179:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:55:2057] recipient: [27:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:55:2057] recipient: [27:51:2095] Leader for TabletID 72057594037927937 is [27:57:2097] sender: [27:58:2057] recipient: [27:51:2095] Leader for TabletID 72057594037927937 is [27:57:2097] sender: [27:75:2057] recipient: [27:14:2061] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TopicAutoscaling::PartitionSplit_AutosplitByLoad_AfterAlter [GOOD] Test command err: 2025-06-03T10:28:47.310190Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668136790283776:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:47.310223Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:28:47.347662Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000e3a/r3tmp/tmp3rlYfz/pdisk_1.dat 2025-06-03T10:28:47.382215Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668136790283752:2079] 1748946527310039 != 1748946527310042 2025-06-03T10:28:47.383141Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11108, node 1 2025-06-03T10:28:47.395900Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/000e3a/r3tmp/yandexgeiFml.tmp 2025-06-03T10:28:47.395912Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/000e3a/r3tmp/yandexgeiFml.tmp 2025-06-03T10:28:47.395994Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/000e3a/r3tmp/yandexgeiFml.tmp 2025-06-03T10:28:47.396041Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:28:47.402385Z INFO: TTestServer started on Port 6046 GrpcPort 11108 2025-06-03T10:28:47.411855Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:47.411892Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:47.412981Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:6046 PQClient connected to localhost:11108 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:47.455575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:28:47.461770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-03T10:28:47.737214Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668136790284550:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.737247Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668136790284569:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.737257Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.738018Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668136790284606:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.738066Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:47.738206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480 2025-06-03T10:28:47.740419Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668136790284579:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-03T10:28:47.792156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.801176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.809813Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668136790284767:2509] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:28:47.823051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:28:47.828736Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668136790284783:2359], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:28:47.828944Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=NjgzOWUzMWEtNTUzZWIwNTItYzJiNGNlYjAtY2VmNGZlZDk=, ActorId: [1:7511668136790284547:2334], ActorState: ExecuteState, TraceId: 01jwtnc9frb51cy7a1crx2ferv, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:28:47.829632Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7511668136790284938:2608] 2025-06-03T10:28:52.310504Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668136790283776:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:52.310553Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:28:53.016947Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-03T10:28:53.021083Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-03T10:28:53.021532Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [1:7511668162560088897:2674], Recipient [1:7511668136790284190:2191]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:53.021549Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:53.021552Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:28:53.021560Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [1:7511668162560088893:2671], Recipient [1:7511668136790284190:2191]: {TEvModifySchemeTransaction txid# 281474976715673 TabletId# 72057594046644480} 2025-06-03T10:28:53.021562Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:28:53.029379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } PartitionStrategy { MinPartitionCount: 1 MaxPartitionCount: 100 ScaleThresho ... CE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668531817967857:2457]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:35.533846Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:35.533863Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:35.533896Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:35.533899Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:35.533905Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:35.536357Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 65538, Sender [0:0:0], Recipient [7:7511668557587772917:2823]: NActors::TEvents::TEvWakeup 2025-06-03T10:30:35.536378Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 65538, Sender [0:0:0], Recipient [7:7511668557587772915:2822]: NActors::TEvents::TEvWakeup 2025-06-03T10:30:35.536403Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 270794759, Sender [7:7511668557587772945:2825], Recipient [7:7511668557587772917:2823]: NKikimr::NKeyValue::TChannelBalancer::TEvUpdateWeights 2025-06-03T10:30:35.536884Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 270794759, Sender [7:7511668557587772944:2824], Recipient [7:7511668557587772915:2822]: NKikimr::NKeyValue::TChannelBalancer::TEvUpdateWeights 2025-06-03T10:30:35.539442Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 65538 (NActors::TEvents::TEvWakeup), Tablet [7:7511668557587772917:2823], Partition 1, Sender [0:0:0], Recipient [7:7511668557587772991:2829], Cookie: 0 2025-06-03T10:30:35.539442Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 65538 (NActors::TEvents::TEvWakeup), Tablet [7:7511668557587772915:2822], Partition 2, Sender [0:0:0], Recipient [7:7511668557587772994:2831], Cookie: 0 2025-06-03T10:30:35.539459Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 65538, Sender [0:0:0], Recipient [7:7511668557587772994:2831]: NActors::TEvents::TEvWakeup 2025-06-03T10:30:35.539466Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 65538, Sender [0:0:0], Recipient [7:7511668557587772991:2829]: NActors::TEvents::TEvWakeup 2025-06-03T10:30:35.539555Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 271188501, Sender [7:7511668557587772994:2831], Recipient [7:7511668557587772915:2822]: NKikimr::TEvPQ::TEvPartitionCounters 2025-06-03T10:30:35.539555Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 271188501, Sender [7:7511668557587772991:2829], Recipient [7:7511668557587772917:2823]: NKikimr::TEvPQ::TEvPartitionCounters 2025-06-03T10:30:35.539560Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5253: HandleHook, processing event TEvPQ::TEvPartitionCounters 2025-06-03T10:30:35.539568Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:1264: [PQ: 72075186224037897] Handle TEvPQ::TEvPartitionCounters PartitionId 1 2025-06-03T10:30:35.539568Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5253: HandleHook, processing event TEvPQ::TEvPartitionCounters 2025-06-03T10:30:35.539573Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:1264: [PQ: 72075186224037896] Handle TEvPQ::TEvPartitionCounters PartitionId 2 2025-06-03T10:30:35.539636Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5241: HandleHook, received event# 271188503, Sender [7:7511668557587772991:2829], Recipient [7:7511668557587772917:2823]: NKikimr::TEvPQ::TEvPartitionLabeledCounters 2025-06-03T10:30:35.539647Z node 7 :PERSQUEUE TRACE: pq_impl.cpp:5255: HandleHook, processing event TEvPQ::TEvPartitionLabeledCounters 2025-06-03T10:30:35.544099Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188544 (NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated), Tablet [7:7511668557587772917:2823], Partition 1, Sender [7:7511668557587773003:2834], Recipient [7:7511668557587772991:2829], Cookie: 0 2025-06-03T10:30:35.544099Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188544 (NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated), Tablet [7:7511668557587772915:2822], Partition 2, Sender [7:7511668557587772997:2833], Recipient [7:7511668557587772994:2831], Cookie: 0 2025-06-03T10:30:35.544118Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188544, Sender [7:7511668557587772997:2833], Recipient [7:7511668557587772994:2831]: NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-03T10:30:35.544124Z node 7 :PERSQUEUE TRACE: partition.h:609: StateIdle, processing event NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-03T10:30:35.544127Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188544, Sender [7:7511668557587773003:2834], Recipient [7:7511668557587772991:2829]: NKikimr::NPQ::NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-03T10:30:35.544136Z node 7 :PERSQUEUE TRACE: partition.h:609: StateIdle, processing event NReadQuoterEvents::TEvQuotaCountersUpdated 2025-06-03T10:30:35.579405Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668557587772915:2822], Partition 2, Sender [0:0:0], Recipient [7:7511668557587772994:2831], Cookie: 0 2025-06-03T10:30:35.579437Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668557587772994:2831]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:35.579443Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:35.579458Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:35.579487Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037896, Partition: 2, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:35.579491Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:35.579498Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037896, Partition: 2, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:35.592690Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668557587772917:2823], Partition 1, Sender [0:0:0], Recipient [7:7511668557587772991:2829], Cookie: 0 2025-06-03T10:30:35.592722Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668557587772991:2829]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:35.592728Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:35.592745Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:35.592775Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037897, Partition: 1, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:35.592778Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:35.592784Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037897, Partition: 1, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:35.614720Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668579062610131:3034], Partition 3, Sender [0:0:0], Recipient [7:7511668579062610219:3041], Cookie: 0 2025-06-03T10:30:35.614719Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668579062610130:3033], Partition 4, Sender [0:0:0], Recipient [7:7511668579062610221:3043], Cookie: 0 2025-06-03T10:30:35.614741Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668579062610221:3043]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:35.614747Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:35.614749Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668579062610219:3041]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:35.614753Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:35.614769Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:35.614769Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:35.614808Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037899, Partition: 3, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:35.614808Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037898, Partition: 4, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:35.614812Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:35.614812Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:35.614819Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:35.614820Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037899, Partition: 3, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 2025-06-03T10:30:35.634214Z node 7 :PERSQUEUE TRACE: partition.h:561: StateIdle event# 271188505 (NKikimr::TEvPQ::TEvUpdateAvailableSize), Tablet [7:7511668531817967800:2454], Partition 0, Sender [0:0:0], Recipient [7:7511668531817967857:2457], Cookie: 0 2025-06-03T10:30:35.634255Z node 7 :PERSQUEUE TRACE: partition.h:563: StateIdle, received event# 271188505, Sender [0:0:0], Recipient [7:7511668531817967857:2457]: NKikimr::TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:35.634261Z node 7 :PERSQUEUE TRACE: partition.h:589: StateIdle, processing event TEvPQ::TEvUpdateAvailableSize 2025-06-03T10:30:35.634284Z node 7 :PERSQUEUE TRACE: partition.cpp:398: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete old stuff 2025-06-03T10:30:35.634323Z node 7 :PERSQUEUE TRACE: partition.cpp:407: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Have 0 items to delete all stuff. Delete command NKikimrClient.TKeyValueRequest 2025-06-03T10:30:35.634334Z node 7 :PERSQUEUE TRACE: partition_write.cpp:163: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ProcessReserveRequests. 2025-06-03T10:30:35.634342Z node 7 :PERSQUEUE TRACE: partition_write.cpp:252: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::AnswerCurrentWrites. Responses.size()=0 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/service/unittest >> KqpQueryService::CloseConnection [GOOD] Test command err: Trying to start YDB, gRPC: 11819, MsgBus: 9987 2025-06-03T10:30:18.131749Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668527872272953:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:18.131826Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d50/r3tmp/tmp5cTGBN/pdisk_1.dat 2025-06-03T10:30:18.229607Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11819, node 1 2025-06-03T10:30:18.236508Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:18.236552Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:18.238428Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:18.257555Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:18.257572Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:18.257575Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:18.257631Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9987 TClient is connected to server localhost:9987 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:18.358423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:18.363962Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:30:18.374908Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:18.444408Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:18.472041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:18.490505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:18.619217Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668527872274419:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:18.619247Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:18.681894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:18.695512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:18.708615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:18.768112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:18.782987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:18.798282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:18.813258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:18.841651Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668527872275073:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:18.841689Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:18.841777Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668527872275078:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:18.842937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:18.845570Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:30:18.845677Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668527872275080:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:18.938779Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668527872275131:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 31261, MsgBus: 65101 2025-06-03T10:30:20.507301Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668538738965476:2214];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:20.507542Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d50/r3tmp/tmpUHD1Bg/pdisk_1.dat 2025-06-03T10:30:20.529909Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31261, node 2 2025-06-03T10:30:20.541413Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:20.541430Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:20.541432Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:20.541492Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65101 TClient is connected to server localhost:65101 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:20.611476Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:20.611517Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:20.611902Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propos ... 3319, node 4 2025-06-03T10:30:35.297325Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:35.297337Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:35.297339Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:35.297383Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8998 TClient is connected to server localhost:8998 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:35.375772Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:35.375799Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:35.376936Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:30:35.379265Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:35.380248Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:30:35.381520Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:35.437272Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:35.454911Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:35.466366Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:35.583601Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511668601601788991:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:35.583632Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:35.591816Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:35.602080Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:35.612752Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:35.627232Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:35.640961Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:35.655313Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:35.669444Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:35.787415Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511668601601789644:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:35.787438Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:35.787452Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511668601601789649:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:35.788307Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:35.790737Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511668601601789651:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:35.886088Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511668601601789702:3393] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:36.007748Z node 4 :RPC_REQUEST WARN: rpc_execute_query.cpp:472: Client lost 2025-06-03T10:30:36.010723Z node 4 :RPC_REQUEST WARN: rpc_execute_query.cpp:472: Client lost 2025-06-03T10:30:36.014749Z node 4 :RPC_REQUEST WARN: rpc_execute_query.cpp:472: Client lost 2025-06-03T10:30:36.019809Z node 4 :RPC_REQUEST WARN: rpc_execute_query.cpp:472: Client lost 2025-06-03T10:30:36.025772Z node 4 :RPC_REQUEST WARN: rpc_execute_query.cpp:472: Client lost 2025-06-03T10:30:36.025843Z node 4 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [4:7511668605896757291:2521] TxId: 281474976715672. Ctx: { TraceId: 01jwtnfk7m973ds26gw8xrp8gw, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=4&id=NDBhZTNmMDEtN2MzNmVmYTctZWNlMTg3ZTEtODZmOTlkYzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Client lost } 2025-06-03T10:30:36.026193Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7511668605896757303:2526], TxId: 281474976715672, task: 4. Ctx: { TraceId : 01jwtnfk7m973ds26gw8xrp8gw. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=4&id=NDBhZTNmMDEtN2MzNmVmYTctZWNlMTg3ZTEtODZmOTlkYzQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:7511668605896757291:2521], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-03T10:30:36.026202Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7511668605896757299:2523], TxId: 281474976715672, task: 1. Ctx: { SessionId : ydb://session/3?node_id=4&id=NDBhZTNmMDEtN2MzNmVmYTctZWNlMTg3ZTEtODZmOTlkYzQ=. TraceId : 01jwtnfk7m973ds26gw8xrp8gw. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:7511668605896757291:2521], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-03T10:30:36.026331Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7511668605896757300:2524], TxId: 281474976715672, task: 2. Ctx: { TraceId : 01jwtnfk7m973ds26gw8xrp8gw. SessionId : ydb://session/3?node_id=4&id=NDBhZTNmMDEtN2MzNmVmYTctZWNlMTg3ZTEtODZmOTlkYzQ=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:7511668605896757291:2521], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-03T10:30:36.026396Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7511668605896757301:2525], TxId: 281474976715672, task: 3. Ctx: { SessionId : ydb://session/3?node_id=4&id=NDBhZTNmMDEtN2MzNmVmYTctZWNlMTg3ZTEtODZmOTlkYzQ=. TraceId : 01jwtnfk7m973ds26gw8xrp8gw. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Handle abort execution event from: [4:7511668605896757291:2521], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-03T10:30:36.026445Z node 4 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [4:7511668605896757304:2527], TxId: 281474976715672, task: 5. Ctx: { TraceId : 01jwtnfk7m973ds26gw8xrp8gw. SessionId : ydb://session/3?node_id=4&id=NDBhZTNmMDEtN2MzNmVmYTctZWNlMTg3ZTEtODZmOTlkYzQ=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [4:7511668605896757291:2521], status: ABORTED, reason: {
: Error: Terminate execution } 2025-06-03T10:30:36.026577Z node 4 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=4&id=NDBhZTNmMDEtN2MzNmVmYTctZWNlMTg3ZTEtODZmOTlkYzQ=, ActorId: [4:7511668605896757289:2521], ActorState: ExecuteState, TraceId: 01jwtnfk7m973ds26gw8xrp8gw, Create QueryResponse for error on request, msg: >> LabeledDbCounters::OneTabletRemoveCounters [GOOD] >> LabeledDbCounters::OneTabletRestart >> ObjectDistribution::TestManyIrrelevantNodes [GOOD] >> Sequencer::Basic1 [GOOD] >> StoragePool::TestDistributionRandomProbability >> THiveTest::TestSpreadNeighboursDifferentOwners [GOOD] >> THiveTest::TestUpdateTabletsObjectUpdatesMetrics >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTable >> TSchemeShardSubDomainTest::SimultaneousDefine >> VDiskBalancing::TestStopOneNode_Mirror3dc_HugeBlob >> TSchemeShardSubDomainTest::DiskSpaceUsage >> TSchemeShardSubDomainTest::CreateDropNbs >> VDiskBalancing::TestStopOneNode_Block42_HugeBlob >> VDiskBalancing::TestRandom_Block42 >> TSchemeShardSubDomainTest::SimultaneousDefineAndCreateTable >> VDiskBalancing::TestStopOneNode_Mirror3dc >> TSchemeShardSubDomainTest::TopicDiskSpaceQuotas >> VDiskBalancing::TestStopOneNode_Block42 >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42_HugeBlob |66.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestRandom_Mirror3dc >> TSchemeShardSubDomainTest::SimultaneousDefine [GOOD] >> TSchemeShardSubDomainTest::CreateDropNbs [GOOD] >> THiveTest::TestUpdateTabletsObjectUpdatesMetrics [GOOD] >> THiveTest::TestRestartTablets >> TSchemeShardSubDomainTest::SimultaneousDefineAndCreateTable [GOOD] >> THiveTest::TestHiveBalancerWithPreferredDC3 [GOOD] >> THiveTest::TestHiveFollowersWithChangingDC >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTable [GOOD] >> TPersQueueMirrorer::TestBasicRemote [GOOD] >> VDiskBalancing::TestStopOneNode_Mirror3dc_HugeBlob [GOOD] >> VDiskBalancing::TestStopOneNode_Block42_HugeBlob [GOOD] >> THiveTest::TestHiveBalancer [GOOD] >> THiveTest::TestHiveBalancerDifferentResources >> VDiskBalancing::TestStopOneNode_Block42 [GOOD] >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42_HugeBlob [GOOD] >> VDiskBalancing::TestStopOneNode_Mirror3dc [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousDefine [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:37.929004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:37.929028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:37.929032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:37.929038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:37.929053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:37.929056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:37.929063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:37.929079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:37.929178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:37.929242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:37.940386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:37.940409Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:37.944316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:37.944405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:37.944435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:37.946156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:37.946225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:37.946344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:37.946397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:37.947038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:37.947091Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:37.947357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:37.947363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:37.947372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:37.947377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:37.947382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:37.947399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.948475Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:37.965569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:37.965636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.965693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:37.965737Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:37.965746Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.966372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:37.966397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:37.966446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.966455Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:37.966459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:37.966464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:37.966857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.966867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:37.966870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:37.967166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.967177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.967182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:37.967188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:37.967667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:37.968013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:37.968048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:37.968203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:37.968230Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:37.968250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:37.968307Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:37.968312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:37.968346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:37.968356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:37.968713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:37.968719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:37.968760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409548, partId: 0 2025-06-03T10:30:37.996171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Status: SUCCESS OnTabletId: 72075186233409548 2025-06-03T10:30:37.996177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:84: NSubDomainState::TConfigureParts operationId# 101:0 HandleReply TEvConfigureStatus operationId:101:0 at schemeshard:72057594046678944 2025-06-03T10:30:37.996187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:120: NSubDomainState::TConfigureParts operationId# 101:0 Got OK TEvConfigureStatus from tablet# 72075186233409548 shardIdx# 72057594046678944:3 at schemeshard# 72057594046678944 2025-06-03T10:30:37.996194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 101:0 3 -> 128 2025-06-03T10:30:37.996884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.996926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.996950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.996958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.996966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 101:0, at tablet# 72057594046678944 2025-06-03T10:30:37.996976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 101 ready parts: 1/1 2025-06-03T10:30:37.997013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 101 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:37.997478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 101:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:101 msg type: 269090816 2025-06-03T10:30:37.997512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 101, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 101 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000003 2025-06-03T10:30:37.997616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:37.997641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 101 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:37.997649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-06-03T10:30:37.997732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 101:0 128 -> 240 2025-06-03T10:30:37.997743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 101:0, at tablet# 72057594046678944 2025-06-03T10:30:37.997784Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:30:37.997804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 101 2025-06-03T10:30:37.998315Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:37.998326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:37.998382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:37.998389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-03T10:30:37.998402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.998411Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-03T10:30:37.998428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:37.998434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:37.998441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:37.998446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:37.998452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-03T10:30:37.998458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:37.998465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-06-03T10:30:37.998470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 101:0 2025-06-03T10:30:37.998504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-03T10:30:37.998511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 1, subscribers: 1 2025-06-03T10:30:37.998517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 4 2025-06-03T10:30:37.998777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:37.998794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:37.998800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:30:37.998810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-03T10:30:37.998816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:30:37.998830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 1 2025-06-03T10:30:37.998835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:308:2298] 2025-06-03T10:30:37.999488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:30:37.999516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:30:37.999523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:315:2305] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-06-03T10:30:37.999658Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:37.999705Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 56us result status StatusSuccess 2025-06-03T10:30:37.999797Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateDropNbs [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:37.951400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:37.951433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:37.951441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:37.951448Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:37.951464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:37.951469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:37.951480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:37.951501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:37.951634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:37.951705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:37.962945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:37.962970Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:37.966858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:37.966960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:37.966993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:37.969680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:37.969772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:37.969898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:37.969960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:37.970850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:37.970929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:37.971301Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:37.971318Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:37.971334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:37.971344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:37.971350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:37.971378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.973401Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:38.001806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:38.001908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.001985Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:38.002050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:38.002065Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.003092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:38.003131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:38.003214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.003228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:38.003237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:38.003245Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:38.003900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.003915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:38.003923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:38.004426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.004464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.004471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:38.004481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:38.005399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:38.006071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:38.006130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:38.006358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:38.006399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:38.006421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:38.006510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:38.006522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:38.006572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:38.006587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:38.007316Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:38.007333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:38.007391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 38.095454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-03T10:30:38.095671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:38.095678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:38.095681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:38.095698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:38.095769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:30:38.096037Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-03T10:30:38.096084Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-03T10:30:38.096103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:38.096166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409546 2025-06-03T10:30:38.096425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-03T10:30:38.096452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 Forgetting tablet 72075186233409548 2025-06-03T10:30:38.096763Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-03T10:30:38.096888Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-03T10:30:38.096910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:30:38.096941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409547 2025-06-03T10:30:38.097073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 Forgetting tablet 72075186233409549 2025-06-03T10:30:38.097143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-03T10:30:38.097163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:30:38.097223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:38.097227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:30:38.097236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:38.097255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:30:38.097270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:38.097273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:38.097310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:30:38.097644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:30:38.097655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:30:38.097666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:30:38.097669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-03T10:30:38.098088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:30:38.098096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:30:38.098107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-03T10:30:38.098111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-03T10:30:38.098120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:38.098145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:38.098149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:38.098159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:38.098198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:38.098456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-03T10:30:38.098508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-03T10:30:38.098513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-03T10:30:38.098567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-03T10:30:38.098581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:30:38.098585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:537:2490] TestWaitNotification: OK eventTxId 102 2025-06-03T10:30:38.100455Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/BSVolume" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:38.100511Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/BSVolume" took 77us result status StatusPathDoesNotExist 2025-06-03T10:30:38.100573Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/BSVolume\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0/BSVolume" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:38.100654Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:38.100665Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 13us result status StatusPathDoesNotExist 2025-06-03T10:30:38.100677Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousDefineAndCreateTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:38.023566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:38.023592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:38.023596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:38.023602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:38.023617Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:38.023620Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:38.023628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:38.023646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:38.023741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:38.023821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:38.036129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:38.036150Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:38.040053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:38.040140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:38.040171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:38.042095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:38.042182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:38.042317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:38.042363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:38.042979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:38.043030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:38.043322Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:38.043331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:38.043340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:38.043347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:38.043351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:38.043366Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.044651Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:38.060415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:38.060498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.060574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:38.060630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:38.060643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.061313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:38.061346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:38.061403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.061417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:38.061423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:38.061430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:38.061882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.061894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:38.061901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:38.062171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.062177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.062182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:38.062189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:38.062643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:38.062931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:38.062965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:38.063148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:38.063172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:38.063191Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:38.063259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:38.063266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:38.063303Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:38.063316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:38.063735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:38.063744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:38.063788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... HARD DEBUG: schemeshard_impl.cpp:6290: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 102 Step: 140 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72075186233409546 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 433 } } CommitVersion { Step: 140 TxId: 102 } 2025-06-03T10:30:38.168708Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409549, partId: 0 2025-06-03T10:30:38.168732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 102 Step: 140 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72075186233409546 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 433 } } CommitVersion { Step: 140 TxId: 102 } 2025-06-03T10:30:38.168754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409549 Status: COMPLETE TxId: 102 Step: 140 OrderId: 102 ExecLatency: 0 ProposeLatency: 2 DomainCoordinators: 72075186233409546 TxStats { PerShardStats { ShardId: 72075186233409549 CpuTimeUsec: 433 } } CommitVersion { Step: 140 TxId: 102 } 2025-06-03T10:30:38.169125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 508 RawX2: 4294969755 } Origin: 72075186233409549 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-03T10:30:38.169137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 102, tablet: 72075186233409549, partId: 0 2025-06-03T10:30:38.169156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 102:0, at schemeshard: 72057594046678944, message: Source { RawX1: 508 RawX2: 4294969755 } Origin: 72075186233409549 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-03T10:30:38.169164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1014: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-06-03T10:30:38.169173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1018: NTableState::TProposedWaitParts operationId# 102:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 508 RawX2: 4294969755 } Origin: 72075186233409549 State: 2 TxId: 102 Step: 0 Generation: 2 2025-06-03T10:30:38.169187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 102:0, shardIdx: 72057594046678944:4, datashard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:38.169192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.169197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 102:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-06-03T10:30:38.169205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 129 -> 240 2025-06-03T10:30:38.169798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:30:38.169825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:30:38.170253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.170299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.170365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.170377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-03T10:30:38.170396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:30:38.170402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:30:38.170408Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:30:38.170412Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:30:38.170419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-03T10:30:38.170436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:308:2298] message: TxId: 102 2025-06-03T10:30:38.170445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:30:38.170452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:30:38.170457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:30:38.170484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:30:38.170962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:30:38.170977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:536:2478] TestWaitNotification: OK eventTxId 102 2025-06-03T10:30:38.171109Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:38.171172Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 73us result status StatusSuccess 2025-06-03T10:30:38.171305Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 2 SecurityStateVersion: 0 } } Children { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 140 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 2 PlanResolution: 10 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:38.171446Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:38.171482Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 38us result status StatusSuccess 2025-06-03T10:30:38.171571Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/table_0" PathDescription { Self { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 140 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_0" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 2 PlanResolution: 10 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> StoragePool::TestDistributionRandomProbability [GOOD] >> StoragePool::TestDistributionRandomProbabilityWithOverflow [GOOD] >> StoragePool::TestDistributionExactMin ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:37.874922Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:37.874947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:37.874951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:37.874957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:37.874972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:37.874975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:37.874982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:37.874997Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:37.875088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:37.875149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:37.886098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:37.886124Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:37.889488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:37.889588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:37.889626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:37.892006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:37.892082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:37.892180Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:37.892233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:37.892902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:37.892956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:37.893241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:37.893249Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:37.893260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:37.893266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:37.893271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:37.893290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.894518Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:37.908843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:37.908921Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.908990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:37.909037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:37.909047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.909977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:37.910021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:37.910094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.910104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:37.910109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:37.910114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:37.910692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.910704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:37.910709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:37.911151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.911162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.911168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:37.911174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:37.911733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:37.912219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:37.912272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:37.912444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:37.912470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:37.912488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:37.912570Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:37.912576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:37.912615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:37.912626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:37.913084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:37.913092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:37.913139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 552 CpuTimeUsec: 293 } } CommitVersion { Step: 140 TxId: 101 } 2025-06-03T10:30:38.050626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409552, partId: 0 2025-06-03T10:30:38.050650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409552 Status: COMPLETE TxId: 101 Step: 140 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72075186233409546 DomainCoordinators: 72075186233409547 DomainCoordinators: 72075186233409548 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 293 } } CommitVersion { Step: 140 TxId: 101 } 2025-06-03T10:30:38.050665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409552 Status: COMPLETE TxId: 101 Step: 140 OrderId: 101 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72075186233409546 DomainCoordinators: 72075186233409547 DomainCoordinators: 72075186233409548 TxStats { PerShardStats { ShardId: 72075186233409552 CpuTimeUsec: 293 } } CommitVersion { Step: 140 TxId: 101 } 2025-06-03T10:30:38.051160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 627 RawX2: 4294969831 } Origin: 72075186233409552 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-03T10:30:38.051171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 101, tablet: 72075186233409552, partId: 0 2025-06-03T10:30:38.051190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 101:0, at schemeshard: 72057594046678944, message: Source { RawX1: 627 RawX2: 4294969831 } Origin: 72075186233409552 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-03T10:30:38.051197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1014: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-06-03T10:30:38.051209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1018: NTableState::TProposedWaitParts operationId# 101:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 627 RawX2: 4294969831 } Origin: 72075186233409552 State: 2 TxId: 101 Step: 0 Generation: 2 2025-06-03T10:30:38.051223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 101:0, shardIdx: 72057594046678944:7, datashard: 72075186233409552, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:38.051227Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.051233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 101:0, datashard: 72075186233409552, at schemeshard: 72057594046678944 2025-06-03T10:30:38.051241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 101:0 129 -> 240 2025-06-03T10:30:38.051840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:30:38.051859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:30:38.051894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.052093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.052148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.052155Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-03T10:30:38.052166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:38.052170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:38.052174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:38.052177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:38.052181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: true 2025-06-03T10:30:38.052194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:275:2265] message: TxId: 101 2025-06-03T10:30:38.052200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:38.052205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-06-03T10:30:38.052208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 101:0 2025-06-03T10:30:38.052231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:30:38.052577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:30:38.052587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:276:2266] TestWaitNotification: OK eventTxId 101 2025-06-03T10:30:38.052683Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:38.052727Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 51us result status StatusSuccess 2025-06-03T10:30:38.052827Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 140 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 10 Coordinators: 72075186233409546 Coordinators: 72075186233409547 Coordinators: 72075186233409548 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409549 Mediators: 72075186233409550 Mediators: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:38.052917Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:38.052937Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 22us result status StatusSuccess 2025-06-03T10:30:38.053000Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/table_0" PathDescription { Self { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 140 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_0" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 10 Coordinators: 72075186233409546 Coordinators: 72075186233409547 Coordinators: 72075186233409548 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409549 Mediators: 72075186233409550 Mediators: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Mirror3dc_HugeBlob [GOOD] Test command err: RandomSeed# 591038289435886706 SEND TEvPut with key [1:1:1:0:0:533504:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:533504:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:533504:0] 2025-06-03T10:30:38.062915Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:533504:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Mirror3dc [GOOD] Test command err: RandomSeed# 16911661027185110072 SEND TEvPut with key [1:1:1:0:0:100:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:100:0] 2025-06-03T10:30:38.161251Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Block42_HugeBlob [GOOD] Test command err: RandomSeed# 6254240632170434998 SEND TEvPut with key [1:1:1:0:0:3201024:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:3201024:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:3201024:0] 2025-06-03T10:30:38.113737Z 3 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:188:17] ServerId# [1:296:58] TabletId# 72057594037932033 PipeClientId# [3:188:17] 2025-06-03T10:30:38.113802Z 8 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [8:223:17] ServerId# [1:301:63] TabletId# 72057594037932033 PipeClientId# [8:223:17] 2025-06-03T10:30:38.113820Z 6 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:209:17] ServerId# [1:299:61] TabletId# 72057594037932033 PipeClientId# [6:209:17] 2025-06-03T10:30:38.113867Z 5 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:202:17] ServerId# [1:298:60] TabletId# 72057594037932033 PipeClientId# [5:202:17] 2025-06-03T10:30:38.113889Z 4 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:195:17] ServerId# [1:297:59] TabletId# 72057594037932033 PipeClientId# [4:195:17] 2025-06-03T10:30:38.113909Z 2 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:181:17] ServerId# [1:295:57] TabletId# 72057594037932033 PipeClientId# [2:181:17] 2025-06-03T10:30:38.113927Z 7 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:216:17] ServerId# [1:300:62] TabletId# 72057594037932033 PipeClientId# [7:216:17] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:3201024:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42_HugeBlob [GOOD] Test command err: RandomSeed# 14894740331474186054 SEND TEvPut with key [1:1:1:0:0:3201024:0] 2025-06-03T10:30:38.203801Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 6 2025-06-03T10:30:38.203948Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:3201024:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Node 0: 4 Node 1: 5 Node 2: 6 Node 3: 1 Node 4: Node 5: Node 6: 2 Node 7: 3 2025-06-03T10:30:38.231819Z 1 00h01m00.011024s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 7 Node 0: 4 Node 1: 5 Node 2: 6 Node 3: 1 2 Node 4: Node 5: 1 Node 6: Node 7: 3 Start compaction 1 Finish compaction 1 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestStopOneNode_Block42 [GOOD] Test command err: RandomSeed# 2902818082370911553 SEND TEvPut with key [1:1:1:0:0:100:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} SEND TEvPut with key [1:1:2:0:0:100:0] 2025-06-03T10:30:38.183586Z 3 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:188:17] ServerId# [1:296:58] TabletId# 72057594037932033 PipeClientId# [3:188:17] 2025-06-03T10:30:38.183633Z 8 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [8:223:17] ServerId# [1:301:63] TabletId# 72057594037932033 PipeClientId# [8:223:17] 2025-06-03T10:30:38.183654Z 6 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:209:17] ServerId# [1:299:61] TabletId# 72057594037932033 PipeClientId# [6:209:17] 2025-06-03T10:30:38.183673Z 5 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:202:17] ServerId# [1:298:60] TabletId# 72057594037932033 PipeClientId# [5:202:17] 2025-06-03T10:30:38.183691Z 4 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:195:17] ServerId# [1:297:59] TabletId# 72057594037932033 PipeClientId# [4:195:17] 2025-06-03T10:30:38.183709Z 2 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:181:17] ServerId# [1:295:57] TabletId# 72057594037932033 PipeClientId# [2:181:17] 2025-06-03T10:30:38.183729Z 7 00h01m00.010512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:216:17] ServerId# [1:300:62] TabletId# 72057594037932033 PipeClientId# [7:216:17] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start compaction Finish compaction >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42 >> THiveTest::TestRestartTablets [GOOD] >> THiveTest::TestServerlessComputeResourcesMode >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOk [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsMultipleColumns [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsWithCompactingMiddleRows >> THiveTest::TestDrain [GOOD] >> THiveTest::TestDrainWithMaxTabletsScheduled >> THiveTest::TestHiveNoBalancingWithLowResourceUsage [GOOD] >> THiveTest::TestLockTabletExecution >> TSchemeShardSubDomainTest::SimultaneousCreateForceDrop >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42 [GOOD] >> TSchemeShardSubDomainTest::SchemeLimitsCreatePq >> VDiskBalancing::TestDontSendToReadOnlyTest_Block42 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestWriteToExtraChannelThenReadMixedChannelsReturnsOk [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:58:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:75:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:58:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:75:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:77:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:80:2057] recipient: [2:79:2110] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:83:2057] recipient: [2:79:2110] !Reboot 72057594037927937 (actor [2:57:2097]) rebooted! !Reboot 72057594037927937 (actor [2:57:2097]) tablet resolver refreshed! new actor is[2:82:2111] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:168:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:58:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:75:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:77:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:83:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:57:2097]) rebooted! !Reboot 72057594037927937 (actor [3:57:2097]) tablet resolver refreshed! new actor is[3:82:2111] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:168:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:58:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:75:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:78:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:82:2057] recipient: [4:80:2110] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:84:2057] recipient: [4:80:2110] !Reboot 72057594037927937 (actor [4:57:2097]) rebooted! !Reboot 72057594037927937 (actor [4:57:2097]) tablet resolver refreshed! new actor is[4:83:2111] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:169:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:58:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:75:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:81:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:83:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:85:2057] recipient: [5:84:2113] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:87:2057] recipient: [5:84:2113] !Reboot 72057594037927937 (actor [5:57:2097]) rebooted! !Reboot 72057594037927937 (actor [5:57:2097]) tablet resolver refreshed! new actor is[5:86:2114] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:172:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:58:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:75:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:57:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:81:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:83:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:85:2057] recipient: [6:84:2113] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:87:2057] recipient: [6:84:2113] !Reboot 72057594037927937 (actor [6:57:2097]) rebooted! !Reboot 72057594037927937 (actor [6:57:2097]) tablet resolver refreshed! new actor is[6:86:2114] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:172:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:58:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:75:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:57:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:82:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:85:2057] recipient: [7:84:2113] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:86:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:88:2057] recipient: [7:84:2113] !Reboot 72057594037927937 (actor [7:57:2097]) rebooted! !Reboot 72057594037927937 (actor [7:57:2097]) tablet resolver refreshed! new actor is[7:87:2114] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:105:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:58:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:75:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:84:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:87:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:88:2057] recipient: [8:86:2115] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:90:2057] recipient: [8:86:2115] !Reboot 72057594037927937 (actor [8:57:2097]) rebooted! !Reboot 72057594037927937 (actor [8:57:2097]) tablet resolver refreshed! new actor is[8:89:2116] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:175:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:58:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:75:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:57:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:84:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:87:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:88:2057] recipient: [9:86:2115] Leader for TabletID 72057594037927937 is [9:89:2116] sender: [9:90:2057] recipient: [9:86:2115] !Reboot 72057594037927937 (actor [9:57:2097]) rebooted! !Reboot 72057594037927937 (actor [9:57:2097]) tablet resolver refreshed! new actor is[9:89:2116] Leader for TabletID 72057594037927937 is [9:89:2116] sender: [9:175:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:58:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:75:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:57:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:85:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:88:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:89:2057] recipient: [10:87:2115] Leader for TabletID 72057594037927937 is [10:90:2116] sender: [10:91:2057] recipient: [10:87:2115] !Reboot 72057594037927937 (actor [10:57:2097]) rebooted! !Reboot 72057594037927937 (actor [10:57:2097]) tablet resolver refreshed! new actor is[10:90:2116] Leader for TabletID 72057594037927937 is [10:90:2116] sender: [10:108:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:58:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:75:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:87:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:90:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:91:2057] recipient: [11:89:2117] Leader for TabletID 72057594037927937 is [11:92:2118] sender: [11:93:2057] recipient: [11:89:2117] !Reboot 72057594037927937 (actor [11:57:2097]) rebooted! !Reboot 72057594037927937 (actor [11:57:2097]) tablet resolver refreshed! new actor is[11:92:2118] Leader for TabletID 72057594037927937 is [11:92:2118] sender: [11:178:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:58:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:75:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (a ... 594037927937 (actor [23:57:2097]) tablet resolver refreshed! new actor is[23:86:2114] Leader for TabletID 72057594037927937 is [23:86:2114] sender: [23:172:2057] recipient: [23:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:55:2057] recipient: [24:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [24:55:2057] recipient: [24:51:2095] Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:58:2057] recipient: [24:51:2095] Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:75:2057] recipient: [24:14:2061] !Reboot 72057594037927937 (actor [24:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:82:2057] recipient: [24:36:2083] Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:85:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [24:57:2097] sender: [24:86:2057] recipient: [24:84:2113] Leader for TabletID 72057594037927937 is [24:87:2114] sender: [24:88:2057] recipient: [24:84:2113] !Reboot 72057594037927937 (actor [24:57:2097]) rebooted! !Reboot 72057594037927937 (actor [24:57:2097]) tablet resolver refreshed! new actor is[24:87:2114] Leader for TabletID 72057594037927937 is [24:87:2114] sender: [24:173:2057] recipient: [24:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:55:2057] recipient: [25:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [25:55:2057] recipient: [25:51:2095] Leader for TabletID 72057594037927937 is [25:57:2097] sender: [25:58:2057] recipient: [25:51:2095] Leader for TabletID 72057594037927937 is [25:57:2097] sender: [25:75:2057] recipient: [25:14:2061] !Reboot 72057594037927937 (actor [25:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [25:57:2097] sender: [25:85:2057] recipient: [25:36:2083] Leader for TabletID 72057594037927937 is [25:57:2097] sender: [25:88:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [25:57:2097] sender: [25:89:2057] recipient: [25:87:2116] Leader for TabletID 72057594037927937 is [25:90:2117] sender: [25:91:2057] recipient: [25:87:2116] !Reboot 72057594037927937 (actor [25:57:2097]) rebooted! !Reboot 72057594037927937 (actor [25:57:2097]) tablet resolver refreshed! new actor is[25:90:2117] Leader for TabletID 72057594037927937 is [25:90:2117] sender: [25:176:2057] recipient: [25:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:55:2057] recipient: [26:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [26:55:2057] recipient: [26:50:2095] Leader for TabletID 72057594037927937 is [26:57:2097] sender: [26:58:2057] recipient: [26:50:2095] Leader for TabletID 72057594037927937 is [26:57:2097] sender: [26:75:2057] recipient: [26:14:2061] !Reboot 72057594037927937 (actor [26:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [26:57:2097] sender: [26:85:2057] recipient: [26:36:2083] Leader for TabletID 72057594037927937 is [26:57:2097] sender: [26:88:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [26:57:2097] sender: [26:89:2057] recipient: [26:87:2116] Leader for TabletID 72057594037927937 is [26:90:2117] sender: [26:91:2057] recipient: [26:87:2116] !Reboot 72057594037927937 (actor [26:57:2097]) rebooted! !Reboot 72057594037927937 (actor [26:57:2097]) tablet resolver refreshed! new actor is[26:90:2117] Leader for TabletID 72057594037927937 is [26:90:2117] sender: [26:176:2057] recipient: [26:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:55:2057] recipient: [27:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [27:55:2057] recipient: [27:51:2095] Leader for TabletID 72057594037927937 is [27:57:2097] sender: [27:58:2057] recipient: [27:51:2095] Leader for TabletID 72057594037927937 is [27:57:2097] sender: [27:75:2057] recipient: [27:14:2061] !Reboot 72057594037927937 (actor [27:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [27:57:2097] sender: [27:86:2057] recipient: [27:36:2083] Leader for TabletID 72057594037927937 is [27:57:2097] sender: [27:89:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [27:57:2097] sender: [27:90:2057] recipient: [27:88:2116] Leader for TabletID 72057594037927937 is [27:91:2117] sender: [27:92:2057] recipient: [27:88:2116] !Reboot 72057594037927937 (actor [27:57:2097]) rebooted! !Reboot 72057594037927937 (actor [27:57:2097]) tablet resolver refreshed! new actor is[27:91:2117] Leader for TabletID 72057594037927937 is [27:91:2117] sender: [27:177:2057] recipient: [27:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:55:2057] recipient: [28:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [28:55:2057] recipient: [28:50:2095] Leader for TabletID 72057594037927937 is [28:57:2097] sender: [28:58:2057] recipient: [28:50:2095] Leader for TabletID 72057594037927937 is [28:57:2097] sender: [28:75:2057] recipient: [28:14:2061] !Reboot 72057594037927937 (actor [28:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [28:57:2097] sender: [28:89:2057] recipient: [28:36:2083] Leader for TabletID 72057594037927937 is [28:57:2097] sender: [28:92:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [28:57:2097] sender: [28:93:2057] recipient: [28:91:2119] Leader for TabletID 72057594037927937 is [28:94:2120] sender: [28:95:2057] recipient: [28:91:2119] !Reboot 72057594037927937 (actor [28:57:2097]) rebooted! !Reboot 72057594037927937 (actor [28:57:2097]) tablet resolver refreshed! new actor is[28:94:2120] Leader for TabletID 72057594037927937 is [28:94:2120] sender: [28:180:2057] recipient: [28:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:55:2057] recipient: [29:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [29:55:2057] recipient: [29:51:2095] Leader for TabletID 72057594037927937 is [29:57:2097] sender: [29:58:2057] recipient: [29:51:2095] Leader for TabletID 72057594037927937 is [29:57:2097] sender: [29:75:2057] recipient: [29:14:2061] !Reboot 72057594037927937 (actor [29:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [29:57:2097] sender: [29:89:2057] recipient: [29:36:2083] Leader for TabletID 72057594037927937 is [29:57:2097] sender: [29:92:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [29:57:2097] sender: [29:93:2057] recipient: [29:91:2119] Leader for TabletID 72057594037927937 is [29:94:2120] sender: [29:95:2057] recipient: [29:91:2119] !Reboot 72057594037927937 (actor [29:57:2097]) rebooted! !Reboot 72057594037927937 (actor [29:57:2097]) tablet resolver refreshed! new actor is[29:94:2120] Leader for TabletID 72057594037927937 is [29:94:2120] sender: [29:180:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:55:2057] recipient: [30:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:55:2057] recipient: [30:50:2095] Leader for TabletID 72057594037927937 is [30:57:2097] sender: [30:58:2057] recipient: [30:50:2095] Leader for TabletID 72057594037927937 is [30:57:2097] sender: [30:75:2057] recipient: [30:14:2061] !Reboot 72057594037927937 (actor [30:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [30:57:2097] sender: [30:90:2057] recipient: [30:36:2083] Leader for TabletID 72057594037927937 is [30:57:2097] sender: [30:93:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [30:57:2097] sender: [30:94:2057] recipient: [30:92:2119] Leader for TabletID 72057594037927937 is [30:95:2120] sender: [30:96:2057] recipient: [30:92:2119] !Reboot 72057594037927937 (actor [30:57:2097]) rebooted! !Reboot 72057594037927937 (actor [30:57:2097]) tablet resolver refreshed! new actor is[30:95:2120] Leader for TabletID 72057594037927937 is [30:95:2120] sender: [30:181:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:55:2057] recipient: [31:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:55:2057] recipient: [31:51:2095] Leader for TabletID 72057594037927937 is [31:57:2097] sender: [31:58:2057] recipient: [31:51:2095] Leader for TabletID 72057594037927937 is [31:57:2097] sender: [31:75:2057] recipient: [31:14:2061] !Reboot 72057594037927937 (actor [31:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [31:57:2097] sender: [31:92:2057] recipient: [31:36:2083] Leader for TabletID 72057594037927937 is [31:57:2097] sender: [31:95:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [31:57:2097] sender: [31:96:2057] recipient: [31:94:2121] Leader for TabletID 72057594037927937 is [31:97:2122] sender: [31:98:2057] recipient: [31:94:2121] !Reboot 72057594037927937 (actor [31:57:2097]) rebooted! !Reboot 72057594037927937 (actor [31:57:2097]) tablet resolver refreshed! new actor is[31:97:2122] Leader for TabletID 72057594037927937 is [31:97:2122] sender: [31:183:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:55:2057] recipient: [32:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:55:2057] recipient: [32:51:2095] Leader for TabletID 72057594037927937 is [32:57:2097] sender: [32:58:2057] recipient: [32:51:2095] Leader for TabletID 72057594037927937 is [32:57:2097] sender: [32:75:2057] recipient: [32:14:2061] !Reboot 72057594037927937 (actor [32:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [32:57:2097] sender: [32:92:2057] recipient: [32:36:2083] Leader for TabletID 72057594037927937 is [32:57:2097] sender: [32:95:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [32:57:2097] sender: [32:96:2057] recipient: [32:94:2121] Leader for TabletID 72057594037927937 is [32:97:2122] sender: [32:98:2057] recipient: [32:94:2121] !Reboot 72057594037927937 (actor [32:57:2097]) rebooted! !Reboot 72057594037927937 (actor [32:57:2097]) tablet resolver refreshed! new actor is[32:97:2122] Leader for TabletID 72057594037927937 is [32:97:2122] sender: [32:183:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:55:2057] recipient: [33:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:55:2057] recipient: [33:51:2095] Leader for TabletID 72057594037927937 is [33:57:2097] sender: [33:58:2057] recipient: [33:51:2095] Leader for TabletID 72057594037927937 is [33:57:2097] sender: [33:75:2057] recipient: [33:14:2061] !Reboot 72057594037927937 (actor [33:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [33:57:2097] sender: [33:93:2057] recipient: [33:36:2083] Leader for TabletID 72057594037927937 is [33:57:2097] sender: [33:95:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [33:57:2097] sender: [33:97:2057] recipient: [33:96:2121] Leader for TabletID 72057594037927937 is [33:98:2122] sender: [33:99:2057] recipient: [33:96:2121] !Reboot 72057594037927937 (actor [33:57:2097]) rebooted! !Reboot 72057594037927937 (actor [33:57:2097]) tablet resolver refreshed! new actor is[33:98:2122] Leader for TabletID 72057594037927937 is [33:98:2122] sender: [33:184:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:55:2057] recipient: [34:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:55:2057] recipient: [34:51:2095] Leader for TabletID 72057594037927937 is [34:57:2097] sender: [34:58:2057] recipient: [34:51:2095] Leader for TabletID 72057594037927937 is [34:57:2097] sender: [34:75:2057] recipient: [34:14:2061] >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-false >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenMkDir >> TPartBtreeIndexIteration::FewNodes_Groups_History_Slices [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups_History_Slices_Sticky ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/ut/ut_with_sdk/unittest >> TPersQueueMirrorer::TestBasicRemote [GOOD] Test command err: 2025-06-03T10:28:47.760044Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668139303353058:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:47.760109Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d9a/r3tmp/tmp7ZHXvd/pdisk_1.dat 2025-06-03T10:28:47.802695Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:28:47.831723Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:47.832147Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668139303353037:2079] 1748946527759889 != 1748946527759892 TServer::EnableGrpc on GrpcPort 18927, node 1 2025-06-03T10:28:47.844533Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/000d9a/r3tmp/yandexslnChX.tmp 2025-06-03T10:28:47.844553Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/000d9a/r3tmp/yandexslnChX.tmp 2025-06-03T10:28:47.844683Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/000d9a/r3tmp/yandexslnChX.tmp 2025-06-03T10:28:47.844756Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:28:47.850600Z INFO: TTestServer started on Port 11418 GrpcPort 18927 2025-06-03T10:28:47.863357Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:47.863397Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:11418 2025-06-03T10:28:47.864516Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected PQClient connected to localhost:18927 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:47.901951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:47.905589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:28:47.917809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:28:48.168444Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668143598321139:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.168484Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.168533Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668143598321156:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.168937Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668143598321182:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.168952Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:28:48.169442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480 2025-06-03T10:28:48.171329Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668143598321158:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-03T10:28:48.209067Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:28:48.216244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:28:48.232547Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-03T10:28:48.252800Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668143598321467:2581] txid# 281474976715666, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } === CheckClustersList. Subcribe to ClusterTracker from [1:7511668143598321517:2608] 2025-06-03T10:28:52.760528Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668139303353058:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:52.760568Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:28:53.493367Z :TopicSplitMerge INFO: TTopicSdkTestSetup started 2025-06-03T10:28:53.497573Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-03T10:28:53.498049Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [1:7511668165073158191:2673], Recipient [1:7511668139303353476:2194]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:53.498067Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:28:53.498071Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046644480 2025-06-03T10:28:53.498081Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [1:7511668165073158187:2670], Recipient [1:7511668139303353476:2194]: {TEvModifySchemeTransaction txid# 281474976715674 TabletId# 72057594046644480} 2025-06-03T10:28:53.498083Z node 1 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:28:53.510593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/Root" OperationType: ESchemeOpCreatePersQueueGroup CreatePersQueueGroup { Name: "test-topic" TotalGroupCount: 1 PartitionPerTablet: 1 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } PartitionStrategy { MinPartitionCount: 1 MaxPartitionCount: 100 ScaleThresholdSeconds: 300 ScaleUpPartitionWriteSpeedThresholdPercent: 90 ScaleDownPartitionWriteSpeedThresholdPercent: 30 PartitionStrategyType: CAN_SPLIT } Consumers { Name: "test-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 } } } } TxId: 281474976715674 TabletId: 72057594046644480 Owner: "root@builtin" UserToken: "***" PeerName: "" , at schemeshard: 72057594046644480 2025-06-03T10:28:53.510738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_pq.cpp:307: TCreatePQ Propose, path: /Root/test-topic, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:28:53.510827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046644480, LocalPathId: 1], parent name: Root, child name: test-topic, child id: [OwnerId: 72057594046644480, LocalPathId: 13], at schemeshard: 72057594046644480 2025-06-03T10:28:53.510847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 13] was 0 2025-06-03T10:28:53.510862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason new shard created for pat ... sumer shared/some_user session shared/some_user_7_1_4549742877962920367_v1 is DEAD 2025-06-03T10:30:37.798256Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 2 consumer shared/some_user session shared/some_user_7_2_12176559389305687441_v1 grpc read done: success# 0, data# { } 2025-06-03T10:30:37.798269Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 2 consumer shared/some_user session shared/some_user_7_2_12176559389305687441_v1 grpc read failed 2025-06-03T10:30:37.798274Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 2 consumer shared/some_user session shared/some_user_7_2_12176559389305687441_v1 grpc closed 2025-06-03T10:30:37.798285Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 2 consumer shared/some_user session shared/some_user_7_2_12176559389305687441_v1 is DEAD 2025-06-03T10:30:37.798364Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037892] Destroy direct read session shared/some_user_7_1_4549742877962920367_v1 2025-06-03T10:30:37.798394Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037892] server disconnected, pipe [7:7511668607708396436:2536] destroyed 2025-06-03T10:30:37.798415Z node 8 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/some_user_7_1_4549742877962920367_v1 2025-06-03T10:30:37.798396Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--topic2] pipe [7:7511668607708396430:2529] disconnected; active server actors: 1 2025-06-03T10:30:37.798408Z node 7 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--topic2] pipe [7:7511668607708396430:2529] client some_user disconnected session shared/some_user_7_1_4549742877962920367_v1 2025-06-03T10:30:37.798423Z node 7 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1183: [72075186224037893][rt3.dc1--topic2] consumer some_user rebalancing was scheduled 2025-06-03T10:30:37.798433Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--topic2] pipe [7:7511668607708396432:2531] disconnected; active server actors: 1 2025-06-03T10:30:37.798436Z node 7 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--topic2] pipe [7:7511668607708396432:2531] client some_user disconnected session shared/some_user_7_2_12176559389305687441_v1 2025-06-03T10:30:37.798470Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037892] Destroy direct read session shared/some_user_7_2_12176559389305687441_v1 2025-06-03T10:30:37.798482Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037892] server disconnected, pipe [7:7511668607708396437:2537] destroyed 2025-06-03T10:30:37.798489Z node 8 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/some_user_7_2_12176559389305687441_v1 2025-06-03T10:30:37.798836Z node 7 :PQ_MIRRORER ERROR: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [dc75c73d-db3c02b1-b98ec9ff-d8d1550] [] Got error. Status: CLIENT_CANCELLED. Description:
: Error: GRpc error: (1): Cancelled on the server side 2025-06-03T10:30:37.798926Z node 7 :PQ_MIRRORER DEBUG: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [dc75c73d-db3c02b1-b98ec9ff-d8d1550] [] In Reconnect, ReadSizeBudget = 0, ReadSizeServerDelta = 8388608 2025-06-03T10:30:37.798935Z node 7 :PQ_MIRRORER DEBUG: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [dc75c73d-db3c02b1-b98ec9ff-d8d1550] [] New values: ReadSizeBudget = 8388608, ReadSizeServerDelta = 0 2025-06-03T10:30:37.798946Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [dc75c73d-db3c02b1-b98ec9ff-d8d1550] [] Closing session to cluster: SessionClosed { Status: CLIENT_CANCELLED Issues: "
: Error: GRpc error: (1): Cancelled on the server side " } 2025-06-03T10:30:37.799017Z node 7 :PQ_MIRRORER NOTICE: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [dc75c73d-db3c02b1-b98ec9ff-d8d1550] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-03T10:30:37.799033Z node 7 :PQ_MIRRORER DEBUG: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [dc75c73d-db3c02b1-b98ec9ff-d8d1550] [] Abort session to cluster 2025-06-03T10:30:37.799039Z node 7 :PQ_MIRRORER ERROR: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [8f8cd4c-2ec5d886-c1518c47-d229f186] [] Got error. Status: CLIENT_CANCELLED. Description:
: Error: GRpc error: (1): Cancelled on the server side 2025-06-03T10:30:37.799044Z node 8 :PQ_MIRRORER DEBUG: mirrorer.cpp:601: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1] got next reader event: 1 2025-06-03T10:30:37.799067Z node 7 :PQ_MIRRORER DEBUG: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [8f8cd4c-2ec5d886-c1518c47-d229f186] [] In Reconnect, ReadSizeBudget = 0, ReadSizeServerDelta = 8388608 2025-06-03T10:30:37.799074Z node 7 :PQ_MIRRORER DEBUG: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [8f8cd4c-2ec5d886-c1518c47-d229f186] [] New values: ReadSizeBudget = 8388608, ReadSizeServerDelta = 0 2025-06-03T10:30:37.799074Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [dc75c73d-db3c02b1-b98ec9ff-d8d1550] Closing read session. Close timeout: 0.000000s 2025-06-03T10:30:37.799078Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [8f8cd4c-2ec5d886-c1518c47-d229f186] [] Closing session to cluster: SessionClosed { Status: CLIENT_CANCELLED Issues: "
: Error: GRpc error: (1): Cancelled on the server side " } 2025-06-03T10:30:37.799086Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic2:1:1:10:11 2025-06-03T10:30:37.799096Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [dc75c73d-db3c02b1-b98ec9ff-d8d1550] Counters: { Errors: 1 CurrentSessionLifetimeMs: 994 BytesRead: 251 MessagesRead: 11 BytesReadCompressed: 251 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:30:37.799102Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [dc75c73d-db3c02b1-b98ec9ff-d8d1550] Closing read session. Close timeout: 0.000000s 2025-06-03T10:30:37.799109Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic2:1:1:10:11 2025-06-03T10:30:37.799113Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [dc75c73d-db3c02b1-b98ec9ff-d8d1550] Counters: { Errors: 1 CurrentSessionLifetimeMs: 994 BytesRead: 251 MessagesRead: 11 BytesReadCompressed: 251 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:30:37.799098Z node 8 :PQ_MIRRORER ERROR: partition.cpp:932: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1]: read session closed: SessionClosed { Status: CLIENT_CANCELLED Issues: "
: Error: GRpc error: (1): Cancelled on the server side " } 2025-06-03T10:30:37.799121Z node 7 :PQ_MIRRORER NOTICE: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1][reader 1] [] [dc75c73d-db3c02b1-b98ec9ff-d8d1550] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-03T10:30:37.799160Z node 8 :PQ_MIRRORER DEBUG: mirrorer.cpp:601: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0] got next reader event: 1 2025-06-03T10:30:37.799137Z node 7 :PQ_MIRRORER NOTICE: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [8f8cd4c-2ec5d886-c1518c47-d229f186] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-03T10:30:37.799153Z node 7 :PQ_MIRRORER DEBUG: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [8f8cd4c-2ec5d886-c1518c47-d229f186] [] Abort session to cluster 2025-06-03T10:30:37.799178Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [8f8cd4c-2ec5d886-c1518c47-d229f186] Closing read session. Close timeout: 0.000000s 2025-06-03T10:30:37.799189Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic2:0:1:16:17 2025-06-03T10:30:37.799197Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [8f8cd4c-2ec5d886-c1518c47-d229f186] Counters: { Errors: 1 CurrentSessionLifetimeMs: 993 BytesRead: 534 MessagesRead: 17 BytesReadCompressed: 534 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:30:37.799202Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [8f8cd4c-2ec5d886-c1518c47-d229f186] Closing read session. Close timeout: 0.000000s 2025-06-03T10:30:37.799204Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:/topic2:0:1:16:17 2025-06-03T10:30:37.799207Z node 7 :PQ_MIRRORER INFO: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [8f8cd4c-2ec5d886-c1518c47-d229f186] Counters: { Errors: 1 CurrentSessionLifetimeMs: 993 BytesRead: 534 MessagesRead: 17 BytesReadCompressed: 534 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:30:37.799212Z node 7 :PQ_MIRRORER NOTICE: actor.cpp:37: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0][reader 1] [] [8f8cd4c-2ec5d886-c1518c47-d229f186] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-03T10:30:37.799553Z node 8 :PQ_MIRRORER NOTICE: mirrorer.cpp:546: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 1] schedule consumer creation 2025-06-03T10:30:37.799579Z node 8 :PQ_MIRRORER ERROR: partition.cpp:932: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0]: read session closed: SessionClosed { Status: CLIENT_CANCELLED Issues: "
: Error: GRpc error: (1): Cancelled on the server side " } 2025-06-03T10:30:37.799983Z node 8 :PQ_MIRRORER NOTICE: mirrorer.cpp:546: [mirrorer for Topic rt3.dc1--topic1 in dc dc1 in database: Root, partition 0] schedule consumer creation >> TSchemeShardSubDomainTest::DeclareDefineAndDelete >> TSchemeShardSubDomainTest::SimultaneousCreateForceDrop [GOOD] >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-true >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomainWithStoragePools ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TwoPartsOnOneNodeTest_Block42 [GOOD] Test command err: RandomSeed# 9261204432618982806 SEND TEvPut with key [1:1:1:0:0:100:0] 2025-06-03T10:30:39.248206Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 6 2025-06-03T10:30:39.248316Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Node 0: 4 Node 1: 5 Node 2: 6 Node 3: 1 Node 4: Node 5: Node 6: 2 Node 7: 3 2025-06-03T10:30:39.266675Z 1 00h01m00.011024s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 7 Node 0: 4 Node 1: 5 Node 2: 6 Node 3: 1 2 Node 4: Node 5: 1 Node 6: Node 7: 3 Start compaction 1 Finish compaction 1 >> TChargeBTreeIndex::OneNode_Groups_History [GOOD] >> TChargeBTreeIndex::FewNodes >> StoragePool::TestDistributionExactMin [GOOD] >> StoragePool::TestDistributionExactMinWithOverflow [GOOD] >> StoragePool::TestDistributionRandomMin7p >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenMkDir [GOOD] >> TStoragePoolsQuotasTest::DifferentQuotasInteraction >> TSchemeShardSubDomainTest::DeclareDefineAndDelete [GOOD] >> TSchemeShardSubDomainTest::SchemeLimitsCreatePq [GOOD] >> THiveTest::TestLockTabletExecution [GOOD] >> THiveTest::TestLockTabletExecutionBadOwner ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateForceDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:39.638065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:39.638092Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:39.638097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:39.638102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:39.638118Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:39.638122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:39.638130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:39.638146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:39.638283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:39.638371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:39.649573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:39.649600Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:39.653018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:39.653134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:39.653177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:39.655347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:39.655425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:39.655541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.655624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:39.656274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:39.656337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:39.656704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:39.656721Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:39.656735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:39.656744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:39.656751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:39.656775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.658195Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:39.672316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:39.672408Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.672476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:39.672541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:39.672551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.673316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.673347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:39.673409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.673418Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:39.673422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:39.673426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:39.673824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.673833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:39.673838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:39.674091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.674098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.674102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.674109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:39.674624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:39.674980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:39.675018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:39.675195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.675217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:39.675232Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.675294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:39.675299Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.675330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:39.675339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:39.675704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:39.675714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:39.675758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... : 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-03T10:30:39.713281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:30:39.713417Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 6 TxId_Deprecated: 6 TabletID: 72075186233409551 Forgetting tablet 72075186233409548 2025-06-03T10:30:39.713618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-06-03T10:30:39.713651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:30:39.713699Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-03T10:30:39.713726Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-03T10:30:39.713790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:30:39.713840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:30:39.713863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409551 2025-06-03T10:30:39.714022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-03T10:30:39.714041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 Forgetting tablet 72075186233409547 2025-06-03T10:30:39.714207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:39.714213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:39.714248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 Forgetting tablet 72075186233409549 2025-06-03T10:30:39.714732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:39.714741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:39.714754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:39.714999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-03T10:30:39.715009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-03T10:30:39.715037Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5744: Failed to connect, to tablet: 72075186233409550, at schemeshard: 72057594046678944 2025-06-03T10:30:39.715053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:30:39.715056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:30:39.715637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:30:39.715648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-03T10:30:39.715663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-03T10:30:39.715666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-03T10:30:39.715684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5744: Failed to connect, to tablet: 72075186233409551, at schemeshard: 72057594046678944 2025-06-03T10:30:39.715692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:30:39.715695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:30:39.715741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-03T10:30:39.715745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-03T10:30:39.715810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5744: Failed to connect, to tablet: 72075186233409549, at schemeshard: 72057594046678944 2025-06-03T10:30:39.716053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:39.716067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-06-03T10:30:39.716125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-03T10:30:39.716133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-06-03T10:30:39.716147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:30:39.716151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-03T10:30:39.716211Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-03T10:30:39.716233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-03T10:30:39.716238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:614:2517] 2025-06-03T10:30:39.716259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:30:39.716271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:30:39.716273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:614:2517] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-06-03T10:30:39.716336Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:39.716372Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 56us result status StatusPathDoesNotExist 2025-06-03T10:30:39.716421Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:39.716473Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:39.716497Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 26us result status StatusSuccess 2025-06-03T10:30:39.716576Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> VDiskBalancing::TestDontSendToReadOnlyTest_Block42 [GOOD] >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomainWithStoragePools [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenMkDir [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:39.752671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:39.752705Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:39.752711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:39.752719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:39.752740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:39.752745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:39.752756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:39.752771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:39.752895Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:39.752974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:39.766758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:39.766783Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:39.770086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:39.770184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:39.770219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:39.772023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:39.772111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:39.772233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.772299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:39.773020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:39.773100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:39.773484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:39.773498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:39.773512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:39.773523Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:39.773530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:39.773554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.775092Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:39.794881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:39.794956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.795015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:39.795059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:39.795069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.795803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.795839Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:39.795901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.795914Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:39.795920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:39.795926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:39.796477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.796492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:39.796497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:39.796919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.796931Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.796938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.796945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:39.797499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:39.797893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:39.797940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:39.798110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.798138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:39.798156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.798218Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:39.798224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.798263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:39.798274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:39.798707Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:39.798718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:39.798762Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... pId# 101:0 ProgressState 2025-06-03T10:30:39.812194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:39.812198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:39.812202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:39.812204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:39.812207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-03T10:30:39.812212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:39.812216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-06-03T10:30:39.812219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 101:0 2025-06-03T10:30:39.812229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:30:39.812237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-03T10:30:39.812240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-03T10:30:39.812243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-03T10:30:39.812370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:39.812378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:39.812382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:30:39.812385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-03T10:30:39.812388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:30:39.812454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:39.812461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:39.812464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:30:39.812466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-03T10:30:39.812469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:30:39.812475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-03T10:30:39.813174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:30:39.813222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-03T10:30:39.813266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:30:39.813273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-03T10:30:39.813365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:30:39.813384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:30:39.813389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:335:2325] TestWaitNotification: OK eventTxId 101 2025-06-03T10:30:39.813453Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:39.813487Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 44us result status StatusSuccess 2025-06-03T10:30:39.813601Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:39.813672Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:39.813684Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 15us result status StatusSuccess 2025-06-03T10:30:39.813716Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "MyDir" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:39.813746Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/MyDir" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:39.813755Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/MyDir" took 10us result status StatusSuccess 2025-06-03T10:30:39.813777Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/MyDir" PathDescription { Self { Name: "MyDir" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeLimitsCreatePq [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:39.702790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:39.702823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:39.702829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:39.702836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:39.702851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:39.702855Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:39.702870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:39.702886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:39.703013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:39.703098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:39.713719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:39.713744Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:39.717196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:39.717278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:39.717333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:39.719375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:39.719440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:39.719531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.719574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:39.720094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:39.720137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:39.720385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:39.720393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:39.720403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:39.720409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:39.720413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:39.720427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.721668Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:39.738624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:39.738704Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.738763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:39.738807Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:39.738816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.739601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.739624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:39.739675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.739684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:39.739688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:39.739692Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:39.740080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.740090Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:39.740094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:39.740383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.740389Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.740394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.740400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:39.740940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:39.741288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:39.741354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:39.741548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.741570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:39.741583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.741651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:39.741662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.741699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:39.741710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:39.742238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:39.742246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:39.742282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... esult> execute, operationId: 104:0, at schemeshard: 72057594046678944, message: TabletId: 72075186233409551 TxId: 104 Status: OK 2025-06-03T10:30:39.917452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:643: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionAttachResult triggers early, at schemeshard: 72057594046678944 message# TabletId: 72075186233409551 TxId: 104 Status: OK 2025-06-03T10:30:39.917458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:648: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionAttachResult CollectPQConfigChanged: false 2025-06-03T10:30:39.917462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:754: NPQState::TPropose operationId# 104:0 can't persist state: ShardsInProgress is not empty, remain: 2 2025-06-03T10:30:39.918468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 2025-06-03T10:30:39.919122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000003 FAKE_COORDINATOR: Erasing txId 104 2025-06-03T10:30:39.941012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409550, partId: 0 2025-06-03T10:30:39.941075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409550 Status: COMPLETE TxId: 104 Step: 5000003 2025-06-03T10:30:39.941090Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:624: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409550 Status: COMPLETE TxId: 104 Step: 5000003 2025-06-03T10:30:39.941103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:265: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 104:0, shardIdx: 72057594046678944:5, shard: 72075186233409550, left await: 1, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.941108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:629: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: false 2025-06-03T10:30:39.941113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:754: NPQState::TPropose operationId# 104:0 can't persist state: ShardsInProgress is not empty, remain: 1 2025-06-03T10:30:39.941193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409551, partId: 0 2025-06-03T10:30:39.941206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Origin: 72075186233409551 Status: COMPLETE TxId: 104 Step: 5000003 2025-06-03T10:30:39.941213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_pq.cpp:624: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult triggers early, at schemeshard: 72057594046678944 message# Origin: 72075186233409551 Status: COMPLETE TxId: 104 Step: 5000003 2025-06-03T10:30:39.941218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:265: CollectPQConfigChanged accept TEvPersQueue::TEvProposeTransactionResult, operationId: 104:0, shardIdx: 72057594046678944:6, shard: 72075186233409551, left await: 0, txState.State: Propose, txState.ReadyForNotifications: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.941223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_pq.cpp:629: NPQState::TPropose operationId# 104:0 HandleReply TEvProposeTransactionResult CollectPQConfigChanged: true 2025-06-03T10:30:39.941275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 104:0 128 -> 240 2025-06-03T10:30:39.941338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:39.941352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-03T10:30:39.942431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.942645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.942703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:39.942713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:39.942771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:30:39.942822Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:39.942830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:337:2312], at schemeshard: 72057594046678944, txId: 104, path id: 1 2025-06-03T10:30:39.942838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:337:2312], at schemeshard: 72057594046678944, txId: 104, path id: 3 2025-06-03T10:30:39.942908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.942918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-03T10:30:39.942933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-06-03T10:30:39.942938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:30:39.942944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-06-03T10:30:39.942948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:30:39.942954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-03T10:30:39.942960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:30:39.942967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-06-03T10:30:39.942972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 104:0 2025-06-03T10:30:39.943011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 6 2025-06-03T10:30:39.943018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-06-03T10:30:39.943023Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-03T10:30:39.943027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-03T10:30:39.943300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:30:39.943322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:30:39.943332Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-03T10:30:39.943338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-03T10:30:39.943344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:30:39.943490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:30:39.943507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:30:39.943512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-03T10:30:39.943516Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-03T10:30:39.943522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-03T10:30:39.943533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-03T10:30:39.946298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-03T10:30:39.946363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 >> TSchemeShardSubDomainTest::ForceDropTwice ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DeclareDefineAndDelete [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:39.922732Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:39.922766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:39.922773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:39.922781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:39.922801Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:39.922806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:39.922822Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:39.922838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:39.922962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:39.923049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:39.938024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:39.938045Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:39.941534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:39.941640Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:39.941684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:39.943850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:39.943944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:39.944063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.944115Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:39.944829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:39.944887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:39.945205Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:39.945216Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:39.945229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:39.945239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:39.945246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:39.945270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.947012Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:39.969268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:39.969389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.969466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:39.969522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:39.969536Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.970235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.970267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:39.970332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.970344Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:39.970350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:39.970356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:39.970940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.970964Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:39.970973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:39.971389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.971400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.971407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.971416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:39.972150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:39.972603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:39.972675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:39.972875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.972905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:39.972927Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.973002Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:39.973009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.973054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:39.973067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:39.973537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:39.973549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:39.973602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... chemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-03T10:30:40.008277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:30:40.008282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:30:40.008290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:30:40.008321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-03T10:30:40.008327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-03T10:30:40.008330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-03T10:30:40.008333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-03T10:30:40.008424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:30:40.008436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:30:40.008442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:30:40.008447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-03T10:30:40.008453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:40.008544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:30:40.008553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:30:40.008556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:30:40.008558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-03T10:30:40.008561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:30:40.008567Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-03T10:30:40.008967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:40.008982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:40.008986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:40.009685Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-03T10:30:40.009772Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-03T10:30:40.009811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:40.009874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:30:40.009965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-03T10:30:40.009990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409546 2025-06-03T10:30:40.010232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 Forgetting tablet 72075186233409548 2025-06-03T10:30:40.010501Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-03T10:30:40.010576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 Forgetting tablet 72075186233409547 2025-06-03T10:30:40.010648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:30:40.010697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:40.010826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:40.010834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:40.010858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:30:40.010946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:40.010953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:40.010964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:40.011521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:30:40.011538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:30:40.011607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:30:40.011613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-03T10:30:40.011950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:30:40.011961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:30:40.011972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:40.012002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-03T10:30:40.012049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-03T10:30:40.012056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-03T10:30:40.012112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-03T10:30:40.012127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:30:40.012131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:520:2473] TestWaitNotification: OK eventTxId 102 2025-06-03T10:30:40.012189Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:40.012214Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 34us result status StatusPathDoesNotExist 2025-06-03T10:30:40.012254Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> THiveTest::TestHiveBalancerDifferentResources [GOOD] >> THiveTest::TestHiveBalancerDifferentResources2 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestDontSendToReadOnlyTest_Block42 [GOOD] Test command err: RandomSeed# 10202138443283635066 SEND TEvPut with key [1:1:1:0:0:100:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 1 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] SEND TEvPut with key [1:1:2:0:0:100:0] 2025-06-03T10:30:39.833049Z 1 00h01m30.060512s :BS_SKELETON ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) Unavailable in read-only Sender# [1:6331:830] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:100:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Setting VDisk read-only to 0 for position 0 Invoking SetVDiskReadOnly for vdisk [82000000:1:0:0:0] Start compaction Finish compaction >> TSchemeShardSubDomainTest::TableDiskSpaceQuotas ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateItemsInsideSubdomainWithStoragePools [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:39.949016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:39.949040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:39.949044Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:39.949049Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:39.949062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:39.949064Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:39.949072Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:39.949083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:39.949182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:39.949253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:39.960539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:39.960565Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:39.964707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:39.964806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:39.964838Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:39.967642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:39.967734Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:39.967844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.967907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:39.968721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:39.968776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:39.969079Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:39.969088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:39.969099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:39.969105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:39.969110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:39.969128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.970440Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:39.985029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:39.985110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.985173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:39.985226Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:39.985236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.985876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.985914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:39.985973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.985985Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:39.985991Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:39.985997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:39.986499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.986512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:39.986518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:39.986924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.986940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.986947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.986955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:39.987723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:39.988168Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:39.988207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:39.988342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.988367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:39.988386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.988447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:39.988452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.988483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:39.988492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:39.989180Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:39.989191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:39.989243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 1/1 2025-06-03T10:30:40.183229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:30:40.183233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:30:40.183236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:30:40.183240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-03T10:30:40.183255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:486:2440] message: TxId: 103 2025-06-03T10:30:40.183261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:30:40.183266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-06-03T10:30:40.183270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:0 2025-06-03T10:30:40.183291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-03T10:30:40.183837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:30:40.183855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:487:2441] TestWaitNotification: OK eventTxId 103 2025-06-03T10:30:40.183977Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:40.184036Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 82us result status StatusSuccess 2025-06-03T10:30:40.184177Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "dir_0" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 150 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "name_USER_0_kind_hdd-1" Kind: "hdd-1" } StoragePools { Name: "name_USER_0_kind_hdd-2" Kind: "hdd-2" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:40.184270Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:40.184295Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 27us result status StatusSuccess 2025-06-03T10:30:40.184366Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/table_0" PathDescription { Self { Name: "table_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_0" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:40.184413Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:40.184423Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir_0" took 11us result status StatusSuccess 2025-06-03T10:30:40.184452Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/dir_0" PathDescription { Self { Name: "dir_0" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 102 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "table_1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 4 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:40.184485Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dir_0/table_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:40.184496Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dir_0/table_1" took 12us result status StatusSuccess 2025-06-03T10:30:40.184544Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/dir_0/table_1" PathDescription { Self { Name: "table_1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 200 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table_1" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::DeclareAndDelete >> THiveTest::TestLockTabletExecutionBadOwner [GOOD] >> THiveTest::TestLockTabletExecutionDelete >> TSchemeShardSubDomainTest::ForceDropTwice [GOOD] >> TSchemeShardSubDomainTest::DeclareAndDelete [GOOD] >> THiveTest::TestServerlessComputeResourcesMode [GOOD] >> THiveTest::TestSkipBadNode >> TSchemeShardSubDomainTest::SimultaneousDeclare >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTableForceDrop ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::ForceDropTwice [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:40.657621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:40.657650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:40.657656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:40.657663Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:40.657679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:40.657683Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:40.657693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:40.657710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:40.657819Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:40.657890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:40.673102Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:40.673133Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:40.678730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:40.678892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:40.678941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:40.683181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:40.683297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:40.683460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:40.683557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:40.684734Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:40.684820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:40.685259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:40.685276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:40.685309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:40.685321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:40.685329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:40.685364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.687433Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:40.710178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:40.710272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.710347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:40.710400Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:40.710411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.711349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:40.711385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:40.711468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.711479Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:40.711486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:40.711493Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:40.712113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.712125Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:40.712129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:40.712541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.712552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.712560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:40.712570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:40.713224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:40.713746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:40.713809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:40.714054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:40.714089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:40.714109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:40.714192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:40.714201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:40.714248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:40.714264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:40.714810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:40.714821Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:40.714888Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:40.828403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-03T10:30:40.828459Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 Forgetting tablet 72075186233409546 2025-06-03T10:30:40.828739Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 6 TxId_Deprecated: 6 TabletID: 72075186233409551 2025-06-03T10:30:40.828846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-03T10:30:40.828875Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:30:40.829074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-06-03T10:30:40.829098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:30:40.829214Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409548 2025-06-03T10:30:40.829680Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-03T10:30:40.829771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:30:40.829813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409551 2025-06-03T10:30:40.829997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-03T10:30:40.830022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 Forgetting tablet 72075186233409547 Forgetting tablet 72075186233409549 2025-06-03T10:30:40.830345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:40.830354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:40.830383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:30:40.830543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:40.830550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:40.830563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:40.830830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-03T10:30:40.830841Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-03T10:30:40.831056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:30:40.831063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:30:40.831083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:30:40.831087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-03T10:30:40.831523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-03T10:30:40.831531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-03T10:30:40.831546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:30:40.831551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:30:40.831561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-03T10:30:40.831567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-03T10:30:40.831643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:40.831876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 102 2025-06-03T10:30:40.831944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-03T10:30:40.831953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2025-06-03T10:30:40.831968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-03T10:30:40.831971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-03T10:30:40.832049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-03T10:30:40.832077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:30:40.832083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:672:2572] 2025-06-03T10:30:40.832100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-03T10:30:40.832114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:30:40.832118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:672:2572] TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 2025-06-03T10:30:40.832224Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:40.832269Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 60us result status StatusPathDoesNotExist 2025-06-03T10:30:40.832324Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:40.832395Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:40.832427Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 33us result status StatusSuccess 2025-06-03T10:30:40.832533Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> StoragePool::TestDistributionRandomMin7p [GOOD] >> StoragePool::TestDistributionRandomMin7pWithOverflow [GOOD] >> TSchemeShardSubDomainTest::SimultaneousDeclareAndDefine >> THiveTest::TestLockTabletExecutionDelete [GOOD] >> TSchemeShardSubDomainTest::SimultaneousDeclare [GOOD] >> THiveTest::TestLockTabletExecutionDeleteReboot ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DeclareAndDelete [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:41.060488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:41.060534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:41.060541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:41.060549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:41.060566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:41.060571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:41.060582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:41.060597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:41.060720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:41.060790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:41.075589Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:41.075621Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:41.080035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:41.080178Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:41.080225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:41.082654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:41.082748Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:41.082876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.082946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:41.083685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:41.083754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:41.084119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:41.084132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:41.084147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:41.084156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:41.084163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:41.084187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.085795Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:41.108147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:41.108246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.108322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:41.108378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:41.108390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.109164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.109201Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:41.109274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.109286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:41.109312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:41.109319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:41.109966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.109990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:41.109995Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:41.110441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.110452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.110457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.110465Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:41.111089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:41.111524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:41.111571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:41.111800Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.111825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:41.111841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.111900Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:41.111905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.111941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:41.111950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:41.112383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:41.112392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:41.112437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... HandleReply TEvOperationPlan, step: 5000003, at schemeshard: 72057594046678944 2025-06-03T10:30:41.126329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5223: ExamineTreeVFS visit path id [OwnerId: 72057594046678944, LocalPathId: 2] name: USER_0 type: EPathTypeSubDomain state: EPathStateDrop stepDropped: 0 droppedTxId: 101 parent: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:41.126333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5239: ExamineTreeVFS run path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:41.126364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 101:0 128 -> 130 2025-06-03T10:30:41.126387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:41.126395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:41.126604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 FAKE_COORDINATOR: Erasing txId 101 2025-06-03T10:30:41.127042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:41.127051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:41.127083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:41.127111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:41.127117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-03T10:30:41.127125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-03T10:30:41.127223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.127231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:416: [72057594046678944] TDeleteParts opId# 101:0 ProgressState 2025-06-03T10:30:41.127238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:41.127242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:41.127245Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:41.127247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:41.127251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-03T10:30:41.127254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:41.127258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-06-03T10:30:41.127261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 101:0 2025-06-03T10:30:41.127274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:30:41.127279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-03T10:30:41.127281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-03T10:30:41.127284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-03T10:30:41.127385Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:41.127394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:41.127398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:30:41.127402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-03T10:30:41.127405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:41.127509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:41.127517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:41.127522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:30:41.127525Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-03T10:30:41.127528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:41.127536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-03T10:30:41.127556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:41.127560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:41.127587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:30:41.127798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:41.127804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:41.127812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:41.128139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:30:41.128453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:30:41.128473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:41.128483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-03T10:30:41.128544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:30:41.128551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-03T10:30:41.128621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:30:41.128637Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:30:41.128643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:340:2330] TestWaitNotification: OK eventTxId 101 2025-06-03T10:30:41.128733Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:41.128775Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 56us result status StatusPathDoesNotExist 2025-06-03T10:30:41.128844Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::SimultaneousDeclareAndCreateTable >> TChargeBTreeIndex::FewNodes [GOOD] >> TChargeBTreeIndex::FewNodes_Groups >> TSchemeShardSubDomainTest::Create >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTableForceDrop [GOOD] >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain >> TSchemeShardSubDomainTest::SimultaneousDeclareAndDefine [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/hive/ut/unittest >> StoragePool::TestDistributionRandomMin7pWithOverflow [GOOD] Test command err: (1,1): 1 on 2 (1,1): 1 on 1 RemoveNode 7 (1,1): 1 on 3 (1,3): 1 on 5 (1,2): 1 on 6 (1,1): 1 on 0 (1,2): 1 on 9 RemoveNode 0 (1,3): 1 on 9 RemoveNode 2 (1,3): 1 on 3 (1,3): 1 on 4 (1,1): -1 on 0 (1,2): 1 on 6 RemoveNode 1 (1,3): 1 on 6 (1,3): 1 on 2 (1,2): 1 on 8 (1,2): -1 on 6 (1,2): 1 on 6 (1,2): 1 on 8 (1,1): 1 on 1 (1,1): 1 on 3 AddNode 0 (1,1): 1 on 0 (1,3): 1 on 2 (1,1): 1 on 2 (1,1): 1 on 3 (1,2): 1 on 5 (1,3): 1 on 7 (1,2): 1 on 7 (1,3): 1 on 4 (1,1): 1 on 1 (1,3): 1 on 8 (1,2): -1 on 6 RemoveNode 6 (1,1): 1 on 3 (1,1): 1 on 4 (1,1): -1 on 3 AddNode 2 (1,2): 1 on 8 RemoveNode 8 (1,1): 1 on 3 (1,2): 1 on 7 (1,1): 1 on 0 (1,1): 1 on 3 RemoveNode 5 (1,3): 1 on 8 RemoveNode 9 (1,3): 1 on 5 (1,1): 1 on 0 AddNode 6 (1,2): -1 on 7 (1,1): 1 on 2 (1,2): 1 on 9 AddNode 1 (1,1): 1 on 3 RemoveNode 6 (1,3): 1 on 7 (1,1): 1 on 3 (1,3): 1 on 0 (1,3): -1 on 5 (1,3): 1 on 3 (1,3): -1 on 3 (1,1): -1 on 1 (1,1): 1 on 0 (1,2): 1 on 7 (1,1): 1 on 3 (1,2): 1 on 8 (1,3): 1 on 8 (1,3): 1 on 3 (1,3): 1 on 0 (1,2): -1 on 7 (1,1): -1 on 4 (1,2): 1 on 7 RemoveNode 0 (1,1): 1 on 4 (1,3): 1 on 2 (1,3): 1 on 2 AddNode 5 (1,2): 1 on 9 (1,3): 1 on 1 (1,1): 1 on 3 RemoveNode 2 (1,3): -1 on 0 (1,2): 1 on 9 (1,1): 1 on 4 (1,1): 1 on 2 RemoveNode 4 (1,3): 1 on 0 (1,1): 1 on 1 (1,2): 1 on 6 (1,2): 1 on 7 (1,1): 1 on 0 (1,1): 1 on 1 AddNode 8 (1,1): -1 on 3 (1,1): -1 on 3 (1,2): 1 on 8 (1,2): 1 on 6 AddNode 2 (1,1): 1 on 1 (1,2): 1 on 6 RemoveNode 5 (1,3): -1 on 5 (1,1): 1 on 3 (1,2): 1 on 9 (1,3): 1 on 6 (1,2): 1 on 8 (1,3): 1 on 2 AddNode 4 (1,2): 1 on 9 (1,1): 1 on 0 (1,2): 1 on 5 (1,1): 1 on 4 AddNode 9 (1,3): -1 on 4 RemoveNode 4 (1,3): 1 on 0 (1,1): -1 on 3 (1,1): 1 on 3 (1,3): 1 on 9 (1,2): -1 on 8 (1,2): 1 on 5 AddNode 7 (1,3): 1 on 1 (1,3): 1 on 3 (1,1): 1 on 0 (1,3): 1 on 0 RemoveNode 1 (1,1): 1 on 2 RemoveNode 9 (1,1): -1 on 1 (1,3): -1 on 8 (1,2): 1 on 9 (1,1): 1 on 4 AddNode 4 (1,1): 1 on 2 AddNode 1 (1,1): 1 on 4 (1,3): -1 on 8 (1,1): 1 on 3 RemoveNode 2 (1,1): 1 on 3 (1,1): 1 on 3 (1,3): -1 on 9 AddNode 2 (1,1): 1 on 3 (1,3): -1 on 8 (1,2): 1 on 6 (1,3): -1 on 1 (1,2): -1 on 8 RemoveNode 7 (1,2): 1 on 5 RemoveNode 4 (1,1): 1 on 4 (1,1): 1 on 0 (1,3): 1 on 8 (1,2): 1 on 8 (1,2): 1 on 7 RemoveNode 2 (1,1): 1 on 4 RemoveNode 3 (1,1): 1 on 0 (1,2): 1 on 5 AddNode 7 (1,1): 1 on 4 (1,1): 1 on 4 (1,1): -1 on 2 (1,3): 1 on 3 (1,1): 1 on 0 (1,3): 1 on 8 (1,2): 1 on 8 AddNode 9 (1,2): 1 on 6 AddNode 4 (1,1): 1 on 3 AddNode 0 (1,1): 1 on 0 (1,1): -1 on 4 (1,2): 1 on 7 (1,2): 1 on 6 (1,1): 1 on 4 (1,1): 1 on 1 (1,1): 1 on 1 (1,3): -1 on 0 AddNode 2 (1,3): -1 on 0 (1,1): 1 on 4 (1,1): 1 on 4 (1,3): 1 on 0 RemoveNode 0 (1,3): 1 on 3 (1,2): -1 on 8 (1,2): 1 on 6 (1,2): 1 on 8 (1,2): 1 on 7 (1,3): 1 on 6 (1,2): 1 on 7 (1,2): 1 on 6 (1,3): 1 on 2 (1,2): 1 on 5 AddNode 6 (1,2): 1 on 7 RemoveNode 9 (1,1): 1 on 1 (1,1): 1 on 1 (1,2): 1 on 7 RemoveNode 8 (1,3): -1 on 6 RemoveNode 7 (1,1): 1 on 2 (1,2): 1 on 9 (1,1): 1 on 1 RemoveNode 4 (1,2): 1 on 5 RemoveNode 1 (1,1): 1 on 1 AddNode 8 (1,3): -1 on 8 (1,3): 1 on 0 AddNode 0 (1,3): 1 on 3 (1,2): 1 on 6 (1,2): -1 on 9 RemoveNode 2 (1,3): 1 on 2 (1,3): 1 on 1 (1,3): 1 on 8 RemoveNode 6 (1,1): -1 on 0 (1,1): 1 on 0 (1,2): 1 on 7 AddNode 3 (1,3): 1 on 5 (1,1): 1 on 3 (1,2): 1 on 8 AddNode 7 (1,3): 1 on 5 AddNode 5 (1,1): -1 on 3 RemoveNode 7 (1,3): -1 on 8 AddNode 7 (1,1): -1 on 3 (1,3): 1 on 3 RemoveNode 7 (1,2): 1 on 7 (1,3): 1 on 7 (1,1): 1 on 1 RemoveNode 0 (1,1): 1 on 3 (1,2): -1 on 9 (1,1): -1 on 2 (1,2): 1 on 9 AddNode 7 (1,2): -1 on 8 AddNode 0 (1,1): 1 on 2 (1,3): 1 on 0 (1,2): 1 on 9 AddNode 2 (1,3): 1 on 0 RemoveNode 7 (1,3): 1 on 8 RemoveNode 2 (1,1): 1 on 4 (1,2): 1 on 8 (1,2): 1 on 7 (1,3): 1 on 0 (1,3): -1 on 1 AddNode 2 (1,3): 1 on 2 (1,3): -1 on 7 (1,3): 1 on 0 (1,1): 1 on 0 (1,3): 1 on 0 (1,2): 1 on 9 RemoveNode 5 (1,1): -1 on 3 (1,3): 1 on 7 (1,1): 1 on 1 (1,2): 1 on 7 AddNode 9 (1,2): 1 on 6 (1,1): 1 on 1 (1,3): 1 on 3 (1,1): 1 on 1 (1,1): -1 on 2 (1,2): -1 on 7 AddNode 4 (1,2): 1 on 8 (1,3): 1 on 5 (1,1): 1 on 0 (1,1): 1 on 4 (1,1): 1 on 1 (1,2): 1 on 7 (1,3): -1 on 2 (1,2): 1 on 9 (1,3): -1 on 5 (1,1): 1 on 0 (1,2): 1 on 8 (1,3): -1 on 0 (1,3): 1 on 7 (1,1): 1 on 0 (1,3): 1 on 1 (1,2): 1 on 6 (1,2): -1 on 7 (1,1): 1 on 2 (1,2): 1 on 6 (1,2): -1 on 9 RemoveNode 2 (1,3): 1 on 7 (1,3): 1 on 2 (1,2): 1 on 7 (1,2): 1 on 7 (1,2): 1 on 9 (1,2): 1 on 6 (1,3): 1 on 2 (1,2): 1 on 5 (1,2): 1 on 6 RemoveNode 3 (1,1): 1 on 4 (1,2): 1 on 9 (1,2): -1 on 8 (1,3): -1 on 6 (1,3): 1 on 0 (1,1): 1 on 0 (1,3): 1 on 3 AddNode 1 (1,3): 1 on 4 (1,1): 1 on 1 RemoveNode 0 (1,1): 1 on 0 RemoveNode 4 (1,2): 1 on 9 (1,3): 1 on 7 (1,1): 1 on 1 (1,2): -1 on 6 AddNode 5 (1,3): -1 on 0 (1,2): 1 on 9 (1,1): 1 on 2 (1,2): 1 on 9 AddNode 3 (1,3): -1 on 2 (1,3): 1 on 1 RemoveNode 8 (1,1): 1 on 0 (1,2): -1 on 5 AddNode 4 (1,3): -1 on 5 (1,3): 1 on 0 (1,3): -1 on 0 (1,3): 1 on 7 (1,1): 1 on 0 RemoveNode 9 (1,1): -1 on 4 (1,3): 1 on 0 (1,2): 1 on 9 (1,2): 1 on 7 (1,2): 1 on 8 (1,2): 1 on 5 RemoveNode 1 (1,3): 1 on 4 (1,3): 1 on 6 (1,1): 1 on 0 (1,1): 1 on 1 AddNode 2 (1,2): -1 on 6 AddNode 1 (1,3): 1 on 6 (1,1): 1 on 4 (1,3): -1 on 8 (1,3): 1 on 3 RemoveNode 2 (1,1): 1 on 1 (1,1): 1 on 0 (1,3): 1 on 0 AddNode 9 (1,1): -1 on 4 (1,3): 1 on 7 (1,2): -1 on 9 (1,3): 1 on 7 RemoveNode 4 (1,3): -1 on 9 AddNode 8 (1,1): 1 on 1 (1,1): 1 on 0 (1,1): 1 on 4 (1,2): 1 on 5 (1,2): 1 on 9 RemoveNode 8 (1,2): 1 on 9 (1,3): 1 on 8 (1,2): 1 on 5 (1,3): 1 on 1 AddNode 7 (1,3): 1 on 4 AddNode 4 (1,1): 1 on 3 RemoveNode 7 (1,1): 1 on 4 (1,2): 1 on 7 (1,3): 1 on 7 (1,1): 1 on 4 (1,2): 1 on 8 AddNode 6 (1,1): 1 on 2 RemoveNode 6 (1,2): 1 on 6 (1,3): 1 on 1 (1,1): -1 on 3 AddNode 0 (1,1): 1 on 0 (1,1): -1 on 2 (1,3): 1 on 9 (1,2): -1 on 8 (1,1): 1 on 3 RemoveNode 3 (1,3): -1 on 0 (1,2): 1 on 5 RemoveNode 1 (1,2): 1 on 9 AddNode 3 (1,1): -1 on 3 (1,2): 1 on 7 (1,2): 1 on 6 AddNode 8 (1,3): 1 on 6 AddNode 1 (1,3): -1 on 3 (1,1): 1 on 3 (1,3): 1 on 4 (1,1): 1 on 4 (1,2): 1 on 6 (1,1): 1 on 3 (1,3): -1 on 7 (1,1): 1 on 4 (1,2): 1 on 8 RemoveNode 4 (1,2): 1 on 7 (1,2): 1 on 5 (1,1): -1 on 0 (1,1): 1 on 4 (1,1): 1 on 0 (1,2): 1 on 7 (1,2): 1 on 5 (1,1): 1 on 0 RemoveNode 5 (1,2): 1 on 8 (1,2): 1 on 8 RemoveNode 8 (1,1): -1 on 0 (1,3): 1 on 1 (1,2): 1 on 6 RemoveNode 0 (1,3): -1 on 2 (1,1): 1 on 0 (1,2): 1 on 8 (1,3): 1 on 6 (1,2): 1 on 6 (1,3): 1 on 8 RemoveNode 1 (1,2): 1 on 8 (1,1): 1 on 2 (1,1): 1 on 4 AddNode 2 (1,2): 1 on 6 (1,1): -1 on 2 (1,3): 1 on 5 (1,1): 1 on 4 (1,1): 1 on 3 (1,2): 1 on 5 (1,2): 1 on 9 (1,3): 1 on 1 RemoveNode 2 (1,3): 1 on 9 (1,1): 1 on 1 AddNode 2 (1,2): 1 on 8 (1,2): 1 on 6 AddNode 8 (1,2): 1 on 8 (1,3): 1 on 8 AddNode 0 (1,3): 1 on 8 (1,1): 1 on 4 (1,1): -1 on 2 RemoveNode 9 (1,1): 1 on 1 (1,1): 1 on 3 (1,1): -1 on 3 (1,3): 1 on 4 (1,3): 1 on 5 AddNode 1 (1,2): 1 on 6 (1,2): -1 on 9 (1,1): 1 on 4 (1,3): 1 on 9 (1,3): 1 on 1 (1,3): 1 on 7 (1,2): -1 on 8 (1,2): 1 on 6 (1,1): 1 on 0 (1,2): -1 on 9 (1,1): 1 on 1 (1,2): 1 on 5 (1,1): 1 on 3 (1,1): 1 on 0 (1,1): -1 on 4 (1,2): -1 on 6 (1,1): 1 on 0 (1,1): 1 on 4 (1,2): 1 on 9 (1,3): 1 on 5 (1,3): 1 on 2 AddNode 5 (1,3): 1 on 8 (1,2): 1 on 9 (1,1): 1 on 0 RemoveNode 5 (1,2): -1 on 7 (1,2): 1 on 6 (1,2): 1 on 6 (1,2): -1 on 5 (1,1): 1 on 3 (1,3): 1 on 5 (1,3): 1 on 4 (1,3): 1 on 4 (1,3): -1 on 2 (1,2): -1 on 7 (1,1): 1 on 3 (1,3): -1 on 7 (1,2): 1 on 6 (1,1): 1 on 2 AddNode 6 (1,1): -1 on 0 (1,2): -1 on 5 (1,3): 1 on 6 (1,1): 1 on 1 AddNode 9 (1,1): 1 on 4 (1,1): 1 on 1 AddNode 7 (1,3): 1 on 3 (1,2): -1 on 7 (1,2): 1 on 9 (1,2): 1 on 5 AddNode 4 (1,3): 1 on 2 (1,2): 1 on 5 RemoveNode 8 (1,2): 1 on 7 (1,2): 1 on 9 RemoveNode 6 (1,2): 1 on 7 (1,3): 1 on 3 (1,3): 1 on 8 (1,1): 1 on 3 RemoveNode 4 (1,3): 1 on 4 (1,3): 1 on 5 (1,2): 1 on 6 (1,1): 1 on 2 (1,2): 1 on 8 AddNode 8 (1,3): 1 on 9 (1,1): 1 on 3 (1,2): 1 on 9 AddNode 5 (1,1): 1 on 3 RemoveNode 8 (1,2): 1 on 9 RemoveNode 7 (1,3): 1 on 0 (1,2): -1 on 9 RemoveNode 1 (1,1): -1 on 1 (1,1): 1 on 0 (1,2): 1 on 6 RemoveNode 2 (1,2): 1 on 7 (1,2): 1 on 8 (1,2): 1 on 9 (1,2): 1 on 7 (1,1): -1 on 4 (1,3): 1 on 1 (1,2): -1 on 5 (1,1): 1 on 3 (1,2): 1 on 9 (1,2): 1 on 5 AddNode 4 (1,2): 1 on 8 RemoveNode 3 (1,2): 1 on 9 AddNode 1 (1,3): -1 on 2 (1,2): -1 on 6 (1,2): 1 on 9 (1,3): -1 on 2 AddNode 2 (1,3): 1 on 0 RemoveNode 0 (1,1): -1 on 3 (1,2): 1 on 6 (1,2): 1 on 9 (1,2): 1 on 9 AddNode 6 (1,2): -1 on 7 RemoveNode 4 (1,2): 1 on 6 AddNode 4 (1,2): 1 on 6 (1,1): 1 on 4 AddNode 0 (1,3): 1 on 4 RemoveNode 9 (1,2): 1 on 8 (1,2): 1 on 7 (1,2): 1 on 6 AddNode 8 (1,1): 1 on 0 (1,1): 1 on 0 AddNode 7 (1,1): 1 on 3 (1,3): 1 on 5 (1,3): -1 on 7 (1,1): -1 on 4 RemoveNode 6 (1,3): 1 on 0 RemoveNode 7 (1,1): 1 on 4 (1,3): 1 on 3 (1,3): 1 on 2 (1,1): 1 on 4 AddNode 9 (1,2): 1 on 8 (1,1): 1 on 0 RemoveNode 0 (1,2): -1 on 8 (1,2): 1 on 6 AddNode 7 (1,2): 1 on 9 (1,2): 1 on 7 (1,2): 1 on 5 (1,2): 1 on 5 AddNode 3 (1,1): 1 on 3 RemoveNode 7 (1,1): 1 on 2 (1,3): 1 on 0 RemoveNode 4 (1,1): 1 on 4 (1,2): -1 on 8 (1,2): 1 on 7 RemoveNode 1 (1,2): 1 on 9 (1,2): 1 on 7 (1,2): 1 on 7 (1,3): -1 on 5 AddNode 6 (1,3): 1 on 8 RemoveNode 9 (1,2): 1 on 5 (1,3): 1 on 9 (1,1): 1 on 2 (1,1): 1 on 0 (1,2): 1 on 6 RemoveNode 8 (1,2): 1 on 9 (1,3): 1 on 6 (1,2): 1 on 8 (1,2): 1 on 7 (1,2): 1 on 9 (1,1): 1 on 4 (1,1): -1 on 2 RemoveNode 6 (1,2): 1 on 6 RemoveNode 2 (1,1): 1 on 2 (1,2): 1 on 6 (1,3): 1 on 3 (1,1): 1 on 1 (1,3): 1 on 8 AddNode 0 (1,2): 1 on 8 RemoveNode 5 (1,3): 1 on 6 AddNode 8 (1,1): -1 on 1 (1,1): 1 on 4 AddNode 2 (1,1): 1 on 2 RemoveNode 2 (1,1): -1 on 4 (1,1): 1 on 1 AddNode 9 (1,2): 1 on 6 (1,2): 1 on 5 RemoveNode 8 (1,3): 1 on 3 AddNode 7 (1,2): 1 on 8 (1,2): 1 on 6 (1,2): 1 on 5 RemoveNode 3 (1,3): 1 on 3 AddNode 4 (1,2): 1 on 5 (1,3): 1 on 1 (1,3): -1 on 6 (1,2): 1 on 5 RemoveNode 4 (1,3): -1 on 6 (1,3): 1 on 2 (1,1): -1 on 4 (1,3): 1 on 9 (1,1): -1 on 0 (1,2): 1 on 7 (1,1): 1 on 1 (1,1): -1 on 1 (1,1): 1 on 1 (1,3): 1 on 6 (1,2): 1 on 8 AddNode 2 (1,3): 1 on 0 (1,2): 1 on 8 RemoveNode 7 (1,1): 1 on 3 (1,1): 1 on 1 AddNode 1 (1,1): 1 on 2 (1,3): 1 on 5 (1,3): 1 on 1 (1,3): 1 on 7 AddNode 8 (1,1): 1 on 4 (1,3): -1 on 7 (1,2): 1 on 8 (1,1): 1 on 2 (1,2): 1 on 5 AddNode 7 (1,1): -1 on 3 (1,2): -1 on 7 (1,2): 1 on 5 AddNode 4 (1,2): -1 on 9 (1,2): -1 on 7 (1,1): -1 on 2 (1,2): 1 on 6 (1,1): 1 on 3 (1,2): 1 on 7 (1,2): 1 on 8 RemoveNode 1 (1,2): 1 on 7 RemoveNode 4 (1,2): 1 on 5 (1,1): -1 on 2 (1,1): 1 on 0 (1,3): -1 on 2 (1,2): 1 on 6 AddNode 6 (1,2): 1 on 5 (1,3): 1 on 6 (1,2): 1 on 5 AddNode 4 (1,3): 1 on 8 (1,2): 1 on 8 (1,3): 1 on 1 (1,3): -1 on 6 (1,2): 1 on 8 (1,3): -1 on 3 (1,2): 1 on 6 (1,1): 1 on 2 (1,3): -1 on 8 (1,2): 1 on 5 (1,3): 1 on 3 AddNode 3 (1,1): 1 on 0 RemoveNode 8 (1,2): 1 on 7 AddNode 8 (1,3): 1 on 3 (1,1): -1 on 0 RemoveNode 0 (1,2): 1 on 8 (1,2): 1 on 9 RemoveNode 3 (1,1): -1 on 2 RemoveNode 8 (1,1): 1 on 0 RemoveNode 7 (1,1): 1 on 4 (1,2): 1 on 8 (1,3): 1 on 9 (1,1): 1 on 2 (1,3): 1 on 3 AddNode 3 (1,2): 1 on 8 AddNode 1 (1,2): 1 on 7 RemoveNode 6 (1,2): 1 on 5 (1,2): -1 on 6 RemoveNode 9 (1,1): 1 on 0 (1,2): 1 on 7 AddNode 0 (1,1): 1 on 4 AddNode 7 (1,3): 1 on 3 (1,1): 1 on 4 (1,3): 1 on 5 (1,1): 1 on 0 (1,3): 1 on 6 (1,3): -1 on 2 RemoveNode 0 (1,3): 1 on 7 AddNode 0 (1,1): 1 on 1 (1,1): -1 on 2 AddNode 6 (1,1): 1 on 3 (1,2): 1 on 5 RemoveNode 2 (1,3): 1 on 7 (1,2): 1 on 7 (1,3): 1 on 8 (1,1): 1 on 1 (1,1): 1 on 0 (1,2): 1 on 7 AddNode 2 (1,2): 1 on 6 (1,3): 1 on 0 RemoveNode 4 (1,1): 1 on 0 (1,1): 1 on 3 (1,3): 1 on 6 (1,2): 1 on 8 (1,3): -1 on 8 (1,2): -1 on 7 (1,1): -1 on 1 (1,1): 1 on 0 (1,1): 1 on 1 RemoveNode 0 (1,1): 1 on 4 RemoveNode 7 (1,2): 1 on 5 RemoveNode 1 (1,1): 1 on 2 (1,2): -1 on 7 (1,3): -1 on 9 (1,1): -1 on 4 (1,1): 1 on 4 (1,3): 1 on 5 (1,1): 1 on 4 RemoveNode 3 (1,1): 1 on 4 (1,1): 1 on 4 (1,1): 1 on 4 (1,2): 1 on 6 AddNode 0 (1,1): 1 on 3 (1,3): 1 on 0 (1,3): 1 on 1 (1,1): 1 on 2 (1,1): 1 on 1 (1,1): 1 on 0 AddNode 4 (1,2): 1 on 7 (1,2): 1 on 9 RemoveNode 4 (1,1): 1 on 3 (1,2): 1 on 5 (1,2): 1 on 6 AddNode 7 (1,3): 1 on 8 (1,1): 1 on 2 (1,3): 1 on 3 AddNode 8 (1,1 ... 4 (1,3): 1 on 3 (1,2): 1 on 8 AddNode 5 (1,2): 1 on 7 (1,2): -1 on 8 RemoveNode 9 (1,2): -1 on 6 (1,2): 1 on 6 (1,3): 1 on 9 RemoveNode 6 (1,1): 1 on 2 (1,3): -1 on 2 (1,1): -1 on 4 RemoveNode 5 (1,2): 1 on 8 (1,2): 1 on 5 AddNode 2 (1,1): 1 on 4 (1,3): 1 on 3 RemoveNode 2 (1,3): 1 on 6 (1,2): 1 on 6 (1,3): 1 on 2 RemoveNode 7 (1,1): 1 on 3 AddNode 2 (1,2): 1 on 6 (1,2): 1 on 9 (1,3): 1 on 0 (1,2): 1 on 7 RemoveNode 4 (1,2): 1 on 5 (1,2): 1 on 9 AddNode 4 (1,3): 1 on 2 (1,3): 1 on 5 (1,3): 1 on 3 (1,2): 1 on 7 (1,1): 1 on 4 (1,3): 1 on 6 (1,3): 1 on 4 (1,1): 1 on 1 (1,3): 1 on 5 (1,1): -1 on 0 RemoveNode 3 (1,1): 1 on 3 (1,1): 1 on 3 (1,3): -1 on 1 RemoveNode 4 (1,3): 1 on 1 (1,1): 1 on 3 (1,1): 1 on 0 AddNode 0 (1,3): 1 on 2 (1,2): 1 on 5 AddNode 1 (1,2): 1 on 7 (1,2): 1 on 9 AddNode 4 (1,1): 1 on 2 (1,3): 1 on 0 AddNode 9 (1,3): -1 on 6 AddNode 7 (1,2): 1 on 8 (1,1): 1 on 2 RemoveNode 4 (1,3): 1 on 6 (1,1): 1 on 2 (1,1): 1 on 3 (1,2): 1 on 8 (1,2): 1 on 6 (1,2): 1 on 9 RemoveNode 7 (1,3): 1 on 9 (1,3): 1 on 3 (1,3): 1 on 5 (1,3): 1 on 5 (1,2): 1 on 6 (1,3): 1 on 7 (1,3): -1 on 2 (1,2): -1 on 9 (1,1): -1 on 4 (1,2): 1 on 7 RemoveNode 9 (1,3): 1 on 0 RemoveNode 1 (1,1): 1 on 0 AddNode 7 (1,3): 1 on 8 (1,2): 1 on 6 (1,1): 1 on 1 RemoveNode 2 (1,3): -1 on 0 (1,2): -1 on 6 (1,3): 1 on 5 AddNode 3 (1,1): -1 on 3 AddNode 4 (1,3): 1 on 1 (1,1): 1 on 2 (1,2): 1 on 5 AddNode 9 (1,1): 1 on 4 (1,2): 1 on 6 RemoveNode 7 (1,3): -1 on 5 (1,1): 1 on 1 (1,3): 1 on 6 RemoveNode 9 (1,3): 1 on 9 RemoveNode 8 (1,1): 1 on 2 AddNode 6 (1,1): 1 on 2 (1,3): 1 on 7 (1,2): 1 on 7 AddNode 8 (1,2): 1 on 5 AddNode 5 (1,2): 1 on 7 (1,2): 1 on 6 (1,2): 1 on 5 (1,3): 1 on 5 (1,1): 1 on 4 (1,2): -1 on 5 RemoveNode 4 (1,2): 1 on 5 (1,3): 1 on 2 (1,1): 1 on 1 (1,3): 1 on 3 (1,2): -1 on 9 (1,2): -1 on 6 AddNode 4 (1,3): 1 on 9 RemoveNode 4 (1,3): -1 on 1 RemoveNode 0 (1,3): 1 on 8 (1,2): 1 on 7 AddNode 2 (1,3): 1 on 1 (1,2): 1 on 6 AddNode 7 (1,2): 1 on 9 AddNode 1 (1,2): 1 on 9 (1,2): 1 on 8 (1,1): 1 on 0 (1,3): 1 on 9 RemoveNode 6 (1,2): 1 on 8 AddNode 6 (1,3): -1 on 7 (1,2): 1 on 8 (1,3): -1 on 5 (1,2): 1 on 8 AddNode 0 (1,1): 1 on 2 (1,1): 1 on 1 (1,2): 1 on 5 RemoveNode 0 (1,3): -1 on 9 (1,3): 1 on 0 AddNode 0 (1,3): 1 on 8 RemoveNode 7 (1,2): -1 on 5 (1,1): 1 on 1 (1,1): -1 on 3 RemoveNode 2 (1,1): 1 on 0 (1,2): -1 on 7 (1,3): 1 on 2 (1,1): 1 on 2 (1,3): 1 on 1 (1,1): -1 on 1 (1,2): 1 on 6 (1,3): 1 on 4 (1,2): 1 on 9 (1,3): -1 on 4 RemoveNode 3 (1,2): 1 on 6 (1,3): 1 on 4 RemoveNode 5 (1,1): 1 on 0 (1,3): 1 on 3 RemoveNode 1 (1,3): -1 on 0 (1,1): 1 on 2 (1,2): 1 on 6 (1,3): 1 on 0 (1,2): -1 on 5 AddNode 3 (1,2): 1 on 9 (1,1): 1 on 1 AddNode 2 (1,2): 1 on 8 RemoveNode 0 (1,3): 1 on 8 RemoveNode 2 (1,3): 1 on 0 RemoveNode 6 (1,2): 1 on 6 (1,3): 1 on 5 (1,1): 1 on 1 AddNode 7 (1,1): 1 on 0 (1,2): 1 on 6 (1,1): 1 on 3 (1,2): 1 on 5 RemoveNode 8 (1,2): 1 on 8 (1,1): 1 on 3 (1,1): 1 on 0 AddNode 4 (1,2): 1 on 8 AddNode 8 (1,2): 1 on 6 RemoveNode 8 (1,3): 1 on 9 AddNode 9 (1,2): 1 on 9 RemoveNode 9 (1,2): 1 on 9 AddNode 8 (1,2): 1 on 8 (1,3): -1 on 5 AddNode 6 (1,3): 1 on 3 RemoveNode 7 (1,3): 1 on 8 (1,2): 1 on 7 AddNode 0 (1,2): -1 on 9 (1,3): 1 on 4 (1,2): 1 on 7 (1,3): -1 on 5 (1,1): -1 on 1 (1,1): 1 on 2 (1,3): 1 on 6 AddNode 5 (1,3): 1 on 7 RemoveNode 3 (1,2): 1 on 8 (1,2): 1 on 5 (1,1): 1 on 3 (1,3): 1 on 5 (1,1): 1 on 4 (1,3): 1 on 8 (1,3): 1 on 7 (1,2): -1 on 8 AddNode 3 (1,1): 1 on 0 RemoveNode 0 (1,2): 1 on 6 (1,1): 1 on 2 (1,3): 1 on 8 RemoveNode 4 (1,3): 1 on 2 (1,2): -1 on 6 (1,3): 1 on 3 AddNode 2 (1,3): 1 on 5 (1,1): 1 on 2 (1,3): 1 on 2 RemoveNode 3 (1,3): 1 on 3 (1,2): 1 on 6 RemoveNode 5 (1,2): 1 on 9 (1,3): -1 on 9 (1,2): 1 on 6 (1,2): -1 on 6 AddNode 0 (1,2): 1 on 5 AddNode 3 (1,3): -1 on 4 (1,3): 1 on 7 RemoveNode 2 (1,1): 1 on 1 (1,3): 1 on 5 RemoveNode 8 (1,1): 1 on 2 (1,2): 1 on 7 (1,2): 1 on 9 RemoveNode 0 (1,1): -1 on 2 RemoveNode 6 (1,2): 1 on 6 AddNode 0 (1,1): 1 on 4 (1,1): 1 on 2 (1,1): 1 on 2 (1,2): 1 on 5 (1,1): 1 on 0 (1,2): 1 on 6 (1,3): -1 on 8 (1,3): 1 on 5 (1,3): 1 on 1 (1,1): 1 on 3 AddNode 4 (1,1): -1 on 3 (1,1): 1 on 2 (1,3): -1 on 5 RemoveNode 4 (1,3): 1 on 2 (1,1): 1 on 0 (1,3): -1 on 6 (1,1): 1 on 1 (1,2): 1 on 6 (1,2): -1 on 6 (1,2): 1 on 7 (1,3): -1 on 3 AddNode 7 (1,2): 1 on 7 RemoveNode 0 (1,3): 1 on 0 (1,1): 1 on 2 (1,2): 1 on 6 (1,3): 1 on 7 (1,1): 1 on 2 (1,2): 1 on 9 RemoveNode 7 (1,2): -1 on 6 AddNode 7 (1,2): 1 on 7 (1,2): 1 on 7 (1,2): -1 on 6 RemoveNode 3 (1,3): 1 on 9 (1,2): 1 on 5 (1,1): 1 on 3 (1,2): 1 on 5 AddNode 0 (1,1): 1 on 3 (1,1): 1 on 1 (1,2): 1 on 5 RemoveNode 7 (1,3): -1 on 7 AddNode 7 (1,1): 1 on 1 (1,2): 1 on 9 (1,2): -1 on 6 AddNode 5 (1,1): 1 on 1 (1,3): 1 on 2 (1,3): 1 on 2 (1,2): 1 on 9 AddNode 4 (1,3): 1 on 9 (1,3): -1 on 2 RemoveNode 4 (1,3): -1 on 4 RemoveNode 7 (1,1): -1 on 4 (1,3): 1 on 2 (1,3): -1 on 2 (1,1): 1 on 4 (1,1): 1 on 0 (1,2): 1 on 9 (1,3): 1 on 0 (1,2): -1 on 7 AddNode 9 (1,1): 1 on 4 (1,3): 1 on 7 (1,3): 1 on 8 (1,1): 1 on 3 AddNode 2 (1,3): 1 on 2 RemoveNode 0 (1,2): -1 on 7 (1,1): 1 on 2 (1,3): 1 on 7 (1,2): 1 on 6 (1,2): -1 on 6 AddNode 6 (1,1): 1 on 3 (1,1): 1 on 4 (1,2): -1 on 6 (1,3): 1 on 0 AddNode 7 (1,1): 1 on 0 (1,3): -1 on 8 RemoveNode 9 (1,2): 1 on 7 (1,2): 1 on 5 (1,1): 1 on 2 (1,1): 1 on 0 (1,3): 1 on 4 (1,1): 1 on 0 AddNode 0 (1,1): 1 on 3 RemoveNode 7 (1,3): 1 on 0 (1,2): 1 on 7 (1,2): 1 on 9 (1,2): 1 on 5 AddNode 7 (1,3): 1 on 8 (1,1): 1 on 1 RemoveNode 0 (1,2): 1 on 9 (1,2): -1 on 5 AddNode 1 (1,2): 1 on 5 (1,2): 1 on 6 (1,1): 1 on 4 (1,1): 1 on 3 (1,3): 1 on 0 (1,2): 1 on 9 (1,1): 1 on 1 (1,1): 1 on 1 AddNode 9 (1,3): 1 on 2 RemoveNode 7 (1,2): 1 on 5 RemoveNode 1 (1,1): -1 on 1 (1,1): -1 on 3 (1,3): 1 on 2 AddNode 0 (1,2): 1 on 7 (1,3): -1 on 0 (1,1): 1 on 3 AddNode 8 (1,2): 1 on 7 (1,3): 1 on 5 (1,2): 1 on 6 (1,3): 1 on 2 (1,3): 1 on 2 RemoveNode 0 (1,2): 1 on 5 AddNode 0 (1,1): -1 on 1 RemoveNode 2 (1,1): 1 on 2 (1,1): -1 on 2 (1,3): 1 on 8 (1,2): 1 on 9 (1,3): -1 on 6 (1,3): -1 on 8 (1,1): 1 on 2 RemoveNode 0 (1,1): -1 on 3 (1,2): 1 on 9 (1,1): 1 on 4 (1,1): 1 on 0 (1,1): 1 on 0 (1,2): -1 on 8 (1,2): 1 on 5 (1,1): 1 on 3 (1,2): 1 on 7 (1,2): 1 on 6 (1,2): 1 on 9 (1,1): 1 on 2 (1,2): 1 on 8 (1,3): 1 on 9 RemoveNode 5 (1,2): 1 on 7 (1,2): 1 on 5 (1,2): -1 on 9 (1,3): 1 on 5 (1,2): 1 on 8 (1,3): 1 on 8 RemoveNode 6 (1,2): -1 on 6 (1,3): 1 on 6 (1,3): 1 on 3 (1,2): 1 on 8 (1,1): 1 on 1 (1,3): 1 on 1 (1,1): 1 on 1 AddNode 6 (1,1): 1 on 4 AddNode 3 (1,2): 1 on 8 (1,1): 1 on 2 RemoveNode 9 (1,3): 1 on 1 AddNode 2 (1,1): 1 on 0 (1,3): 1 on 7 AddNode 9 (1,1): -1 on 2 AddNode 1 (1,1): -1 on 1 (1,2): 1 on 8 RemoveNode 2 (1,1): 1 on 3 (1,2): 1 on 7 (1,2): 1 on 7 (1,2): 1 on 9 AddNode 2 (1,2): 1 on 8 (1,2): 1 on 9 (1,3): 1 on 3 RemoveNode 2 (1,1): 1 on 4 AddNode 7 (1,1): 1 on 1 RemoveNode 9 (1,2): 1 on 9 (1,3): 1 on 7 AddNode 4 (1,2): 1 on 6 (1,3): -1 on 7 (1,2): -1 on 6 (1,3): 1 on 5 (1,2): -1 on 8 (1,1): 1 on 3 AddNode 2 (1,1): 1 on 1 (1,2): 1 on 8 (1,3): 1 on 2 (1,1): 1 on 4 (1,3): -1 on 8 (1,1): 1 on 3 (1,1): 1 on 4 RemoveNode 8 (1,1): 1 on 3 RemoveNode 4 (1,2): 1 on 8 (1,2): 1 on 9 (1,3): -1 on 2 (1,1): -1 on 0 (1,2): 1 on 5 AddNode 0 (1,1): 1 on 3 (1,1): 1 on 3 (1,1): 1 on 4 (1,1): -1 on 1 (1,1): 1 on 0 (1,1): -1 on 4 (1,2): 1 on 9 (1,3): 1 on 7 (1,3): 1 on 8 (1,1): 1 on 1 (1,3): -1 on 4 (1,1): 1 on 0 (1,1): 1 on 3 (1,1): 1 on 3 RemoveNode 1 (1,3): 1 on 3 (1,3): 1 on 0 (1,1): 1 on 3 RemoveNode 2 (1,3): 1 on 5 (1,1): -1 on 2 (1,2): 1 on 8 (1,1): 1 on 1 RemoveNode 7 (1,3): -1 on 2 (1,1): 1 on 2 (1,1): 1 on 0 (1,1): 1 on 3 (1,1): 1 on 3 (1,1): 1 on 0 AddNode 9 (1,3): -1 on 7 (1,1): 1 on 1 RemoveNode 0 (1,3): -1 on 9 AddNode 2 (1,1): 1 on 3 (1,1): -1 on 0 (1,1): 1 on 0 (1,3): -1 on 1 (1,2): 1 on 8 (1,2): -1 on 8 (1,2): 1 on 9 (1,1): -1 on 4 RemoveNode 2 (1,3): 1 on 2 (1,3): 1 on 3 (1,2): 1 on 8 (1,3): 1 on 5 (1,2): 1 on 9 AddNode 2 (1,2): -1 on 8 RemoveNode 9 (1,3): -1 on 3 (1,3): -1 on 1 RemoveNode 3 (1,1): 1 on 0 AddNode 5 (1,3): 1 on 4 RemoveNode 6 (1,2): 1 on 7 (1,1): 1 on 2 AddNode 1 (1,1): 1 on 1 RemoveNode 2 (1,3): -1 on 1 AddNode 2 (1,3): -1 on 7 (1,2): -1 on 5 (1,1): -1 on 1 (1,1): -1 on 1 (1,1): 1 on 1 AddNode 8 (1,1): 1 on 3 AddNode 3 (1,2): 1 on 9 (1,3): 1 on 5 (1,1): 1 on 2 (1,2): 1 on 6 (1,2): -1 on 7 AddNode 6 (1,3): 1 on 9 (1,1): 1 on 0 AddNode 0 (1,2): 1 on 9 AddNode 7 (1,2): 1 on 7 (1,3): 1 on 1 (1,3): 1 on 1 (1,2): -1 on 9 (1,1): -1 on 3 RemoveNode 7 (1,2): 1 on 8 (1,3): 1 on 0 RemoveNode 0 (1,3): 1 on 1 (1,1): -1 on 3 RemoveNode 1 (1,1): 1 on 4 (1,3): 1 on 0 (1,3): 1 on 5 (1,1): 1 on 0 (1,3): 1 on 8 (1,1): 1 on 2 AddNode 9 (1,3): 1 on 6 (1,3): 1 on 6 (1,1): -1 on 1 (1,1): 1 on 4 AddNode 7 (1,2): 1 on 9 (1,2): -1 on 8 (1,2): 1 on 8 (1,2): -1 on 5 AddNode 4 (1,1): 1 on 0 (1,3): 1 on 5 (1,3): 1 on 2 RemoveNode 4 (1,3): -1 on 9 (1,1): 1 on 4 (1,2): 1 on 9 RemoveNode 9 (1,2): 1 on 7 (1,1): 1 on 2 (1,1): 1 on 3 (1,2): 1 on 7 RemoveNode 6 (1,3): 1 on 2 (1,1): 1 on 2 (1,1): 1 on 0 (1,1): 1 on 0 (1,2): -1 on 5 (1,1): 1 on 2 (1,2): 1 on 9 (1,1): -1 on 3 (1,1): 1 on 1 RemoveNode 5 (1,1): 1 on 4 (1,2): 1 on 7 (1,2): 1 on 7 (1,3): 1 on 1 (1,2): 1 on 9 (1,1): 1 on 1 (1,3): 1 on 4 (1,3): 1 on 4 RemoveNode 2 (1,1): 1 on 0 (1,3): 1 on 0 AddNode 4 (1,1): 1 on 0 (1,1): 1 on 3 RemoveNode 4 (1,2): 1 on 8 (1,2): -1 on 9 (1,2): -1 on 7 AddNode 5 (1,1): 1 on 0 AddNode 0 (1,2): 1 on 7 (1,2): -1 on 5 (1,1): 1 on 2 (1,3): 1 on 8 (1,1): -1 on 2 RemoveNode 0 (1,2): 1 on 5 (1,2): -1 on 7 RemoveNode 5 (1,3): 1 on 5 AddNode 2 (1,2): 1 on 6 AddNode 0 (1,1): 1 on 3 (1,1): 1 on 4 (1,2): 1 on 5 (1,3): 1 on 3 AddNode 9 (1,1): 1 on 0 (1,2): 1 on 6 RemoveNode 0 (1,1): 1 on 1 (1,3): 1 on 2 (1,1): -1 on 4 (1,3): 1 on 3 AddNode 5 (1,2): 1 on 9 (1,3): 1 on 2 (1,2): -1 on 5 (1,2): 1 on 6 AddNode 1 (1,1): -1 on 0 RemoveNode 1 (1,2): -1 on 7 AddNode 4 (1,1): 1 on 0 (1,2): 1 on 9 (1,1): 1 on 2 RemoveNode 3 (1,3): -1 on 6 RemoveNode 8 (1,2): 1 on 8 (1,3): 1 on 7 (1,3): 1 on 0 RemoveNode 5 (1,2): -1 on 7 (1,3): 1 on 3 AddNode 6 (1,1): 1 on 2 AddNode 5 (1,2): 1 on 6 AddNode 3 (1,3): 1 on 2 RemoveNode 4 (1,3): 1 on 3 (1,2): 1 on 9 (1,3): 1 on 4 AddNode 0 (1,1): -1 on 4 RemoveNode 0 (1,2): 1 on 6 RemoveNode 5 (1,1): 1 on 0 (1,1): -1 on 4 (1,3): 1 on 1 (1,1): 1 on 0 AddNode 8 (1,1): -1 on 2 (1,3): -1 on 0 (1,3): 1 on 6 (1,1): 1 on 2 (1,2): 1 on 7 AddNode 5 (1,2): 1 on 9 (1,1): 1 on 2 (1,3): 1 on 5 (1,3): 1 on 1 RemoveNode 9 (1,2): 1 on 6 AddNode 0 (1,1): 1 on 1 (1,3): 1 on 3 (1,2): 1 on 6 (1,1): -1 on 0 (1,2): 1 on 9 (1,3): 1 on 1 (1,2): 1 on 8 (1,1): 1 on 3 (1,1): 1 on 3 (1,3): 1 on 4 RemoveNode 5 (1,2): -1 on 6 (1,3): 1 on 4 (1,1): -1 on 1 (1,1): 1 on 3 AddNode 5 (1,1): 1 on 4 (1,3): 1 on 3 (1,1): -1 on 2 (1,3): -1 on 1 (1,1): 1 on 1 (1,2): 1 on 9 (1,2): 1 on 7 (1,1): 1 on 0 (1,3): 1 on 1 RemoveNode 0 (1,2): 1 on 5 (1,3): -1 on 8 (1,2): 1 on 6 (1,1): 1 on 4 (1,1): -1 on 3 RemoveNode 6 (1,3): 1 on 9 AddNode 9 (1,1): 1 on 0 RemoveNode 5 (1,3): 1 on 0 RemoveNode 3 (1,3): -1 on 4 (1,2): 1 on 8 (1,2): 1 on 7 (1,2): -1 on 7 (1,3): -1 on 6 Final state: 403 387 397 417 400 0 0 0 0 0 0 0 0 0 0 359 427 442 433 410 192 199 174 233 198 205 200 154 185 175 - - + - - - - + + + Took 2.637083 seconds avg = 4800 min = 4800 max = 4800 std-dev = 0 ch.0 avg = 1600 ch.0 min = 1520 ch.0 max = 1674 ch.0 std-dev = 30.49098227 ch.1 avg = 1600 ch.1 min = 1523 ch.1 max = 1680 ch.1 std-dev = 32.76888768 ch.2 avg = 1600 ch.2 min = 1522 ch.2 max = 1695 ch.2 std-dev = 30.79090775 avg = 1250 std-dev = 0 avg = 4800 min = 4800 max = 4800 std-dev = 0 ch.0 avg = 1600 ch.0 min = 1600 ch.0 max = 1600 ch.0 std-dev = 0 ch.1 avg = 1600 ch.1 min = 1600 ch.1 max = 1600 ch.1 std-dev = 0 ch.2 avg = 1600 ch.2 min = 1600 ch.2 max = 1600 ch.2 std-dev = 0 avg = 1250 std-dev = 0 avg = 4800 min = 4799 max = 4801 std-dev = 0.2449489743 ch.0 avg = 1600 ch.0 min = 1497 ch.0 max = 1684 ch.0 std-dev = 31.67806812 ch.1 avg = 1600 ch.1 min = 1525 ch.1 max = 1717 ch.1 std-dev = 32.6762911 ch.2 avg = 1600 ch.2 min = 1523 ch.2 max = 1671 ch.2 std-dev = 31.69353246 avg = 1250 std-dev = 0 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousDeclare [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:41.365194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:41.365224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:41.365230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:41.365237Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:41.365252Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:41.365257Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:41.365266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:41.365286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:41.365425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:41.365508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:41.375926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:41.375951Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:41.379713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:41.379824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:41.379858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:41.382018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:41.382116Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:41.382228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.382282Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:41.382984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:41.383030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:41.383287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:41.383295Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:41.383306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:41.383312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:41.383316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:41.383335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.384470Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:41.400722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:41.400816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.400879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:41.400925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:41.400935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.401851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.401885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:41.401945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.401954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:41.401959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:41.401964Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:41.402535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.402547Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:41.402551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:41.402918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.402928Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.402933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.402939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:41.403500Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:41.403961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:41.404005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:41.404162Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.404185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:41.404200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.404272Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:41.404278Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.404312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:41.404323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:41.404802Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:41.404810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:41.404851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 0:30:41.412999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 100 ready parts: 1/1 2025-06-03T10:30:41.413025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 100 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:41.413313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 100:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:100 msg type: 269090816 2025-06-03T10:30:41.413341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 100, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 100 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 100 at step: 5000002 2025-06-03T10:30:41.413402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.413417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 100 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:41.413421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 100:0, at tablet# 72057594046678944 2025-06-03T10:30:41.413474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 100:0 128 -> 240 2025-06-03T10:30:41.413481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 100:0, at tablet# 72057594046678944 2025-06-03T10:30:41.413505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:41.413516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:41.413524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 100 2025-06-03T10:30:41.413990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:41.413997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:41.414030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:41.414042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:41.414046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-03T10:30:41.414050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-06-03T10:30:41.414057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.414062Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-03T10:30:41.414073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-06-03T10:30:41.414076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:41.414081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-06-03T10:30:41.414083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:41.414086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-03T10:30:41.414090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:41.414094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 100:0 2025-06-03T10:30:41.414097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 100:0 2025-06-03T10:30:41.414107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:30:41.414114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 100, publications: 2, subscribers: 1 2025-06-03T10:30:41.414119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-03T10:30:41.414124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-03T10:30:41.414292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:41.414302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:41.414306Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-03T10:30:41.414309Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-03T10:30:41.414312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:41.414390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:41.414400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:41.414405Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-03T10:30:41.414409Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-03T10:30:41.414413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:41.414422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 1 2025-06-03T10:30:41.414428Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:275:2265] 2025-06-03T10:30:41.415261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-03T10:30:41.415347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-03T10:30:41.415360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-03T10:30:41.415365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:276:2266] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 100 2025-06-03T10:30:41.415455Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:41.415488Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 38us result status StatusSuccess 2025-06-03T10:30:41.415591Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::SimultaneousDeclareAndCreateTable [GOOD] >> TSchemeShardSubDomainTest::Create [GOOD] >> TSchemeShardSubDomainTest::CreateAlterNbsChannels >> THiveTest::TestSkipBadNode [GOOD] >> THiveTest::TestStopTenant ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateTenantTableForceDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:41.496372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:41.496402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:41.496410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:41.496416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:41.496431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:41.496436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:41.496447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:41.496461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:41.496600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:41.496672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:41.513391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:41.513419Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:41.518164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:41.518291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:41.518331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:41.520866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:41.520948Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:41.521076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.521133Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:41.522027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:41.522089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:41.522437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:41.522451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:41.522464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:41.522481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:41.522489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:41.522514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.524044Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:41.548151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:41.548234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.548301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:41.548357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:41.548368Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.549245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.549274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:41.549363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.549376Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:41.549382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:41.549388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:41.549926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.549939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:41.549946Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:41.550355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.550368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.550375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.550384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:41.551165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:41.551629Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:41.551673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:41.551871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.551898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:41.551915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.551982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:41.551990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.552029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:41.552042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:41.552510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:41.552520Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:41.552562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:41.614283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:41.614320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:30:41.614645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-03T10:30:41.614665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-03T10:30:41.614724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5744: Failed to connect, to tablet: 72075186233409550, at schemeshard: 72057594046678944 2025-06-03T10:30:41.614759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-03T10:30:41.615912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:41.615954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:41.615962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:41.615983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:41.616029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:30:41.616038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:30:41.616053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:30:41.616059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-03T10:30:41.616079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-03T10:30:41.616083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-03T10:30:41.616097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:30:41.616101Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:30:41.616120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-03T10:30:41.616126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-03T10:30:41.616169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5744: Failed to connect, to tablet: 72075186233409551, at schemeshard: 72057594046678944 2025-06-03T10:30:41.616195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:41.616213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5744: Failed to connect, to tablet: 72075186233409549, at schemeshard: 72057594046678944 2025-06-03T10:30:41.616773Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 100 2025-06-03T10:30:41.616858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-03T10:30:41.616868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-06-03T10:30:41.616886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:30:41.616890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-03T10:30:41.616902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-03T10:30:41.616906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-03T10:30:41.617006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-03T10:30:41.617046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:30:41.617057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-03T10:30:41.617063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:627:2528] 2025-06-03T10:30:41.617100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-03T10:30:41.617112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:30:41.617116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:627:2528] 2025-06-03T10:30:41.617140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:30:41.617144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:627:2528] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 2025-06-03T10:30:41.617248Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:41.617323Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 64us result status StatusPathDoesNotExist 2025-06-03T10:30:41.617392Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:41.617505Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:41.617536Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 34us result status StatusPathDoesNotExist 2025-06-03T10:30:41.617556Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/table_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0/table_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:41.617628Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:41.617668Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 40us result status StatusSuccess 2025-06-03T10:30:41.617760Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> THiveTest::TestHiveFollowersWithChangingDC [GOOD] >> THiveTest::TestHiveBalancerWithSystemTablets >> THiveTest::TestLockTabletExecutionDeleteReboot [GOOD] >> THiveTest::TestLockTabletExecutionRebootReconnect ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousDeclareAndDefine [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:41.645874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:41.645907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:41.645913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:41.645920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:41.645935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:41.645941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:41.645951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:41.645971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:41.646108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:41.646187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:41.662351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:41.662379Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:41.667327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:41.667487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:41.667541Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:41.670040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:41.670129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:41.670267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.670333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:41.671026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:41.671084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:41.671449Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:41.671464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:41.671479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:41.671488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:41.671494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:41.671517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.673215Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:41.695767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:41.695870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.695958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:41.696020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:41.696032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.697080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.697116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:41.697194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.697206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:41.697212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:41.697219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:41.698011Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.698034Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:41.698043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:41.698584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.698604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.698612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.698622Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:41.699274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:41.699852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:41.699908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:41.700171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.700208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:41.700233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.700330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:41.700341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.700418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:41.700436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:41.701014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:41.701025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:41.701087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... ationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.711237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 100 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:41.711243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 100:0, at tablet# 72057594046678944 2025-06-03T10:30:41.711303Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 100:0 128 -> 240 2025-06-03T10:30:41.711310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 100:0, at tablet# 72057594046678944 2025-06-03T10:30:41.711351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:41.711360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:41.711368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:41.711966Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:41.711987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:41.712031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:41.712052Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:41.712059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-03T10:30:41.712065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 100, path id: 2 FAKE_COORDINATOR: Erasing txId 100 2025-06-03T10:30:41.712160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.712170Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-03T10:30:41.712186Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-06-03T10:30:41.712193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:41.712199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-06-03T10:30:41.712203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:41.712209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-03T10:30:41.712215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:41.712222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 100:0 2025-06-03T10:30:41.712227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 100:0 2025-06-03T10:30:41.712242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:30:41.712249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-06-03T10:30:41.712254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-03T10:30:41.712258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-03T10:30:41.712415Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:41.712430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:41.712440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-03T10:30:41.712445Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-03T10:30:41.712450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:41.712892Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:41.712912Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:41.712918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-03T10:30:41.712924Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-03T10:30:41.712930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:41.712947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-06-03T10:30:41.713536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-03T10:30:41.713919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestModificationResults wait txId: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-06-03T10:30:41.714012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-03T10:30:41.714021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-06-03T10:30:41.714040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:30:41.714044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-03T10:30:41.714140Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-03T10:30:41.714179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-03T10:30:41.714186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:313:2303] 2025-06-03T10:30:41.714223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:30:41.714245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:30:41.714248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:313:2303] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-06-03T10:30:41.714320Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:41.714376Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 74us result status StatusSuccess 2025-06-03T10:30:41.714513Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousDeclareAndCreateTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:41.685435Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:41.685469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:41.685476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:41.685483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:41.685501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:41.685507Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:41.685518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:41.685540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:41.685670Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:41.685767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:41.701821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:41.701851Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:41.706815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:41.706936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:41.706979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:41.709586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:41.709686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:41.709834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.709886Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:41.710695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:41.710761Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:41.711149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:41.711162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:41.711176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:41.711186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:41.711193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:41.711214Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.712827Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:41.736736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:41.736846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.736939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:41.737004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:41.737018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.738150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.738187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:41.738265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.738276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:41.738283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:41.738290Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:41.738812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.738824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:41.738830Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:41.739199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.739207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.739214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.739224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:41.740014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:41.740421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:41.740470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:41.740707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.740734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:41.740754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.740836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:41.740844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.740891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:41.740905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:41.741469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:41.741481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:41.741539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... enant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:41.751497Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:41.751505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:41.751543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:41.751559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:41.751564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-03T10:30:41.751569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 100, path id: 2 FAKE_COORDINATOR: Erasing txId 100 2025-06-03T10:30:41.751650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.751656Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-03T10:30:41.751670Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-06-03T10:30:41.751675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:41.751680Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-06-03T10:30:41.751684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:41.751689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-03T10:30:41.751695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:41.751700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 100:0 2025-06-03T10:30:41.751704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 100:0 2025-06-03T10:30:41.751714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:30:41.751720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-06-03T10:30:41.751727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-03T10:30:41.751731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-03T10:30:41.751834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:41.751844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:41.751849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-03T10:30:41.751854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-03T10:30:41.751858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:41.752108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:41.752118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:41.752122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-03T10:30:41.752127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-03T10:30:41.752131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:41.752139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-06-03T10:30:41.752527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-03T10:30:41.752802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestModificationResults wait txId: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-06-03T10:30:41.752866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-03T10:30:41.752873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-06-03T10:30:41.752888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:30:41.752894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-03T10:30:41.752974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-03T10:30:41.753000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-03T10:30:41.753006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:313:2303] 2025-06-03T10:30:41.753033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:30:41.753050Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:30:41.753054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:313:2303] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-06-03T10:30:41.753119Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:41.753153Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 45us result status StatusSuccess 2025-06-03T10:30:41.753280Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:41.753368Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:41.753391Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 25us result status StatusPathDoesNotExist 2025-06-03T10:30:41.753410Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/table_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/USER_0\' (id: [OwnerId: 72057594046678944, LocalPathId: 2]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0/table_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/USER_0" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::CreateAlterNbsChannels [GOOD] >> TSchemeShardSubDomainTest::DeleteAdd >> TSchemeShardSubDomainTest::Delete >> THiveTest::TestHiveBalancerDifferentResources2 [GOOD] >> THiveTest::TestFollowersCrossDC_Easy >> TSchemeShardSubDomainTest::Delete [GOOD] >> TStoragePoolsQuotasTest::DifferentQuotasInteraction [GOOD] >> TSchemeShardSubDomainTest::SimultaneousCreateForceDropTwice >> THiveTest::TestStopTenant [GOOD] >> THiveTest::TestTabletsStartingCounter >> TSchemeShardSubDomainTest::DeleteAdd [GOOD] |66.8%| [TA] $(B)/ydb/core/tx/schemeshard/ut_column_build/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateAlterNbsChannels [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:41.786097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:41.786122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:41.786126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:41.786131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:41.786143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:41.786146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:41.786154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:41.786173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:41.786260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:41.786316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:41.795912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:41.795938Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:41.799164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:41.799250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:41.799278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:41.801163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:41.801239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:41.801353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.801406Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:41.802132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:41.802197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:41.802474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:41.802481Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:41.802491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:41.802497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:41.802502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:41.802519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.803709Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:41.820405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:41.820483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.820559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:41.820601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:41.820611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.821385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.821409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:41.821460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.821468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:41.821472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:41.821477Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:41.821934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.821945Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:41.821949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:41.822281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.822291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.822296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.822302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:41.822828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:41.823188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:41.823224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:41.823375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.823395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:41.823411Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.823464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:41.823469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.823501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:41.823510Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:41.823907Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:41.823915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:41.823951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 42.335928Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 105, subscribers: 0 2025-06-03T10:30:42.336273Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:42.336283Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:42.336286Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:42.336291Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:42.336554Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-03T10:30:42.336747Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-03T10:30:42.336866Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:42.336934Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409546 2025-06-03T10:30:42.337364Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-03T10:30:42.337471Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-03T10:30:42.337619Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-03T10:30:42.337752Z node 2 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 2025-06-03T10:30:42.337780Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-03T10:30:42.338306Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 Forgetting tablet 72075186233409548 2025-06-03T10:30:42.338963Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:30:42.339039Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409547 2025-06-03T10:30:42.339206Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-03T10:30:42.339241Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 Forgetting tablet 72075186233409549 2025-06-03T10:30:42.339479Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:42.339487Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:30:42.339505Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:42.339565Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:42.339572Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:42.339602Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:30:42.339638Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 105 2025-06-03T10:30:42.340253Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:30:42.340270Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:30:42.340286Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:30:42.340289Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-03T10:30:42.340297Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:30:42.340303Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:30:42.340744Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-03T10:30:42.340755Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-03T10:30:42.340793Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:42.340809Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:42.340819Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:42.340824Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:42.340839Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:42.341278Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-03T10:30:42.341381Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-03T10:30:42.341388Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-03T10:30:42.341458Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-03T10:30:42.341477Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-03T10:30:42.341482Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [2:654:2604] TestWaitNotification: OK eventTxId 105 2025-06-03T10:30:42.341588Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/BSVolume" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:42.341629Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/BSVolume" took 58us result status StatusPathDoesNotExist 2025-06-03T10:30:42.341679Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/BSVolume\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0/BSVolume" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:42.341754Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:42.341764Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 12us result status StatusPathDoesNotExist 2025-06-03T10:30:42.341776Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::Redefine >> TSchemeShardSubDomainTest::SchemeQuotas >> THiveTest::TestLockTabletExecutionRebootReconnect [GOOD] >> THiveTest::TestLockTabletExecutionBadUnlock >> TSchemeShardSubDomainTest::SimultaneousCreateForceDropTwice [GOOD] >> TSchemeShardSubDomainTest::DeleteAndRestart >> TSchemeShardSubDomainTest::SetSchemeLimits >> THiveTest::TestTabletsStartingCounter [GOOD] >> THiveTest::TestTabletsStartingCounterExternalBoot >> TKeyValueTest::TestInlineWriteReadWithRestartsThenResponseOk [GOOD] >> TKeyValueTest::TestInlineWriteReadWithRestartsThenResponseOkNewApi ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::Delete [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:42.775022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:42.775051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:42.775057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:42.775065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:42.775084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:42.775089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:42.775100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:42.775120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:42.775242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:42.775312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:42.787675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:42.787705Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:42.792170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:42.792290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:42.792331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:42.795491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:42.795584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:42.795714Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:42.795782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:42.796575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:42.796641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:42.797001Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:42.797012Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:42.797025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:42.797040Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:42.797047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:42.797070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:42.798786Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:42.815448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:42.815535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:42.815603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:42.815652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:42.815661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:42.816462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:42.816502Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:42.816566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:42.816575Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:42.816579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:42.816584Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:42.817085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:42.817097Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:42.817102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:42.817620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:42.817640Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:42.817648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:42.817659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:42.818290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:42.818814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:42.818860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:42.819036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:42.819063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:42.819080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:42.819155Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:42.819164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:42.819201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:42.819212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:42.819657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:42.819666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:42.819712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... ublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:30:42.844321Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 Forgetting tablet 72075186233409546 2025-06-03T10:30:42.844658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:42.844710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:30:42.844804Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-03T10:30:42.844876Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-03T10:30:42.844892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-03T10:30:42.844909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409548 2025-06-03T10:30:42.845030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:30:42.845044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:42.845094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 Forgetting tablet 72075186233409547 2025-06-03T10:30:42.845182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:42.845187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:42.845204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:30:42.845288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:42.845312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:42.845322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:42.845646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:30:42.845658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:30:42.845919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:30:42.845926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-03T10:30:42.845940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:30:42.845944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:30:42.845989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:42.846155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-03T10:30:42.846196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:30:42.846201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-03T10:30:42.846262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:30:42.846277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:30:42.846280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:492:2445] TestWaitNotification: OK eventTxId 101 2025-06-03T10:30:42.846341Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:42.846376Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 49us result status StatusPathDoesNotExist 2025-06-03T10:30:42.846417Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:42.846474Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:42.846497Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 24us result status StatusSuccess 2025-06-03T10:30:42.846549Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted wait until 72075186233409548 is deleted 2025-06-03T10:30:42.846606Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-03T10:30:42.846631Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 2025-06-03T10:30:42.846638Z node 1 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409548 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 Deleted tabletId 72075186233409548 2025-06-03T10:30:42.846698Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:42.846715Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 21us result status StatusSuccess 2025-06-03T10:30:42.846743Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DeleteAdd [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:42.580059Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:42.580096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:42.580103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:42.580110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:42.580131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:42.580136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:42.580149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:42.580170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:42.580304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:42.580404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:42.596927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:42.596954Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:42.601392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:42.601525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:42.601568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:42.603357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:42.603447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:42.603579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:42.603646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:42.604397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:42.604459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:42.604849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:42.604860Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:42.604873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:42.604884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:42.604891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:42.604915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:42.606344Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:42.630122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:42.630230Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:42.630315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:42.630379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:42.630392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:42.631306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:42.631336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:42.631408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:42.631420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:42.631427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:42.631435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:42.631915Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:42.631926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:42.631933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:42.632254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:42.632266Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:42.632273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:42.632283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:42.633223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:42.633754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:42.633805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:42.634051Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:42.634080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:42.634104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:42.634191Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:42.634201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:42.634252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:42.634271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:42.634750Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:42.634759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:42.634834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... ode 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 102 2025-06-03T10:30:42.781708Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:42.781720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:42.781776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:30:42.781796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:42.781802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-03T10:30:42.781809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 102, path id: 3 2025-06-03T10:30:42.781822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:30:42.781834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-03T10:30:42.781851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:30:42.781857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:30:42.781863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:30:42.781866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:30:42.781872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-03T10:30:42.781877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:30:42.781884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:30:42.781889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:30:42.781935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 9 2025-06-03T10:30:42.781943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-03T10:30:42.781948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-03T10:30:42.781951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-03T10:30:42.782188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:30:42.782205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:30:42.782212Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:30:42.782217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-03T10:30:42.782222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:42.782349Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:30:42.782360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:30:42.782365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:30:42.782372Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-03T10:30:42.782377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 8 2025-06-03T10:30:42.782387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-03T10:30:42.783165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:30:42.783408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-03T10:30:42.783471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-03T10:30:42.783478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-03T10:30:42.783554Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-03T10:30:42.783572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:30:42.783579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:980:2803] TestWaitNotification: OK eventTxId 102 2025-06-03T10:30:42.783696Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:42.783747Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 71us result status StatusSuccess 2025-06-03T10:30:42.783829Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409552 Coordinators: 72075186233409553 Coordinators: 72075186233409554 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409555 Mediators: 72075186233409556 Mediators: 72075186233409557 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:42.783903Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:42.783915Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 13us result status StatusSuccess 2025-06-03T10:30:42.783953Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 7 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DifferentQuotasInteraction [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:40.098652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:40.098679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:40.098685Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:40.098693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:40.098710Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:40.098715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:40.098726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:40.098741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:40.098859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:40.098933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:40.112804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:40.112831Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:40.116606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:40.116774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:40.116820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:40.119344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:40.119444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:40.119579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:40.119655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:40.120478Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:40.120561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:40.120939Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:40.120955Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:40.120970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:40.120980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:40.120987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:40.121015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.122770Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:40.138700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:40.138784Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.138851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:40.138897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:40.138906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.139643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:40.139665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:40.139720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.139729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:40.139736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:40.139742Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:40.140129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.140138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:40.140142Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:40.140389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.140395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.140400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:40.140406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:40.140868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:40.141187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:40.141222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:40.141439Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:40.141464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:40.141484Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:40.141564Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:40.141572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:40.141613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:40.141626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:40.142183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:40.142194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:40.142239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T1 ... : 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:30:42.835157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:30:42.835672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:30:42.835705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:30:42.835992Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:42.836000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:42.836048Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:30:42.836074Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:42.836078Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-03T10:30:42.836082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-03T10:30:42.836167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:30:42.836173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1045: NTableState::TProposedWaitParts operationId# 103:0 ProgressState at tablet: 72057594046678944 2025-06-03T10:30:42.836187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:30:42.836191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 103:0, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-06-03T10:30:42.836196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 103:0 129 -> 240 2025-06-03T10:30:42.836339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:30:42.836348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:30:42.836352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-03T10:30:42.836356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 8 2025-06-03T10:30:42.836360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:30:42.836501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:30:42.836513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:30:42.836518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-03T10:30:42.836522Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-03T10:30:42.836525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-03T10:30:42.836533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-03T10:30:42.837137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:30:42.837147Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 103:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:42.837219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:30:42.837255Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:30:42.837260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:30:42.837263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:30:42.837266Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:30:42.837269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-03T10:30:42.837284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:405:2370] message: TxId: 103 2025-06-03T10:30:42.837288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:30:42.837311Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-06-03T10:30:42.837318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:0 2025-06-03T10:30:42.837337Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:30:42.837474Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:42.837479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:42.837599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:30:42.837918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:30:42.838199Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:42.838206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 0, path id: 2 2025-06-03T10:30:42.838217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:30:42.838223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:727:2659] 2025-06-03T10:30:42.838364Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 9 PathOwnerId: 72057594046678944, cookie: 0 TestWaitNotification: OK eventTxId 103 2025-06-03T10:30:42.838602Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:42.838634Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDatabase" took 39us result status StatusSuccess 2025-06-03T10:30:42.838711Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "SomeDatabase" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "fast" Kind: "fast_kind" } StoragePools { Name: "large" Kind: "large_kind" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "large_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "fast_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 2800 data_size_soft_quota: 2200 storage_quotas { unit_kind: "fast_kind" data_size_hard_quota: 600 data_size_soft_quota: 500 } storage_quotas { unit_kind: "large_kind" data_size_hard_quota: 2200 data_size_soft_quota: 1700 } } SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::Redefine [GOOD] >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain-EnableSeparateQuotas [GOOD] >> TSchemeShardSubDomainTest::SetSchemeLimits [GOOD] >> THiveTest::TestTabletsStartingCounterExternalBoot [GOOD] >> TScaleRecommenderTest::BasicTest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateForceDropTwice [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:43.047649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:43.047682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:43.047689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:43.047697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:43.047715Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:43.047720Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:43.047736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:43.047752Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:43.047877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:43.047962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:43.060472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:43.060518Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:43.063914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:43.063994Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:43.064026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:43.065751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:43.065841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:43.065972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:43.066028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:43.066781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:43.066837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:43.067115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:43.067122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:43.067132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:43.067138Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:43.067143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:43.067161Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.068254Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:43.084843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:43.084940Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.085017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:43.085069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:43.085079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.085999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:43.086027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:43.086089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.086098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:43.086102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:43.086107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:43.086774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.086782Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:43.086786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:43.087109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.087121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.087126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:43.087133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:43.087690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:43.088014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:43.088049Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:43.088219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:43.088239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:43.088256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:43.088315Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:43.088321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:43.088356Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:43.088365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:43.088750Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:43.088757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:43.088799Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 1 2025-06-03T10:30:43.099959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:276:2266] 2025-06-03T10:30:43.100404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:5 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:43.100413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:43.100416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:43.100419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:6 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:43.100422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:43.100424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:43.100575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:30:43.100974Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 5 TxId_Deprecated: 5 2025-06-03T10:30:43.101053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-06-03T10:30:43.101119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 7 2025-06-03T10:30:43.101177Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 2025-06-03T10:30:43.101196Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 2025-06-03T10:30:43.101221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:30:43.101247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:30:43.101254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:277:2267] 2025-06-03T10:30:43.101276Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:43.101322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-03T10:30:43.101355Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 6 TxId_Deprecated: 6 2025-06-03T10:30:43.101398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-03T10:30:43.101421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:30:43.101453Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 2025-06-03T10:30:43.101489Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 6 ShardOwnerId: 72057594046678944 ShardLocalIdx: 6, at schemeshard: 72057594046678944 2025-06-03T10:30:43.101508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:30:43.101580Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 2025-06-03T10:30:43.101621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:30:43.101643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:30:43.101696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-03T10:30:43.101719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:43.101781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:43.101788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:43.101823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:30:43.102034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:43.102042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:43.102057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:43.102317Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-03T10:30:43.102651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:30:43.102670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:30:43.102682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-03T10:30:43.103245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:30:43.103263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-03T10:30:43.103294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:43.103331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 2025-06-03T10:30:43.103456Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:43.103506Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 62us result status StatusPathDoesNotExist 2025-06-03T10:30:43.103588Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:43.103678Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:43.103716Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 40us result status StatusSuccess 2025-06-03T10:30:43.103817Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::DeleteAndRestart [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::Redefine [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:43.243574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:43.243600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:43.243604Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:43.243609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:43.243621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:43.243624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:43.243631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:43.243647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:43.243738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:43.243804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:43.254745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:43.254770Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:43.258293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:43.258393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:43.258425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:43.260362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:43.260438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:43.260546Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:43.260589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:43.261140Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:43.261186Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:43.261503Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:43.261518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:43.261533Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:43.261542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:43.261550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:43.261573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.262895Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:43.278619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:43.278700Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.278763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:43.278810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:43.278820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.279620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:43.279644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:43.279698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.279707Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:43.279711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:43.279716Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:43.281513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.281538Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:43.281547Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:43.282219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.282236Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.282243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:43.282253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:43.283102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:43.283745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:43.283797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:43.284011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:43.284046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:43.284063Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:43.284146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:43.284155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:43.284198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:43.284212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:43.284813Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:43.284824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:43.284879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... heme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-03T10:30:43.329095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-03T10:30:43.329100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:43.329224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:30:43.329236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:30:43.329240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-03T10:30:43.329245Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-03T10:30:43.329249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:30:43.329258Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-03T10:30:43.329862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:43.329877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:43.329885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:43.330327Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-03T10:30:43.330388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 Forgetting tablet 72075186233409546 2025-06-03T10:30:43.330615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:43.330702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:30:43.330839Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-03T10:30:43.330903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-03T10:30:43.331040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-03T10:30:43.331075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:30:43.331130Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409548 Forgetting tablet 72075186233409547 2025-06-03T10:30:43.331431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:30:43.331471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:43.331555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:43.331563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:43.331592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:30:43.331688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:43.331695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:43.331709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:43.332216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:30:43.332235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:30:43.332571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:30:43.332579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-03T10:30:43.332638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:30:43.332645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:30:43.332993Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:43.333012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-03T10:30:43.333095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-03T10:30:43.333104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-03T10:30:43.333196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-03T10:30:43.333222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-03T10:30:43.333228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:576:2529] TestWaitNotification: OK eventTxId 104 2025-06-03T10:30:43.333352Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:43.333398Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 64us result status StatusPathDoesNotExist 2025-06-03T10:30:43.333456Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:43.333527Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:43.333558Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 32us result status StatusSuccess 2025-06-03T10:30:43.333634Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain-EnableSeparateQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:32.893878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:32.893914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:32.893921Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:32.893929Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:32.893949Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:32.893954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:32.893966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:32.893994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:32.894097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:32.894171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:32.909498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:32.909530Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:32.914872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:32.915033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:32.915079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:32.919255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:32.919378Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:32.919557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.919640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:32.920669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:32.920742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:32.921163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:32.921176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:32.921191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:32.921202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:32.921210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:32.921239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.923380Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:32.950468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:32.950571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.950657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:32.950721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:32.950734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.951646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.951681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:32.951754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.951766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:32.951774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:32.951781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:32.952236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.952248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:32.952254Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:32.952659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.952669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:32.952677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:32.952687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:32.953573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:32.954038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:32.954088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:32.954320Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:32.954351Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:32.954373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:32.954454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:32.954463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:32.954514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:32.954529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:32.955020Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:32.955030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:32.955085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T1 ... n publish path for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 4 2025-06-03T10:30:43.371115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 3 2025-06-03T10:30:43.372012Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-03T10:30:43.372179Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-03T10:30:43.372243Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-03T10:30:43.372252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-03T10:30:43.372312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 2] 2025-06-03T10:30:43.372351Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-03T10:30:43.372358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:444:2395], at schemeshard: 72075186233409546, txId: 104, path id: 1 2025-06-03T10:30:43.372364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:444:2395], at schemeshard: 72075186233409546, txId: 104, path id: 2 2025-06-03T10:30:43.372458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-03T10:30:43.372467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1045: NTableState::TProposedWaitParts operationId# 104:0 ProgressState at tablet: 72075186233409546 2025-06-03T10:30:43.372511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-03T10:30:43.372517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 104:0, datashard: 72075186233409549, at schemeshard: 72075186233409546 2025-06-03T10:30:43.372524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 104:0 129 -> 240 2025-06-03T10:30:43.372814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72075186233409546, cookie: 104 2025-06-03T10:30:43.372830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72075186233409546, cookie: 104 2025-06-03T10:30:43.372839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-06-03T10:30:43.372846Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 1], version: 11 2025-06-03T10:30:43.372854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 5 2025-06-03T10:30:43.373129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-06-03T10:30:43.373142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-06-03T10:30:43.373146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-06-03T10:30:43.373151Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], version: 18446744073709551615 2025-06-03T10:30:43.373156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 4 2025-06-03T10:30:43.373170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-06-03T10:30:43.374091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-03T10:30:43.374110Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 104:0 ProgressState, at schemeshard: 72075186233409546 2025-06-03T10:30:43.374233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 3 2025-06-03T10:30:43.374283Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-06-03T10:30:43.374289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:30:43.374296Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-06-03T10:30:43.374300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:30:43.374306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-06-03T10:30:43.374328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:548:2486] message: TxId: 104 2025-06-03T10:30:43.374335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:30:43.374341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-06-03T10:30:43.374350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 104:0 2025-06-03T10:30:43.374375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 2 2025-06-03T10:30:43.374547Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-03T10:30:43.374556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 0, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-03T10:30:43.374936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-06-03T10:30:43.375019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-06-03T10:30:43.375468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-03T10:30:43.375483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:444:2395], at schemeshard: 72075186233409546, txId: 0, path id: 1 2025-06-03T10:30:43.375621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-03T10:30:43.375630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:1419:3326] 2025-06-03T10:30:43.375765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 12 PathOwnerId: 72075186233409546, cookie: 0 TestWaitNotification: OK eventTxId 104 2025-06-03T10:30:43.376591Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186233409546 2025-06-03T10:30:43.376658Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409546 describe path "/MyRoot/SomeDatabase" took 79us result status StatusSuccess 2025-06-03T10:30:43.376782Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "MyRoot/SomeDatabase" PathId: 1 SchemeshardId: 72075186233409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 12 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 12 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 2 SubDomainStateVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "fast" Kind: "fast_kind" } StoragePools { Name: "large" Kind: "large_kind" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "large_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "fast_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 2800 data_size_soft_quota: 2200 storage_quotas { unit_kind: "fast_kind" data_size_hard_quota: 600 data_size_soft_quota: 500 } storage_quotas { unit_kind: "large_kind" data_size_hard_quota: 2200 data_size_soft_quota: 1700 } } SecurityState { Audience: "/MyRoot/SomeDatabase" } } } PathId: 1 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 >> THiveTest::TestLockTabletExecutionBadUnlock [GOOD] >> THiveTest::TestLockTabletExecutionGoodUnlock >> TSchemeShardSubDomainTest::RmDir ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SetSchemeLimits [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:43.429697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:43.429735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:43.429744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:43.429751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:43.429771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:43.429776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:43.429788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:43.429806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:43.429939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:43.430033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:43.442310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:43.442346Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:43.446822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:43.446940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:43.446981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:43.449156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:43.449225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:43.449349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:43.449403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:43.449972Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:43.450027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:43.450292Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:43.450300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:43.450310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:43.450316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:43.450322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:43.450337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.451879Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:43.477172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:43.477284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.477396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:43.477466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:43.477481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.478554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:43.478592Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:43.478673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.478687Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:43.478695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:43.478702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:43.479244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.479256Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:43.479263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:43.479646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.479656Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.479664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:43.479675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:43.480473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:43.480937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:43.480990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:43.481225Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:43.481255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:43.481276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:43.481380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:43.481391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:43.481438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:43.481453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:43.482015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:43.482027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:43.482098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... lish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:30:43.560167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 100 2025-06-03T10:30:43.560646Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:43.560656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:43.560714Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:43.560735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:43.560742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:337:2312], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-03T10:30:43.560749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:337:2312], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-06-03T10:30:43.560842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.560852Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-03T10:30:43.560868Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-06-03T10:30:43.560874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:43.560881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-06-03T10:30:43.560885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:43.560891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-03T10:30:43.560897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:43.560904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 100:0 2025-06-03T10:30:43.560910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 100:0 2025-06-03T10:30:43.560950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:30:43.560957Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-06-03T10:30:43.560962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-03T10:30:43.560967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-03T10:30:43.561093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:43.561107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:43.561114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-03T10:30:43.561122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-03T10:30:43.561127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:43.561231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:43.561243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 3 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:43.561248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-03T10:30:43.561253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-03T10:30:43.561257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:30:43.561267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-06-03T10:30:43.562022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-03T10:30:43.562054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-03T10:30:43.562122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-03T10:30:43.562130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-06-03T10:30:43.562207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-03T10:30:43.562226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-03T10:30:43.562233Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:477:2425] TestWaitNotification: OK eventTxId 100 2025-06-03T10:30:43.562336Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:43.562388Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 65us result status StatusSuccess 2025-06-03T10:30:43.562494Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 3 ShardsInside: 2 ShardsLimit: 3 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 300 DatabaseQuotas { data_stream_shards_quota: 3 } SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:43.562572Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:43.562586Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 17us result status StatusSuccess 2025-06-03T10:30:43.562646Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 3 ShardsInside: 0 ShardsLimit: 3 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 300 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-true [GOOD] >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-false [GOOD] >> TSchemeShardSubDomainTest::RmDir [GOOD] >> TScaleRecommenderTest::BasicTest [GOOD] >> TPartBtreeIndexIteration::FewNodes_Groups_History_Slices_Sticky [GOOD] >> TPartGroupBtreeIndexIter::NoNodes [GOOD] >> TPartGroupBtreeIndexIter::OneNode [GOOD] >> TPartGroupBtreeIndexIter::FewNodes >> TSchemeShardSubDomainTest::SchemeDatabaseQuotaRejects ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DeleteAndRestart [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:43.427251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:43.427289Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:43.427295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:43.427302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:43.427319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:43.427324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:43.427342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:43.427358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:43.427492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:43.427587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:43.445376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:43.445405Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:43.449809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:43.449953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:43.450001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:43.452604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:43.452688Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:43.452805Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:43.452882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:43.453769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:43.453836Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:43.454219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:43.454237Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:43.454249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:43.454261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:43.454268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:43.454297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.456094Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:43.481225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:43.481351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.481437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:43.481506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:43.481527Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.482516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:43.482558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:43.482648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.482663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:43.482669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:43.482676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:43.483413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.483431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:43.483438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:43.483945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.483960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.483968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:43.483978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:43.484865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:43.485471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:43.485534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:43.485776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:43.485812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:43.485837Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:43.485912Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:43.485923Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:43.485967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:43.485984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:43.486686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:43.486698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:43.486748Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... r TabletID 72057594046678944 is [1:628:2542] sender: [1:629:2058] recipient: [1:626:2541] 2025-06-03T10:30:43.611434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:43.611461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:43.611467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:43.611472Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:43.611476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:43.611480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:43.611491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:43.611500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:43.611590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:43.611641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:43.612546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:43.612892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:43.612928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:43.612951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:43.612955Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:43.612976Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:43.613042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit for Paths, read records: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1457: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1483: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1785: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-03T10:30:43.613135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2033: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2093: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2151: TTxInit for Shards, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2237: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613172Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2303: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2453: TTxInit for ChannelsBinding, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2832: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613248Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2911: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613318Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3412: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3448: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3665: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3810: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613388Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3827: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3987: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4003: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4288: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4593: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613482Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4651: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4746: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4773: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.613512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4800: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.614904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:43.615508Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:43.615527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:43.615540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:43.615549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:43.615555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:43.615834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:628:2542] sender: [1:687:2058] recipient: [1:15:2062] 2025-06-03T10:30:43.646639Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:43.646694Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 73us result status StatusPathDoesNotExist 2025-06-03T10:30:43.646729Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:43.646818Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:43.646842Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 25us result status StatusSuccess 2025-06-03T10:30:43.646894Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> THiveTest::TestHiveBalancerWithSystemTablets [GOOD] >> THiveTest::TestHiveBalancerWithFollowers >> TSchemeShardSubDomainTest::SimultaneousCreateTableForceDrop >> TSchemeShardSubDomainTest::CopyRejects >> TPartGroupBtreeIndexIter::FewNodes [GOOD] >> TPartMulti::Basics [GOOD] >> TPartMulti::BasicsReverse [GOOD] >> TPartSlice::SimpleMerge [GOOD] >> TPartSlice::ComplexMerge [GOOD] >> TPartSlice::LongTailMerge [GOOD] >> TPartSlice::CutSingle [GOOD] >> TPartSlice::CutMulti [GOOD] >> TPartSlice::LookupBasics [GOOD] >> TPartSlice::LookupFull [GOOD] >> TPartSlice::EqualByRowId [GOOD] >> TPartSlice::ParallelCompactions [GOOD] >> BuildStatsHistogram::Many_Mixed [GOOD] >> BuildStatsHistogram::Many_Serial >> THiveTest::TestLockTabletExecutionGoodUnlock [GOOD] >> THiveTest::TestLockTabletExecutionLocalGone ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:39.769565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:39.769598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:39.769605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:39.769611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:39.769626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:39.769631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:39.769646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:39.769661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:39.769782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:39.769855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:39.785839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:39.785867Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:39.790515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:39.790643Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:39.790686Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:39.793825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:39.793938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:39.794050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.794114Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:39.795084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:39.795147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:39.795495Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:39.795510Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:39.795523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:39.795532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:39.795539Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:39.795566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.797034Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:39.814150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:39.814245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.814314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:39.814372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:39.814386Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.815205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.815246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:39.815312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.815328Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:39.815334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:39.815341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:39.815883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.815898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:39.815903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:39.816340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.816355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.816363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.816373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:39.816931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:39.817371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:39.817414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:39.817611Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.817635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:39.817654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.817718Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:39.817725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.817759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:39.817774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:39.818380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:39.818396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:39.818454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T1 ... pp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:30:43.977217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:30:43.977748Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.978015Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.978058Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:43.978063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:43.978095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:30:43.978119Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:43.978123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-03T10:30:43.978127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-03T10:30:43.978217Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.978226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1045: NTableState::TProposedWaitParts operationId# 103:0 ProgressState at tablet: 72057594046678944 2025-06-03T10:30:43.978246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.978258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 103:0, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-06-03T10:30:43.978264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 103:0 129 -> 240 2025-06-03T10:30:43.978410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:30:43.978422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:30:43.978425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-03T10:30:43.978430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 8 2025-06-03T10:30:43.978434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:30:43.978524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:30:43.978532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:30:43.978535Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-03T10:30:43.978538Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-03T10:30:43.978551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-03T10:30:43.978559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-03T10:30:43.979236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.979246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 103:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:43.979347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:30:43.979386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:30:43.979390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:30:43.979394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:30:43.979399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:30:43.979403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-03T10:30:43.979415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:405:2370] message: TxId: 103 2025-06-03T10:30:43.979422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:30:43.979428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-06-03T10:30:43.979433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:0 2025-06-03T10:30:43.979452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:30:43.979537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:43.979542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:43.979656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:30:43.979932Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:30:43.980182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:43.980192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 0, path id: 2 2025-06-03T10:30:43.980205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:30:43.980208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:657:2588] 2025-06-03T10:30:43.980350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 9 PathOwnerId: 72057594046678944, cookie: 0 TestWaitNotification: OK eventTxId 103 2025-06-03T10:30:43.980520Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:43.980549Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDatabase" took 33us result status StatusSuccess 2025-06-03T10:30:43.980618Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "SomeDatabase" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "unquoted_storage_pool" Kind: "unquoted_storage_pool_kind" } StoragePools { Name: "quoted_storage_pool" Kind: "quoted_storage_pool_kind" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "unquoted_storage_pool_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "quoted_storage_pool_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { storage_quotas { unit_kind: "quoted_storage_pool_kind" data_size_hard_quota: 1 } } SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DisableWritesToDatabase-IsExternalSubdomain-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:39.937030Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:39.937060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:39.937067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:39.937074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:39.937093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:39.937098Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:39.937113Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:39.937128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:39.937253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:39.937361Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:39.951930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:39.951953Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:39.955528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:39.955604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:39.955634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:39.957374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:39.957453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:39.957574Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.957629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:39.958128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:39.958174Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:39.958431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:39.958439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:39.958448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:39.958454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:39.958459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:39.958476Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.959511Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:39.974181Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:39.974244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.974295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:39.974336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:39.974344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.974850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.974874Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:39.974916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.974923Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:39.974927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:39.974932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:39.975273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.975282Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:39.975285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:39.975541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.975549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:39.975554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.975560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:39.975996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:39.976369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:39.976403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:39.976573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:39.976593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:39.976610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.976666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:39.976672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:39.976700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:39.976709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:39.977093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:39.977100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:39.977137Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T1 ... _SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 4 2025-06-03T10:30:44.074237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 3 2025-06-03T10:30:44.074910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-03T10:30:44.075385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-03T10:30:44.075464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-03T10:30:44.075471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-03T10:30:44.075524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 2] 2025-06-03T10:30:44.075552Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-03T10:30:44.075557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:444:2395], at schemeshard: 72075186233409546, txId: 104, path id: 1 2025-06-03T10:30:44.075562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:444:2395], at schemeshard: 72075186233409546, txId: 104, path id: 2 2025-06-03T10:30:44.075623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-03T10:30:44.075630Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1045: NTableState::TProposedWaitParts operationId# 104:0 ProgressState at tablet: 72075186233409546 2025-06-03T10:30:44.075655Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-03T10:30:44.075662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 104:0, datashard: 72075186233409549, at schemeshard: 72075186233409546 2025-06-03T10:30:44.075668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 104:0 129 -> 240 2025-06-03T10:30:44.075883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409546, cookie: 104 2025-06-03T10:30:44.075894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409546, cookie: 104 2025-06-03T10:30:44.075898Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-06-03T10:30:44.075902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 1], version: 9 2025-06-03T10:30:44.075907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 5 2025-06-03T10:30:44.076020Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-06-03T10:30:44.076027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-06-03T10:30:44.076030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-06-03T10:30:44.076033Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], version: 18446744073709551615 2025-06-03T10:30:44.076035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 4 2025-06-03T10:30:44.076043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-06-03T10:30:44.076854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-03T10:30:44.076874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 104:0 ProgressState, at schemeshard: 72075186233409546 2025-06-03T10:30:44.076966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 3 2025-06-03T10:30:44.077008Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-06-03T10:30:44.077013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:30:44.077019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-06-03T10:30:44.077026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:30:44.077032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-06-03T10:30:44.077053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:548:2486] message: TxId: 104 2025-06-03T10:30:44.077058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:30:44.077062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-06-03T10:30:44.077066Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 104:0 2025-06-03T10:30:44.077080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 2 2025-06-03T10:30:44.077223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-03T10:30:44.077231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 0, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-03T10:30:44.077478Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-06-03T10:30:44.077504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-06-03T10:30:44.077844Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-03T10:30:44.077858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:444:2395], at schemeshard: 72075186233409546, txId: 0, path id: 1 2025-06-03T10:30:44.077880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-03T10:30:44.077889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:743:2658] 2025-06-03T10:30:44.078118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 10 PathOwnerId: 72075186233409546, cookie: 0 TestWaitNotification: OK eventTxId 104 2025-06-03T10:30:44.078374Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186233409546 2025-06-03T10:30:44.078425Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409546 describe path "/MyRoot/SomeDatabase" took 63us result status StatusSuccess 2025-06-03T10:30:44.078506Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "MyRoot/SomeDatabase" PathId: 1 SchemeshardId: 72075186233409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 10 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 10 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 2 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "quoted_storage_pool" Kind: "quoted_storage_pool_kind" } StoragePools { Name: "unquoted_storage_pool" Kind: "unquoted_storage_pool_kind" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "unquoted_storage_pool_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "quoted_storage_pool_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { storage_quotas { unit_kind: "quoted_storage_pool_kind" data_size_hard_quota: 1 } } SecurityState { Audience: "/MyRoot/SomeDatabase" } } } PathId: 1 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::RmDir [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:44.072123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:44.072157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:44.072163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:44.072169Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:44.072187Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:44.072192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:44.072206Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:44.072221Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:44.072335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:44.072417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:44.083449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:44.083479Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:44.087585Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:44.087719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:44.087762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:44.090265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:44.090362Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:44.090488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:44.090562Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:44.091314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:44.091383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:44.091722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:44.091736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:44.091750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:44.091760Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:44.091765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:44.091792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.093143Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:44.113139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:44.113254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.113384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:44.113454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:44.113469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.114590Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:44.114630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:44.114719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.114734Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:44.114741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:44.114749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:44.115407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.115420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:44.115426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:44.115845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.115859Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.115866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:44.115876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:44.116508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:44.116919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:44.116956Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:44.117141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:44.117163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:44.117180Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:44.117259Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:44.117265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:44.117323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:44.117336Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:44.117728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:44.117735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:44.117781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... rationId 100:0, at tablet# 72057594046678944 2025-06-03T10:30:44.214695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:44.214707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-06-03T10:30:44.214719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 100 2025-06-03T10:30:44.215148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:44.215157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:44.215212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:44.215232Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:44.215239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-03T10:30:44.215246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-06-03T10:30:44.215332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.215341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-03T10:30:44.215357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-06-03T10:30:44.215362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:44.215369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-06-03T10:30:44.215373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:44.215379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-03T10:30:44.215386Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:44.215398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 100:0 2025-06-03T10:30:44.215404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 100:0 2025-06-03T10:30:44.215449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 9 2025-06-03T10:30:44.215457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 100, publications: 2, subscribers: 1 2025-06-03T10:30:44.215462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-03T10:30:44.215467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-03T10:30:44.215601Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:44.215615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:44.215622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-03T10:30:44.215628Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-03T10:30:44.215634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:44.215733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:44.215745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:44.215750Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-03T10:30:44.215755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-03T10:30:44.215760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 8 2025-06-03T10:30:44.215769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 1 2025-06-03T10:30:44.215775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [1:565:2473] 2025-06-03T10:30:44.216453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-03T10:30:44.216685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-03T10:30:44.216705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-03T10:30:44.216712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:566:2474] TestWaitNotification: OK eventTxId 100 2025-06-03T10:30:44.216848Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:44.216898Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 61us result status StatusSuccess 2025-06-03T10:30:44.217046Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 Coordinators: 72075186233409547 Coordinators: 72075186233409548 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409549 Mediators: 72075186233409550 Mediators: 72075186233409551 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-03T10:30:44.217902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpRmDir Drop { Name: "USER_0" } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:44.217946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_rmdir.cpp:29: TRmDir Propose, path: /MyRoot/USER_0, pathId: 0, opId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.217976Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusPathIsNotDirectory, reason: Check failed: path: '/MyRoot/USER_0', error: path is not a directory (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeSubDomain, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:37, at schemeshard: 72057594046678944 2025-06-03T10:30:44.218655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusPathIsNotDirectory Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path is not a directory (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeSubDomain, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:37" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:44.218697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusPathIsNotDirectory, reason: Check failed: path: '/MyRoot/USER_0', error: path is not a directory (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeSubDomain, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_rmdir.cpp:37, operation: DROP DIRECTORY, path: /MyRoot/USER_0 TestModificationResult got TxId: 101, wait until txId: 101 >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenForceDrop >> TSchemeShardSubDomainTest::SchemeQuotas [GOOD] >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain [GOOD] >> TSchemeShardSubDomainTest::SchemeDatabaseQuotaRejects [GOOD] >> TSchemeShardSubDomainTest::Restart ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut/unittest >> TPartSlice::ParallelCompactions [GOOD] Test command err: ======= CUT ======= Part{[1:2:3:0:0:0:0] eph 0, 346b 12r} data 755b + FlatIndex{4} Label{3 rev 3, 172b} 5 rec | Page Row Bytes (Uint32, String) | 0 0 86b {1, aaa} | 1 3 88b {1, b} | 2 6 86b {2, NULL} | 3 9 86b {2, ccx} | 3 11 86b {2, cxz} + BTreeIndex{PageId: 5 RowCount: 12 DataSize: 346 ErasedRowCount: 0} Label{13 rev 1, 208b} | PageId: 0 RowCount: 3 DataSize: 86 ErasedRowCount: 0 | > {1, b} | PageId: 1 RowCount: 6 DataSize: 174 ErasedRowCount: 0 | > {2, NULL} | PageId: 2 RowCount: 9 DataSize: 260 ErasedRowCount: 0 | > {2, ccx} | PageId: 3 RowCount: 12 DataSize: 346 ErasedRowCount: 0 ======= FULL ======= Part{[1:2:3:0:0:0:0] eph 0, 346b 12r} data 777b + FlatIndex{4} Label{3 rev 3, 179b} 5 rec | Page Row Bytes (Uint32, String) | 0 0 86b {1, aaa} | 1 3 88b {1, baaaa} | 2 6 86b {2, aaa} | 3 9 86b {2, ccx} | 3 11 86b {2, cxz} + BTreeIndex{PageId: 5 RowCount: 12 DataSize: 346 ErasedRowCount: 0} Label{13 rev 1, 223b} | PageId: 0 RowCount: 3 DataSize: 86 ErasedRowCount: 0 | > {1, baaaa} | PageId: 1 RowCount: 6 DataSize: 174 ErasedRowCount: 0 | > {2, aaa} | PageId: 2 RowCount: 9 DataSize: 260 ErasedRowCount: 0 | > {2, ccx} | PageId: 3 RowCount: 12 DataSize: 346 ErasedRowCount: 0 ======= CUT ======= Part{[1:2:3:0:0:0:0] eph 0, 420b 10r} data 1347b + FlatIndex{10} Label{3 rev 3, 362b} 11 rec | Page Row Bytes (Uint32, String) | 0 0 42b {1, aaa} | 1 1 42b {1, ab} | 2 2 42b {1, ac} | 3 3 42b {1, b} | 4 4 42b {1, bb} | 5 5 42b {2, NULL} | 6 6 42b {2, ab} | 7 7 42b {2, ac} | 8 8 42b {2, b} | 9 9 42b {2, bb} | 9 9 42b {2, bba} + BTreeIndex{PageId: 11 RowCount: 10 DataSize: 420 ErasedRowCount: 0} Label{13 rev 1, 536b} | PageId: 0 RowCount: 1 DataSize: 42 ErasedRowCount: 0 | > {1, ab} | PageId: 1 RowCount: 2 DataSize: 84 ErasedRowCount: 0 | > {1, ac} | PageId: 2 RowCount: 3 DataSize: 126 ErasedRowCount: 0 | > {1, b} | PageId: 3 RowCount: 4 DataSize: 168 ErasedRowCount: 0 | > {1, bb} | PageId: 4 RowCount: 5 DataSize: 210 ErasedRowCount: 0 | > {2, NULL} | PageId: 5 RowCount: 6 DataSize: 252 ErasedRowCount: 0 | > {2, ab} | PageId: 6 RowCount: 7 DataSize: 294 ErasedRowCount: 0 | > {2, ac} | PageId: 7 RowCount: 8 DataSize: 336 ErasedRowCount: 0 | > {2, b} | PageId: 8 RowCount: 9 DataSize: 378 ErasedRowCount: 0 | > {2, bb} | PageId: 9 RowCount: 10 DataSize: 420 ErasedRowCount: 0 ======= FULL ======= Part{[1:2:3:0:0:0:0] eph 0, 420b 10r} data 1381b + FlatIndex{10} Label{3 rev 3, 375b} 11 rec | Page Row Bytes (Uint32, String) | 0 0 42b {1, aaa} | 1 1 42b {1, aba} | 2 2 42b {1, aca} | 3 3 42b {1, baa} | 4 4 42b {1, bba} | 5 5 42b {2, aaa} | 6 6 42b {2, aba} | 7 7 42b {2, aca} | 8 8 42b {2, baa} | 9 9 42b {2, bba} | 9 9 42b {2, bba} + BTreeIndex{PageId: 11 RowCount: 10 DataSize: 420 ErasedRowCount: 0} Label{13 rev 1, 557b} | PageId: 0 RowCount: 1 DataSize: 42 ErasedRowCount: 0 | > {1, aba} | PageId: 1 RowCount: 2 DataSize: 84 ErasedRowCount: 0 | > {1, aca} | PageId: 2 RowCount: 3 DataSize: 126 ErasedRowCount: 0 | > {1, baa} | PageId: 3 RowCount: 4 DataSize: 168 ErasedRowCount: 0 | > {1, bba} | PageId: 4 RowCount: 5 DataSize: 210 ErasedRowCount: 0 | > {2, aaa} | PageId: 5 RowCount: 6 DataSize: 252 ErasedRowCount: 0 | > {2, aba} | PageId: 6 RowCount: 7 DataSize: 294 ErasedRowCount: 0 | > {2, aca} | PageId: 7 RowCount: 8 DataSize: 336 ErasedRowCount: 0 | > {2, baa} | PageId: 8 RowCount: 9 DataSize: 378 ErasedRowCount: 0 | > {2, bba} | PageId: 9 RowCount: 10 DataSize: 420 ErasedRowCount: 0 ======= SLICES ======= { [0, 2), [2, 4), [4, 5), [5, 7), [7, 8), [8, 9), [9, 9] } ======= CUT ======= Part{[1:2:3:0:0:0:0] eph 0, 420b 10r} data 1347b + FlatIndex{10} Label{3 rev 3, 362b} 11 rec | Page Row Bytes (Uint32, String) | 0 0 42b {1, aaa} | 1 1 42b {1, ab} | 2 2 42b {1, ac} | 3 3 42b {1, b} | 4 4 42b {1, bb} | 5 5 42b {2, NULL} | 6 6 42b {2, ab} | 7 7 42b {2, ac} | 8 8 42b {2, b} | 9 9 42b {2, bb} | 9 9 42b {2, bba} + BTreeIndex{PageId: 11 RowCount: 10 DataSize: 420 ErasedRowCount: 0} Label{13 rev 1, 536b} | PageId: 0 RowCount: 1 DataSize: 42 ErasedRowCount: 0 | > {1, ab} | PageId: 1 RowCount: 2 DataSize: 84 ErasedRowCount: 0 | > {1, ac} | PageId: 2 RowCount: 3 DataSize: 126 ErasedRowCount: 0 | > {1, b} | PageId: 3 RowCount: 4 DataSize: 168 ErasedRowCount: 0 | > {1, bb} | PageId: 4 RowCount: 5 DataSize: 210 ErasedRowCount: 0 | > {2, NULL} | PageId: 5 RowCount: 6 DataSize: 252 ErasedRowCount: 0 | > {2, ab} | PageId: 6 RowCount: 7 DataSize: 294 ErasedRowCount: 0 | > {2, ac} | PageId: 7 RowCount: 8 DataSize: 336 ErasedRowCount: 0 | > {2, b} | PageId: 8 RowCount: 9 DataSize: 378 ErasedRowCount: 0 | > {2, bb} | PageId: 9 RowCount: 10 DataSize: 420 ErasedRowCount: 0 ======= FULL ======= Part{[1:2:3:0:0:0:0] eph 0, 420b 10r} data 1381b + FlatIndex{10} Label{3 rev 3, 375b} 11 rec | Page Row Bytes (Uint32, String) | 0 0 42b {1, aaa} | 1 1 42b {1, aba} | 2 2 42b {1, aca} | 3 3 42b {1, baa} | 4 4 42b {1, bba} | 5 5 42b {2, aaa} | 6 6 42b {2, aba} | 7 7 42b {2, aca} | 8 8 42b {2, baa} | 9 9 42b {2, bba} | 9 9 42b {2, bba} + BTreeIndex{PageId: 11 RowCount: 10 DataSize: 420 ErasedRowCount: 0} Label{13 rev 1, 557b} | PageId: 0 RowCount: 1 DataSize: 42 ErasedRowCount: 0 | > {1, aba} | PageId: 1 RowCount: 2 DataSize: 84 ErasedRowCount: 0 | > {1, aca} | PageId: 2 RowCount: 3 DataSize: 126 ErasedRowCount: 0 | > {1, baa} | PageId: 3 RowCount: 4 DataSize: 168 ErasedRowCount: 0 | > {1, bba} | PageId: 4 RowCount: 5 DataSize: 210 ErasedRowCount: 0 | > {2, aaa} | PageId: 5 RowCount: 6 DataSize: 252 ErasedRowCount: 0 | > {2, aba} | PageId: 6 RowCount: 7 DataSize: 294 ErasedRowCount: 0 | > {2, aca} | PageId: 7 RowCount: 8 DataSize: 336 ErasedRowCount: 0 | > {2, baa} | PageId: 8 RowCount: 9 DataSize: 378 ErasedRowCount: 0 | > {2, bba} | PageId: 9 RowCount: 10 DataSize: 420 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 81b 2r} data 316b + FlatIndex{2} Label{3 rev 3, 107b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 41b {ccccccd} | 1 1 41b {ccccccd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 81 ErasedRowCount: 0} Label{13 rev 1, 109b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {ccccccd} | PageId: 1 RowCount: 2 DataSize: 81 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 83b 2r} data 320b + FlatIndex{2} Label{3 rev 3, 109b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 43b {ccccccd} | 1 1 43b {ccccccddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 83 ErasedRowCount: 0} Label{13 rev 1, 109b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {ccccccd} | PageId: 1 RowCount: 2 DataSize: 83 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 80b 2r} data 312b + FlatIndex{2} Label{3 rev 3, 105b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 40b {cccccd} | 1 1 40b {cccccd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 80 ErasedRowCount: 0} Label{13 rev 1, 108b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {cccccd} | PageId: 1 RowCount: 2 DataSize: 80 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 82b 2r} data 316b + FlatIndex{2} Label{3 rev 3, 107b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 42b {cccccd} | 1 1 42b {cccccddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 82 ErasedRowCount: 0} Label{13 rev 1, 108b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {cccccd} | PageId: 1 RowCount: 2 DataSize: 82 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 79b 2r} data 308b + FlatIndex{2} Label{3 rev 3, 103b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 39b {ccccd} | 1 1 39b {ccccd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 79 ErasedRowCount: 0} Label{13 rev 1, 107b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {ccccd} | PageId: 1 RowCount: 2 DataSize: 79 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 81b 2r} data 312b + FlatIndex{2} Label{3 rev 3, 105b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 41b {ccccd} | 1 1 41b {ccccddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 81 ErasedRowCount: 0} Label{13 rev 1, 107b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {ccccd} | PageId: 1 RowCount: 2 DataSize: 81 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 78b 2r} data 304b + FlatIndex{2} Label{3 rev 3, 101b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 38b {cccd} | 1 1 38b {cccd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 78 ErasedRowCount: 0} Label{13 rev 1, 106b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {cccd} | PageId: 1 RowCount: 2 DataSize: 78 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 80b 2r} data 308b + FlatIndex{2} Label{3 rev 3, 103b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 40b {cccd} | 1 1 40b {cccddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 80 ErasedRowCount: 0} Label{13 rev 1, 106b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {cccd} | PageId: 1 RowCount: 2 DataSize: 80 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 75b 2r} data 292b + FlatIndex{2} Label{3 rev 3, 95b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 35b {d} | 1 1 35b {d} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 75 ErasedRowCount: 0} Label{13 rev 1, 103b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {d} | PageId: 1 RowCount: 2 DataSize: 75 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 77b 2r} data 296b + FlatIndex{2} Label{3 rev 3, 97b} 3 rec | Page Row Bytes (String) | 0 0 40b {cccccc} | 1 1 37b {d} | 1 1 37b {ddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 77 ErasedRowCount: 0} Label{13 rev 1, 103b} | PageId: 0 RowCount: 1 DataSize: 40 ErasedRowCount: 0 | > {d} | PageId: 1 RowCount: 2 DataSize: 77 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 69b 2r} data 280b + FlatIndex{2} Label{3 rev 3, 89b} 3 rec | Page Row Bytes (String) | 0 0 34b {} | 1 1 35b {d} | 1 1 35b {d} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 69 ErasedRowCount: 0} Label{13 rev 1, 103b} | PageId: 0 RowCount: 1 DataSize: 34 ErasedRowCount: 0 | > {d} | PageId: 1 RowCount: 2 DataSize: 69 ErasedRowCount: 0 Part{[1:2:3:0:0:0:0] eph 0, 71b 2r} data 284b + FlatIndex{2} Label{3 rev 3, 91b} 3 rec | Page Row Bytes (String) | 0 0 34b {} | 1 1 37b {d} | 1 1 37b {ddd} + BTreeIndex{PageId: 3 RowCount: 2 DataSize: 71 ErasedRowCount: 0} Label{13 rev 1, 10 ... owOp 1: {0, 8} {Set 2 Uint32 : 5}, {Set 3 Uint64 : 5}, {Set 4 String : xxxxxxxxxx_5} + Rows{3} Label{34 rev 1, 120b}, [6, +2)row | ERowOp 1: {0, 10} {Set 2 Uint32 : 6}, {Set 3 Uint64 : 6}, {Set 4 String : xxxxxxxxxx_6} | ERowOp 1: {1, 1} {Set 2 Uint32 : 7}, {Set 3 Uint64 : 7}, {Set 4 String : xxxxxxxxxx_7} + Rows{4} Label{44 rev 1, 120b}, [8, +2)row | ERowOp 1: {1, 3} {Set 2 Uint32 : 8}, {Set 3 Uint64 : 8}, {Set 4 String : xxxxxxxxxx_8} | ERowOp 1: {1, 4} {Set 2 Uint32 : 9}, {Set 3 Uint64 : 9}, {Set 4 String : xxxxxxxxxx_9} + Rows{5} Label{54 rev 1, 122b}, [10, +2)row | ERowOp 1: {1, 6} {Set 2 Uint32 : 10}, {Set 3 Uint64 : 10}, {Set 4 String : xxxxxxxxxx_10} | ERowOp 1: {1, 7} {Set 2 Uint32 : 11}, {Set 3 Uint64 : 11}, {Set 4 String : xxxxxxxxxx_11} + Rows{6} Label{64 rev 1, 122b}, [12, +2)row | ERowOp 1: {1, 8} {Set 2 Uint32 : 12}, {Set 3 Uint64 : 12}, {Set 4 String : xxxxxxxxxx_12} | ERowOp 1: {1, 10} {Set 2 Uint32 : 13}, {Set 3 Uint64 : 13}, {Set 4 String : xxxxxxxxxx_13} + Rows{7} Label{74 rev 1, 122b}, [14, +2)row | ERowOp 1: {2, 1} {Set 2 Uint32 : 14}, {Set 3 Uint64 : 14}, {Set 4 String : xxxxxxxxxx_14} | ERowOp 1: {2, 3} {Set 2 Uint32 : 15}, {Set 3 Uint64 : 15}, {Set 4 String : xxxxxxxxxx_15} + Rows{8} Label{84 rev 1, 122b}, [16, +2)row | ERowOp 1: {2, 4} {Set 2 Uint32 : 16}, {Set 3 Uint64 : 16}, {Set 4 String : xxxxxxxxxx_16} | ERowOp 1: {2, 6} {Set 2 Uint32 : 17}, {Set 3 Uint64 : 17}, {Set 4 String : xxxxxxxxxx_17} + Rows{9} Label{94 rev 1, 122b}, [18, +2)row | ERowOp 1: {2, 7} {Set 2 Uint32 : 18}, {Set 3 Uint64 : 18}, {Set 4 String : xxxxxxxxxx_18} | ERowOp 1: {2, 8} {Set 2 Uint32 : 19}, {Set 3 Uint64 : 19}, {Set 4 String : xxxxxxxxxx_19} + Rows{10} Label{104 rev 1, 122b}, [20, +2)row | ERowOp 1: {2, 10} {Set 2 Uint32 : 20}, {Set 3 Uint64 : 20}, {Set 4 String : xxxxxxxxxx_20} | ERowOp 1: {3, 1} {Set 2 Uint32 : 21}, {Set 3 Uint64 : 21}, {Set 4 String : xxxxxxxxxx_21} + Rows{11} Label{114 rev 1, 122b}, [22, +2)row | ERowOp 1: {3, 3} {Set 2 Uint32 : 22}, {Set 3 Uint64 : 22}, {Set 4 String : xxxxxxxxxx_22} | ERowOp 1: {3, 4} {Set 2 Uint32 : 23}, {Set 3 Uint64 : 23}, {Set 4 String : xxxxxxxxxx_23} + Rows{12} Label{124 rev 1, 122b}, [24, +2)row | ERowOp 1: {3, 6} {Set 2 Uint32 : 24}, {Set 3 Uint64 : 24}, {Set 4 String : xxxxxxxxxx_24} | ERowOp 1: {3, 7} {Set 2 Uint32 : 25}, {Set 3 Uint64 : 25}, {Set 4 String : xxxxxxxxxx_25} + Rows{13} Label{134 rev 1, 122b}, [26, +2)row | ERowOp 1: {3, 8} {Set 2 Uint32 : 26}, {Set 3 Uint64 : 26}, {Set 4 String : xxxxxxxxxx_26} | ERowOp 1: {3, 10} {Set 2 Uint32 : 27}, {Set 3 Uint64 : 27}, {Set 4 String : xxxxxxxxxx_27} + Rows{14} Label{144 rev 1, 122b}, [28, +2)row | ERowOp 1: {4, 1} {Set 2 Uint32 : 28}, {Set 3 Uint64 : 28}, {Set 4 String : xxxxxxxxxx_28} | ERowOp 1: {4, 3} {Set 2 Uint32 : 29}, {Set 3 Uint64 : 29}, {Set 4 String : xxxxxxxxxx_29} + Rows{15} Label{154 rev 1, 122b}, [30, +2)row | ERowOp 1: {4, 4} {Set 2 Uint32 : 30}, {Set 3 Uint64 : 30}, {Set 4 String : xxxxxxxxxx_30} | ERowOp 1: {4, 6} {Set 2 Uint32 : 31}, {Set 3 Uint64 : 31}, {Set 4 String : xxxxxxxxxx_31} + Rows{16} Label{164 rev 1, 122b}, [32, +2)row | ERowOp 1: {4, 7} {Set 2 Uint32 : 32}, {Set 3 Uint64 : 32}, {Set 4 String : xxxxxxxxxx_32} | ERowOp 1: {4, 8} {Set 2 Uint32 : 33}, {Set 3 Uint64 : 33}, {Set 4 String : xxxxxxxxxx_33} + Rows{17} Label{174 rev 1, 122b}, [34, +2)row | ERowOp 1: {4, 10} {Set 2 Uint32 : 34}, {Set 3 Uint64 : 34}, {Set 4 String : xxxxxxxxxx_34} | ERowOp 1: {5, 1} {Set 2 Uint32 : 35}, {Set 3 Uint64 : 35}, {Set 4 String : xxxxxxxxxx_35} + Rows{18} Label{184 rev 1, 122b}, [36, +2)row | ERowOp 1: {5, 3} {Set 2 Uint32 : 36}, {Set 3 Uint64 : 36}, {Set 4 String : xxxxxxxxxx_36} | ERowOp 1: {5, 4} {Set 2 Uint32 : 37}, {Set 3 Uint64 : 37}, {Set 4 String : xxxxxxxxxx_37} + Rows{19} Label{194 rev 1, 122b}, [38, +2)row | ERowOp 1: {5, 6} {Set 2 Uint32 : 38}, {Set 3 Uint64 : 38}, {Set 4 String : xxxxxxxxxx_38} | ERowOp 1: {5, 7} {Set 2 Uint32 : 39}, {Set 3 Uint64 : 39}, {Set 4 String : xxxxxxxxxx_39} Slices{ [0, 39] } Part{[1:2:3:0:0:0:0] eph 0, 2430b 40r} data 4441b + FlatIndex{26} Label{3 rev 3, 558b} 21 rec | Page Row Bytes (Uint32, Uint32) | 0 0 120b {0, 1} | 1 2 120b {0, 4} | 2 4 120b {0, 7} | 3 6 120b {0, 10} | 4 8 120b {1, 3} | 5 10 122b {1, 6} | 7 12 122b {1, 8} | 8 14 122b {2, NULL} | 9 16 122b {2, 4} | 11 18 122b {2, 7} | 12 20 122b {2, 10} | 13 22 122b {3, 3} | 15 24 122b {3, 6} | 16 26 122b {3, 8} | 17 28 122b {4, NULL} | 19 30 122b {4, 4} | 20 32 122b {4, 7} | 21 34 122b {4, 10} | 24 36 122b {5, 3} | 25 38 122b {5, 6} | 25 39 122b {5, 7} + BTreeIndex{PageId: 29 RowCount: 40 DataSize: 2430 ErasedRowCount: 0} Label{13 rev 1, 102b} | + BTreeIndex{PageId: 23 RowCount: 18 DataSize: 1088 ErasedRowCount: 0} Label{13 rev 1, 151b} | | + BTreeIndex{PageId: 6 RowCount: 6 DataSize: 360 ErasedRowCount: 0} Label{13 rev 1, 151b} | | | PageId: 0 RowCount: 2 DataSize: 120 ErasedRowCount: 0 | | | > {0, 4} | | | PageId: 1 RowCount: 4 DataSize: 240 ErasedRowCount: 0 | | | > {0, 7} | | | PageId: 2 RowCount: 6 DataSize: 360 ErasedRowCount: 0 | | > {0, 10} | | + BTreeIndex{PageId: 10 RowCount: 12 DataSize: 722 ErasedRowCount: 0} Label{13 rev 1, 151b} | | | PageId: 3 RowCount: 8 DataSize: 480 ErasedRowCount: 0 | | | > {1, 3} | | | PageId: 4 RowCount: 10 DataSize: 600 ErasedRowCount: 0 | | | > {1, 6} | | | PageId: 5 RowCount: 12 DataSize: 722 ErasedRowCount: 0 | | > {1, 8} | | + BTreeIndex{PageId: 14 RowCount: 18 DataSize: 1088 ErasedRowCount: 0} Label{13 rev 1, 147b} | | | PageId: 7 RowCount: 14 DataSize: 844 ErasedRowCount: 0 | | | > {2, NULL} | | | PageId: 8 RowCount: 16 DataSize: 966 ErasedRowCount: 0 | | | > {2, 4} | | | PageId: 9 RowCount: 18 DataSize: 1088 ErasedRowCount: 0 | > {2, 7} | + BTreeIndex{PageId: 28 RowCount: 40 DataSize: 2430 ErasedRowCount: 0} Label{13 rev 1, 151b} | | + BTreeIndex{PageId: 18 RowCount: 24 DataSize: 1454 ErasedRowCount: 0} Label{13 rev 1, 151b} | | | PageId: 11 RowCount: 20 DataSize: 1210 ErasedRowCount: 0 | | | > {2, 10} | | | PageId: 12 RowCount: 22 DataSize: 1332 ErasedRowCount: 0 | | | > {3, 3} | | | PageId: 13 RowCount: 24 DataSize: 1454 ErasedRowCount: 0 | | > {3, 6} | | + BTreeIndex{PageId: 22 RowCount: 30 DataSize: 1820 ErasedRowCount: 0} Label{13 rev 1, 147b} | | | PageId: 15 RowCount: 26 DataSize: 1576 ErasedRowCount: 0 | | | > {3, 8} | | | PageId: 16 RowCount: 28 DataSize: 1698 ErasedRowCount: 0 | | | > {4, NULL} | | | PageId: 17 RowCount: 30 DataSize: 1820 ErasedRowCount: 0 | | > {4, 4} | | + BTreeIndex{PageId: 27 RowCount: 40 DataSize: 2430 ErasedRowCount: 0} Label{13 rev 1, 249b} | | | PageId: 19 RowCount: 32 DataSize: 1942 ErasedRowCount: 0 | | | > {4, 7} | | | PageId: 20 RowCount: 34 DataSize: 2064 ErasedRowCount: 0 | | | > {4, 10} | | | PageId: 21 RowCount: 36 DataSize: 2186 ErasedRowCount: 0 | | | > {5, 3} | | | PageId: 24 RowCount: 38 DataSize: 2308 ErasedRowCount: 0 | | | > {5, 6} | | | PageId: 25 RowCount: 40 DataSize: 2430 ErasedRowCount: 0 + Rows{0} Label{04 rev 1, 120b}, [0, +2)row | ERowOp 1: {0, 1} {Set 2 Uint32 : 0}, {Set 3 Uint64 : 0}, {Set 4 String : xxxxxxxxxx_0} | ERowOp 1: {0, 3} {Set 2 Uint32 : 1}, {Set 3 Uint64 : 1}, {Set 4 String : xxxxxxxxxx_1} + Rows{1} Label{14 rev 1, 120b}, [2, +2)row | ERowOp 1: {0, 4} {Set 2 Uint32 : 2}, {Set 3 Uint64 : 2}, {Set 4 String : xxxxxxxxxx_2} | ERowOp 1: {0, 6} {Set 2 Uint32 : 3}, {Set 3 Uint64 : 3}, {Set 4 String : xxxxxxxxxx_3} + Rows{2} Label{24 rev 1, 120b}, [4, +2)row | ERowOp 1: {0, 7} {Set 2 Uint32 : 4}, {Set 3 Uint64 : 4}, {Set 4 String : xxxxxxxxxx_4} | ERowOp 1: {0, 8} {Set 2 Uint32 : 5}, {Set 3 Uint64 : 5}, {Set 4 String : xxxxxxxxxx_5} + Rows{3} Label{34 rev 1, 120b}, [6, +2)row | ERowOp 1: {0, 10} {Set 2 Uint32 : 6}, {Set 3 Uint64 : 6}, {Set 4 String : xxxxxxxxxx_6} | ERowOp 1: {1, 1} {Set 2 Uint32 : 7}, {Set 3 Uint64 : 7}, {Set 4 String : xxxxxxxxxx_7} + Rows{4} Label{44 rev 1, 120b}, [8, +2)row | ERowOp 1: {1, 3} {Set 2 Uint32 : 8}, {Set 3 Uint64 : 8}, {Set 4 String : xxxxxxxxxx_8} | ERowOp 1: {1, 4} {Set 2 Uint32 : 9}, {Set 3 Uint64 : 9}, {Set 4 String : xxxxxxxxxx_9} + Rows{5} Label{54 rev 1, 122b}, [10, +2)row | ERowOp 1: {1, 6} {Set 2 Uint32 : 10}, {Set 3 Uint64 : 10}, {Set 4 String : xxxxxxxxxx_10} | ERowOp 1: {1, 7} {Set 2 Uint32 : 11}, {Set 3 Uint64 : 11}, {Set 4 String : xxxxxxxxxx_11} + Rows{7} Label{74 rev 1, 122b}, [12, +2)row | ERowOp 1: {1, 8} {Set 2 Uint32 : 12}, {Set 3 Uint64 : 12}, {Set 4 String : xxxxxxxxxx_12} | ERowOp 1: {1, 10} {Set 2 Uint32 : 13}, {Set 3 Uint64 : 13}, {Set 4 String : xxxxxxxxxx_13} + Rows{8} Label{84 rev 1, 122b}, [14, +2)row | ERowOp 1: {2, 1} {Set 2 Uint32 : 14}, {Set 3 Uint64 : 14}, {Set 4 String : xxxxxxxxxx_14} | ERowOp 1: {2, 3} {Set 2 Uint32 : 15}, {Set 3 Uint64 : 15}, {Set 4 String : xxxxxxxxxx_15} + Rows{9} Label{94 rev 1, 122b}, [16, +2)row | ERowOp 1: {2, 4} {Set 2 Uint32 : 16}, {Set 3 Uint64 : 16}, {Set 4 String : xxxxxxxxxx_16} | ERowOp 1: {2, 6} {Set 2 Uint32 : 17}, {Set 3 Uint64 : 17}, {Set 4 String : xxxxxxxxxx_17} + Rows{11} Label{114 rev 1, 122b}, [18, +2)row | ERowOp 1: {2, 7} {Set 2 Uint32 : 18}, {Set 3 Uint64 : 18}, {Set 4 String : xxxxxxxxxx_18} | ERowOp 1: {2, 8} {Set 2 Uint32 : 19}, {Set 3 Uint64 : 19}, {Set 4 String : xxxxxxxxxx_19} + Rows{12} Label{124 rev 1, 122b}, [20, +2)row | ERowOp 1: {2, 10} {Set 2 Uint32 : 20}, {Set 3 Uint64 : 20}, {Set 4 String : xxxxxxxxxx_20} | ERowOp 1: {3, 1} {Set 2 Uint32 : 21}, {Set 3 Uint64 : 21}, {Set 4 String : xxxxxxxxxx_21} + Rows{13} Label{134 rev 1, 122b}, [22, +2)row | ERowOp 1: {3, 3} {Set 2 Uint32 : 22}, {Set 3 Uint64 : 22}, {Set 4 String : xxxxxxxxxx_22} | ERowOp 1: {3, 4} {Set 2 Uint32 : 23}, {Set 3 Uint64 : 23}, {Set 4 String : xxxxxxxxxx_23} + Rows{15} Label{154 rev 1, 122b}, [24, +2)row | ERowOp 1: {3, 6} {Set 2 Uint32 : 24}, {Set 3 Uint64 : 24}, {Set 4 String : xxxxxxxxxx_24} | ERowOp 1: {3, 7} {Set 2 Uint32 : 25}, {Set 3 Uint64 : 25}, {Set 4 String : xxxxxxxxxx_25} + Rows{16} Label{164 rev 1, 122b}, [26, +2)row | ERowOp 1: {3, 8} {Set 2 Uint32 : 26}, {Set 3 Uint64 : 26}, {Set 4 String : xxxxxxxxxx_26} | ERowOp 1: {3, 10} {Set 2 Uint32 : 27}, {Set 3 Uint64 : 27}, {Set 4 String : xxxxxxxxxx_27} + Rows{17} Label{174 rev 1, 122b}, [28, +2)row | ERowOp 1: {4, 1} {Set 2 Uint32 : 28}, {Set 3 Uint64 : 28}, {Set 4 String : xxxxxxxxxx_28} | ERowOp 1: {4, 3} {Set 2 Uint32 : 29}, {Set 3 Uint64 : 29}, {Set 4 String : xxxxxxxxxx_29} + Rows{19} Label{194 rev 1, 122b}, [30, +2)row | ERowOp 1: {4, 4} {Set 2 Uint32 : 30}, {Set 3 Uint64 : 30}, {Set 4 String : xxxxxxxxxx_30} | ERowOp 1: {4, 6} {Set 2 Uint32 : 31}, {Set 3 Uint64 : 31}, {Set 4 String : xxxxxxxxxx_31} + Rows{20} Label{204 rev 1, 122b}, [32, +2)row | ERowOp 1: {4, 7} {Set 2 Uint32 : 32}, {Set 3 Uint64 : 32}, {Set 4 String : xxxxxxxxxx_32} | ERowOp 1: {4, 8} {Set 2 Uint32 : 33}, {Set 3 Uint64 : 33}, {Set 4 String : xxxxxxxxxx_33} + Rows{21} Label{214 rev 1, 122b}, [34, +2)row | ERowOp 1: {4, 10} {Set 2 Uint32 : 34}, {Set 3 Uint64 : 34}, {Set 4 String : xxxxxxxxxx_34} | ERowOp 1: {5, 1} {Set 2 Uint32 : 35}, {Set 3 Uint64 : 35}, {Set 4 String : xxxxxxxxxx_35} + Rows{24} Label{244 rev 1, 122b}, [36, +2)row | ERowOp 1: {5, 3} {Set 2 Uint32 : 36}, {Set 3 Uint64 : 36}, {Set 4 String : xxxxxxxxxx_36} | ERowOp 1: {5, 4} {Set 2 Uint32 : 37}, {Set 3 Uint64 : 37}, {Set 4 String : xxxxxxxxxx_37} + Rows{25} Label{254 rev 1, 122b}, [38, +2)row | ERowOp 1: {5, 6} {Set 2 Uint32 : 38}, {Set 3 Uint64 : 38}, {Set 4 String : xxxxxxxxxx_38} | ERowOp 1: {5, 7} {Set 2 Uint32 : 39}, {Set 3 Uint64 : 39}, {Set 4 String : xxxxxxxxxx_39} Slices{ [0, 39] } >> TSchemeShardSubDomainTest::SimultaneousCreateTableForceDrop [GOOD] >> TSchemeShardSubDomainTest::CopyRejects [GOOD] >> TSchemeShardSubDomainTest::ConsistentCopyRejects >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenForceDrop [GOOD] >> TSchemeShardSubDomainTest::RedefineErrors >> THiveTest::TestFollowersCrossDC_Easy [GOOD] >> THiveTest::TestFollowers_LocalNodeOnly >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-true ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeDatabaseQuotaRejects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:44.365498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:44.365529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:44.365535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:44.365543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:44.365559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:44.365564Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:44.365575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:44.365595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:44.365713Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:44.365786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:44.381904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:44.381933Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:44.387088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:44.387221Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:44.387268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:44.389699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:44.389793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:44.389943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:44.390012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:44.390900Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:44.390961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:44.391272Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:44.391284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:44.391294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:44.391301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:44.391305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:44.391321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.392640Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:44.416955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:44.417057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.417135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:44.417200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:44.417215Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.418182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:44.418217Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:44.418292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.418306Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:44.418313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:44.418320Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:44.418918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.418934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:44.418940Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:44.419407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.419420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.419427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:44.419435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:44.420231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:44.420786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:44.420839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:44.421053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:44.421084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:44.421104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:44.421188Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:44.421196Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:44.421236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:44.421249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:44.421829Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:44.421842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:44.421894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 0:30:44.593866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:5 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:44.593878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:44.593883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:44.593888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:44.593895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:30:44.594352Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 5 TxId_Deprecated: 5 TabletID: 72075186233409550 2025-06-03T10:30:44.598896Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-03T10:30:44.598963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046678944 ShardLocalIdx: 5, at schemeshard: 72057594046678944 2025-06-03T10:30:44.599103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 Forgetting tablet 72075186233409550 2025-06-03T10:30:44.599701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-03T10:30:44.599988Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-03T10:30:44.600037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:44.600096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 Forgetting tablet 72075186233409546 2025-06-03T10:30:44.600551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-03T10:30:44.601124Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409548 2025-06-03T10:30:44.601713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-03T10:30:44.601778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:30:44.601970Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 Forgetting tablet 72075186233409547 2025-06-03T10:30:44.602062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:30:44.602087Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409549 2025-06-03T10:30:44.602286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-03T10:30:44.602310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:30:44.602373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 106 2025-06-03T10:30:44.602394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:44.602398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:30:44.602408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:44.602434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:44.602441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:44.602462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:30:44.603062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-03T10:30:44.603082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-03T10:30:44.603100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:30:44.603104Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:30:44.603115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:30:44.603120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-03T10:30:44.603678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:30:44.603689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:30:44.603701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-03T10:30:44.603705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-03T10:30:44.603736Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:44.603750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:44.603757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:44.603762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:44.603776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:44.604128Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 106, wait until txId: 106 TestWaitNotification wait txId: 106 2025-06-03T10:30:44.604201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 106: send EvNotifyTxCompletion 2025-06-03T10:30:44.604207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-03T10:30:44.604266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-03T10:30:44.604284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-03T10:30:44.604288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [1:902:2801] TestWaitNotification: OK eventTxId 106 2025-06-03T10:30:44.604364Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:44.604406Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 55us result status StatusSuccess 2025-06-03T10:30:44.604504Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:43.262325Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:43.262346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:43.262350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:43.262355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:43.262369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:43.262372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:43.262378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:43.262393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:43.262481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:43.262538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:43.272100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:43.272126Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:43.275523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:43.275618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:43.275652Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:43.277118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:43.277189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:43.277277Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:43.277358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:43.278065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:43.278130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:43.278485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:43.278496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:43.278511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:43.278522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:43.278529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:43.278556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.282109Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:43.304189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:43.304289Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.304371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:43.304433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:43.304446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.305439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:43.305467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:43.305535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.305546Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:43.305553Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:43.305559Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:43.306237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.306262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:43.306271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:43.306872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.306889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:43.306896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:43.306905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:43.307738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:43.308476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:43.308576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:43.308872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:43.308922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:43.308945Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:43.309049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:43.309062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:43.309114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:43.309132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:43.310006Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:43.310022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:43.310082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... CHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 137:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.482353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 11 2025-06-03T10:30:44.482364Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 10] was 3 2025-06-03T10:30:44.483032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 137, response: Status: StatusAccepted TxId: 137 SchemeshardId: 72057594046678944 PathId: 10, at schemeshard: 72057594046678944 2025-06-03T10:30:44.483081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 137, database: /MyRoot/USER_0, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /MyRoot/USER_0/Table11 2025-06-03T10:30:44.483148Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:44.483157Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 137, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:44.483222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 137, path id: [OwnerId: 72057594046678944, LocalPathId: 10] 2025-06-03T10:30:44.483240Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:44.483246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:1021:2880], at schemeshard: 72057594046678944, txId: 137, path id: 2 2025-06-03T10:30:44.483253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:1021:2880], at schemeshard: 72057594046678944, txId: 137, path id: 10 2025-06-03T10:30:44.483268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 137:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.483281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 137:0 ProgressState, operation type: TxCreateTable, at tablet# 72057594046678944 2025-06-03T10:30:44.483338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:357: TCreateParts opId# 137:0 CreateRequest Event to Hive: 72057594037968897 msg: Owner: 72057594046678944 OwnerIdx: 10 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 2 } ObjectId: 10 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 2 } 2025-06-03T10:30:44.483725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 2 Version: 18 PathOwnerId: 72057594046678944, cookie: 137 2025-06-03T10:30:44.483754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 2 Version: 18 PathOwnerId: 72057594046678944, cookie: 137 2025-06-03T10:30:44.483760Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 137 2025-06-03T10:30:44.483767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 137, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18 2025-06-03T10:30:44.483774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 12 2025-06-03T10:30:44.483915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 10 Version: 1 PathOwnerId: 72057594046678944, cookie: 137 2025-06-03T10:30:44.483931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 10 Version: 1 PathOwnerId: 72057594046678944, cookie: 137 2025-06-03T10:30:44.483936Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 137 2025-06-03T10:30:44.483941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 137, pathId: [OwnerId: 72057594046678944, LocalPathId: 10], version: 1 2025-06-03T10:30:44.483947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 10] was 4 2025-06-03T10:30:44.483962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 137, ready parts: 0/1, is published: true 2025-06-03T10:30:44.484813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 137:0 from tablet: 72057594046678944 to tablet: 72057594037968897 cookie: 72057594046678944:10 msg type: 268697601 2025-06-03T10:30:44.484855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 137, partId: 0, tablet: 72057594037968897 2025-06-03T10:30:44.484864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1780: TOperation RegisterRelationByShardIdx, TxId: 137, shardIdx: 72057594046678944:10, partId: 0 2025-06-03T10:30:44.484964Z node 1 :HIVE INFO: tablet_helpers.cpp:1181: [72057594037968897] TEvCreateTablet, msg: Owner: 72057594046678944 OwnerIdx: 10 TabletType: DataShard ObjectDomain { SchemeShard: 72057594046678944 PathId: 2 } ObjectId: 10 BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } BindedChannels { StoragePoolName: "pool-1" } AllowedDomains { SchemeShard: 72057594046678944 PathId: 2 } 2025-06-03T10:30:44.485031Z node 1 :HIVE INFO: tablet_helpers.cpp:1245: [72057594037968897] TEvCreateTablet, Owner 72057594046678944, OwnerIdx 10, type DataShard, boot OK, tablet id 72075186233409555 2025-06-03T10:30:44.485055Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5872: Handle TEvCreateTabletReply at schemeshard: 72057594046678944 message: Status: OK Owner: 72057594046678944 OwnerIdx: 10 TabletID: 72075186233409555 Origin: 72057594037968897 2025-06-03T10:30:44.485062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1794: TOperation FindRelatedPartByShardIdx, TxId: 137, shardIdx: 72057594046678944:10, partId: 0 2025-06-03T10:30:44.485081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 137:0, at schemeshard: 72057594046678944, message: Status: OK Owner: 72057594046678944 OwnerIdx: 10 TabletID: 72075186233409555 Origin: 72057594037968897 2025-06-03T10:30:44.485089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:175: TCreateParts opId# 137:0 HandleReply TEvCreateTabletReply, at tabletId: 72057594046678944 2025-06-03T10:30:44.485096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:178: TCreateParts opId# 137:0 HandleReply TEvCreateTabletReply, message: Status: OK Owner: 72057594046678944 OwnerIdx: 10 TabletID: 72075186233409555 Origin: 72057594037968897 2025-06-03T10:30:44.485120Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 137:0 2 -> 3 2025-06-03T10:30:44.485366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 137 2025-06-03T10:30:44.485839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 137 2025-06-03T10:30:44.486616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 137:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.486656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 137:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.486662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_table.cpp:200: TCreateTable TConfigureParts operationId# 137:0 ProgressState at tabletId# 72057594046678944 2025-06-03T10:30:44.486676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:220: TCreateTable TConfigureParts operationId# 137:0 ProgressState Propose modify scheme on datashard datashardId: 72075186233409555 seqNo: 4:5 2025-06-03T10:30:44.486758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_table.cpp:236: TCreateTable TConfigureParts operationId# 137:0 ProgressState Propose modify scheme on datashard datashardId: 72075186233409555 message: TxKind: TX_KIND_SCHEME SourceDeprecated { RawX1: 969 RawX2: 4294970135 } TxBody: "\n\236\004\n\007Table11\020\n\032\r\n\003key\030\002 \001(\000@\000\032\020\n\005Value\030\200$ \002(\000@\000(\001:\262\003\022\253\003\010\200\200\200\002\020\254\002\030\364\003 \200\200\200\010(\0000\200\200\200 8\200\200\200\010@\2008H\000RX\010\000\020\000\030\010 \010(\200\200\200@0\377\377\377\377\0178\001B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen1P\nX\200\200\001`nh\000p\000Rb\010\001\020\200\200\200\024\030\005 \020(\200\200\200\200\0020\377\377\377\377\0178\000B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen2P\nX\200\200\001`nh\200\200\200\004p\200\200\200\004Rc\010\002\020\200\200\200\310\001\030\005 \020(\200\200\200\200@0\377\377\377\377\0178\000B$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionJ\017compaction_gen3P\nX\200\200\001`nh\200\200\200(p\200\200\200(X\001`\005j$\010e\020d\031\000\000\000\000\000\000\360?*\025background_compactionr\017compaction_gen0z\017compaction_gen0\202\001\004scan\210\001\200\200\200\010\220\001\364\003\230\0012\270\001\2008\300\001\006R\002\020\001J\026/MyRoot/USER_0/Table11\242\001\006\001\000\000\000\000\200\252\001\000\260\001\001\270\001\000\210\002\001\222\002\013\t\240\207\205\000\000\000\000\001\020\n:\004\010\004\020\005" TxId: 137 ExecLevel: 0 Flags: 0 SchemeShardId: 72057594046678944 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } SubDomainPathId: 2 2025-06-03T10:30:44.487611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 137:0 from tablet: 72057594046678944 to tablet: 72075186233409555 cookie: 72057594046678944:10 msg type: 269549568 2025-06-03T10:30:44.487669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 137, partId: 0, tablet: 72075186233409555 TestModificationResult got TxId: 137, wait until txId: 137 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-IsExternalSubdomain [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:41.887386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:41.887423Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:41.887429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:41.887436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:41.887452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:41.887457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:41.887468Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:41.887483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:41.887618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:41.887699Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:41.906661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:41.906692Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:41.911830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:41.911987Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:41.912037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:41.915497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:41.915615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:41.915774Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.915844Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:41.916835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:41.916896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:41.917278Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:41.917323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:41.917341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:41.917351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:41.917359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:41.917386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.919335Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:41.945538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:41.945633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.945707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:41.945765Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:41.945778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.946731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.946764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:41.946832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.946843Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:41.946849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:41.946855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:41.947370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.947382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:41.947387Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:41.947781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.947793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:41.947800Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.947809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:41.948611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:41.949194Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:41.949244Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:41.949471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:41.949501Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:41.949520Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.949599Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:41.949608Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:41.949651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:41.949666Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:41.950194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:41.950205Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:41.950257Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T1 ... eason publish path for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 4 2025-06-03T10:30:44.624633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 3 2025-06-03T10:30:44.625029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-03T10:30:44.625388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-03T10:30:44.625783Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-03T10:30:44.625795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-03T10:30:44.625853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 104, path id: [OwnerId: 72075186233409546, LocalPathId: 2] 2025-06-03T10:30:44.625883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-03T10:30:44.625888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:444:2395], at schemeshard: 72075186233409546, txId: 104, path id: 1 2025-06-03T10:30:44.625893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:444:2395], at schemeshard: 72075186233409546, txId: 104, path id: 2 2025-06-03T10:30:44.625967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-03T10:30:44.625974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1045: NTableState::TProposedWaitParts operationId# 104:0 ProgressState at tablet: 72075186233409546 2025-06-03T10:30:44.625990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-03T10:30:44.625997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 104:0, datashard: 72075186233409549, at schemeshard: 72075186233409546 2025-06-03T10:30:44.626003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 104:0 129 -> 240 2025-06-03T10:30:44.626198Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409546, cookie: 104 2025-06-03T10:30:44.626209Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72075186233409546, cookie: 104 2025-06-03T10:30:44.626213Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-06-03T10:30:44.626219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 1], version: 9 2025-06-03T10:30:44.626226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 1] was 5 2025-06-03T10:30:44.626425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-06-03T10:30:44.626436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72075186233409546, cookie: 104 2025-06-03T10:30:44.626440Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72075186233409546, txId: 104 2025-06-03T10:30:44.626443Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72075186233409546, txId: 104, pathId: [OwnerId: 72075186233409546, LocalPathId: 2], version: 18446744073709551615 2025-06-03T10:30:44.626447Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 4 2025-06-03T10:30:44.626457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 104, ready parts: 0/1, is published: true 2025-06-03T10:30:44.627273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72075186233409546 2025-06-03T10:30:44.627288Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 104:0 ProgressState, at schemeshard: 72075186233409546 2025-06-03T10:30:44.627408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 3 2025-06-03T10:30:44.627464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-06-03T10:30:44.627472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:30:44.627479Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-06-03T10:30:44.627486Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:30:44.627492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: true 2025-06-03T10:30:44.627512Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:548:2486] message: TxId: 104 2025-06-03T10:30:44.627519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:30:44.627525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-06-03T10:30:44.627531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 104:0 2025-06-03T10:30:44.627554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72075186233409546, LocalPathId: 2] was 2 2025-06-03T10:30:44.627767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72075186233409546 2025-06-03T10:30:44.627776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72075186233409546, txId: 0, path id: [OwnerId: 72075186233409546, LocalPathId: 1] 2025-06-03T10:30:44.628031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-06-03T10:30:44.628106Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72075186233409546, cookie: 104 2025-06-03T10:30:44.628501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72075186233409546 2025-06-03T10:30:44.628515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:444:2395], at schemeshard: 72075186233409546, txId: 0, path id: 1 2025-06-03T10:30:44.628651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-03T10:30:44.628659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:811:2727] 2025-06-03T10:30:44.628769Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72075186233409546, msg: Owner: 72075186233409546 Generation: 2 LocalPathId: 1 Version: 10 PathOwnerId: 72075186233409546, cookie: 0 TestWaitNotification: OK eventTxId 104 2025-06-03T10:30:44.629109Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72075186233409546 2025-06-03T10:30:44.629155Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72075186233409546 describe path "/MyRoot/SomeDatabase" took 58us result status StatusSuccess 2025-06-03T10:30:44.629247Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "MyRoot/SomeDatabase" PathId: 1 SchemeshardId: 72075186233409546 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 10 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 10 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 2 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 2 ProcessingParams { Version: 2 PlanResolution: 50 Coordinators: 72075186233409547 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409548 SchemeShard: 72075186233409546 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "fast" Kind: "fast_kind" } StoragePools { Name: "large" Kind: "large_kind" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "large_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "fast_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 2800 data_size_soft_quota: 2200 storage_quotas { unit_kind: "fast_kind" data_size_hard_quota: 600 data_size_soft_quota: 500 } storage_quotas { unit_kind: "large_kind" data_size_hard_quota: 2200 data_size_soft_quota: 1700 } } SecurityState { Audience: "/MyRoot/SomeDatabase" } } } PathId: 1 PathOwnerId: 72075186233409546, at schemeshard: 72075186233409546 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SimultaneousCreateTableForceDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:44.406317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:44.406340Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:44.406344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:44.406349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:44.406364Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:44.406367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:44.406374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:44.406385Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:44.406480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:44.406543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:44.416364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:44.416383Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:44.419502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:44.419588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:44.419626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:44.421345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:44.421419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:44.421535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:44.421589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:44.422233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:44.422294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:44.422569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:44.422576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:44.422586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:44.422593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:44.422598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:44.422614Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.423788Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:44.438951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:44.439032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.439086Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:44.439132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:44.439141Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.439760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:44.439787Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:44.439840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.439848Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:44.439853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:44.439857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:44.440254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.440264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:44.440269Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:44.440591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.440603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.440610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:44.440618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:44.441316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:44.441705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:44.441739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:44.441909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:44.441928Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:44.441945Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:44.442000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:44.442005Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:44.442033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:44.442042Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:44.442414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:44.442422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:44.442459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 06-03T10:30:44.603834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 Forgetting tablet 72075186233409547 Forgetting tablet 72075186233409549 2025-06-03T10:30:44.604163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-03T10:30:44.604193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:44.604353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:30:44.604528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:44.604538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:44.604572Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:30:44.605235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:5 2025-06-03T10:30:44.605251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:5 tabletId 72075186233409550 2025-06-03T10:30:44.605280Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:7 2025-06-03T10:30:44.605285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:7 tabletId 72075186233409552 2025-06-03T10:30:44.605336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5744: Failed to connect, to tablet: 72075186233409552, at schemeshard: 72057594046678944 2025-06-03T10:30:44.605358Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:30:44.605362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:30:44.606068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:44.606111Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:30:44.606119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-03T10:30:44.606137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:44.606144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:44.606167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:44.606222Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:6 2025-06-03T10:30:44.606227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:6 tabletId 72075186233409551 2025-06-03T10:30:44.606238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:30:44.606242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:30:44.606716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-03T10:30:44.606727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-03T10:30:44.606766Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:44.607080Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 2025-06-03T10:30:44.607136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:30:44.607142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-03T10:30:44.607153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-03T10:30:44.607155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-03T10:30:44.607212Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:30:44.607232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:30:44.607237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:731:2617] 2025-06-03T10:30:44.607250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-03T10:30:44.607262Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:30:44.607264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:731:2617] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 2025-06-03T10:30:44.607331Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:44.607376Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 61us result status StatusPathDoesNotExist 2025-06-03T10:30:44.607438Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:44.607506Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:44.607524Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table_0" took 21us result status StatusPathDoesNotExist 2025-06-03T10:30:44.607539Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0/table_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0/table_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:44.607579Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:44.607601Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 23us result status StatusSuccess 2025-06-03T10:30:44.607659Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::Restart [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/hive/ut/unittest >> TScaleRecommenderTest::BasicTest [GOOD] Test command err: 2025-06-03T10:30:29.749444Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:30:29.750235Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:29.750299Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "/tmp/pdisk.dat" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-03T10:30:29.750456Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:30:29.750747Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-03T10:30:29.750761Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 0 2025-06-03T10:30:29.750920Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:27:2074] ControllerId# 72057594037932033 2025-06-03T10:30:29.750925Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:30:29.750967Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:30:29.750983Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:30:29.754229Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-03T10:30:29.754246Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:30:29.754529Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:35:2079] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:29.754563Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:36:2080] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:29.754583Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:37:2081] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:29.754606Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:38:2082] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:29.754627Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:39:2083] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:29.754665Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:40:2084] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:29.754688Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:41:2085] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:29.754691Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:30:29.754705Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:27:2074] 2025-06-03T10:30:29.754710Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:27:2074] 2025-06-03T10:30:29.754717Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-03T10:30:29.754724Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:30:29.754842Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:30:29.754935Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:30:29.757990Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:27:2074] 2025-06-03T10:30:29.758020Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:29.758029Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:30:29.758376Z node 1 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:29.758400Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:29.758408Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-03T10:30:29.759088Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-03T10:30:29.759204Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-03T10:30:29.759210Z node 1 :LOCAL DEBUG: local.cpp:1441: TDomainLocal(dc-1): Bootstrap 2025-06-03T10:30:29.759261Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:27:2074] 2025-06-03T10:30:29.759270Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:29.759730Z node 1 :LOCAL DEBUG: local.cpp:1149: TDomainLocal(dc-1): Binding to hive 72057594037927937 at domain dc-1 (allocated resources: ) 2025-06-03T10:30:29.759744Z node 1 :LOCAL DEBUG: local.cpp:975: TLocalNodeRegistrar::Bootstrap 2025-06-03T10:30:29.759748Z node 1 :LOCAL DEBUG: local.cpp:181: TLocalNodeRegistrar::TryToRegister 2025-06-03T10:30:29.759788Z node 1 :LOCAL DEBUG: local.cpp:213: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:52:2092] 2025-06-03T10:30:29.759808Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:246: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-03T10:30:29.760016Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-03T10:30:29.760025Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-03T10:30:29.760029Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-03T10:30:29.760035Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033} 2025-06-03T10:30:29.760915Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [1:31:2063] 2025-06-03T10:30:29.760930Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [1:31:2063] 2025-06-03T10:30:29.760961Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037936129 entry.State: StInit ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:30:29.761012Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-03T10:30:29.761020Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:30:29.761053Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:322} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\371$\224\316I\335\243.)W\014\261m\013\346Osy\0160" } 2025-06-03T10:30:29.761085Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [1:52:2092] 2025-06-03T10:30:29.761089Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [1:52:2092] 2025-06-03T10:30:29.761097Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033} 2025-06-03T10:30:29.761143Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:29.761175Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-03T10:30:29.761183Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:361} StateFunc Type# 2146435075 Sender# [1:47:2090] SessionId# [0:0:0] Cookie# 0 2025-06-03T10:30:29.761190Z node 1 :BS_NODE DEBUG: {NWDC36@distconf_persistent_storage.cpp:205} TEvStorageConfigStored NumOk# 0 NumError# 0 Passed# 0.002749s 2025-06-03T10:30:29.761721Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [1:52:2092] 2025-06-03T10:30:29.761852Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033} 2025-06-03T10:30:29.761896Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StInit ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:30:29.761934Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72057594037932033 entry.State: StInitResolve success: false ev: {EvInfo Status: 5 TabletID: 72057594037932033 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[1:24343667:0] : 2}, {[1:2199047599219:0] : 8}, {[1:1099535971443:0] : 5}}}} 2025-06-03T10:30:29.761940Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72057594037932033 followers: 0 2025-06-03T10:30:29.762062Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:172: TClient[72057594037932033] forward result error, check reconnect [1:27:2074] 2025-06-03T10:30:29.762070Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:562: TClient[72057594037932033] schedule retry [1:27:2074] 2025-06-03T10:30:29.762080Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:361} StateFunc Type# 268639248 Sender# [1:12:2059] SessionId# [0:0:0] Cookie# 0 2025-06-03T10:30:29.762847Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:246: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936129 Cookie: 0 ProxyOptions: SigNone} 2025-06-03T10:30:29.762878Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:27:2074] 2025-06-03T10:30:29.762920Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 0} 2025-06-03T10:30:29.762926Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 1} 2025-06-03T10:30:29.762930Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 720575 ... 71: TClient[72075186224037888]::SendEvent [23:566:2485] 2025-06-03T10:30:44.139822Z node 23 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037888] Accept Connect Originator# [23:566:2485] 2025-06-03T10:30:44.139834Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037888] connected with status OK role: Leader [23:566:2485] 2025-06-03T10:30:44.139838Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037888] send queued [23:566:2485] 2025-06-03T10:30:44.139840Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037888] push event to server [23:566:2485] 2025-06-03T10:30:44.139843Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72075186224037888] shutdown pipe due to pending shutdown request [23:566:2485] 2025-06-03T10:30:44.139846Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72075186224037888] notify reset [23:566:2485] 2025-06-03T10:30:44.139850Z node 23 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72075186224037888] HandleSend Sender# [23:565:2484] EventType# 268697612 2025-06-03T10:30:44.139862Z node 23 :HIVE TRACE: hive_impl.cpp:114: HIVE#72075186224037888 Handle TEvTabletPipe::TEvServerConnected([23:566:2485]) [23:567:2486] 2025-06-03T10:30:44.139889Z node 23 :HIVE TRACE: hive_impl.cpp:755: HIVE#72075186224037888 THive::Handle::TEvTabletMetrics, NodeId 23 TotalNodeCpuUsage: 0.95 2025-06-03T10:30:44.139903Z node 23 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:12} Tx{22, NKikimr::NHive::TTxUpdateTabletMetrics} queued, type NKikimr::NHive::TTxUpdateTabletMetrics 2025-06-03T10:30:44.139911Z node 23 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:12} Tx{22, NKikimr::NHive::TTxUpdateTabletMetrics} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-03T10:30:44.139925Z node 23 :HIVE TRACE: tx__update_tablet_metrics.cpp:66: HIVE#72075186224037888 THive::TTxUpdateTabletMetrics UpdateResourceTotalUsage node 23 value (0,0,0,0) accumulated to (0,0,0,0) 2025-06-03T10:30:44.139970Z node 23 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:12} Tx{22, NKikimr::NHive::TTxUpdateTabletMetrics} hope 1 -> done Change{15, redo 82b alter 0b annex 0, ~{ 4 } -{ }, 0 gb} 2025-06-03T10:30:44.139976Z node 23 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:12} Tx{22, NKikimr::NHive::TTxUpdateTabletMetrics} release 4194304b of static, Memory{0 dyn 0} 2025-06-03T10:30:44.150374Z node 23 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [8b41b9ac8186ade8] bootstrap ActorId# [23:569:2488] Group# 2147483648 BlobCount# 1 BlobIDs# [[72075186224037888:1:12:0:0:92:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-03T10:30:44.150430Z node 23 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [8b41b9ac8186ade8] Id# [72075186224037888:1:12:0:0:92:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:30:44.150439Z node 23 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [8b41b9ac8186ade8] restore Id# [72075186224037888:1:12:0:0:92:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:30:44.150450Z node 23 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [8b41b9ac8186ade8] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224037888:1:12:0:0:92:1] Marker# BPG33 2025-06-03T10:30:44.150458Z node 23 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [8b41b9ac8186ade8] Sending missing VPut part# 0 to# 0 blob Id# [72075186224037888:1:12:0:0:92:1] Marker# BPG32 2025-06-03T10:30:44.150484Z node 23 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [23:420:2367] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72075186224037888:1:12:0:0:92:1] FDS# 92 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-03T10:30:44.150964Z node 23 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [8b41b9ac8186ade8] received {EvVPutResult Status# OK ID# [72075186224037888:1:12:0:0:92:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 18 } Cost# 80724 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 19 }}}} from# [80000000:1:0:0:0] Marker# BPP01 2025-06-03T10:30:44.150991Z node 23 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [8b41b9ac8186ade8] Result# TEvPutResult {Id# [72075186224037888:1:12:0:0:92:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 2147483648 Marker# BPP12 2025-06-03T10:30:44.150998Z node 23 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [8b41b9ac8186ade8] SendReply putResult# TEvPutResult {Id# [72075186224037888:1:12:0:0:92:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:30:44.151019Z node 23 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 2147483648 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.159 sample PartId# [72075186224037888:1:12:0:0:92:1] QueryCount# 1 VDiskId# [80000000:1:0:0:0] NodeId# 23 } TEvVPutResult{ TimestampMs# 0.651 VDiskId# [80000000:1:0:0:0] NodeId# 23 Status# OK } ] } 2025-06-03T10:30:44.151042Z node 23 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72075186224037888:1:12:0:0:92:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-03T10:30:44.151067Z node 23 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:13} commited cookie 1 for step 12 2025-06-03T10:30:44.151151Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037888] ::Bootstrap [23:571:2490] 2025-06-03T10:30:44.151155Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037888] lookup [23:571:2490] 2025-06-03T10:30:44.151172Z node 23 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037888 entry.State: StNormal ev: {EvForward TabletID: 72075186224037888 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:30:44.151181Z node 23 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 23 selfDC leaderDC 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72075186224037888 followers: 0 countLeader 1 allowFollowers 0 winner: [23:414:2364] 2025-06-03T10:30:44.151188Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72075186224037888] queue send [23:571:2490] 2025-06-03T10:30:44.151194Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72075186224037888] received pending shutdown [23:571:2490] 2025-06-03T10:30:44.151200Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037888] forward result local node, try to connect [23:571:2490] 2025-06-03T10:30:44.151204Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [23:571:2490] 2025-06-03T10:30:44.151214Z node 23 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037888] Accept Connect Originator# [23:571:2490] 2025-06-03T10:30:44.151236Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037888] connected with status OK role: Leader [23:571:2490] 2025-06-03T10:30:44.151240Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037888] send queued [23:571:2490] 2025-06-03T10:30:44.151243Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037888] push event to server [23:571:2490] 2025-06-03T10:30:44.151246Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72075186224037888] shutdown pipe due to pending shutdown request [23:571:2490] 2025-06-03T10:30:44.151250Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72075186224037888] notify reset [23:571:2490] 2025-06-03T10:30:44.151256Z node 23 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72075186224037888] HandleSend Sender# [23:570:2489] EventType# 2146435094 2025-06-03T10:30:44.151265Z node 23 :HIVE TRACE: hive_impl.cpp:114: HIVE#72075186224037888 Handle TEvTabletPipe::TEvServerConnected([23:571:2490]) [23:572:2491] 2025-06-03T10:30:44.151278Z node 23 :HIVE DEBUG: hive_impl.cpp:3579: HIVE#72075186224037888 [MSR] Started 2025-06-03T10:30:44.151285Z node 23 :HIVE TRACE: hive_impl.cpp:3619: HIVE#72075186224037888 [MSR] Node 23 is ready, avg CPU usage: 0.95 2025-06-03T10:30:44.151289Z node 23 :HIVE TRACE: hive_impl.cpp:3627: HIVE#72075186224037888 [MSR] Total avg CPU usage: 0.95, ready nodes: 1 2025-06-03T10:30:44.151293Z node 23 :HIVE TRACE: hive_impl.cpp:3636: HIVE#72075186224037888 [MSR] Avg CPU usage history: [0.95] 2025-06-03T10:30:44.151302Z node 23 :HIVE TRACE: domain_info.cpp:74: HIVE#0 [TargetTracking] [MSR] Scale in window: [0.95], bottom threshold: 0.5 2025-06-03T10:30:44.151307Z node 23 :HIVE TRACE: domain_info.cpp:99: HIVE#0 [TargetTracking] [MSR] Scale out window: [0.95], target: 0.6 2025-06-03T10:30:44.151309Z node 23 :HIVE TRACE: domain_info.cpp:113: HIVE#0 [TargetTracking] [MSR] Need scale out, rounded recommended nodes: 2 2025-06-03T10:30:44.151312Z node 23 :HIVE TRACE: hive_impl.cpp:3649: HIVE#72075186224037888 [MSR] Recommended nodes: 2, current nodes: 1 2025-06-03T10:30:44.151347Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037888] ::Bootstrap [23:574:2493] 2025-06-03T10:30:44.151350Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037888] lookup [23:574:2493] 2025-06-03T10:30:44.151355Z node 23 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037888 entry.State: StNormal ev: {EvForward TabletID: 72075186224037888 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:30:44.151361Z node 23 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 23 selfDC leaderDC 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72075186224037888 followers: 0 countLeader 1 allowFollowers 0 winner: [23:414:2364] 2025-06-03T10:30:44.151366Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72075186224037888] queue send [23:574:2493] 2025-06-03T10:30:44.151372Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72075186224037888] received pending shutdown [23:574:2493] 2025-06-03T10:30:44.151375Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037888] forward result local node, try to connect [23:574:2493] 2025-06-03T10:30:44.151378Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [23:574:2493] 2025-06-03T10:30:44.151399Z node 23 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037888] Accept Connect Originator# [23:574:2493] 2025-06-03T10:30:44.151411Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037888] connected with status OK role: Leader [23:574:2493] 2025-06-03T10:30:44.151414Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037888] send queued [23:574:2493] 2025-06-03T10:30:44.151417Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037888] push event to server [23:574:2493] 2025-06-03T10:30:44.151419Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72075186224037888] shutdown pipe due to pending shutdown request [23:574:2493] 2025-06-03T10:30:44.151422Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72075186224037888] notify reset [23:574:2493] 2025-06-03T10:30:44.151425Z node 23 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72075186224037888] HandleSend Sender# [23:573:2492] EventType# 268697642 2025-06-03T10:30:44.151430Z node 23 :HIVE TRACE: hive_impl.cpp:114: HIVE#72075186224037888 Handle TEvTabletPipe::TEvServerConnected([23:574:2493]) [23:575:2494] 2025-06-03T10:30:44.151450Z node 23 :HIVE DEBUG: hive_impl.cpp:3668: HIVE#72075186224037888 Handle TEvHive::TEvRequestScaleRecommendation(DomainKey { SchemeShard: 72057594046678944 PathId: 2 }) >> TSchemeShardSubDomainTest::ConsistentCopyRejects [GOOD] >> TSchemeShardSubDomainTest::CreateAndWait >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-true [GOOD] >> TSchemeShardSubDomainTest::RedefineErrors [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainWithoutTabletsThenForceDrop [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:44.688306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:44.688335Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:44.688341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:44.688348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:44.688363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:44.688367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:44.688378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:44.688391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:44.688542Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:44.688634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:44.703835Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:44.703864Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:44.708177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:44.708309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:44.708352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:44.710210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:44.710281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:44.710373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:44.710413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:44.710967Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:44.711011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:44.711257Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:44.711264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:44.711273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:44.711278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:44.711283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:44.711298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.712344Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:44.737252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:44.737362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.737434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:44.737495Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:44.737508Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.738471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:44.738505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:44.738577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.738590Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:44.738596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:44.738602Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:44.739184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.739200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:44.739206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:44.739794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.739811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.739818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:44.739827Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:44.740697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:44.741192Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:44.741241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:44.741483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:44.741516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:44.741534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:44.741616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:44.741625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:44.741663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:44.741676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:44.742281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:44.742293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:44.742340Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 2] 2025-06-03T10:30:44.760226Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:44.760231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-03T10:30:44.760237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-03T10:30:44.760300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.760307Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:416: [72057594046678944] TDeleteParts opId# 101:0 ProgressState 2025-06-03T10:30:44.760318Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:44.760323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:44.760329Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:44.760332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:44.760339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-03T10:30:44.760344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:44.760348Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-06-03T10:30:44.760352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 101:0 2025-06-03T10:30:44.760365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:30:44.760371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-03T10:30:44.760375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-03T10:30:44.760379Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-03T10:30:44.760512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:44.760528Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:44.760533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:30:44.760538Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-03T10:30:44.760543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:44.760644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:44.760656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:44.760661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:30:44.760665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-03T10:30:44.760670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:44.760681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-03T10:30:44.760885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:205: TTxCleanDroppedSubDomains Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:44.760893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:224: TTxCleanDroppedPaths: PersistRemoveSubDomain for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:44.760914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:30:44.760958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:44.760963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:44.760973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:44.761423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:30:44.761845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:30:44.761873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:245: TTxCleanDroppedSubDomains Complete, done PersistRemoveSubDomain for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:44.761885Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-03T10:30:44.761933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:30:44.761940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-03T10:30:44.762008Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:30:44.762024Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:30:44.762029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:341:2331] TestWaitNotification: OK eventTxId 101 2025-06-03T10:30:44.762105Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:44.762132Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 41us result status StatusPathDoesNotExist 2025-06-03T10:30:44.762192Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:44.762268Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:44.762289Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 22us result status StatusSuccess 2025-06-03T10:30:44.762367Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-EnableSeparateQuotas ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::Restart [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:44.918240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:44.918270Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:44.918274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:44.918280Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:44.918294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:44.918298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:44.918305Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:44.918322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:44.918422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:44.918485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:44.930335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:44.930366Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:44.935020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:44.935152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:44.935195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:44.938400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:44.938485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:44.938608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:44.938669Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:44.939659Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:44.939731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:44.940115Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:44.940132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:44.940144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:44.940154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:44.940161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:44.940187Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.941959Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:44.960639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:44.960719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.960782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:44.960825Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:44.960834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.961783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:44.961823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:44.961901Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.961913Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:44.961920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:44.961926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:44.962499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.962512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:44.962519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:44.962950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.962960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.962967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:44.962976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:44.963744Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:44.964261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:44.964309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:44.964537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:44.964564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:44.964583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:44.964662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:44.964670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:44.964712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:44.964726Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:44.965279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:44.965307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:44.965365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... EMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:44.998897Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:45.000250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:45.000667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:45.000715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:45.000770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:45.000776Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:45.000836Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:45.000945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit for Paths, read records: 2, at schemeshard: 72057594046678944 2025-06-03T10:30:45.000968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: USER_0, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:45.000980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1457: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.000990Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1483: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-06-03T10:30:45.001079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1785: TTxInit for Tables, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-03T10:30:45.001132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2033: TTxInit for Columns, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2093: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2151: TTxInit for Shards, read records: 3, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:30:45.001169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:45.001173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:30:45.001197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2237: TTxInit for TablePartitions, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2303: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2453: TTxInit for ChannelsBinding, read records: 9, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2832: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2911: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3412: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3448: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3665: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3810: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3827: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3987: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4003: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4288: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4593: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4651: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4746: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4773: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.001629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4800: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.002850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:45.003592Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:45.003606Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:45.003635Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:45.003644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:45.003651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:45.003690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:463:2412] sender: [1:522:2058] recipient: [1:15:2062] 2025-06-03T10:30:45.065077Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:45.065149Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 100us result status StatusSuccess 2025-06-03T10:30:45.065244Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:45.065328Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:45.065343Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 18us result status StatusSuccess 2025-06-03T10:30:45.065395Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::QuoteNonexistentPool-IsExternalSubdomain-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:45.078060Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:45.078093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:45.078100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:45.078107Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:45.078126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:45.078131Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:45.078141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:45.078156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:45.078297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:45.078374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:45.095598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:45.095628Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:45.100678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:45.100782Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:45.100815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:45.102741Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:45.102812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:45.102904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.102948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:45.103512Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:45.103566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:45.103833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:45.103841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:45.103852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:45.103858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:45.103863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:45.103878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.104968Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:45.119718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:45.119792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.119850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:45.119893Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:45.119902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.120705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.120731Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:45.120785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.120793Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:45.120798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:45.120803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:45.121248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.121261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:45.121265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:45.121668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.121682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.121689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:45.121697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:45.122246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:45.122618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:45.122665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:45.122837Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.122859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:45.122876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:45.122932Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:45.122937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:45.122968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:45.122977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:45.123319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:45.123326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:45.123365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:45.130289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 FAKE_COORDINATOR: Erasing txId 101 2025-06-03T10:30:45.130551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:45.130557Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:45.130588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:45.130599Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:45.130603Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 101, path id: 1 2025-06-03T10:30:45.130606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-03T10:30:45.130657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.130663Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-03T10:30:45.130675Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:45.130679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:45.130683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:45.130685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:45.130691Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-03T10:30:45.130695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:45.130699Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-06-03T10:30:45.130702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 101:0 2025-06-03T10:30:45.130712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:30:45.130717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-03T10:30:45.130720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-03T10:30:45.130722Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-03T10:30:45.130792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:45.130799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:45.130802Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:30:45.130806Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-03T10:30:45.130808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:45.130893Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:45.130900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:45.130903Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:30:45.130906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-03T10:30:45.130908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:45.130914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-03T10:30:45.131712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:30:45.131750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 2025-06-03T10:30:45.132679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "SomeDatabase" TimeCastBucketsPerMediator: 2 ExternalSchemeShard: true DatabaseQuotas { storage_quotas { unit_kind: "nonexistent_storage_kind" data_size_hard_quota: 1 } } } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:45.132729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1102: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 102:0, feature flag EnableAlterDatabaseCreateHiveFirst 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpAlterExtSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Mediators: 1 Name: "SomeDatabase" TimeCastBucketsPerMediator: 2 ExternalSchemeShard: true DatabaseQuotas { storage_quotas { unit_kind: "nonexistent_storage_kind" data_size_hard_quota: 1 } } } 2025-06-03T10:30:45.132737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_extsubdomain.cpp:1108: [72057594046678944] CreateCompatibleAlterExtSubDomain, opId 102:0, path /MyRoot/SomeDatabase 2025-06-03T10:30:45.132782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 102:0, explain: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain SomeDatabase has the specified kinds. Existing storage kinds are: , at schemeshard: 72057594046678944 2025-06-03T10:30:45.132789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 102:1, propose status:StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain SomeDatabase has the specified kinds. Existing storage kinds are: , at schemeshard: 72057594046678944 2025-06-03T10:30:45.133319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 102, response: Status: StatusInvalidParameter Reason: "Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain SomeDatabase has the specified kinds. Existing storage kinds are: " TxId: 102 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:45.133354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Invalid AlterExtSubDomain request: Invalid ExtSubDomain request: Malformed subdomain request: cannot set storage quotas of the following kinds: nonexistent_storage_kind, because no storage pool in the subdomain SomeDatabase has the specified kinds. Existing storage kinds are: , operation: ALTER DATABASE, path: /MyRoot/SomeDatabase TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 2025-06-03T10:30:45.133419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:30:45.133427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-03T10:30:45.133446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-03T10:30:45.133450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-03T10:30:45.133534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:30:45.133562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:30:45.133568Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:307:2297] 2025-06-03T10:30:45.133598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-03T10:30:45.133616Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:30:45.133620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:307:2297] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 >> TSchemeShardSubDomainTest::CreateWithNoEqualName >> TSchemeShardSubDomainTest::SchemeLimitsRejectsWithIndexedTables >> THiveTest::TestLockTabletExecutionLocalGone [GOOD] >> THiveTest::TestLocalRegistrationInSharedHive >> TSchemeShardSubDomainTest::CreateAndWait [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::RedefineErrors [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:45.046977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:45.047004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:45.047010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:45.047016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:45.047031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:45.047036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:45.047047Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:45.047067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:45.047183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:45.047252Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:45.058597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:45.058618Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:45.062085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:45.062160Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:45.062188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:45.063749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:45.063804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:45.063880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.063917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:45.064545Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:45.064590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:45.064803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:45.064810Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:45.064819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:45.064824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:45.064829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:45.064842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.066526Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:45.086524Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:45.086594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.086650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:45.086692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:45.086700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.087474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.087505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:45.087563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.087572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:45.087576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:45.087580Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:45.088046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.088057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:45.088061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:45.088427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.088437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.088441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:45.088448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:45.088982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:45.089401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:45.089437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:45.089584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.089607Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:45.089623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:45.089676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:45.089682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:45.089713Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:45.089723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:45.090133Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:45.090144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:45.090191Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... ration.cpp:485: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.135958Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 108:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:45.135964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 108:0 ProgressState no shards to create, do next state 2025-06-03T10:30:45.135969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 108:0 2 -> 3 2025-06-03T10:30:45.136395Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.136413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 108:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:45.136420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 108:0 3 -> 128 2025-06-03T10:30:45.136866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.136880Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.136887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 108:0, at tablet# 72057594046678944 2025-06-03T10:30:45.136894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 108 ready parts: 1/1 2025-06-03T10:30:45.136924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 108 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:45.137357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 108:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:108 msg type: 269090816 2025-06-03T10:30:45.137392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 108, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 108 at step: 5000007 FAKE_COORDINATOR: advance: minStep5000007 State->FrontStep: 5000006 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 108 at step: 5000007 2025-06-03T10:30:45.137473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000007, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.137496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 108 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000007 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:45.137504Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 108:0, at tablet# 72057594046678944 2025-06-03T10:30:45.137569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 108:0 128 -> 240 2025-06-03T10:30:45.137578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 108:0, at tablet# 72057594046678944 2025-06-03T10:30:45.137606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:30:45.137620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 108 2025-06-03T10:30:45.138078Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:45.138089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 108, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:45.138129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:45.138136Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 108, path id: 2 2025-06-03T10:30:45.138199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.138207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 108:0 ProgressState 2025-06-03T10:30:45.138220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#108:0 progress is 1/1 2025-06-03T10:30:45.138226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-03T10:30:45.138232Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#108:0 progress is 1/1 2025-06-03T10:30:45.138237Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-03T10:30:45.138242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 108, ready parts: 1/1, is published: false 2025-06-03T10:30:45.138248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 108 ready parts: 1/1 2025-06-03T10:30:45.138254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 108:0 2025-06-03T10:30:45.138258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 108:0 2025-06-03T10:30:45.138270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-03T10:30:45.138276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 108, publications: 1, subscribers: 0 2025-06-03T10:30:45.138281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 108, [OwnerId: 72057594046678944, LocalPathId: 2], 8 2025-06-03T10:30:45.138375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 108 2025-06-03T10:30:45.138388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 8 PathOwnerId: 72057594046678944, cookie: 108 2025-06-03T10:30:45.138393Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 108 2025-06-03T10:30:45.138399Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 108, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 8 2025-06-03T10:30:45.138404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:30:45.138417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 108, subscribers: 0 2025-06-03T10:30:45.139102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 108 TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 2025-06-03T10:30:45.139190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 108: send EvNotifyTxCompletion 2025-06-03T10:30:45.139197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 108 2025-06-03T10:30:45.139276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 108, at schemeshard: 72057594046678944 2025-06-03T10:30:45.139295Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-06-03T10:30:45.139300Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [1:591:2544] TestWaitNotification: OK eventTxId 108 2025-06-03T10:30:45.139419Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:45.139455Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 47us result status StatusSuccess 2025-06-03T10:30:45.139542Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 6 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 6 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } StoragePools { Name: "pool-hdd-1" Kind: "hdd-1" } StoragePools { Name: "pool-hdd-2" Kind: "hdd-1" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::ConsistentCopyRejects [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:44.407274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:44.407308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:44.407314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:44.407321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:44.407337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:44.407341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:44.407352Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:44.407374Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:44.407493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:44.407582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:44.422860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:44.422892Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:44.427201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:44.427314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:44.427365Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:44.429421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:44.429499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:44.429631Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:44.429681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:44.430387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:44.430447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:44.430768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:44.430781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:44.430795Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:44.430804Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:44.430810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:44.430831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.432138Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:44.459203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:44.459299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.459380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:44.459451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:44.459467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.460324Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:44.460360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:44.460429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.460442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:44.460449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:44.460457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:44.461134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.461152Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:44.461160Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:44.461712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.461733Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:44.461740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:44.461747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:44.462551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:44.463145Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:44.463198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:44.463433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:44.463467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:44.463490Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:44.463568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:44.463577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:44.463618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:44.463633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:44.464225Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:44.464236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:44.464286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... rd: 72057594046678944 2025-06-03T10:30:45.151521Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 106:0 129 -> 240 2025-06-03T10:30:45.152131Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.152270Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.152284Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_copy_table.cpp:306: TCopyTable TCopyTableBarrier operationId: 106:0ProgressState, operation type TxCopyTable 2025-06-03T10:30:45.152294Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1059: Set barrier, OperationId: 106:0, name: CopyTableBarrier, done: 0, blocked: 1, parts count: 1 2025-06-03T10:30:45.152301Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1103: All parts have reached barrier, tx: 106, done: 0, blocked: 1 2025-06-03T10:30:45.152315Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_copy_table.cpp:289: TCopyTable TCopyTableBarrier operationId: 106:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 106 Name: CopyTableBarrier }, at tablet# 72057594046678944 2025-06-03T10:30:45.152322Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 106:0 240 -> 240 2025-06-03T10:30:45.152837Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 106:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.152854Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 106:0 ProgressState 2025-06-03T10:30:45.152871Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-06-03T10:30:45.152877Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-03T10:30:45.152883Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#106:0 progress is 1/1 2025-06-03T10:30:45.152887Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-03T10:30:45.152894Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 106, ready parts: 1/1, is published: true 2025-06-03T10:30:45.152914Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:646:2567] message: TxId: 106 2025-06-03T10:30:45.152923Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 106 ready parts: 1/1 2025-06-03T10:30:45.152930Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 106:0 2025-06-03T10:30:45.152935Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 106:0 2025-06-03T10:30:45.152975Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-03T10:30:45.152981Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:30:45.153697Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-03T10:30:45.153716Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [2:823:2719] TestWaitNotification: OK eventTxId 106 2025-06-03T10:30:45.153892Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:45.153959Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/table" took 81us result status StatusSuccess 2025-06-03T10:30:45.154091Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/table" PathDescription { Self { Name: "table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "table" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:45.154203Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0/dst" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:45.154230Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0/dst" took 31us result status StatusSuccess 2025-06-03T10:30:45.154302Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0/dst" PathDescription { Self { Name: "dst" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 106 CreateStep: 250 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "dst" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 6 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:45.154386Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:45.154411Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 28us result status StatusSuccess 2025-06-03T10:30:45.154488Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } } Children { Name: "dst" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 106 CreateStep: 250 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 150 ParentPathId: 2 PathState: EPathStateCopying Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 2 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::CreateWithoutPlanResolution >> TSchemeShardSubDomainTest::CreateWithNoEqualName [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateAndWait [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:45.410286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:45.410310Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:45.410314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:45.410319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:45.410331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:45.410334Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:45.410342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:45.410358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:45.410450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:45.410515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:45.420359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:45.420381Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:45.424535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:45.424645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:45.424679Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:45.427334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:45.427412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:45.427509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.427559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:45.428176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:45.428227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:45.428511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:45.428519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:45.428526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:45.428536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:45.428541Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:45.428557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.430038Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:45.446448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:45.446536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.446615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:45.446673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:45.446686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.447606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.447637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:45.447700Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.447712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:45.447719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:45.447725Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:45.448207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.448221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:45.448228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:45.448689Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.448700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.448711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:45.448720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:45.449523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:45.449966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:45.450009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:45.450218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.450246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:45.450265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:45.450345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:45.450353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:45.450410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:45.450425Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:45.450872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:45.450882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:45.450929Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... hToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 101, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:30:45.461428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:45.461434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 101, path id: 2 2025-06-03T10:30:45.461440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 101, path id: 3 FAKE_COORDINATOR: Erasing txId 101 2025-06-03T10:30:45.461506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.461513Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-03T10:30:45.461526Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:45.461531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:45.461537Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:30:45.461544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:45.461549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-03T10:30:45.461554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:30:45.461559Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-06-03T10:30:45.461563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 101:0 2025-06-03T10:30:45.461576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:30:45.461582Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-03T10:30:45.461586Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-03T10:30:45.461589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-03T10:30:45.461698Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:45.461710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:45.461714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:30:45.461717Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-03T10:30:45.461721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:30:45.461800Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:45.461807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:30:45.461809Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:30:45.461812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-03T10:30:45.461815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:30:45.461820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-03T10:30:45.462435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:30:45.462457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 100, wait until txId: 101 TestModificationResults wait txId: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-06-03T10:30:45.462523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-03T10:30:45.462531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-06-03T10:30:45.462546Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:30:45.462550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-03T10:30:45.462625Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-03T10:30:45.462650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-03T10:30:45.462655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:331:2321] 2025-06-03T10:30:45.462684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:30:45.462702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:30:45.462707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:331:2321] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-06-03T10:30:45.462778Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:45.462813Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/USER_0" took 47us result status StatusSuccess 2025-06-03T10:30:45.462935Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/USER_0" PathDescription { Self { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 0 TimeCastBucketsPerMediator: 0 } DomainKey { SchemeShard: 72057594046678944 PathId: 3 } StoragePools { Name: "/dc-1/users/tenant-1:hdd" Kind: "hdd" } StoragePools { Name: "/dc-1/users/tenant-1:hdd-1" Kind: "hdd-1" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 3 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:45.463009Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:45.463024Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir" took 18us result status StatusSuccess 2025-06-03T10:30:45.463087Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir" PathDescription { Self { Name: "dir" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "USER_0" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TSchemeShardSubDomainTest::CreateSubDomainWithoutSomeTablets >> TSchemeShardSubDomainTest::SchemeLimitsRejectsWithIndexedTables [GOOD] >> TSchemeShardSubDomainTest::CreateWithoutPlanResolution [GOOD] >> TSchemeShardSubDomainTest::LS |66.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |66.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::CreateExternalTableWithReboots >> THiveTest::TestLocalRegistrationInSharedHive [GOOD] >> TSchemeShardSubDomainTest::CreateSubDomainWithoutSomeTablets [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateWithNoEqualName [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:45.627850Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:45.627880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:45.627884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:45.627890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:45.627905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:45.627908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:45.627923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:45.627935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:45.628029Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:45.628090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:45.639104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:45.639137Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:45.642971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:45.643075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:45.643111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:45.645577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:45.645674Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:45.645813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.645884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:45.646821Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:45.646889Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:45.647196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:45.647204Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:45.647213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:45.647220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:45.647225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:45.647244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.648592Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:45.665654Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:45.665747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.665816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:45.665861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:45.665871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.666570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.666596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:45.666650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.666658Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:45.666662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:45.666666Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:45.666976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.666984Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:45.666987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:45.667268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.667279Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.667283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:45.667290Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:45.667774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:45.668084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:45.668117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:45.668289Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.668313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:45.668330Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:45.668402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:45.668410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:45.668443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:45.668452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:45.668825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:45.668834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:45.668876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... : schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:30:45.802027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: true 2025-06-03T10:30:45.802046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:627:2559] message: TxId: 102 2025-06-03T10:30:45.802053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:30:45.802058Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:30:45.802065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:30:45.802089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:30:45.802552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:30:45.802564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:628:2560] TestWaitNotification: OK eventTxId 102 TestModificationResults wait txId: 108 2025-06-03T10:30:45.803239Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpMkDir MkDir { Name: "USER_3" } } TxId: 108 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:45.803306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/USER_3, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.803337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 108:1, propose status:StatusAlreadyExists, reason: Check failed: path: '/MyRoot/USER_3', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeSubDomain, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155, at schemeshard: 72057594046678944 2025-06-03T10:30:45.803839Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 108, response: Status: StatusAlreadyExists Reason: "Check failed: path: \'/MyRoot/USER_3\', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeSubDomain, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155" TxId: 108 SchemeshardId: 72057594046678944 PathId: 5 PathCreateTxId: 106, at schemeshard: 72057594046678944 2025-06-03T10:30:45.803870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 108, database: /MyRoot, subject: , status: StatusAlreadyExists, reason: Check failed: path: '/MyRoot/USER_3', error: path exist, request accepts it (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeSubDomain, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:155, operation: CREATE DIRECTORY, path: /MyRoot/USER_3 TestModificationResult got TxId: 108, wait until txId: 108 2025-06-03T10:30:45.803971Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:45.804009Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 40us result status StatusSuccess 2025-06-03T10:30:45.804089Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:45.804143Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:45.804163Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1" took 21us result status StatusSuccess 2025-06-03T10:30:45.804229Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1" PathDescription { Self { Name: "USER_1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000005 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "USER_1" Columns { Name: "RowId" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "RowId" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:45.804281Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:45.804291Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_2" took 11us result status StatusSuccess 2025-06-03T10:30:45.804314Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_2" PathDescription { Self { Name: "USER_2" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 104 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 1 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:45.804350Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:45.804362Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_3" took 12us result status StatusSuccess 2025-06-03T10:30:45.804386Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_3" PathDescription { Self { Name: "USER_3" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 106 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409549 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409550 } DomainKey { SchemeShard: 72057594046678944 PathId: 5 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 5 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::SchemeLimitsRejectsWithIndexedTables [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:45.703758Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:45.703789Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:45.703796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:45.703803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:45.703818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:45.703823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:45.703840Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:45.703854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:45.704082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:45.704172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:45.720227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:45.720255Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:45.724903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:45.725033Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:45.725079Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:45.727732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:45.727817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:45.727923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.727966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:45.728743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:45.728814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:45.729126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:45.729137Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:45.729152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:45.729160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:45.729167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:45.729191Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.730719Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:45.751435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:45.751514Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.751584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:45.751641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:45.751651Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.752507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.752533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:45.752588Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.752598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:45.752604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:45.752610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:45.753038Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.753047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:45.753053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:45.753384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.753392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.753397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:45.753405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:45.754105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:45.754515Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:45.754552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:45.754738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.754765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:45.754781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:45.754849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:45.754857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:45.754894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:45.754906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:45.755383Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:45.755392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:45.755437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... CHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 600 RawX2: 4294969834 } Origin: 72075186233409549 State: 2 TxId: 107 Step: 0 Generation: 2 2025-06-03T10:30:45.985874Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 107, tablet: 72075186233409549, partId: 2 2025-06-03T10:30:45.985881Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 107:2, at schemeshard: 72057594046678944, message: Source { RawX1: 600 RawX2: 4294969834 } Origin: 72075186233409549 State: 2 TxId: 107 Step: 0 Generation: 2 2025-06-03T10:30:45.985884Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1014: NTableState::TProposedWaitParts operationId# 107:2 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-06-03T10:30:45.985889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1018: NTableState::TProposedWaitParts operationId# 107:2 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 600 RawX2: 4294969834 } Origin: 72075186233409549 State: 2 TxId: 107 Step: 0 Generation: 2 2025-06-03T10:30:45.985893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 107:2, shardIdx: 72057594046678944:4, datashard: 72075186233409549, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.985896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 107:2, at schemeshard: 72057594046678944 2025-06-03T10:30:45.985898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 107:2, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-06-03T10:30:45.985901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 107:2 129 -> 240 2025-06-03T10:30:45.986730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-06-03T10:30:45.986754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-06-03T10:30:45.987310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-06-03T10:30:45.987330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-06-03T10:30:45.987344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.987365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 107:2, at schemeshard: 72057594046678944 2025-06-03T10:30:45.987377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.987399Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.987407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 107:0 ProgressState 2025-06-03T10:30:45.987423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#107:0 progress is 2/3 2025-06-03T10:30:45.987427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 107 ready parts: 2/3 2025-06-03T10:30:45.987431Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#107:0 progress is 2/3 2025-06-03T10:30:45.987433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 107 ready parts: 2/3 2025-06-03T10:30:45.987440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 107, ready parts: 2/3, is published: true 2025-06-03T10:30:45.987545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 107:2, at schemeshard: 72057594046678944 2025-06-03T10:30:45.987559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 107:2, at schemeshard: 72057594046678944 2025-06-03T10:30:45.987562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 107:2 ProgressState 2025-06-03T10:30:45.987568Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#107:2 progress is 3/3 2025-06-03T10:30:45.987571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 107 ready parts: 3/3 2025-06-03T10:30:45.987574Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#107:2 progress is 3/3 2025-06-03T10:30:45.987576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 107 ready parts: 3/3 2025-06-03T10:30:45.987579Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 107, ready parts: 3/3, is published: true 2025-06-03T10:30:45.987599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:476:2424] message: TxId: 107 2025-06-03T10:30:45.987604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 107 ready parts: 3/3 2025-06-03T10:30:45.987609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 107:0 2025-06-03T10:30:45.987613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 107:0 2025-06-03T10:30:45.987638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-03T10:30:45.987642Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 107:1 2025-06-03T10:30:45.987647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 107:1 2025-06-03T10:30:45.987651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-03T10:30:45.987654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 107:2 2025-06-03T10:30:45.987656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 107:2 2025-06-03T10:30:45.987660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 3 2025-06-03T10:30:45.988152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-03T10:30:45.988163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [1:530:2478] TestWaitNotification: OK eventTxId 107 TestModificationResults wait txId: 108 2025-06-03T10:30:45.988997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/USER_0" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "Table7" Columns { Name: "RowId" Type: "Uint64" } Columns { Name: "Value0" Type: "Utf8" } Columns { Name: "Value1" Type: "Utf8" } Columns { Name: "Value2" Type: "Utf8" } Columns { Name: "Value3" Type: "Utf8" } Columns { Name: "Value4" Type: "Utf8" } KeyColumnNames: "RowId" } IndexDescription { Name: "UserDefinedIndexByValue0" KeyColumnNames: "Value0" } IndexDescription { Name: "UserDefinedIndexByValue1" KeyColumnNames: "Value1" } IndexDescription { Name: "UserDefinedIndexByValue2" KeyColumnNames: "Value2" } IndexDescription { Name: "UserDefinedIndexByValue3" KeyColumnNames: "Value3" } IndexDescription { Name: "UserDefinedIndexByValue4" KeyColumnNames: "Value4" } } } TxId: 108 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:45.989079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_indexed_table.cpp:101: TCreateTableIndex construct operation table path: /MyRoot/USER_0/Table7 domain path id: [OwnerId: 72057594046678944, LocalPathId: 2] domain path: /MyRoot/USER_0 shardsToCreate: 6 GetShardsInside: 4 MaxShards: 7 2025-06-03T10:30:45.989093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 108:0, explain: indexes count has reached maximum value in the table, children limit for dir in domain: 4, intention to create new children: 5, at schemeshard: 72057594046678944 2025-06-03T10:30:45.989099Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 108:1, propose status:StatusResourceExhausted, reason: indexes count has reached maximum value in the table, children limit for dir in domain: 4, intention to create new children: 5, at schemeshard: 72057594046678944 2025-06-03T10:30:45.989806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 108, response: Status: StatusResourceExhausted Reason: "indexes count has reached maximum value in the table, children limit for dir in domain: 4, intention to create new children: 5" TxId: 108 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:45.989861Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 108, database: /MyRoot/USER_0, subject: , status: StatusResourceExhausted, reason: indexes count has reached maximum value in the table, children limit for dir in domain: 4, intention to create new children: 5, operation: CREATE TABLE WITH INDEXES, path: /MyRoot/USER_0/Table7 TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 2025-06-03T10:30:45.989968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 108: send EvNotifyTxCompletion 2025-06-03T10:30:45.989977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 108 2025-06-03T10:30:45.990080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 108, at schemeshard: 72057594046678944 2025-06-03T10:30:45.990108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-06-03T10:30:45.990115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [1:723:2640] TestWaitNotification: OK eventTxId 108 >> TSchemeShardSubDomainTest::LS [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateWithoutPlanResolution [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:45.967239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:45.967264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:45.967268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:45.967273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:45.967287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:45.967290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:45.967297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:45.967312Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:45.967397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:45.967453Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:45.981806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:45.981835Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:45.986352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:45.986470Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:45.986513Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:45.988771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:45.988853Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:45.988997Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.989056Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:45.989939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:45.990004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:45.990406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:45.990420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:45.990434Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:45.990444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:45.990452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:45.990473Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.992051Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:46.016567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:46.016672Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.016756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:46.016815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:46.016827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.017873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:46.017914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:46.017997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.018014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:46.018021Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:46.018028Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:46.018684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.018702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:46.018709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:46.019166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.019182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.019190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:46.019200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:46.020017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:46.020464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:46.020529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:46.020763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:46.020796Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:46.020818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:46.020894Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:46.020903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:46.020943Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:46.020957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:46.021485Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:46.021497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:46.021554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:46.021561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-03T10:30:46.021667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.021676Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-03T10:30:46.021691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-06-03T10:30:46.021696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:30:46.021702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-06-03T10:30:46.021705Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:30:46.021711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-03T10:30:46.021717Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:30:46.021722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-06-03T10:30:46.021727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1:0 2025-06-03T10:30:46.021746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:46.021754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-03T10:30:46.021759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-03T10:30:46.022147Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-03T10:30:46.022170Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-03T10:30:46.022176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-03T10:30:46.022182Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-03T10:30:46.022188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:46.022204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-03T10:30:46.022863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-03T10:30:46.022980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 100 2025-06-03T10:30:46.023855Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateSubDomain SubDomain { Coordinators: 1 Mediators: 1 Name: "USER_0" TimeCastBucketsPerMediator: 2 StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 100 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:46.023925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /MyRoot/USER_0, opId: 100:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.023944Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 100:1, propose status:StatusInvalidParameter, reason: Malformed subdomain request: plan resolution is 0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.024055Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:269:2259] Bootstrap 2025-06-03T10:30:46.026481Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:269:2259] Become StateWork (SchemeCache [1:274:2264]) 2025-06-03T10:30:46.026895Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:269:2259] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:30:46.027631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 100, response: Status: StatusInvalidParameter Reason: "Malformed subdomain request: plan resolution is 0" TxId: 100 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:46.027684Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 100, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Malformed subdomain request: plan resolution is 0, operation: CREATE DATABASE, path: /MyRoot/USER_0 2025-06-03T10:30:46.027832Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-03T10:30:46.027894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-03T10:30:46.027902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-06-03T10:30:46.027985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-03T10:30:46.028008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-03T10:30:46.028014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:284:2274] TestWaitNotification: OK eventTxId 100 2025-06-03T10:30:46.028117Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:46.028155Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 54us result status StatusPathDoesNotExist 2025-06-03T10:30:46.028216Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_0\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_0" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> TExternalTableTestReboots::SimpleDropExternalTableWithReboots |67.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::CreateSubDomainWithoutSomeTablets [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:46.247149Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:46.247173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:46.247178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:46.247183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:46.247194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:46.247198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:46.247205Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:46.247219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:46.247311Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:46.247367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:46.257219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:46.257242Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:46.260423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:46.260534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:46.260561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:46.262877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:46.262960Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:46.263066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:46.263137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:46.263917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:46.263971Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:46.264264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:46.264274Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:46.264288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:46.264295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:46.264301Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:46.264321Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.265611Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:46.280908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:46.280985Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.281044Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:46.281083Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:46.281093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.281880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:46.281907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:46.281961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.281971Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:46.281975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:46.281979Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:46.282402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.282412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:46.282417Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:46.282710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.282718Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.282724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:46.282730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:46.283263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:46.283760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:46.283812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:46.284033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:46.284069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:46.284088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:46.284190Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:46.284199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:46.284241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:46.284255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:46.284834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:46.284846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:46.284899Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 46678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-03T10:30:46.285576Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-03T10:30:46.285582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-03T10:30:46.285587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:46.285605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-03T10:30:46.286552Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-03T10:30:46.286641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 100 2025-06-03T10:30:46.287287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateSubDomain SubDomain { PlanResolution: 50 Coordinators: 1 Name: "USER_1" TimeCastBucketsPerMediator: 2 StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 100 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:46.287328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /MyRoot/USER_1, opId: 100:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.287341Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 100:1, propose status:StatusInvalidParameter, reason: Malformed subdomain request: cant create subdomain with coordinators, but no mediators, at schemeshard: 72057594046678944 2025-06-03T10:30:46.287430Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:269:2259] Bootstrap 2025-06-03T10:30:46.288803Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:269:2259] Become StateWork (SchemeCache [1:274:2264]) 2025-06-03T10:30:46.289056Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:269:2259] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:30:46.289674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 100, response: Status: StatusInvalidParameter Reason: "Malformed subdomain request: cant create subdomain with coordinators, but no mediators" TxId: 100 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:46.289723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 100, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Malformed subdomain request: cant create subdomain with coordinators, but no mediators, operation: CREATE DATABASE, path: /MyRoot/USER_1 2025-06-03T10:30:46.289905Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 100, wait until txId: 100 TestModificationResults wait txId: 101 2025-06-03T10:30:46.290709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateSubDomain SubDomain { PlanResolution: 50 Mediators: 1 Name: "USER_2" TimeCastBucketsPerMediator: 2 StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:46.290771Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_subdomain.cpp:92: TCreateSubDomain Propose, path: /MyRoot/USER_2, opId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.290788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusInvalidParameter, reason: Malformed subdomain request: cant create subdomain with mediators, but no coordinators, at schemeshard: 72057594046678944 2025-06-03T10:30:46.291328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusInvalidParameter Reason: "Malformed subdomain request: cant create subdomain with mediators, but no coordinators" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:46.291359Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusInvalidParameter, reason: Malformed subdomain request: cant create subdomain with mediators, but no coordinators, operation: CREATE DATABASE, path: /MyRoot/USER_2 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 100 2025-06-03T10:30:46.291416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-03T10:30:46.291424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 TestWaitNotification wait txId: 101 2025-06-03T10:30:46.291439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:30:46.291443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-03T10:30:46.291517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-03T10:30:46.291545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-03T10:30:46.291566Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:288:2278] 2025-06-03T10:30:46.291584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:30:46.291614Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:30:46.291618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:288:2278] TestWaitNotification: OK eventTxId 100 TestWaitNotification: OK eventTxId 101 2025-06-03T10:30:46.291692Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:46.291721Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1" took 38us result status StatusPathDoesNotExist 2025-06-03T10:30:46.291772Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_1\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_1" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:46.291848Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_2" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:46.291864Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_2" took 17us result status StatusPathDoesNotExist 2025-06-03T10:30:46.291879Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/USER_2\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/USER_2" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:30:46.291928Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:46.291952Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 26us result status StatusSuccess 2025-06-03T10:30:46.292026Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::LS [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:46.394550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:46.394587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:46.394595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:46.394602Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:46.394621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:46.394625Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:46.394643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:46.394658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:46.394780Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:46.394864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:46.411985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:46.412017Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:46.416936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:46.417081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:46.417122Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:46.419884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:46.419972Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:46.420103Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:46.420173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:46.421031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:46.421093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:46.421400Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:46.421413Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:46.421424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:46.421433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:46.421438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:46.421459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.422636Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:46.438538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:46.438612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.438668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:46.438709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:46.438717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.439416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:46.439441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:46.439492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.439500Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:46.439505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:46.439509Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:46.439938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.439953Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:46.439959Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:46.440383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.440395Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.440400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:46.440406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:46.440911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:46.441363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:46.441408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:46.441563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:46.441584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:46.441598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:46.441651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:46.441656Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:46.441687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:46.441696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:46.442118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:46.442126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:46.442164Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... h for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:30:46.460711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 100 2025-06-03T10:30:46.461130Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:46.461139Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:46.461177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:46.461193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:46.461201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-03T10:30:46.461207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 100, path id: 2 2025-06-03T10:30:46.461279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.461286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-03T10:30:46.461315Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-06-03T10:30:46.461321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:46.461326Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-06-03T10:30:46.461330Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:46.461335Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-03T10:30:46.461341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:30:46.461346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 100:0 2025-06-03T10:30:46.461350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 100:0 2025-06-03T10:30:46.461387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 6 2025-06-03T10:30:46.461393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-06-03T10:30:46.461398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-03T10:30:46.461402Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 3 2025-06-03T10:30:46.461517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:46.461529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:46.461534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-03T10:30:46.461540Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-03T10:30:46.461544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:46.461651Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:46.461660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 3 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:30:46.461665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-03T10:30:46.461669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 3 2025-06-03T10:30:46.461673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:30:46.461681Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-06-03T10:30:46.462483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-03T10:30:46.462521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-03T10:30:46.462580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-03T10:30:46.462587Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-06-03T10:30:46.462680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-03T10:30:46.462730Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-03T10:30:46.462736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:452:2405] TestWaitNotification: OK eventTxId 100 2025-06-03T10:30:46.462825Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:46.462868Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 56us result status StatusSuccess 2025-06-03T10:30:46.462984Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 Mediators: 72075186233409548 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:46.463095Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:46.463117Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 26us result status StatusSuccess 2025-06-03T10:30:46.463180Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |67.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> THiveTest::TestFollowers_LocalNodeOnly [GOOD] >> THiveTest::TestFollowersCrossDC_Tight |67.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/hive/ut/unittest >> THiveTest::TestLocalRegistrationInSharedHive [GOOD] Test command err: 2025-06-03T10:30:30.097894Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:30:30.098684Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:30.098744Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "/tmp/pdisk.dat" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-03T10:30:30.098906Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:30:30.099157Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-03T10:30:30.099170Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 0 2025-06-03T10:30:30.099364Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:27:2074] ControllerId# 72057594037932033 2025-06-03T10:30:30.099368Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:30:30.099403Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:30:30.099420Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:30:30.103790Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-03T10:30:30.103812Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:30:30.104126Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:35:2079] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.104161Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:36:2080] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.104184Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:37:2081] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.104208Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:38:2082] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.104231Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:39:2083] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.104257Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:40:2084] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.104282Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:41:2085] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.104286Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:30:30.104299Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:27:2074] 2025-06-03T10:30:30.104304Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:27:2074] 2025-06-03T10:30:30.104312Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-03T10:30:30.104319Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:30:30.104447Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:30:30.104549Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:30:30.107800Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:27:2074] 2025-06-03T10:30:30.107841Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:30.107849Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:30:30.108152Z node 1 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:30.108173Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:30.108179Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-03T10:30:30.108863Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-03T10:30:30.108970Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-03T10:30:30.108977Z node 1 :LOCAL DEBUG: local.cpp:1441: TDomainLocal(dc-1): Bootstrap 2025-06-03T10:30:30.109019Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:27:2074] 2025-06-03T10:30:30.109025Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:30.109531Z node 1 :LOCAL DEBUG: local.cpp:1149: TDomainLocal(dc-1): Binding to hive 72057594037927937 at domain dc-1 (allocated resources: ) 2025-06-03T10:30:30.109553Z node 1 :LOCAL DEBUG: local.cpp:975: TLocalNodeRegistrar::Bootstrap 2025-06-03T10:30:30.109556Z node 1 :LOCAL DEBUG: local.cpp:181: TLocalNodeRegistrar::TryToRegister 2025-06-03T10:30:30.109591Z node 1 :LOCAL DEBUG: local.cpp:213: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:52:2092] 2025-06-03T10:30:30.109607Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:246: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-03T10:30:30.109819Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-03T10:30:30.109829Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-03T10:30:30.109832Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-03T10:30:30.109838Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033} 2025-06-03T10:30:30.110721Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [1:31:2063] 2025-06-03T10:30:30.110757Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [1:31:2063] 2025-06-03T10:30:30.110784Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037936129 entry.State: StInit ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:30:30.110814Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-03T10:30:30.110819Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:30:30.110846Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:322} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\371$\224\316I\335\243.)W\014\261m\013\346Osy\0160" } 2025-06-03T10:30:30.110880Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [1:52:2092] 2025-06-03T10:30:30.110883Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [1:52:2092] 2025-06-03T10:30:30.110888Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033} 2025-06-03T10:30:30.110931Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:30.110961Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-03T10:30:30.110970Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:361} StateFunc Type# 2146435075 Sender# [1:47:2090] SessionId# [0:0:0] Cookie# 0 2025-06-03T10:30:30.110979Z node 1 :BS_NODE DEBUG: {NWDC36@distconf_persistent_storage.cpp:205} TEvStorageConfigStored NumOk# 0 NumError# 0 Passed# 0.002768s 2025-06-03T10:30:30.111473Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [1:52:2092] 2025-06-03T10:30:30.111586Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033} 2025-06-03T10:30:30.111617Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StInit ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:30:30.111649Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72057594037932033 entry.State: StInitResolve success: false ev: {EvInfo Status: 5 TabletID: 72057594037932033 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[1:24343667:0] : 2}, {[1:2199047599219:0] : 8}, {[1:1099535971443:0] : 5}}}} 2025-06-03T10:30:30.111655Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72057594037932033 followers: 0 2025-06-03T10:30:30.111759Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:172: TClient[72057594037932033] forward result error, check reconnect [1:27:2074] 2025-06-03T10:30:30.111764Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:562: TClient[72057594037932033] schedule retry [1:27:2074] 2025-06-03T10:30:30.111773Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:361} StateFunc Type# 268639248 Sender# [1:12:2059] SessionId# [0:0:0] Cookie# 0 2025-06-03T10:30:30.112532Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:246: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936129 Cookie: 0 ProxyOptions: SigNone} 2025-06-03T10:30:30.112568Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:27:2074] 2025-06-03T10:30:30.112611Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 0} 2025-06-03T10:30:30.112617Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 1} 2025-06-03T10:30:30.112620Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 720575 ... 37]::SendEvent [31:550:2091] 2025-06-03T10:30:46.035803Z node 30 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 0} 2025-06-03T10:30:46.035831Z node 30 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 1} 2025-06-03T10:30:46.035837Z node 30 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 2} 2025-06-03T10:30:46.035857Z node 30 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [31:550:2091] 2025-06-03T10:30:46.035928Z node 31 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037888 CurrentLeader: [30:460:2365] CurrentLeaderTablet: [30:475:2377] CurrentGeneration: 1 CurrentStep: 0} 2025-06-03T10:30:46.035963Z node 30 :HIVE TRACE: hive_impl.cpp:114: HIVE#72057594037927937 Handle TEvTabletPipe::TEvServerConnected([31:550:2091]) [30:559:2427] 2025-06-03T10:30:46.035972Z node 31 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037888 CurrentLeader: [30:460:2365] CurrentLeaderTablet: [30:475:2377] CurrentGeneration: 1 CurrentStep: 0} 2025-06-03T10:30:46.035991Z node 31 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037888 entry.State: StInitResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037888 Cookie: 0 CurrentLeader: [30:460:2365] CurrentLeaderTablet: [30:475:2377] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[30:1099535971443:0] : 6}, {[30:24343667:0] : 3}}}} 2025-06-03T10:30:46.035995Z node 31 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 72075186224037888 followers: 0 2025-06-03T10:30:46.036000Z node 31 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 31 selfDC 2 leaderDC 1 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037888 followers: 0 countLeader 1 allowFollowers 0 winner: [30:460:2365] 2025-06-03T10:30:46.036009Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037888] forward result remote node 30 [31:551:2092] 2025-06-03T10:30:46.036063Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [31:550:2091] 2025-06-03T10:30:46.036068Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [31:550:2091] 2025-06-03T10:30:46.036071Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [31:550:2091] 2025-06-03T10:30:46.036081Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [31:550:2091] 2025-06-03T10:30:46.036086Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037888] remote node connected [31:551:2092] 2025-06-03T10:30:46.036089Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [31:551:2092] 2025-06-03T10:30:46.036102Z node 31 :LOCAL DEBUG: local.cpp:260: TEvTabletPipe::TEvClientConnected {TabletId=72057594037927937 Status=OK ClientId=[31:550:2091]} 2025-06-03T10:30:46.036184Z node 30 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037888] Accept Connect Originator# [31:551:2092] 2025-06-03T10:30:46.036198Z node 30 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:72: [72057594037927937] Push Sender# [31:547:2091] EventType# 268959744 2025-06-03T10:30:46.036237Z node 30 :HIVE DEBUG: hive_impl.cpp:141: HIVE#72057594037927937 Handle TEvLocal::TEvRegisterNode from [31:547:2091] HiveId: 72057594037927937 ServicedDomains { SchemeShard: 72057594046678944 PathId: 2 } TabletAvailability { Type: Dummy Priority: 0 } TabletAvailability { Type: Hive Priority: 0 } 2025-06-03T10:30:46.036261Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{25, NKikimr::NHive::TTxRegisterNode} queued, type NKikimr::NHive::TTxRegisterNode 2025-06-03T10:30:46.036269Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{25, NKikimr::NHive::TTxRegisterNode} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-03T10:30:46.036279Z node 30 :HIVE DEBUG: tx__register_node.cpp:21: HIVE#72057594037927937 THive::TTxRegisterNode(31)::Execute 2025-06-03T10:30:46.036311Z node 30 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037927937 ProcessWaitQueue (0) 2025-06-03T10:30:46.036315Z node 30 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037927937 ProcessBootQueue (0) 2025-06-03T10:30:46.036318Z node 30 :HIVE TRACE: hive_impl.cpp:344: HIVE#72057594037927937 ProcessBootQueue - sending 2025-06-03T10:30:46.036321Z node 30 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037927937 ProcessWaitQueue (0) 2025-06-03T10:30:46.036326Z node 30 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037927937 ProcessBootQueue (0) 2025-06-03T10:30:46.036339Z node 30 :HIVE WARN: node_info.cpp:25: HIVE#72057594037927937 Node(31, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:46.036359Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{25, NKikimr::NHive::TTxRegisterNode} hope 1 -> done Change{14, redo 208b alter 0b annex 0, ~{ 4 } -{ }, 0 gb} 2025-06-03T10:30:46.036370Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{25, NKikimr::NHive::TTxRegisterNode} release 4194304b of static, Memory{0 dyn 0} 2025-06-03T10:30:46.036410Z node 30 :HIVE TRACE: hive_impl.cpp:114: HIVE#72075186224037888 Handle TEvTabletPipe::TEvServerConnected([31:551:2092]) [30:560:2428] 2025-06-03T10:30:46.036452Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037888] connected with status OK role: Leader [31:551:2092] 2025-06-03T10:30:46.036459Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037888] send queued [31:551:2092] 2025-06-03T10:30:46.036464Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72075186224037888] push event to server [31:551:2092] 2025-06-03T10:30:46.036493Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [31:551:2092] 2025-06-03T10:30:46.036504Z node 31 :LOCAL DEBUG: local.cpp:260: TEvTabletPipe::TEvClientConnected {TabletId=72075186224037888 Status=OK ClientId=[31:551:2092]} 2025-06-03T10:30:46.036514Z node 30 :HIVE TRACE: hive_impl.cpp:328: HIVE#72057594037927937 ProcessBootQueue - executing 2025-06-03T10:30:46.036526Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{26, NKikimr::NHive::TTxProcessBootQueue} queued, type NKikimr::NHive::TTxProcessBootQueue 2025-06-03T10:30:46.036531Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{26, NKikimr::NHive::TTxProcessBootQueue} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-03T10:30:46.036537Z node 30 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037927937 THive::TTxProcessBootQueue()::Execute 2025-06-03T10:30:46.036545Z node 30 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037927937 Handle ProcessBootQueue (size: 0) 2025-06-03T10:30:46.036551Z node 30 :HIVE DEBUG: hive_impl.cpp:225: HIVE#72057594037927937 Handle ProcessWaitQueue (size: 0) 2025-06-03T10:30:46.036557Z node 30 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037927937 ProcessBootQueue - BootQueue empty (WaitQueue: 0) 2025-06-03T10:30:46.036566Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{26, NKikimr::NHive::TTxProcessBootQueue} hope 1 -> done Change{15, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-03T10:30:46.036571Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} Tx{26, NKikimr::NHive::TTxProcessBootQueue} release 4194304b of static, Memory{0 dyn 0} 2025-06-03T10:30:46.036615Z node 30 :HIVE DEBUG: hive_impl.cpp:798: HIVE#72057594037927937 TEvInterconnect::TEvNodeInfo NodeId 31 Location DataCenter: "2" Module: "2" Rack: "2" Unit: "2" 2025-06-03T10:30:46.036642Z node 30 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:72: [72075186224037888] Push Sender# [31:548:2092] EventType# 268959744 2025-06-03T10:30:46.036678Z node 30 :HIVE DEBUG: hive_impl.cpp:141: HIVE#72075186224037888 Handle TEvLocal::TEvRegisterNode from [31:548:2092] HiveId: 72075186224037888 ServicedDomains { SchemeShard: 72057594046678944 PathId: 2 } TabletAvailability { Type: Dummy Priority: 0 } TabletAvailability { Type: Hive Priority: 0 } 2025-06-03T10:30:46.036693Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:5} Tx{6, NKikimr::NHive::TTxRegisterNode} queued, type NKikimr::NHive::TTxRegisterNode 2025-06-03T10:30:46.036699Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:5} Tx{6, NKikimr::NHive::TTxRegisterNode} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-03T10:30:46.036707Z node 30 :HIVE DEBUG: tx__register_node.cpp:21: HIVE#72075186224037888 THive::TTxRegisterNode(31)::Execute 2025-06-03T10:30:46.036740Z node 30 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(31, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:46.036747Z node 30 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72075186224037888 ProcessWaitQueue (0) 2025-06-03T10:30:46.036751Z node 30 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72075186224037888 ProcessBootQueue (0) 2025-06-03T10:30:46.036755Z node 30 :HIVE TRACE: hive_impl.cpp:344: HIVE#72075186224037888 ProcessBootQueue - sending 2025-06-03T10:30:46.036760Z node 30 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72075186224037888 ProcessWaitQueue (0) 2025-06-03T10:30:46.036764Z node 30 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72075186224037888 ProcessBootQueue (0) 2025-06-03T10:30:46.036774Z node 30 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(31, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:46.036785Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:5} Tx{6, NKikimr::NHive::TTxRegisterNode} hope 1 -> done Change{6, redo 199b alter 0b annex 0, ~{ 4 } -{ }, 0 gb} 2025-06-03T10:30:46.036791Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:5} Tx{6, NKikimr::NHive::TTxRegisterNode} release 4194304b of static, Memory{0 dyn 0} 2025-06-03T10:30:46.036834Z node 30 :HIVE TRACE: hive_impl.cpp:328: HIVE#72075186224037888 ProcessBootQueue - executing 2025-06-03T10:30:46.036841Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:6} Tx{7, NKikimr::NHive::TTxProcessBootQueue} queued, type NKikimr::NHive::TTxProcessBootQueue 2025-06-03T10:30:46.036846Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:6} Tx{7, NKikimr::NHive::TTxProcessBootQueue} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-03T10:30:46.036851Z node 30 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72075186224037888 THive::TTxProcessBootQueue()::Execute 2025-06-03T10:30:46.036858Z node 30 :HIVE DEBUG: hive_impl.cpp:199: HIVE#72075186224037888 ProcessBootQueue: 0 nodes connected out of 0 2025-06-03T10:30:46.036865Z node 30 :HIVE DEBUG: hive_impl.cpp:216: HIVE#72075186224037888 ProcessBootQueue - waiting until 586524-01-19T08:01:49.551615Z because of warmup, now: 1970-01-01T00:00:00.142448Z 2025-06-03T10:30:46.036870Z node 30 :HIVE DEBUG: hive_impl.cpp:353: HIVE#72075186224037888 PostponeProcessBootQueue (18446744073709.409167s) 2025-06-03T10:30:46.036880Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:6} Tx{7, NKikimr::NHive::TTxProcessBootQueue} hope 1 -> done Change{7, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-03T10:30:46.036885Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72075186224037888:1:6} Tx{7, NKikimr::NHive::TTxProcessBootQueue} release 4194304b of static, Memory{0 dyn 0} 2025-06-03T10:30:46.036907Z node 30 :HIVE DEBUG: hive_impl.cpp:798: HIVE#72075186224037888 TEvInterconnect::TEvNodeInfo NodeId 31 Location DataCenter: "2" Module: "2" Rack: "2" Unit: "2" |67.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::SimpleDropExternalTableWithReboots2 |67.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::CreateDroppedExternalTableAndDropWithReboots >> BuildStatsHistogram::Many_Serial [GOOD] |67.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> IndexBuildTest::MergeIndexTableShardsOnlyWhenReady [GOOD] >> IndexBuildTest::RejectsCancel |67.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TChargeBTreeIndex::FewNodes_Groups [GOOD] >> TChargeBTreeIndex::FewNodes_History >> TSchemeShardSubDomainTest::DiskSpaceUsage [GOOD] |67.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::DiskSpaceUsage [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:37.940473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:37.940506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:37.940532Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:37.940540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:37.940557Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:37.940563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:37.940575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:37.940596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:37.940728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:37.940811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:37.957513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:37.957540Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:37.961844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:37.961963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:37.962004Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:37.963999Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:37.964076Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:37.964198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:37.964253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:37.965190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:37.965267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:37.965611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:37.965623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:37.965633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:37.965645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:37.965659Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:37.965682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.967077Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:37.982375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:37.982451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.982508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:37.982561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:37.982571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.983199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:37.983229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:37.983279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.983288Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:37.983292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:37.983296Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:37.983639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.983648Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:37.983652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:37.983973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.983981Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:37.983986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:37.983993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:37.984545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:37.984995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:37.985043Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:37.985211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:37.985235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:37.985253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:37.985346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:37.985354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:37.985389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:37.985400Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:37.985813Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:37.985822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:37.985865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T1 ... 14Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:48.818618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:48.818623Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:48.818626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:48.818634Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:48.818644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:48.818719Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:48.818775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:48.819747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:48.820164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:48.820210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:48.820250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:48.820256Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:48.820308Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:48.820391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1383: TTxInit for Paths, read records: 3, at schemeshard: 72057594046678944 2025-06-03T10:30:48.820413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: Table1, child id: [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:30:48.820420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: Table2, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:30:48.820429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1457: TTxInit for UserAttributes, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.820438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1483: TTxInit for UserAttributesAlterData, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.820524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1785: TTxInit for Tables, read records: 2, at schemeshard: 72057594046678944 2025-06-03T10:30:48.820564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 0 2025-06-03T10:30:48.820576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-06-03T10:30:48.820589Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__root_data_erasure_manager.cpp:452: [RootDataErasureManager] Restore: Generation# 0, Status# 0, WakeupInterval# 604800 s, NumberDataErasureTenantsInRunning# 0 2025-06-03T10:30:48.820630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2033: TTxInit for Columns, read records: 4, at schemeshard: 72057594046678944 2025-06-03T10:30:48.820653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2093: TTxInit for ColumnsAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.820668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2151: TTxInit for Shards, read records: 3, at schemeshard: 72057594046678944 2025-06-03T10:30:48.820675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:30:48.820679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:30:48.820687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:30:48.820707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2237: TTxInit for TablePartitions, read records: 3, at schemeshard: 72057594046678944 2025-06-03T10:30:48.820742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2303: TTxInit for TableShardPartitionConfigs, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.820790Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2453: TTxInit for ChannelsBinding, read records: 9, at schemeshard: 72057594046678944 2025-06-03T10:30:48.820843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2832: TTxInit for TableIndexes, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.820859Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:2911: TTxInit for TableIndexKeys, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.820910Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3412: TTxInit for KesusInfos, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.820919Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3448: TTxInit for KesusAlters, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.820948Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3665: TTxInit for TxShards, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.820960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3810: TTxInit for ShardToDelete, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.820974Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3827: TTxInit for BackupSettings, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.821001Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:3987: TTxInit for ShardBackupStatus, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.821013Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4003: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.821043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4288: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.821076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4593: IndexBuild , records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.821086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4651: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.821104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4746: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.821110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4773: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.821117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4800: LongLocks: records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.822268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:48.822849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:48.822863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:48.823072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:48.823082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:48.823092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:48.823373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:762:2678] sender: [1:822:2058] recipient: [1:15:2062] 2025-06-03T10:30:48.854350Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:48.854427Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 100us result status StatusSuccess 2025-06-03T10:30:48.854549Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Table2" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 2 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 1752 DataSize: 1752 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> THiveTest::TestFollowersCrossDC_Tight [GOOD] >> THiveTest::TestFollowersCrossDC_MovingLeader >> TExternalTableTestReboots::DropExternalTableWithReboots >> TExternalTableTestReboots::ParallelCreateDrop |67.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::CreateDroppedExternalTableWithReboots |67.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::DropReplacedExternalTableWithReboots |67.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> THiveTest::TestHiveBalancerWithFollowers [GOOD] >> THiveTest::TestHiveBalancerWithLimit |67.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest |67.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> VectorIndexBuildTest::VectorIndexDescriptionIsPersisted-prefixed-false >> IndexBuildTest::ShadowDataNotAllowedByDefault >> IndexBuildTest::BaseCase |67.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut/unittest >> BuildStatsHistogram::Many_Serial [GOOD] Test command err: adding part [0:0:1:0:0:0:0] data size (1.93MiB in total) adding group {0,0} PageId: 934 RowCount: 24000 DataSize: 1692763 GroupDataSize: 413676 ErasedRowCount: 0 LevelCount: 3 IndexSize: 49449 added slice [0, 24000) data size (1.61MiB - 0B) => 1.61MiB added small blobs data size => 1.73MiB added large blobs data size => 2.01MiB building histogram with row resolution 2400, data size resolution 206KiB slicing part [0:0:1:0:0:0:0]: { {rows: [0, 23999] keys: [{7, 10}, {80038, 26687}]} } slicing node Part: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 0 => take adding node future events -1 Part: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 0 iterating stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 2400 nextHistogramDataSize: 210643 closedRowCount: 0 closedDataSize: 0 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 2 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 0 processing event IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 0 loading node by row count triggerPart: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 1 closedRowCount: 0 openedRowCount: 24000 nextHistogramRowCount: 2400 adding part [0:0:1:0:0:0:0] data size (1.93MiB in total) adding group {0,0} PageId: 934 RowCount: 24000 DataSize: 1692763 GroupDataSize: 413676 ErasedRowCount: 0 LevelCount: 3 IndexSize: 49449 added slice [0, 24000) data size (1.61MiB - 0B) => 1.61MiB added small blobs data size => 1.73MiB added large blobs data size => 2.01MiB building histogram with row resolution 2400, data size resolution 206KiB slicing part [0:0:1:0:0:0:0]: { {rows: [0, 23999] keys: [{7, 10}, {80038, 26687}]} } slicing node Part: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 0 => take adding node future events -1 Part: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 0 iterating stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 2400 nextHistogramDataSize: 210643 closedRowCount: 0 closedDataSize: 0 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 0 openedSortedByDataSize: 0 FutureEvents: 2 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 0 processing event IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 0 loading node by row count triggerPart: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 1 closedRowCount: 0 openedRowCount: 24000 nextHistogramRowCount: 2400 adding event 0 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 219 Level: 2 BeginRowId: 0 EndRowId: 2855 BeginDataSize: 0 EndDataSize: 252082 BeginKey: {7, 10} EndKey: {9445, 3156} State: 0 processing event IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 219 Level: 2 BeginRowId: 0 EndRowId: 2855 BeginDataSize: 0 EndDataSize: 252082 BeginKey: {7, 10} EndKey: {9445, 3156} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 219 Level: 2 BeginRowId: 0 EndRowId: 2855 BeginDataSize: 0 EndDataSize: 252082 BeginKey: {7, 10} EndKey: {9445, 3156} State: 1 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 330 Level: 2 BeginRowId: 2855 EndRowId: 5712 BeginDataSize: 252082 EndDataSize: 503626 BeginKey: {9445, 3156} EndKey: {19015, 6346} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 330 Level: 2 BeginRowId: 2855 EndRowId: 5712 BeginDataSize: 252082 EndDataSize: 503626 BeginKey: {9445, 3156} EndKey: {19015, 6346} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 441 Level: 2 BeginRowId: 5712 EndRowId: 8565 BeginDataSize: 503626 EndDataSize: 754239 BeginKey: {19015, 6346} EndKey: {28576, 9533} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 441 Level: 2 BeginRowId: 5712 EndRowId: 8565 BeginDataSize: 503626 EndDataSize: 754239 BeginKey: {19015, 6346} EndKey: {28576, 9533} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 552 Level: 2 BeginRowId: 8565 EndRowId: 11434 BeginDataSize: 754239 EndDataSize: 1008444 BeginKey: {28576, 9533} EndKey: {38122, 12715} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 552 Level: 2 BeginRowId: 8565 EndRowId: 11434 BeginDataSize: 754239 EndDataSize: 1008444 BeginKey: {28576, 9533} EndKey: {38122, 12715} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 663 Level: 2 BeginRowId: 11434 EndRowId: 14280 BeginDataSize: 1008444 EndDataSize: 1257358 BeginKey: {38122, 12715} EndKey: {47692, 15905} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 663 Level: 2 BeginRowId: 11434 EndRowId: 14280 BeginDataSize: 1008444 EndDataSize: 1257358 BeginKey: {38122, 12715} EndKey: {47692, 15905} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 774 Level: 2 BeginRowId: 14280 EndRowId: 17140 BeginDataSize: 1257358 EndDataSize: 1508340 BeginKey: {47692, 15905} EndKey: {57265, 19096} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 774 Level: 2 BeginRowId: 14280 EndRowId: 17140 BeginDataSize: 1257358 EndDataSize: 1508340 BeginKey: {47692, 15905} EndKey: {57265, 19096} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 885 Level: 2 BeginRowId: 17140 EndRowId: 19992 BeginDataSize: 1508340 EndDataSize: 1755252 BeginKey: {57265, 19096} EndKey: {66697, 22240} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 885 Level: 2 BeginRowId: 17140 EndRowId: 19992 BeginDataSize: 1508340 EndDataSize: 1755252 BeginKey: {57265, 19096} EndKey: {66697, 22240} State: 0 adding event 1 IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 933 Level: 2 BeginRowId: 19992 EndRowId: 24000 BeginDataSize: 1755252 EndDataSize: 2106439 BeginKey: {66697, 22240} EndKey: {80038, 26687} State: 0 adding event 1 IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 933 Level: 2 BeginRowId: 19992 EndRowId: 24000 BeginDataSize: 1755252 EndDataSize: 2106439 BeginKey: {66697, 22240} EndKey: {80038, 26687} State: 0 checking stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 2400 nextHistogramDataSize: 210643 closedRowCount: 0 closedDataSize: 0 openedRowCount: 2855 openedDataSize: 252082 openedSortedByRowCount: 1 openedSortedByDataSize: 2 FutureEvents: 16 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 934 Level: 3 BeginRowId: 0 EndRowId: 24000 BeginDataSize: 0 EndDataSize: 2106439 BeginKey: {7, 10} EndKey: {80038, 26687} State: 3 iterating stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 2400 nextHistogramDataSize: 210643 closedRowCount: 0 closedDataSize: 0 openedRowCount: 2855 openedDataSize: 252082 openedSortedByRowCount: 1 openedSortedByDataSize: 2 FutureEvents: 16 currentKeyPointer: IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 219 Level: 2 BeginRowId: 0 EndRowId: 2855 BeginDataSize: 0 EndDataSize: 252082 BeginKey: {7, 10} EndKey: {9445, 3156} State: 1 processing event IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 219 Level: 2 BeginRowId: 0 EndRowId: 2855 BeginDataSize: 0 EndDataSize: 252082 BeginKey: {7, 10} EndKey: {9445, 3156} State: 1 checking stats.RowCountHistogram: 0 stats.DataSizeHistogram: 0 nextHistogramRowCount: 2400 nextHistogramDataSize: 210643 closedRowCount: 2855 closedDataSize: 252082 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 1 openedSortedByDataSize: 2 FutureEvents: 15 currentKeyPointer: IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 219 Level: 2 BeginRowId: 0 EndRowId: 2855 BeginDataSize: 0 EndDataSize: 252082 BeginKey: {7, 10} EndKey: {9445, 3156} State: 2 iterating stats.RowCountHistogram: 1 stats.DataSizeHistogram: 1 nextHistogramRowCount: 4800 nextHistogramDataSize: 421286 closedRowCount: 2855 closedDataSize: 252082 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 1 openedSortedByDataSize: 2 FutureEvents: 15 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 330 Level: 2 BeginRowId: 2855 EndRowId: 5712 BeginDataSize: 252082 EndDataSize: 503626 BeginKey: {9445, 3156} EndKey: {19015, 6346} State: 0 processing event IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 330 Level: 2 BeginRowId: 2855 EndRowId: 5712 BeginDataSize: 252082 EndDataSize: 503626 BeginKey: {9445, 3156} EndKey: {19015, 6346} State: 0 checking stats.RowCountHistogram: 1 stats.DataSizeHistogram: 1 nextHistogramRowCount: 4800 nextHistogramDataSize: 421286 closedRowCount: 2855 closedDataSize: 252082 openedRowCount: 2857 openedDataSize: 251544 openedSortedByRowCount: 2 openedSortedByDataSize: 3 FutureEvents: 14 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 330 Level: 2 BeginRowId: 2855 EndRowId: 5712 BeginDataSize: 252082 EndDataSize: 503626 BeginKey: {9445, 3156} EndKey: {19015, 6346} State: 1 iterating stats.RowCountHistogram: 1 stats.DataSizeHistogram: 1 nextHistogramRowCount: 4800 nextHistogramDataSize: 421286 closedRowCount: 2855 closedDataSize: 252082 openedRowCount: 2857 openedDataSize: 251544 openedSortedByRowCount: 2 openedSortedByDataSize: 3 FutureEvents: 14 currentKeyPointer: IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 330 Level: 2 BeginRowId: 2855 EndRowId: 5712 BeginDataSize: 252082 EndDataSize: 503626 BeginKey: {9445, 3156} EndKey: {19015, 6346} State: 1 processing event IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 330 Level: 2 BeginRowId: 2855 EndRowId: 5712 BeginDataSize: 252082 EndDataSize: 503626 BeginKey: {9445, 3156} EndKey: {19015, 6346} State: 1 checking stats.RowCountHistogram: 1 stats.DataSizeHistogram: 1 nextHistogramRowCount: 4800 nextHistogramDataSize: 421286 closedRowCount: 5712 closedDataSize: 503626 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 2 openedSortedByDataSize: 3 FutureEvents: 13 currentKeyPointer: IsBegin: 0 Part: [0:0:1:0:0:0:0] PageId: 330 Level: 2 BeginRowId: 2855 EndRowId: 5712 BeginDataSize: 252082 EndDataSize: 503626 BeginKey: {9445, 3156} EndKey: {19015, 6346} State: 2 iterating stats.RowCountHistogram: 2 stats.DataSizeHistogram: 2 nextHistogramRowCount: 7200 nextHistogramDataSize: 631929 closedRowCount: 5712 closedDataSize: 503626 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 2 openedSortedByDataSize: 3 FutureEvents: 13 currentKeyPointer: IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 441 Level: 2 BeginRowId: 5712 EndRowId: 8565 BeginDataSize: 503626 EndDataSize: 754239 BeginKey: {19015, 6346} EndKey: {28576, 9533} State: 0 processing event IsBegin: 1 Part: [0:0:1:0:0:0:0] PageId: 441 Level: 2 BeginRowId: 5712 EndRowId: 8565 BeginDataSize: 503626 EndDataSize: 754239 BeginKey: {19015, 6346} EndKey: {28576, 9533} State: 0 loading node by row count triggerPart: [0:0:1:0:0:0:0] PageId: 330 Level: 2 BeginRowId: 2855 EndRowId: 5712 BeginDataSize: 252082 EndDataSize: 503626 BeginKey: {9445, 3156} EndKey: {19015, 6346} State: 2 closedRowCount: 5712 openedRowCount: 2853 nextHistogramRowCount: 7200 loading node by row count triggerPart: [0:0:1:0:0:0:0] PageId: 219 Level: 2 BeginRowId: 0 EndRowId: 2855 BeginDataSize: 0 EndDataSize: 252082 BeginKey: {7, 10} EndKey: {9445, 3156} State: 2 closedRowCount: 5712 openedRowCount: 2853 nextHistogramRowCount: 7200 loading node by row count tri ... 65} State: 1 iterating stats.RowCountHistogram: 8 stats.DataSizeHistogram: 8 nextHistogramRowCount: 90000 nextHistogramDataSize: 9370665 closedRowCount: 84700 closedDataSize: 8822453 openedRowCount: 100 openedDataSize: 10550 openedSortedByRowCount: 848 openedSortedByDataSize: 848 FutureEvents: 305 currentKeyPointer: IsBegin: 0 Part: [0:0:848:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10550 BeginKey: {282451, 94158} EndKey: {282772, 94265} State: 1 processing event IsBegin: 0 Part: [0:0:848:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10550 BeginKey: {282451, 94158} EndKey: {282772, 94265} State: 1 checking stats.RowCountHistogram: 8 stats.DataSizeHistogram: 8 nextHistogramRowCount: 90000 nextHistogramDataSize: 9370665 closedRowCount: 84800 closedDataSize: 8833003 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 848 openedSortedByDataSize: 848 FutureEvents: 304 currentKeyPointer: IsBegin: 0 Part: [0:0:848:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10550 BeginKey: {282451, 94158} EndKey: {282772, 94265} State: 2 iterating stats.RowCountHistogram: 8 stats.DataSizeHistogram: 8 nextHistogramRowCount: 90000 nextHistogramDataSize: 9370665 closedRowCount: 84800 closedDataSize: 8833003 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 848 openedSortedByDataSize: 848 FutureEvents: 304 currentKeyPointer: IsBegin: 1 Part: [0:0:849:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10111 BeginKey: {282775, 94266} EndKey: {283117, 94380} State: 0 processing event IsBegin: 1 Part: [0:0:849:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10111 BeginKey: {282775, 94266} EndKey: {283117, 94380} State: 0 checking stats.RowCountHistogram: 8 stats.DataSizeHistogram: 8 nextHistogramRowCount: 90000 nextHistogramDataSize: 9370665 closedRowCount: 84800 closedDataSize: 8833003 openedRowCount: 100 openedDataSize: 10111 openedSortedByRowCount: 849 openedSortedByDataSize: 849 FutureEvents: 303 currentKeyPointer: IsBegin: 1 Part: [0:0:849:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10111 BeginKey: {282775, 94266} EndKey: {283117, 94380} State: 1 iterating stats.RowCountHistogram: 8 stats.DataSizeHistogram: 8 nextHistogramRowCount: 90000 nextHistogramDataSize: 9370665 closedRowCount: 84800 closedDataSize: 8833003 openedRowCount: 100 openedDataSize: 10111 openedSortedByRowCount: 849 openedSortedByDataSize: 849 FutureEvents: 303 currentKeyPointer: IsBegin: 0 Part: [0:0:849:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10111 BeginKey: {282775, 94266} EndKey: {283117, 94380} State: 1 processing event IsBegin: 0 Part: [0:0:849:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10111 BeginKey: {282775, 94266} EndKey: {283117, 94380} State: 1 checking stats.RowCountHistogram: 8 stats.DataSizeHistogram: 8 nextHistogramRowCount: 90000 nextHistogramDataSize: 9370665 closedRowCount: 84900 closedDataSize: 8843114 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 849 openedSortedByDataSize: 849 FutureEvents: 302 currentKeyPointer: IsBegin: 0 Part: [0:0:849:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10111 BeginKey: {282775, 94266} EndKey: {283117, 94380} State: 2 iterating stats.RowCountHistogram: 8 stats.DataSizeHistogram: 8 nextHistogramRowCount: 90000 nextHistogramDataSize: 9370665 closedRowCount: 84900 closedDataSize: 8843114 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 849 openedSortedByDataSize: 849 FutureEvents: 302 currentKeyPointer: IsBegin: 1 Part: [0:0:850:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10583 BeginKey: {283123, 94382} EndKey: {283444, 94489} State: 0 processing event IsBegin: 1 Part: [0:0:850:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10583 BeginKey: {283123, 94382} EndKey: {283444, 94489} State: 0 checking stats.RowCountHistogram: 8 stats.DataSizeHistogram: 8 nextHistogramRowCount: 90000 nextHistogramDataSize: 9370665 closedRowCount: 84900 closedDataSize: 8843114 openedRowCount: 100 openedDataSize: 10583 openedSortedByRowCount: 850 openedSortedByDataSize: 850 FutureEvents: 301 currentKeyPointer: IsBegin: 1 Part: [0:0:850:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10583 BeginKey: {283123, 94382} EndKey: {283444, 94489} State: 1 iterating stats.RowCountHistogram: 8 stats.DataSizeHistogram: 8 nextHistogramRowCount: 90000 nextHistogramDataSize: 9370665 closedRowCount: 84900 closedDataSize: 8843114 openedRowCount: 100 openedDataSize: 10583 openedSortedByRowCount: 850 openedSortedByDataSize: 850 FutureEvents: 301 currentKeyPointer: IsBegin: 0 Part: [0:0:850:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10583 BeginKey: {283123, 94382} EndKey: {283444, 94489} State: 1 processing event IsBegin: 0 Part: [0:0:850:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10583 BeginKey: {283123, 94382} EndKey: {283444, 94489} State: 1 checking stats.RowCountHistogram: 8 stats.DataSizeHistogram: 8 nextHistogramRowCount: 90000 nextHistogramDataSize: 9370665 closedRowCount: 85000 closedDataSize: 8853697 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 850 openedSortedByDataSize: 850 FutureEvents: 300 currentKeyPointer: IsBegin: 0 Part: [0:0:850:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10583 BeginKey: {283123, 94382} EndKey: {283444, 94489} State: 2 iterating stats.RowCountHistogram: 8 stats.DataSizeHistogram: 9 nextHistogramRowCount: 90000 nextHistogramDataSize: 18446744073709551615 closedRowCount: 85000 closedDataSize: 8853697 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 850 openedSortedByDataSize: 850 FutureEvents: 300 currentKeyPointer: IsBegin: 1 Part: [0:0:851:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10456 BeginKey: {283447, 94490} EndKey: {283771, 94598} State: 0 processing event IsBegin: 1 Part: [0:0:851:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10456 BeginKey: {283447, 94490} EndKey: {283771, 94598} State: 0 checking stats.RowCountHistogram: 8 stats.DataSizeHistogram: 9 nextHistogramRowCount: 90000 nextHistogramDataSize: 18446744073709551615 closedRowCount: 85000 closedDataSize: 8853697 openedRowCount: 100 openedDataSize: 10456 openedSortedByRowCount: 851 openedSortedByDataSize: 851 FutureEvents: 299 currentKeyPointer: IsBegin: 1 Part: [0:0:851:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10456 BeginKey: {283447, 94490} EndKey: {283771, 94598} State: 1 iterating stats.RowCountHistogram: 8 stats.DataSizeHistogram: 9 nextHistogramRowCount: 90000 nextHistogramDataSize: 18446744073709551615 closedRowCount: 85000 closedDataSize: 8853697 openedRowCount: 100 openedDataSize: 10456 openedSortedByRowCount: 851 openedSortedByDataSize: 851 FutureEvents: 299 currentKeyPointer: IsBegin: 0 Part: [0:0:851:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10456 BeginKey: {283447, 94490} EndKey: {283771, 94598} State: 1 processing event IsBegin: 0 Part: [0:0:851:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10456 BeginKey: {283447, 94490} EndKey: {283771, 94598} State: 1 checking stats.RowCountHistogram: 8 stats.DataSizeHistogram: 9 nextHistogramRowCount: 90000 nextHistogramDataSize: 18446744073709551615 closedRowCount: 85100 closedDataSize: 8864153 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 851 openedSortedByDataSize: 851 FutureEvents: 298 currentKeyPointer: IsBegin: 0 Part: [0:0:851:0:0:0:0] PageId: 148 Level: 4 BeginRowId: 0 EndRowId: 100 BeginDataSize: 0 EndDataSize: 10456 BeginKey: {283447, 94490} EndKey: {283771, 94598} State: 2 finished stats.RowCountHistogram: 9 stats.DataSizeHistogram: 9 nextHistogramRowCount: 18446744073709551615 nextHistogramDataSize: 18446744073709551615 closedRowCount: 85100 closedDataSize: 8864153 openedRowCount: 0 openedDataSize: 0 openedSortedByRowCount: 851 openedSortedByDataSize: 851 FutureEvents: 298 Touched 0% bytes, 0 pages RowCountHistogram: 5% (actual 6%) key = (16984, 5669) value = 5100 (actual 6998 - -1% error) 10% (actual 9%) key = (50416, 16813) value = 15100 (actual 16798 - -1% error) 10% (actual 9%) key = (83701, 27908) value = 25100 (actual 26598 - -1% error) 10% (actual 9%) key = (116986, 39003) value = 35100 (actual 36398 - -1% error) 10% (actual 9%) key = (150319, 50114) value = 45100 (actual 46198 - -1% error) 10% (actual 9%) key = (183700, 61241) value = 55100 (actual 55998 - 0% error) 10% (actual 9%) key = (217081, 72368) value = 65100 (actual 65798 - 0% error) 10% (actual 9%) key = (250486, 83503) value = 75100 (actual 75598 - 0% error) 10% (actual 9%) key = (283771, 94598) value = 85100 (actual 85398 - 0% error) 14% (actual 14%) DataSizeHistogram: 5% (actual 6%) key = (16648, 5557) value = 524891 (actual 723287 - -1% error) 10% (actual 9%) key = (50086, 16703) value = 1569936 (actual 1747238 - -1% error) 9% (actual 9%) key = (83356, 27793) value = 2610698 (actual 2767306 - -1% error) 10% (actual 9%) key = (116647, 38890) value = 3652143 (actual 3787394 - -1% error) 9% (actual 9%) key = (149656, 49893) value = 4685435 (actual 4800597 - -1% error) 10% (actual 9%) key = (183040, 61021) value = 5728420 (actual 5822785 - 0% error) 10% (actual 9%) key = (216727, 72250) value = 6776444 (actual 6848929 - 0% error) 9% (actual 9%) key = (250144, 83389) value = 7813547 (actual 7865227 - 0% error) 9% (actual 9%) key = (283444, 94489) value = 8853697 (actual 8884838 - 0% error) 14% (actual 14%) Checking Flat: Touched 100% bytes, 1000 pages RowCountHistogram: 10% (actual 11%) key = (33379, 11134) value = 10000 (actual 11800 - -1% error) 10% (actual 9%) key = (66721, 22248) value = 20000 (actual 21600 - -1% error) 10% (actual 9%) key = (100015, 33346) value = 30000 (actual 31400 - -1% error) 10% (actual 9%) key = (133258, 44427) value = 40000 (actual 41200 - -1% error) 10% (actual 9%) key = (166621, 55548) value = 50000 (actual 51000 - -1% error) 10% (actual 9%) key = (200041, 66688) value = 60000 (actual 60800 - 0% error) 10% (actual 9%) key = (233449, 77824) value = 70000 (actual 70600 - 0% error) 10% (actual 9%) key = (266824, 88949) value = 80000 (actual 80400 - 0% error) 10% (actual 9%) key = (300073, 100032) value = 90000 (actual 90200 - 0% error) 10% (actual 9%) DataSizeHistogram: 10% (actual 11%) key = (33187, NULL) value = 1041247 (actual 1229534 - -1% error) 10% (actual 9%) key = (66517, NULL) value = 2082456 (actual 2249844 - -1% error) 10% (actual 9%) key = (99709, NULL) value = 3123684 (actual 3270138 - -1% error) 10% (actual 9%) key = (132925, NULL) value = 4164886 (actual 4290603 - -1% error) 10% (actual 9%) key = (166246, NULL) value = 5206111 (actual 5311117 - -1% error) 10% (actual 9%) key = (199678, NULL) value = 6247321 (actual 6331068 - 0% error) 10% (actual 9%) key = (233290, NULL) value = 7288529 (actual 7350869 - 0% error) 10% (actual 9%) key = (266701, NULL) value = 8329759 (actual 8371441 - 0% error) 10% (actual 9%) key = (300052, NULL) value = 9371030 (actual 9392083 - 0% error) 9% (actual 9%) Checking Mixed: Touched 0% bytes, 0 pages RowCountHistogram: 100% (actual 100%) DataSizeHistogram: 100% (actual 100%) >> IndexBuildTest::CancellationNotEnoughRetries >> IndexBuildTest::ShadowDataNotAllowedByDefault [GOOD] >> IndexBuildTest::ShadowDataEdgeCases >> IndexBuildTest::RejectsCreate >> THiveTest::TestDrainWithMaxTabletsScheduled [GOOD] >> THiveTest::TestDownAfterDrain >> IndexBuildTest::RejectsCancel [GOOD] >> THiveTest::TestHiveBalancerWithLimit [GOOD] >> THiveTest::TestHiveBalancerIgnoreTablet >> IndexBuildTest::ShadowDataEdgeCases [GOOD] >> TStorageBalanceTest::TestScenario1 [GOOD] >> TStorageBalanceTest::TestScenario2 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::RejectsCancel [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:21.771021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:21.771057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:21.771066Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:21.771073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:21.771093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:21.771098Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:21.771111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:21.771129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:21.771265Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:21.771360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:21.788374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:21.788414Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:21.794085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:21.794257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:21.794304Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:21.801245Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:21.801368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:21.801553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:21.801668Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:21.802956Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:21.803041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:21.803550Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:21.803583Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:21.803619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:21.803636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:21.803651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:21.803683Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:21.805935Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:21.831394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:21.831501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:21.831584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:21.831641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:21.831654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:21.835503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:21.835579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:21.835685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:21.835703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:21.835710Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:21.835718Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:21.837536Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:21.837587Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:21.837596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:21.842096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:21.842136Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:21.842147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:21.842199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:21.843183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:21.844482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:21.844555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:21.844847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:21.844899Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:21.844912Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:21.845024Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:21.845037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:21.845116Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:21.845134Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:21.846457Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:21.846476Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:21.846549Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T1 ... lockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 1818, read rows: 101, read bytes: 1818 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-03T10:30:51.491624Z node 2 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Unlocking to Done 2025-06-03T10:30:51.492024Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1117: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done 2025-06-03T10:30:51.492040Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1118: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: index1, IndexColumn: index, State: Done, IsCancellationRequested: 0, Issue: , SubscribersCount: 1, CreateSender: [2:1174:3026], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 5000004, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 101, upload bytes: 1818, read rows: 101, read bytes: 1818 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-03T10:30:51.492046Z node 2 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:339: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-06-03T10:30:51.492072Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:30:51.492079Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:1266:3107] TestWaitNotification: OK eventTxId 102 2025-06-03T10:30:51.492530Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__cancel.cpp:18: TIndexBuilder::TXTYPE_CANCEL_INDEX_BUILD: DoExecute TxId: 105 DatabaseName: "/MyRoot" IndexBuildId: 102 2025-06-03T10:30:51.492562Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index_tx_base.h:91: TIndexBuilder::TXTYPE_CANCEL_INDEX_BUILD: Reply TxId: 105 Status: PRECONDITION_FAILED Issues { message: "Index build process with id <102> has been finished already" severity: 1 } BUILDINDEX RESPONSE CANCEL: NKikimrIndexBuilder.TEvCancelResponse TxId: 105 Status: PRECONDITION_FAILED Issues { message: "Index build process with id <102> has been finished already" severity: 1 } 2025-06-03T10:30:51.492800Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 102 2025-06-03T10:30:51.492877Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:93: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 102 State: STATE_DONE Settings { source_path: "/MyRoot/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { seconds: 30 } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 State: STATE_DONE Settings { source_path: "/MyRoot/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 100 StartTime { } EndTime { seconds: 30 } } 2025-06-03T10:30:51.493163Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:51.493221Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 64us result status StatusSuccess 2025-06-03T10:30:51.493389Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "index1" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "index" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 13280 RowCount: 101 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 10 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 10 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 10 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 2706 Memory: 823440 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 11 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 13280 DataSize: 13280 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:51.493650Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/index1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:30:51.493690Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/index1" took 43us result status StatusSuccess 2025-06-03T10:30:51.493860Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/index1" PathDescription { Self { Name: "index1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 11 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 13280 DataSize: 13280 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } TableIndex { Name: "index1" LocalPathId: 3 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "index" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "index" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> VectorIndexBuildTest::VectorIndexDescriptionIsPersisted-prefixed-true >> IndexBuildTest::RejectsCreate [GOOD] >> IndexBuildTest::RejectsDropIndex ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::ShadowDataEdgeCases [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:51.078915Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:51.078959Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:51.078964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:51.078968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:51.078983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:51.078987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:51.078995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:51.079012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:51.079104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:51.079174Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:51.091933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:51.091960Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:51.096621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:51.096760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:51.096792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:51.099241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:51.099310Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:51.099429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:51.099503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:51.100157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:51.100219Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:51.100562Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:51.100572Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:51.100582Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:51.100589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:51.100594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:51.100612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.102194Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:51.121207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:51.121331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.121419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:51.121487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:51.121504Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.122519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:51.122553Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:51.122618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.122629Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:51.122634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:51.122639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:51.123323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.123345Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:51.123353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:51.123969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.123990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.123996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:51.124017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:51.124682Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:51.125263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:51.125325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:51.125510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:51.125540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:51.125547Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:51.125615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:51.125623Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:51.125661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:51.125672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:51.126111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:51.126117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:51.126163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 18: TAlterTable TPropose operationId# 109:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:51.698065Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 109 ready parts: 1/1 2025-06-03T10:30:51.698112Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } AffectedSet { TabletId: 72075186233409548 Flags: 2 } ExecLevel: 0 TxId: 109 MinStep: 5000008 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:51.698595Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 109:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:109 msg type: 269090816 2025-06-03T10:30:51.698637Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 109, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 109 at step: 5000008 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000007 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 109 at step: 5000008 FAKE_COORDINATOR: Send Plan to tablet 72075186233409548 for txId: 109 at step: 5000008 2025-06-03T10:30:51.698823Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000008, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:51.698852Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 109 Coordinator: 72057594046316545 AckTo { RawX1: 131 RawX2: 8589936746 } } Step: 5000008 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:51.698863Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_alter_table.cpp:359: TAlterTable TPropose operationId# 109:0 HandleReply TEvOperationPlan, operationId: 109:0, stepId: 5000008, at schemeshard: 72057594046678944 2025-06-03T10:30:51.698946Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 109:0 128 -> 129 2025-06-03T10:30:51.698985Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 FAKE_COORDINATOR: advance: minStep5000008 State->FrontStep: 5000008 2025-06-03T10:30:51.700344Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:51.700359Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 109, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-03T10:30:51.700428Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:51.700432Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:207:2208], at schemeshard: 72057594046678944, txId: 109, path id: 4 2025-06-03T10:30:51.700550Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 109:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.700559Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1045: NTableState::TProposedWaitParts operationId# 109:0 ProgressState at tablet: 72057594046678944 2025-06-03T10:30:51.700691Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72057594046678944, cookie: 109 2025-06-03T10:30:51.700701Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 4 PathOwnerId: 72057594046678944, cookie: 109 2025-06-03T10:30:51.700706Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 109 2025-06-03T10:30:51.700715Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 109, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 4 2025-06-03T10:30:51.700720Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-03T10:30:51.700738Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 109, ready parts: 0/1, is published: true FAKE_COORDINATOR: Erasing txId 109 2025-06-03T10:30:51.702230Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 109 2025-06-03T10:30:51.702708Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6290: Handle TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409548 Status: COMPLETE TxId: 109 Step: 5000008 OrderId: 109 ExecLatency: 3 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409548 CpuTimeUsec: 357 } } CommitVersion { Step: 5000008 TxId: 109 } 2025-06-03T10:30:51.702723Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 109, tablet: 72075186233409548, partId: 0 2025-06-03T10:30:51.702749Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 109:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409548 Status: COMPLETE TxId: 109 Step: 5000008 OrderId: 109 ExecLatency: 3 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409548 CpuTimeUsec: 357 } } CommitVersion { Step: 5000008 TxId: 109 } 2025-06-03T10:30:51.702767Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409548 Status: COMPLETE TxId: 109 Step: 5000008 OrderId: 109 ExecLatency: 3 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409548 CpuTimeUsec: 357 } } CommitVersion { Step: 5000008 TxId: 109 } 2025-06-03T10:30:51.703043Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 681 RawX2: 8589937226 } Origin: 72075186233409548 State: 2 TxId: 109 Step: 0 Generation: 2 2025-06-03T10:30:51.703053Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 109, tablet: 72075186233409548, partId: 0 2025-06-03T10:30:51.703073Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 109:0, at schemeshard: 72057594046678944, message: Source { RawX1: 681 RawX2: 8589937226 } Origin: 72075186233409548 State: 2 TxId: 109 Step: 0 Generation: 2 2025-06-03T10:30:51.703081Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1014: NTableState::TProposedWaitParts operationId# 109:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-06-03T10:30:51.703090Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1018: NTableState::TProposedWaitParts operationId# 109:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 681 RawX2: 8589937226 } Origin: 72075186233409548 State: 2 TxId: 109 Step: 0 Generation: 2 2025-06-03T10:30:51.703104Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 109:0, shardIdx: 72057594046678944:3, datashard: 72075186233409548, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:51.703112Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 109:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.703118Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 109:0, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-06-03T10:30:51.703126Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 109:0 129 -> 240 2025-06-03T10:30:51.703571Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 109:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.703802Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 109:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.703873Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 109:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.703883Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 109:0 ProgressState 2025-06-03T10:30:51.703900Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#109:0 progress is 1/1 2025-06-03T10:30:51.703906Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 109 ready parts: 1/1 2025-06-03T10:30:51.703912Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#109:0 progress is 1/1 2025-06-03T10:30:51.703917Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 109 ready parts: 1/1 2025-06-03T10:30:51.703923Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 109, ready parts: 1/1, is published: true 2025-06-03T10:30:51.703939Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:338:2316] message: TxId: 109 2025-06-03T10:30:51.703947Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 109 ready parts: 1/1 2025-06-03T10:30:51.703954Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 109:0 2025-06-03T10:30:51.703961Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 109:0 2025-06-03T10:30:51.703997Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-03T10:30:51.704637Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 109: got EvNotifyTxCompletionResult 2025-06-03T10:30:51.704657Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 109: satisfy waiter [2:788:2731] TestWaitNotification: OK eventTxId 109 >> TChargeBTreeIndex::FewNodes_History [GOOD] >> TChargeBTreeIndex::FewNodes_Sticky >> IndexBuildTest::RejectsDropIndex [GOOD] >> VectorIndexBuildTest::VectorIndexDescriptionIsPersisted-prefixed-false [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::RejectsDropIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:51.391203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:51.391231Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:51.391238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:51.391244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:51.391259Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:51.391264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:51.391276Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:51.391296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:51.391421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:51.391496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:51.407575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:51.407602Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:51.411947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:51.412061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:51.412102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:51.414380Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:51.414436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:51.414548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:51.414608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:51.415261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:51.415309Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:51.415614Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:51.415622Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:51.415632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:51.415639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:51.415644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:51.415659Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.416832Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:51.432229Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:51.432317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.432388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:51.432431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:51.432457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.433327Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:51.433360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:51.433431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.433441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:51.433445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:51.433450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:51.433905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.433918Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:51.433922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:51.434242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.434252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.434257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:51.434274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:51.434837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:51.435259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:51.435296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:51.435460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:51.435482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:51.435488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:51.435555Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:51.435561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:51.435596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:51.435606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:51.435957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:51.435965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:51.436003Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 10:30:52.384846Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 107, tablet: 72075186233409547, partId: 0 2025-06-03T10:30:52.384864Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 107:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 107 Step: 5000004 OrderId: 107 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 156 } } CommitVersion { Step: 5000004 TxId: 107 } 2025-06-03T10:30:52.384875Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409547 Status: COMPLETE TxId: 107 Step: 5000004 OrderId: 107 ExecLatency: 0 ProposeLatency: 3 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409547 CpuTimeUsec: 156 } } CommitVersion { Step: 5000004 TxId: 107 } 2025-06-03T10:30:52.385021Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 327 RawX2: 8589936901 } Origin: 72075186233409547 State: 5 TxId: 107 Step: 0 Generation: 2 2025-06-03T10:30:52.385026Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 107, tablet: 72075186233409547, partId: 0 2025-06-03T10:30:52.385040Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 107:0, at schemeshard: 72057594046678944, message: Source { RawX1: 327 RawX2: 8589936901 } Origin: 72075186233409547 State: 5 TxId: 107 Step: 0 Generation: 2 2025-06-03T10:30:52.385045Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:332: TDropTable TDeleteTableBarrier operationId: 107:0 HandleReply TEvDataShard::TEvSchemaChanged, save it, at schemeshard: 72057594046678944 2025-06-03T10:30:52.385113Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.385119Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:368: TDropTable TDeleteTableBarrier operationId: 107:0 ProgressState, operation type: TxDropTable, at tablet# 72057594046678944 2025-06-03T10:30:52.385126Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1059: Set barrier, OperationId: 107:0, name: RenamePathBarrier, done: 0, blocked: 1, parts count: 1 2025-06-03T10:30:52.385130Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1103: All parts have reached barrier, tx: 107, done: 0, blocked: 1 2025-06-03T10:30:52.385138Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:344: TDropTable TDeleteTableBarrier operationId: 107:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 107 Name: RenamePathBarrier }, at tablet# 72057594046678944 2025-06-03T10:30:52.385162Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 107:0 137 -> 129 2025-06-03T10:30:52.385180Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:52.385189Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:30:52.385549Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.385736Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.385770Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:52.385775Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 107, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:52.385810Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 107, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:52.385829Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:52.385832Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:207:2208], at schemeshard: 72057594046678944, txId: 107, path id: 1 2025-06-03T10:30:52.385836Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:207:2208], at schemeshard: 72057594046678944, txId: 107, path id: 2 2025-06-03T10:30:52.385919Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.385925Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1045: NTableState::TProposedWaitParts operationId# 107:0 ProgressState at tablet: 72057594046678944 2025-06-03T10:30:52.385939Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.385942Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 107:0, datashard: 72075186233409547, at schemeshard: 72057594046678944 2025-06-03T10:30:52.385946Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 107:0 129 -> 240 2025-06-03T10:30:52.386065Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 107 2025-06-03T10:30:52.386076Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 107 2025-06-03T10:30:52.386079Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 107 2025-06-03T10:30:52.386083Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 107, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-03T10:30:52.386087Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:30:52.386367Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 107 2025-06-03T10:30:52.386380Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 107 2025-06-03T10:30:52.386383Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 107 2025-06-03T10:30:52.386387Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 107, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-03T10:30:52.386390Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:30:52.386400Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 107, ready parts: 0/1, is published: true 2025-06-03T10:30:52.386716Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.386725Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 107:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:52.386787Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:30:52.386813Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#107:0 progress is 1/1 2025-06-03T10:30:52.386817Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-03T10:30:52.386820Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#107:0 progress is 1/1 2025-06-03T10:30:52.386822Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-03T10:30:52.386826Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 107, ready parts: 1/1, is published: true 2025-06-03T10:30:52.386835Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:380:2347] message: TxId: 107 2025-06-03T10:30:52.386839Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-03T10:30:52.386843Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 107:0 2025-06-03T10:30:52.386846Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 107:0 2025-06-03T10:30:52.386861Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:30:52.387116Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-06-03T10:30:52.387173Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-06-03T10:30:52.387388Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-03T10:30:52.387396Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [2:586:2545] TestWaitNotification: OK eventTxId 107 >> THiveTest::TestDownAfterDrain [GOOD] >> THiveTest::TestCreateTabletsWithRaceForStoragePoolsKIKIMR_9659 >> TSchemeShardSubDomainTest::TableDiskSpaceQuotas [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build/unittest >> VectorIndexBuildTest::VectorIndexDescriptionIsPersisted-prefixed-false [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:51.068082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:51.068109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:51.068121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:51.068127Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:51.068142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:51.068147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:51.068158Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:51.068171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:51.068255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:51.068322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:51.078841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:51.078873Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:51.082809Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:51.082925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:51.082961Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:51.085159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:51.085229Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:51.085379Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:51.085456Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:51.086178Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:51.086239Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:51.086636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:51.086650Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:51.086665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:51.086674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:51.086680Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:51.086702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.088073Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:51.106746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:51.106851Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.106948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:51.107006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:51.107021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.107982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:51.108012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:51.108080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.108090Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:51.108094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:51.108098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:51.108639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.108652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:51.108657Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:51.109045Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.109055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.109060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:51.109077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:51.109711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:51.110248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:51.110294Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:51.110497Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:51.110526Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:51.110532Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:51.110598Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:51.110604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:51.110636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:51.110646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:51.111143Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:51.111151Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:51.111196Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... shard: 72057594046678944 2025-06-03T10:30:52.429550Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4003: TTxInit for CompletedBackup, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.429589Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4288: TTxInit for Publications, read records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.429647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4593: IndexBuild , records: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:52.429657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4651: KMeansTreeSample records: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.429679Z node 1 :BUILD_INDEX DEBUG: schemeshard_info_types.h:3723: AddShardStatus id# 102 shard 72057594046678944:11 2025-06-03T10:30:52.429691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4746: SnapshotTables: snapshots: 0 tables: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.429698Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4773: SnapshotSteps: snapshots: 0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.429706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:4800: LongLocks: records: 4, at schemeshard: 72057594046678944 2025-06-03T10:30:52.431358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:52.431407Z node 1 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1117: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done 2025-06-03T10:30:52.431428Z node 1 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1118: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Done TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobalVectorKmeansTree, IndexName: by_embedding, IndexColumn: embedding, DataColumns: covered, State: Done, IsCancellationRequested: 0, Issue: , SubscribersCount: 0, CreateSender: [0:0:0], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976720769, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976720770, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-03T10:30:52.431437Z node 1 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:339: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 0 2025-06-03T10:30:52.432118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:52.432133Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:52.432153Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:52.432159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:52.432164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:52.432499Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:3199:4993] sender: [1:3257:2058] recipient: [1:15:2062] 2025-06-03T10:30:52.463951Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/vectors/by_embedding" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:30:52.464056Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/vectors/by_embedding" took 124us result status StatusSuccess 2025-06-03T10:30:52.464356Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/vectors/by_embedding" PathDescription { Self { Name: "by_embedding" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 8 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 8 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplLevelTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Children { Name: "indexImplPostingTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } TableIndex { Name: "by_embedding" LocalPathId: 3 Type: EIndexTypeGlobalVectorKmeansTree State: EIndexStateReady KeyColumnNames: "embedding" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataColumnNames: "covered" DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } clusters: 4 levels: 5 } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::TableDiskSpaceQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:40.892086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:40.892117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:40.892123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:40.892130Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:40.892150Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:40.892155Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:40.892165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:40.892182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:40.892298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:40.892392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:40.906246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:40.906273Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:40.910002Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:40.910105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:40.910141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:40.912013Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:40.912089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:40.912191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:40.912241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:40.912884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:40.912938Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:40.913221Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:40.913229Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:40.913238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:40.913243Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:40.913248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:40.913264Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.914590Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:40.930479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:40.930572Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.930641Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:40.930692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:40.930701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.931558Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:40.931583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:40.931644Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.931652Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:40.931657Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:40.931662Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:40.932315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.932342Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:40.932350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:40.932872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.932887Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:40.932895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:40.932905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:40.933807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:40.934410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:40.934466Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:40.934709Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:40.934742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:40.934763Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:40.934849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:40.934858Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:40.934905Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:40.934921Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:40.935428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:40.935440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:40.935496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T1 ... entPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:30:52.875304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-03T10:30:52.875437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.875461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.876167Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:52.876199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 107, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:52.876265Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 107, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-03T10:30:52.876304Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:52.876312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 107, path id: 2 2025-06-03T10:30:52.876319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 107, path id: 4 2025-06-03T10:30:52.876467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.876496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1045: NTableState::TProposedWaitParts operationId# 107:0 ProgressState at tablet: 72057594046678944 2025-06-03T10:30:52.876524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.876529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 107:0, datashard: 72075186233409549, at schemeshard: 72057594046678944 2025-06-03T10:30:52.876534Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 107:0 129 -> 240 2025-06-03T10:30:52.876763Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 14 PathOwnerId: 72057594046678944, cookie: 107 2025-06-03T10:30:52.876780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 14 PathOwnerId: 72057594046678944, cookie: 107 2025-06-03T10:30:52.876786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 107 2025-06-03T10:30:52.876794Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 107, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 14 2025-06-03T10:30:52.876802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:30:52.877074Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 107 2025-06-03T10:30:52.877084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 107 2025-06-03T10:30:52.877087Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 107 2025-06-03T10:30:52.877091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 107, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-03T10:30:52.877094Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-03T10:30:52.877103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 107, ready parts: 0/1, is published: true 2025-06-03T10:30:52.878121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.878139Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 107:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:52.878243Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-03T10:30:52.878277Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#107:0 progress is 1/1 2025-06-03T10:30:52.878282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-03T10:30:52.878286Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#107:0 progress is 1/1 2025-06-03T10:30:52.878288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-03T10:30:52.878292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 107, ready parts: 1/1, is published: true 2025-06-03T10:30:52.878296Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 107 ready parts: 1/1 2025-06-03T10:30:52.878301Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 107:0 2025-06-03T10:30:52.878307Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 107:0 2025-06-03T10:30:52.878325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:30:52.878426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:52.878431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:52.878634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-06-03T10:30:52.879061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 107 2025-06-03T10:30:52.879430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:52.879446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 0, path id: 2 2025-06-03T10:30:52.879654Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 15 PathOwnerId: 72057594046678944, cookie: 0 TestWaitNotification wait txId: 107 2025-06-03T10:30:52.879824Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 107: send EvNotifyTxCompletion 2025-06-03T10:30:52.879831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 107 2025-06-03T10:30:52.879917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 107, at schemeshard: 72057594046678944 2025-06-03T10:30:52.879934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-03T10:30:52.879938Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [1:986:2909] TestWaitNotification: OK eventTxId 107 2025-06-03T10:30:52.880017Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_0" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:52.880062Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_0" took 55us result status StatusSuccess 2025-06-03T10:30:52.880148Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_0" PathDescription { Self { Name: "USER_0" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 15 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 15 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 9 SubDomainVersion: 1 SubDomainStateVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "name_USER_0_kind_hdd-1" Kind: "hdd-1" } StoragePools { Name: "name_USER_0_kind_hdd-2" Kind: "hdd-2" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 1 } SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> THiveTest::TestCreateTabletsWithRaceForStoragePoolsKIKIMR_9659 [GOOD] >> THiveTest::TestDeleteTablet >> VectorIndexBuildTest::VectorIndexDescriptionIsPersisted-prefixed-true [GOOD] >> THiveTest::TestDeleteTablet [GOOD] >> THiveTest::TestDeleteOwnerTablets ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build/unittest >> VectorIndexBuildTest::VectorIndexDescriptionIsPersisted-prefixed-true [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:52.134782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:52.134813Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:52.134820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:52.134826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:52.134841Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:52.134847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:52.134857Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:52.134872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:52.134987Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:52.135061Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:52.147839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:52.147865Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:52.151115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:52.151238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:52.151272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:52.153132Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:52.153185Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:52.153308Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:52.153384Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:52.154008Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:52.154057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:52.154356Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:52.154368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:52.154382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:52.154389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:52.154394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:52.154411Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.155484Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:52.169618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:52.169693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.169752Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:52.169791Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:52.169800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.170621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:52.170645Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:52.170704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.170712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:52.170716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:52.170720Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:52.171060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.171069Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:52.171073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:52.171373Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.171382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:52.171387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:52.171401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:52.171917Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:52.172381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:52.172418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:52.172593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:52.172613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:52.172620Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:52.172680Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:52.172686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:52.172718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:52.172728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:52.173107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:52.173113Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:52.173152Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... d_build_index_tx_base.cpp:339: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 0 2025-06-03T10:30:53.607712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:53.607726Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:53.607867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:53.607876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:53.607882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:53.608174Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594046678944 is [1:2633:4378] sender: [1:2691:2058] recipient: [1:15:2062] 2025-06-03T10:30:53.639689Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/vectors/by_embedding" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:30:53.639788Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/vectors/by_embedding" took 132us result status StatusSuccess 2025-06-03T10:30:53.640059Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/vectors/by_embedding" PathDescription { Self { Name: "by_embedding" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 6 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplLevelTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Children { Name: "indexImplPostingTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Children { Name: "indexImplPrefixTable" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000004 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 10 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } TableIndex { Name: "by_embedding" LocalPathId: 3 Type: EIndexTypeGlobalVectorKmeansTree State: EIndexStateReady KeyColumnNames: "prefix" KeyColumnNames: "embedding" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataColumnNames: "covered" DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } VectorIndexKmeansTreeDescription { Settings { settings { metric: DISTANCE_COSINE vector_type: VECTOR_TYPE_FLOAT vector_dimension: 1024 } clusters: 4 levels: 5 } } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |67.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge >> THiveTest::TestDeleteOwnerTablets [GOOD] |67.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge >> TChargeBTreeIndex::FewNodes_Sticky [GOOD] >> TChargeBTreeIndex::FewNodes_Groups_History >> THiveTest::TestDeleteOwnerTabletsMany >> TKeyValueTest::TestInlineCopyRangeWorksNewApi [GOOD] >> THiveTest::TestHiveBalancerIgnoreTablet [GOOD] >> THiveTest::TestHiveBalancerNodeRestarts >> ResultFormatter::EmptyDict [GOOD] >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-EnableSeparateQuotas [GOOD] >> ResultFormatter::EmptyResultSet [GOOD] >> ResultFormatter::Tuple [GOOD] >> ResultFormatter::List [GOOD] >> ResultFormatter::StructWithNoFields >> ResultFormatter::Utf8WithQuotes [GOOD] >> ResultFormatter::Primitive [GOOD] >> ResultFormatter::Void [GOOD] >> ResultFormatter::FormatEmptySchema [GOOD] |67.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader >> ResultFormatter::Dict [GOOD] >> ResultFormatter::Decimal [GOOD] >> ResultFormatter::EmptyList [GOOD] >> ResultFormatter::Struct [GOOD] >> ResultFormatter::Tagged [GOOD] >> ResultFormatter::Null [GOOD] >> ResultFormatter::VariantStruct [GOOD] >> ResultFormatter::EmptyTuple [GOOD] >> ResultFormatter::VariantTuple [GOOD] >> ResultFormatter::FormatNonEmptySchema [GOOD] >> ResultFormatter::StructWithNoFields [GOOD] >> ResultFormatter::StructTypeNameAsString [GOOD] |67.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader |67.2%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_split_merge/ydb-core-tx-schemeshard-ut_split_merge |67.2%| [LD] {RESULT} $(B)/ydb/core/backup/impl/ut_local_partition_reader/ydb-core-backup-impl-ut_local_partition_reader |67.2%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_column_build/test-results/unittest/{meta.json ... results_accumulator.log} |67.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::VariantStruct [GOOD] |67.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Tagged [GOOD] |67.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Null [GOOD] |67.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::EmptyTuple [GOOD] |67.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Struct [GOOD] |67.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::StructTypeNameAsString [GOOD] |67.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Decimal [GOOD] |67.2%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::FormatNonEmptySchema [GOOD] |67.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::VariantTuple [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestInlineCopyRangeWorksNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:58:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:75:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:58:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:75:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:77:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:80:2057] recipient: [2:79:2110] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:83:2057] recipient: [2:79:2110] !Reboot 72057594037927937 (actor [2:57:2097]) rebooted! !Reboot 72057594037927937 (actor [2:57:2097]) tablet resolver refreshed! new actor is[2:82:2111] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:168:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:58:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:75:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:77:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:83:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:57:2097]) rebooted! !Reboot 72057594037927937 (actor [3:57:2097]) tablet resolver refreshed! new actor is[3:82:2111] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:168:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:58:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:75:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:78:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:82:2057] recipient: [4:80:2110] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:84:2057] recipient: [4:80:2110] !Reboot 72057594037927937 (actor [4:57:2097]) rebooted! !Reboot 72057594037927937 (actor [4:57:2097]) tablet resolver refreshed! new actor is[4:83:2111] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:169:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:58:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:75:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:81:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:84:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:85:2057] recipient: [5:83:2113] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:87:2057] recipient: [5:83:2113] !Reboot 72057594037927937 (actor [5:57:2097]) rebooted! !Reboot 72057594037927937 (actor [5:57:2097]) tablet resolver refreshed! new actor is[5:86:2114] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:172:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:58:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:75:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:81:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:84:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:85:2057] recipient: [6:83:2113] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:87:2057] recipient: [6:83:2113] !Reboot 72057594037927937 (actor [6:57:2097]) rebooted! !Reboot 72057594037927937 (actor [6:57:2097]) tablet resolver refreshed! new actor is[6:86:2114] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:172:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:58:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:75:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:82:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:84:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:86:2057] recipient: [7:85:2113] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:88:2057] recipient: [7:85:2113] !Reboot 72057594037927937 (actor [7:57:2097]) rebooted! !Reboot 72057594037927937 (actor [7:57:2097]) tablet resolver refreshed! new actor is[7:87:2114] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:173:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:58:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:75:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:85:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:88:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:89:2057] recipient: [8:87:2116] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:91:2057] recipient: [8:87:2116] !Reboot 72057594037927937 (actor [8:57:2097]) rebooted! !Reboot 72057594037927937 (actor [8:57:2097]) tablet resolver refreshed! new actor is[8:90:2117] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:176:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:58:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:75:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:85:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:88:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:89:2057] recipient: [9:87:2116] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:91:2057] recipient: [9:87:2116] !Reboot 72057594037927937 (actor [9:57:2097]) rebooted! !Reboot 72057594037927937 (actor [9:57:2097]) tablet resolver refreshed! new actor is[9:90:2117] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:176:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:58:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:75:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:86:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:89:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:90:2057] recipient: [10:88:2116] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:92:2057] recipient: [10:88:2116] !Reboot 72057594037927937 (actor [10:57:2097]) rebooted! !Reboot 72057594037927937 (actor [10:57:2097]) tablet resolver refreshed! new actor is[10:91:2117] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:177:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:58:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:75:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:89:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:92:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:93:2057] recipient: [11:91:2119] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:95:2057] recipient: [11:91:2119] !Reboot 72057594037927937 (actor [11:57:2097]) rebooted! !Reboot 72057594037927937 (actor [11:57:2097]) tablet resolver refreshed! new actor is[11:94:2120] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:180:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:58:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:75:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... 29:80:2110] !Reboot 72057594037927937 (actor [29:57:2097]) rebooted! !Reboot 72057594037927937 (actor [29:57:2097]) tablet resolver refreshed! new actor is[29:82:2111] Leader for TabletID 72057594037927937 is [29:82:2111] sender: [29:168:2057] recipient: [29:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:55:2057] recipient: [30:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [30:55:2057] recipient: [30:50:2095] Leader for TabletID 72057594037927937 is [30:57:2097] sender: [30:58:2057] recipient: [30:50:2095] Leader for TabletID 72057594037927937 is [30:57:2097] sender: [30:75:2057] recipient: [30:14:2061] !Reboot 72057594037927937 (actor [30:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [30:57:2097] sender: [30:78:2057] recipient: [30:36:2083] Leader for TabletID 72057594037927937 is [30:57:2097] sender: [30:81:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [30:57:2097] sender: [30:82:2057] recipient: [30:80:2110] Leader for TabletID 72057594037927937 is [30:83:2111] sender: [30:84:2057] recipient: [30:80:2110] !Reboot 72057594037927937 (actor [30:57:2097]) rebooted! !Reboot 72057594037927937 (actor [30:57:2097]) tablet resolver refreshed! new actor is[30:83:2111] Leader for TabletID 72057594037927937 is [30:83:2111] sender: [30:169:2057] recipient: [30:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:55:2057] recipient: [31:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [31:55:2057] recipient: [31:51:2095] Leader for TabletID 72057594037927937 is [31:57:2097] sender: [31:58:2057] recipient: [31:51:2095] Leader for TabletID 72057594037927937 is [31:57:2097] sender: [31:75:2057] recipient: [31:14:2061] !Reboot 72057594037927937 (actor [31:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [31:57:2097] sender: [31:81:2057] recipient: [31:36:2083] Leader for TabletID 72057594037927937 is [31:57:2097] sender: [31:84:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [31:57:2097] sender: [31:85:2057] recipient: [31:83:2113] Leader for TabletID 72057594037927937 is [31:86:2114] sender: [31:87:2057] recipient: [31:83:2113] !Reboot 72057594037927937 (actor [31:57:2097]) rebooted! !Reboot 72057594037927937 (actor [31:57:2097]) tablet resolver refreshed! new actor is[31:86:2114] Leader for TabletID 72057594037927937 is [31:86:2114] sender: [31:172:2057] recipient: [31:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:55:2057] recipient: [32:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [32:55:2057] recipient: [32:51:2095] Leader for TabletID 72057594037927937 is [32:57:2097] sender: [32:58:2057] recipient: [32:51:2095] Leader for TabletID 72057594037927937 is [32:57:2097] sender: [32:75:2057] recipient: [32:14:2061] !Reboot 72057594037927937 (actor [32:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [32:57:2097] sender: [32:81:2057] recipient: [32:36:2083] Leader for TabletID 72057594037927937 is [32:57:2097] sender: [32:84:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [32:57:2097] sender: [32:85:2057] recipient: [32:83:2113] Leader for TabletID 72057594037927937 is [32:86:2114] sender: [32:87:2057] recipient: [32:83:2113] !Reboot 72057594037927937 (actor [32:57:2097]) rebooted! !Reboot 72057594037927937 (actor [32:57:2097]) tablet resolver refreshed! new actor is[32:86:2114] Leader for TabletID 72057594037927937 is [32:86:2114] sender: [32:172:2057] recipient: [32:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:55:2057] recipient: [33:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [33:55:2057] recipient: [33:51:2095] Leader for TabletID 72057594037927937 is [33:57:2097] sender: [33:58:2057] recipient: [33:51:2095] Leader for TabletID 72057594037927937 is [33:57:2097] sender: [33:75:2057] recipient: [33:14:2061] !Reboot 72057594037927937 (actor [33:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [33:57:2097] sender: [33:82:2057] recipient: [33:36:2083] Leader for TabletID 72057594037927937 is [33:57:2097] sender: [33:85:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [33:57:2097] sender: [33:86:2057] recipient: [33:84:2113] Leader for TabletID 72057594037927937 is [33:87:2114] sender: [33:88:2057] recipient: [33:84:2113] !Reboot 72057594037927937 (actor [33:57:2097]) rebooted! !Reboot 72057594037927937 (actor [33:57:2097]) tablet resolver refreshed! new actor is[33:87:2114] Leader for TabletID 72057594037927937 is [33:87:2114] sender: [33:173:2057] recipient: [33:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:55:2057] recipient: [34:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [34:55:2057] recipient: [34:51:2095] Leader for TabletID 72057594037927937 is [34:57:2097] sender: [34:58:2057] recipient: [34:51:2095] Leader for TabletID 72057594037927937 is [34:57:2097] sender: [34:75:2057] recipient: [34:14:2061] !Reboot 72057594037927937 (actor [34:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [34:57:2097] sender: [34:85:2057] recipient: [34:36:2083] Leader for TabletID 72057594037927937 is [34:57:2097] sender: [34:88:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [34:57:2097] sender: [34:89:2057] recipient: [34:87:2116] Leader for TabletID 72057594037927937 is [34:90:2117] sender: [34:91:2057] recipient: [34:87:2116] !Reboot 72057594037927937 (actor [34:57:2097]) rebooted! !Reboot 72057594037927937 (actor [34:57:2097]) tablet resolver refreshed! new actor is[34:90:2117] Leader for TabletID 72057594037927937 is [34:90:2117] sender: [34:176:2057] recipient: [34:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:55:2057] recipient: [35:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [35:55:2057] recipient: [35:51:2095] Leader for TabletID 72057594037927937 is [35:57:2097] sender: [35:58:2057] recipient: [35:51:2095] Leader for TabletID 72057594037927937 is [35:57:2097] sender: [35:75:2057] recipient: [35:14:2061] !Reboot 72057594037927937 (actor [35:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [35:57:2097] sender: [35:85:2057] recipient: [35:36:2083] Leader for TabletID 72057594037927937 is [35:57:2097] sender: [35:88:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [35:57:2097] sender: [35:89:2057] recipient: [35:87:2116] Leader for TabletID 72057594037927937 is [35:90:2117] sender: [35:91:2057] recipient: [35:87:2116] !Reboot 72057594037927937 (actor [35:57:2097]) rebooted! !Reboot 72057594037927937 (actor [35:57:2097]) tablet resolver refreshed! new actor is[35:90:2117] Leader for TabletID 72057594037927937 is [35:90:2117] sender: [35:176:2057] recipient: [35:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:55:2057] recipient: [36:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:55:2057] recipient: [36:51:2095] Leader for TabletID 72057594037927937 is [36:57:2097] sender: [36:58:2057] recipient: [36:51:2095] Leader for TabletID 72057594037927937 is [36:57:2097] sender: [36:75:2057] recipient: [36:14:2061] !Reboot 72057594037927937 (actor [36:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [36:57:2097] sender: [36:86:2057] recipient: [36:36:2083] Leader for TabletID 72057594037927937 is [36:57:2097] sender: [36:89:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [36:57:2097] sender: [36:90:2057] recipient: [36:88:2116] Leader for TabletID 72057594037927937 is [36:91:2117] sender: [36:92:2057] recipient: [36:88:2116] !Reboot 72057594037927937 (actor [36:57:2097]) rebooted! !Reboot 72057594037927937 (actor [36:57:2097]) tablet resolver refreshed! new actor is[36:91:2117] Leader for TabletID 72057594037927937 is [36:91:2117] sender: [36:177:2057] recipient: [36:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:55:2057] recipient: [37:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:55:2057] recipient: [37:51:2095] Leader for TabletID 72057594037927937 is [37:57:2097] sender: [37:58:2057] recipient: [37:51:2095] Leader for TabletID 72057594037927937 is [37:57:2097] sender: [37:75:2057] recipient: [37:14:2061] !Reboot 72057594037927937 (actor [37:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [37:57:2097] sender: [37:89:2057] recipient: [37:36:2083] Leader for TabletID 72057594037927937 is [37:57:2097] sender: [37:92:2057] recipient: [37:14:2061] Leader for TabletID 72057594037927937 is [37:57:2097] sender: [37:93:2057] recipient: [37:91:2119] Leader for TabletID 72057594037927937 is [37:94:2120] sender: [37:95:2057] recipient: [37:91:2119] !Reboot 72057594037927937 (actor [37:57:2097]) rebooted! !Reboot 72057594037927937 (actor [37:57:2097]) tablet resolver refreshed! new actor is[37:94:2120] Leader for TabletID 72057594037927937 is [37:94:2120] sender: [37:180:2057] recipient: [37:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:55:2057] recipient: [38:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:55:2057] recipient: [38:50:2095] Leader for TabletID 72057594037927937 is [38:57:2097] sender: [38:58:2057] recipient: [38:50:2095] Leader for TabletID 72057594037927937 is [38:57:2097] sender: [38:75:2057] recipient: [38:14:2061] !Reboot 72057594037927937 (actor [38:57:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [38:57:2097] sender: [38:89:2057] recipient: [38:36:2083] Leader for TabletID 72057594037927937 is [38:57:2097] sender: [38:92:2057] recipient: [38:14:2061] Leader for TabletID 72057594037927937 is [38:57:2097] sender: [38:93:2057] recipient: [38:91:2119] Leader for TabletID 72057594037927937 is [38:94:2120] sender: [38:95:2057] recipient: [38:91:2119] !Reboot 72057594037927937 (actor [38:57:2097]) rebooted! !Reboot 72057594037927937 (actor [38:57:2097]) tablet resolver refreshed! new actor is[38:94:2120] Leader for TabletID 72057594037927937 is [38:94:2120] sender: [38:180:2057] recipient: [38:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:55:2057] recipient: [39:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:55:2057] recipient: [39:51:2095] Leader for TabletID 72057594037927937 is [39:57:2097] sender: [39:58:2057] recipient: [39:51:2095] Leader for TabletID 72057594037927937 is [39:57:2097] sender: [39:75:2057] recipient: [39:14:2061] !Reboot 72057594037927937 (actor [39:57:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [39:57:2097] sender: [39:90:2057] recipient: [39:36:2083] Leader for TabletID 72057594037927937 is [39:57:2097] sender: [39:93:2057] recipient: [39:14:2061] Leader for TabletID 72057594037927937 is [39:57:2097] sender: [39:94:2057] recipient: [39:92:2119] Leader for TabletID 72057594037927937 is [39:95:2120] sender: [39:96:2057] recipient: [39:92:2119] !Reboot 72057594037927937 (actor [39:57:2097]) rebooted! !Reboot 72057594037927937 (actor [39:57:2097]) tablet resolver refreshed! new actor is[39:95:2120] Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:55:2057] recipient: [40:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:55:2057] recipient: [40:50:2095] Leader for TabletID 72057594037927937 is [40:57:2097] sender: [40:58:2057] recipient: [40:50:2095] Leader for TabletID 72057594037927937 is [40:57:2097] sender: [40:75:2057] recipient: [40:14:2061] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TStoragePoolsQuotasTest::DifferentQuotasInteraction-EnableSeparateQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:45.608389Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:45.608421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:45.608428Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:45.608436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:45.608451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:45.608455Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:45.608485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:45.608501Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:45.608629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:45.608734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:45.622863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:45.622891Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:45.627080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:45.627208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:45.627246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:45.630161Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:45.630238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:45.630372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.630437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:45.631241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:45.631297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:45.631627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:45.631639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:45.631650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:45.631656Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:45.631660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:45.631678Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.632954Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:45.651041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:45.651121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.651184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:45.651227Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:45.651237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.652020Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.652047Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:45.652102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.652111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:45.652115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:45.652120Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:45.652601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.652612Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:45.652617Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:45.653057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.653066Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:45.653070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:45.653077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:45.653781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:45.654321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:45.654372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:45.654596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:45.654627Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:45.654641Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:45.654701Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:45.654707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:45.654739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:45.654750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:45.655220Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:45.655228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:45.655265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T1 ... 594046678944, LocalPathId: 2] was 4 2025-06-03T10:30:55.987304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:30:55.987709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:30:55.988128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:30:55.988544Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:55.988556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:55.988611Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:30:55.988661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:55.988668Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-03T10:30:55.988674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-03T10:30:55.988805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:30:55.988814Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1045: NTableState::TProposedWaitParts operationId# 103:0 ProgressState at tablet: 72057594046678944 2025-06-03T10:30:55.988833Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:30:55.988842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 103:0, datashard: 72075186233409548, at schemeshard: 72057594046678944 2025-06-03T10:30:55.988849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 103:0 129 -> 240 2025-06-03T10:30:55.989042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 10 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:30:55.989071Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 10 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:30:55.989077Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-03T10:30:55.989083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 10 2025-06-03T10:30:55.989090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:30:55.989235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:30:55.989248Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:30:55.989252Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 103 2025-06-03T10:30:55.989257Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-03T10:30:55.989261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-03T10:30:55.989272Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 0/1, is published: true 2025-06-03T10:30:55.990218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:30:55.990235Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 103:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:55.990357Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:30:55.990401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:30:55.990408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:30:55.990414Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:30:55.990422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:30:55.990427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: true 2025-06-03T10:30:55.990446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:405:2370] message: TxId: 103 2025-06-03T10:30:55.990452Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:30:55.990458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-06-03T10:30:55.990464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:0 2025-06-03T10:30:55.990488Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:30:55.990611Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:55.990618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 0, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:30:55.990802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:30:55.991190Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:30:55.991506Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:55.991517Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 0, path id: 2 2025-06-03T10:30:55.991596Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:30:55.991604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:1333:3256] 2025-06-03T10:30:55.991765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 11 PathOwnerId: 72057594046678944, cookie: 0 TestWaitNotification: OK eventTxId 103 2025-06-03T10:30:55.992449Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDatabase" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:55.992494Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDatabase" took 75us result status StatusSuccess 2025-06-03T10:30:55.992583Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDatabase" PathDescription { Self { Name: "SomeDatabase" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 11 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 11 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SubDomainStateVersion: 4 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "fast" Kind: "fast_kind" } StoragePools { Name: "large" Kind: "large_kind" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } StoragePoolsUsage { PoolKind: "large_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } StoragePoolsUsage { PoolKind: "fast_kind" TotalSize: 0 DataSize: 0 IndexSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 2800 data_size_soft_quota: 2200 storage_quotas { unit_kind: "fast_kind" data_size_hard_quota: 600 data_size_soft_quota: 500 } storage_quotas { unit_kind: "large_kind" data_size_hard_quota: 2200 data_size_soft_quota: 1700 } } SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> THiveTest::TestDeleteOwnerTabletsMany [GOOD] >> THiveTest::TestDeleteTabletWithFollowers >> VDiskBalancing::TestRandom_Mirror3dc [GOOD] >> IndexBuildTest::BaseCase [GOOD] >> IndexBuildTest::CancelBuild |67.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut |67.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut |67.3%| [LD] {RESULT} $(B)/ydb/core/kqp/provider/ut/ydb-core-kqp-provider-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestRandom_Mirror3dc [GOOD] Test command err: RandomSeed# 9258539310278670142 Step = 0 SEND TEvPut with key [1:1:0:0:0:51943:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:51943:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 1 SEND TEvPut with key [1:1:1:0:0:37868:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:37868:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 2 SEND TEvPut with key [1:1:2:0:0:85877:0] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:85877:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 3 SEND TEvPut with key [1:1:3:0:0:192081:0] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:192081:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 4 SEND TEvPut with key [1:1:4:0:0:267203:0] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:267203:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Stop node 3 2025-06-03T10:30:38.406314Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 4 Step = 5 SEND TEvPut with key [1:1:5:0:0:502135:0] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:502135:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 6 SEND TEvPut with key [1:1:6:0:0:377427:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:377427:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Stop node 4 2025-06-03T10:30:38.445045Z 1 00h01m10.060512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 Step = 7 SEND TEvPut with key [1:1:7:0:0:48850:0] TEvPutResult: TEvPutResult {Id# [1:1:7:0:0:48850:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 8 SEND TEvPut with key [1:1:8:0:0:411812:0] TEvPutResult: TEvPutResult {Id# [1:1:8:0:0:411812:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 9 SEND TEvPut with key [1:1:9:0:0:293766:0] TEvPutResult: TEvPutResult {Id# [1:1:9:0:0:293766:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Start node 3 Step = 10 SEND TEvPut with key [1:1:10:0:0:127358:0] TEvPutResult: TEvPutResult {Id# [1:1:10:0:0:127358:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 11 SEND TEvPut with key [1:1:11:0:0:282945:0] TEvPutResult: TEvPutResult {Id# [1:1:11:0:0:282945:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 12 SEND TEvPut with key [1:1:12:0:0:34864:0] TEvPutResult: TEvPutResult {Id# [1:1:12:0:0:34864:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 13 SEND TEvPut with key [1:1:13:0:0:363096:0] TEvPutResult: TEvPutResult {Id# [1:1:13:0:0:363096:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 14 SEND TEvPut with key [1:1:14:0:0:179270:0] TEvPutResult: TEvPutResult {Id# [1:1:14:0:0:179270:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 15 SEND TEvPut with key [1:1:15:0:0:358611:0] TEvPutResult: TEvPutResult {Id# [1:1:15:0:0:358611:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 16 SEND TEvPut with key [1:1:16:0:0:136892:0] TEvPutResult: TEvPutResult {Id# [1:1:16:0:0:136892:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 17 SEND TEvPut with key [1:1:17:0:0:517733:0] TEvPutResult: TEvPutResult {Id# [1:1:17:0:0:517733:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 18 SEND TEvPut with key [1:1:18:0:0:250802:0] TEvPutResult: TEvPutResult {Id# [1:1:18:0:0:250802:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 19 SEND TEvPut with key [1:1:19:0:0:199490:0] TEvPutResult: TEvPutResult {Id# [1:1:19:0:0:199490:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 20 SEND TEvPut with key [1:1:20:0:0:244269:0] TEvPutResult: TEvPutResult {Id# [1:1:20:0:0:244269:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 21 SEND TEvPut with key [1:1:21:0:0:329606:0] TEvPutResult: TEvPutResult {Id# [1:1:21:0:0:329606:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 22 SEND TEvPut with key [1:1:22:0:0:322338:0] TEvPutResult: TEvPutResult {Id# [1:1:22:0:0:322338:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 23 SEND TEvPut with key [1:1:23:0:0:519258:0] TEvPutResult: TEvPutResult {Id# [1:1:23:0:0:519258:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 24 SEND TEvPut with key [1:1:24:0:0:56036:0] TEvPutResult: TEvPutResult {Id# [1:1:24:0:0:56036:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 25 SEND TEvPut with key [1:1:25:0:0:514591:0] TEvPutResult: TEvPutResult {Id# [1:1:25:0:0:514591:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Stop node 7 2025-06-03T10:30:38.617204Z 1 00h01m30.100512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 8 Step = 26 SEND TEvPut with key [1:1:26:0:0:5927:0] TEvPutResult: TEvPutResult {Id# [1:1:26:0:0:5927:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 27 SEND TEvPut with key [1:1:27:0:0:148482:0] TEvPutResult: TEvPutResult {Id# [1:1:27:0:0:148482:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 28 SEND TEvPut with key [1:1:28:0:0:6043:0] TEvPutResult: TEvPutResult {Id# [1:1:28:0:0:6043:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 29 SEND TEvPut with key [1:1:29:0:0:265170:0] TEvPutResult: TEvPutResult {Id# [1:1:29:0:0:265170:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 30 SEND TEvPut with key [1:1:30:0:0:264716:0] TEvPutResult: TEvPutResult {Id# [1:1:30:0:0:264716:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Compact vdisk 3 Step = 31 SEND TEvPut with key [1:1:31:0:0:168116:0] TEvPutResult: TEvPutResult {Id# [1:1:31:0:0:168116:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 32 SEND TEvPut with key [1:1:32:0:0:444749:0] TEvPutResult: TEvPutResult {Id# [1:1:32:0:0:444749:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 33 SEND TEvPut with key [1:1:33:0:0:350254:0] TEvPutResult: TEvPutResult {Id# [1:1:33:0:0:350254:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 34 SEND TEvPut with key [1:1:34:0:0:145950:0] TEvPutResult: TEvPutResult {Id# [1:1:34:0:0:145950:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 35 SEND TEvPut with key [1:1:35:0:0:358611:0] TEvPutResult: TEvPutResult {Id# [1:1:35:0:0:358611:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 36 SEND TEvPut with key [1:1:36:0:0:139148:0] TEvPutResult: TEvPutResult {Id# [1:1:36:0:0:139148:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 37 SEND TEvPut with key [1:1:37:0:0:200198:0] TEvPutResult: TEvPutResult {Id# [1:1:37:0:0:200198:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 38 SEND TEvPut with key [1:1:38:0:0:185170:0] TEvPutResult: TEvPutResult {Id# [1:1:38:0:0:185170:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 39 SEND TEvPut with key [1:1:39:0:0:297271:0] TEvPutResult: TEvPutResult {Id# [1:1:39:0:0:297271:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 40 SEND TEvPut with key [1:1:40:0:0:419670:0] TEvPutResult: TEvPutResult {Id# [1:1:40:0:0:419670:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 41 SEND TEvPut with key [1:1:41:0:0:218956:0] TEvPutResult: TEvPutResult {Id# [1:1:41:0:0:218956:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 42 SEND TEvPut with key [1:1:42:0:0:154723:0] TEvPutResult: TEvPutResult {Id# [1:1:42:0:0:154723:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 43 SEND TEvPut with key [1:1:43:0:0:13332:0] TEvPutResult: TEvPutResult {Id# [1:1:43:0:0:13332:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 44 SEND TEvPut with key [1:1:44:0:0:448892:0] TEvPutResult: TEvPutResult {Id# [1:1:44:0:0:448892:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 45 SEND TEvPut with key [1:1:45:0:0:103231:0] TEvPutResult: TEvPutResult {Id# [1:1:45:0:0:103231:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 46 SEND TEvPut with key [1:1:46:0:0:295973:0] TEvPutResult: TEvPutResult {Id# [1:1:46:0:0:295973:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 47 SEND TEvPut with key [1:1:47:0:0:402799:0] TEvPutResult: TEvPutResult {Id# [1:1:47:0:0:402799:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 48 SEND TEvPut with key [1:1:48:0:0:165045:0] TEvPutResult: TEvPutResult {Id# [1:1:48:0:0:165045:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 49 SEND TEvPut with key [1:1:49:0:0:360099:0] TEvPutResult: TEvPutResult {Id# [1:1:49:0:0:360099:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 50 SEND TEvPut with key [1:1:50:0:0:97222:0] TEvPutResult: TEvPutResult {Id# [1:1:50:0:0:97222:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 51 SEND TEvPut with key [1:1:51:0:0:303396:0] TEvPutResult: TEvPutResult {Id# [1:1:51:0:0:303396:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 52 SEND TEvPut with key [1:1:52:0:0:304876:0] TEvPutResult: TEvPutResult {Id# [1:1:52:0:0:304876:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Step = 53 SEND TEvPut with key [1:1:53:0:0:375063:0] TEvPutResult: TEvPutResult {Id# [1:1:53:0:0:375063:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999927} Start node 4 Step = 54 SEND TEvPut with key [1:1:54:0:0:288044:0] TEvPutResult: TEvPutResult {Id# [1:1:54:0:0:288044:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999866} Step = 55 SEND TEvPut with key [1:1:55:0:0:181559:0] TEvPutResult: TEvPutResult {Id# [1:1:55:0:0:181559:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 56 SEND TEvPut with key [1:1:56:0:0:42993:0] TEvPutResult: TEvPutResult {Id# [1:1:56:0:0:42993:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999866} Step = 57 SEND TEvPut with key [1:1:57:0:0:424399:0] TEvPutResult: TEvPutResult {Id# [1:1:57:0:0:424399:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Step = 58 SEND TEvPut with key [1:1:58:0:0:169341:0] TEvPutResult: TEvPutResult {Id# [1:1:58:0:0:169341:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999902} Step = 59 SEND TEvPut with key [1:1:59:0:0:405932:0] TEvPutResult: TEvPutResult {Id# [1:1:59:0:0:405932:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999902} Step = 60 SEND TEvPut with key [1:1:60:0:0:190148:0] TEvPutResult: TEvPutResult {Id# [1:1:60:0:0:190148:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999915} Stop node 3 2025-06-03T10:30:38.855486Z 1 00h02m00.150512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 4 Wipe node 0 2025-06-03T10:30:38.883679Z 1 00h02m10.161024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: DECISION: [Decision# LostData SubsequentFailure# 0] 2025-06-03T10:30:38.884118Z 1 00h02m10.161024s :BS_SYNCER ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TVDiskGuidRecoveryActor: FINISH: [Decision# LostData Guid# 16268344753019398412] Step = 61 SEND TEvPut with key [1:1:61:0:0:500240:0] 2025-06-03T10:30:39.090159Z 1 00h03m50.161024s :BS_PROXY ERROR: Group# 2181038080 StateEstablishingSessions Wakeup TIMEOUT Marker# DSP12 TEvPutResult: TEvPutResult {Id# [1:1:61:0:0:500240:0] Status# ERROR StatusFlags# { } ErrorReason# "Timeout while establishing sessions (DSPE4)." ApproximateFreeSpaceShare# 0} Step = 62 SEND TEvPut with key [1:1:62:0:0:354994:0] TEvPutResult: TEvPutResult {Id# [1:1:62:0:0:354994:0] Status# ERROR StatusFlags# { } ErrorReason# "Timeout while establishing sessions (DSPE4)." ApproximateFreeSpac ... 1:945:0:0:76599:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 946 SEND TEvPut with key [1:1:946:0:0:24822:0] TEvPutResult: TEvPutResult {Id# [1:1:946:0:0:24822:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999744} Compact vdisk 2 Step = 947 SEND TEvPut with key [1:1:947:0:0:100167:0] TEvPutResult: TEvPutResult {Id# [1:1:947:0:0:100167:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 948 SEND TEvPut with key [1:1:948:0:0:112126:0] TEvPutResult: TEvPutResult {Id# [1:1:948:0:0:112126:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 949 SEND TEvPut with key [1:1:949:0:0:525378:0] TEvPutResult: TEvPutResult {Id# [1:1:949:0:0:525378:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 950 SEND TEvPut with key [1:1:950:0:0:410875:0] TEvPutResult: TEvPutResult {Id# [1:1:950:0:0:410875:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 951 SEND TEvPut with key [1:1:951:0:0:113503:0] TEvPutResult: TEvPutResult {Id# [1:1:951:0:0:113503:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 952 SEND TEvPut with key [1:1:952:0:0:431140:0] TEvPutResult: TEvPutResult {Id# [1:1:952:0:0:431140:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 953 SEND TEvPut with key [1:1:953:0:0:509293:0] TEvPutResult: TEvPutResult {Id# [1:1:953:0:0:509293:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Stop node 3 2025-06-03T10:30:50.932433Z 1 00h28m00.983072s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 4 Step = 954 SEND TEvPut with key [1:1:954:0:0:286395:0] TEvPutResult: TEvPutResult {Id# [1:1:954:0:0:286395:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Stop node 1 2025-06-03T10:30:50.976045Z 1 00h28m10.983584s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 Step = 955 SEND TEvPut with key [1:1:955:0:0:219270:0] TEvPutResult: TEvPutResult {Id# [1:1:955:0:0:219270:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999805} Start node 1 Step = 956 SEND TEvPut with key [1:1:956:0:0:274971:0] TEvPutResult: TEvPutResult {Id# [1:1:956:0:0:274971:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999805} Step = 957 SEND TEvPut with key [1:1:957:0:0:487884:0] TEvPutResult: TEvPutResult {Id# [1:1:957:0:0:487884:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Start node 3 Step = 958 SEND TEvPut with key [1:1:958:0:0:327302:0] TEvPutResult: TEvPutResult {Id# [1:1:958:0:0:327302:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 959 SEND TEvPut with key [1:1:959:0:0:385917:0] TEvPutResult: TEvPutResult {Id# [1:1:959:0:0:385917:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 960 SEND TEvPut with key [1:1:960:0:0:200998:0] TEvPutResult: TEvPutResult {Id# [1:1:960:0:0:200998:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 961 SEND TEvPut with key [1:1:961:0:0:61147:0] TEvPutResult: TEvPutResult {Id# [1:1:961:0:0:61147:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 962 SEND TEvPut with key [1:1:962:0:0:237906:0] TEvPutResult: TEvPutResult {Id# [1:1:962:0:0:237906:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 963 SEND TEvPut with key [1:1:963:0:0:347273:0] TEvPutResult: TEvPutResult {Id# [1:1:963:0:0:347273:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 964 SEND TEvPut with key [1:1:964:0:0:181317:0] TEvPutResult: TEvPutResult {Id# [1:1:964:0:0:181317:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 965 SEND TEvPut with key [1:1:965:0:0:456096:0] TEvPutResult: TEvPutResult {Id# [1:1:965:0:0:456096:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 966 SEND TEvPut with key [1:1:966:0:0:93776:0] TEvPutResult: TEvPutResult {Id# [1:1:966:0:0:93776:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 967 SEND TEvPut with key [1:1:967:0:0:447659:0] TEvPutResult: TEvPutResult {Id# [1:1:967:0:0:447659:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 968 SEND TEvPut with key [1:1:968:0:0:14298:0] TEvPutResult: TEvPutResult {Id# [1:1:968:0:0:14298:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 969 SEND TEvPut with key [1:1:969:0:0:92781:0] TEvPutResult: TEvPutResult {Id# [1:1:969:0:0:92781:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 970 SEND TEvPut with key [1:1:970:0:0:334566:0] TEvPutResult: TEvPutResult {Id# [1:1:970:0:0:334566:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Stop node 0 2025-06-03T10:30:51.161959Z 9 00h28m41.001536s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [9:127537:347] ServerId# [1:128569:166] TabletId# 72057594037932033 PipeClientId# [9:127537:347] 2025-06-03T10:30:51.162009Z 8 00h28m41.001536s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [8:158207:17] ServerId# [1:158216:4094] TabletId# 72057594037932033 PipeClientId# [8:158207:17] 2025-06-03T10:30:51.162027Z 7 00h28m41.001536s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:157125:17] ServerId# [1:157135:3964] TabletId# 72057594037932033 PipeClientId# [7:157125:17] 2025-06-03T10:30:51.162052Z 6 00h28m41.001536s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:134203:17] ServerId# [1:134210:1011] TabletId# 72057594037932033 PipeClientId# [6:134203:17] 2025-06-03T10:30:51.162070Z 5 00h28m41.001536s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:154244:17] ServerId# [1:154252:3592] TabletId# 72057594037932033 PipeClientId# [5:154244:17] 2025-06-03T10:30:51.162088Z 4 00h28m41.001536s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:163118:17] ServerId# [1:163128:4692] TabletId# 72057594037932033 PipeClientId# [4:163118:17] 2025-06-03T10:30:51.162106Z 3 00h28m41.001536s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:153132:17] ServerId# [1:153142:3466] TabletId# 72057594037932033 PipeClientId# [3:153132:17] 2025-06-03T10:30:51.162125Z 2 00h28m41.001536s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:162152:17] ServerId# [1:162159:4583] TabletId# 72057594037932033 PipeClientId# [2:162152:17] Step = 971 SEND TEvPut with key [1:1:971:0:0:439384:0] TEvPutResult: TEvPutResult {Id# [1:1:971:0:0:439384:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999792} Step = 972 SEND TEvPut with key [1:1:972:0:0:252551:0] TEvPutResult: TEvPutResult {Id# [1:1:972:0:0:252551:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 973 SEND TEvPut with key [1:1:973:0:0:39982:0] TEvPutResult: TEvPutResult {Id# [1:1:973:0:0:39982:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Stop node 2 Step = 974 SEND TEvPut with key [1:1:974:0:0:526796:0] TEvPutResult: TEvPutResult {Id# [1:1:974:0:0:526796:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999744} Start node 0 Step = 975 SEND TEvPut with key [1:1:975:0:0:337763:0] TEvPutResult: TEvPutResult {Id# [1:1:975:0:0:337763:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Stop node 2 Step = 976 SEND TEvPut with key [1:1:976:0:0:475740:0] TEvPutResult: TEvPutResult {Id# [1:1:976:0:0:475740:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 977 SEND TEvPut with key [1:1:977:0:0:169780:0] TEvPutResult: TEvPutResult {Id# [1:1:977:0:0:169780:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 978 SEND TEvPut with key [1:1:978:0:0:481535:0] TEvPutResult: TEvPutResult {Id# [1:1:978:0:0:481535:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 979 SEND TEvPut with key [1:1:979:0:0:24668:0] TEvPutResult: TEvPutResult {Id# [1:1:979:0:0:24668:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 980 SEND TEvPut with key [1:1:980:0:0:159890:0] TEvPutResult: TEvPutResult {Id# [1:1:980:0:0:159890:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 981 SEND TEvPut with key [1:1:981:0:0:111300:0] TEvPutResult: TEvPutResult {Id# [1:1:981:0:0:111300:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 982 SEND TEvPut with key [1:1:982:0:0:355914:0] TEvPutResult: TEvPutResult {Id# [1:1:982:0:0:355914:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 983 SEND TEvPut with key [1:1:983:0:0:399106:0] TEvPutResult: TEvPutResult {Id# [1:1:983:0:0:399106:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 984 SEND TEvPut with key [1:1:984:0:0:347759:0] TEvPutResult: TEvPutResult {Id# [1:1:984:0:0:347759:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 985 SEND TEvPut with key [1:1:985:0:0:261994:0] TEvPutResult: TEvPutResult {Id# [1:1:985:0:0:261994:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 986 SEND TEvPut with key [1:1:986:0:0:101043:0] TEvPutResult: TEvPutResult {Id# [1:1:986:0:0:101043:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 987 SEND TEvPut with key [1:1:987:0:0:138774:0] TEvPutResult: TEvPutResult {Id# [1:1:987:0:0:138774:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 988 SEND TEvPut with key [1:1:988:0:0:441913:0] TEvPutResult: TEvPutResult {Id# [1:1:988:0:0:441913:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 989 SEND TEvPut with key [1:1:989:0:0:134469:0] TEvPutResult: TEvPutResult {Id# [1:1:989:0:0:134469:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 990 SEND TEvPut with key [1:1:990:0:0:123825:0] TEvPutResult: TEvPutResult {Id# [1:1:990:0:0:123825:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 991 SEND TEvPut with key [1:1:991:0:0:40387:0] TEvPutResult: TEvPutResult {Id# [1:1:991:0:0:40387:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 992 SEND TEvPut with key [1:1:992:0:0:193000:0] TEvPutResult: TEvPutResult {Id# [1:1:992:0:0:193000:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Stop node 7 2025-06-03T10:30:51.415712Z 1 00h29m21.012560s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 8 Step = 993 SEND TEvPut with key [1:1:993:0:0:455894:0] TEvPutResult: TEvPutResult {Id# [1:1:993:0:0:455894:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99967} Compact vdisk 0 Step = 994 SEND TEvPut with key [1:1:994:0:0:54378:0] TEvPutResult: TEvPutResult {Id# [1:1:994:0:0:54378:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Compact vdisk 6 Step = 995 SEND TEvPut with key [1:1:995:0:0:487669:0] TEvPutResult: TEvPutResult {Id# [1:1:995:0:0:487669:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 996 SEND TEvPut with key [1:1:996:0:0:194641:0] TEvPutResult: TEvPutResult {Id# [1:1:996:0:0:194641:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 997 SEND TEvPut with key [1:1:997:0:0:74188:0] TEvPutResult: TEvPutResult {Id# [1:1:997:0:0:74188:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Step = 998 SEND TEvPut with key [1:1:998:0:0:136082:0] TEvPutResult: TEvPutResult {Id# [1:1:998:0:0:136082:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999695} Step = 999 SEND TEvPut with key [1:1:999:0:0:145518:0] TEvPutResult: TEvPutResult {Id# [1:1:999:0:0:145518:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999731} Starting nodes Start compaction 1 Start checking |67.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_local_partition_reader/unittest >> EscapingBasics::HideSecretsOverEncloseSecretShouldWork [GOOD] >> EscapingBasics::EscapeStringShouldWork [GOOD] >> IssuesTextFiltering::ShouldRemoveDatabasePath [GOOD] >> SplitterBasic::EqualSplitByMaxBytesLimitPerChunk >> Cache::Test4 [GOOD] >> Cache::Test5 >> SplitterBasic::EqualSplitByMaxRowsLimitPerChunk >> EntityId::Order >> THiveTest::TestDeleteTabletWithFollowers [GOOD] >> EscapingBasics::HideSecretsShouldWork [GOOD] >> THiveTest::TestCreateTabletReboots >> IcebergClusterProcessor::ValidateConfigurationWithoutCatalog [GOOD] >> IcebergClusterProcessor::ValidateDdlCreationForHadoopWithS3 [GOOD] >> IcebergClusterProcessor::ValidateConfigurationWithoutWarehouse [GOOD] >> Cache::Test1 [GOOD] >> Cache::Test2 [GOOD] >> Cache::Test3 [GOOD] >> SplitterBasic::EqualSplitByMaxBytesLimitPerChunk [GOOD] >> EntityId::Distinct [GOOD] >> EntityId::MinId [GOOD] >> EntityId::MaxId [GOOD] >> SplitterBasic::EqualSplitByMaxRowsLimitPerChunk [GOOD] >> SplitterBasic::LimitExceed [GOOD] >> EntityId::Order [GOOD] >> EscapingBasics::EncloseSecretShouldWork [GOOD] >> EscapingBasics::EncloseAndEscapeStringShouldWork [GOOD] >> ResultFormatter::Optional [GOOD] >> ResultFormatter::Pg |67.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/common/ut/unittest >> IcebergClusterProcessor::ValidateConfigurationWithoutCatalog [GOOD] |67.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/common/ut/unittest >> EscapingBasics::EscapeStringShouldWork [GOOD] >> ResultFormatter::Pg [GOOD] |67.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/common/ut/unittest >> IcebergClusterProcessor::ValidateConfigurationWithoutWarehouse [GOOD] |67.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/common/ut/unittest >> Cache::Test3 [GOOD] |67.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/common/ut/unittest >> EntityId::MaxId [GOOD] |67.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view |67.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view |67.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_view/ydb-core-tx-schemeshard-ut_view |67.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/common/ut/unittest >> SplitterBasic::EqualSplitByMaxBytesLimitPerChunk [GOOD] |67.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/common/ut/unittest >> SplitterBasic::LimitExceed [GOOD] |67.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/common/ut/unittest >> EscapingBasics::EncloseAndEscapeStringShouldWork [GOOD] |67.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source |67.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source |67.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_data_source/ydb-core-tx-schemeshard-ut_external_data_source |67.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/result_formatter/ut/unittest >> ResultFormatter::Pg [GOOD] >> TExternalTableTestReboots::SimpleDropExternalTableWithReboots [GOOD] >> Cache::Test5 [GOOD] >> EntityId::CheckId [GOOD] >> THiveTest::TestFollowersCrossDC_MovingLeader [GOOD] >> THiveTest::TestFollowersCrossDC_KillingHiveAndFollower >> LocalPartitionReader::FeedSlowly >> LocalPartitionReader::FeedSlowly [GOOD] |67.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_local_partition_reader/unittest |67.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/common/ut/unittest >> EntityId::CheckId [GOOD] >> THiveTest::TestHiveBalancerNodeRestarts [GOOD] >> THiveTest::TestHiveBalancerUselessNeighbourMoves ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::SimpleDropExternalTableWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:30:46.750841Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:46.750867Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:46.750872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:46.750876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:46.750888Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:46.750890Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:46.750897Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:46.750909Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:46.750999Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:46.751064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:46.762566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:30:46.762595Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:46.762671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:30:46.764767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:46.764849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:46.764876Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:46.766794Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:46.766874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:46.766998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:46.767074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:46.767646Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:46.767711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:46.768011Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:46.768024Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:46.768040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:46.768050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:46.768057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:46.768105Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:30:46.769943Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:30:46.792927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:46.793018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.793095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:46.793143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:46.793155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.794039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:46.794077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:46.794143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.794156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:46.794162Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:46.794169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:46.794683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.794700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:46.794708Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:46.795124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.795140Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.795148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:46.795156Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:46.795896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:46.796388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:46.796439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:46.796693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:46.796728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:46.796737Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:46.796817Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:59.267437Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-03T10:30:59.267452Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:30:59.267469Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:59.267473Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:205:2206], at schemeshard: 72057594046678944, txId: 1004, path id: 1 2025-06-03T10:30:59.267477Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:205:2206], at schemeshard: 72057594046678944, txId: 1004, path id: 4 2025-06-03T10:30:59.267480Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:205:2206], at schemeshard: 72057594046678944, txId: 1004, path id: 3 2025-06-03T10:30:59.267525Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1004:0, at schemeshard: 72057594046678944 2025-06-03T10:30:59.267531Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1004:0 ProgressState 2025-06-03T10:30:59.267544Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1004:0 progress is 1/1 2025-06-03T10:30:59.267547Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-03T10:30:59.267550Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1004:0 progress is 1/1 2025-06-03T10:30:59.267553Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-03T10:30:59.267556Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1004, ready parts: 1/1, is published: false 2025-06-03T10:30:59.267559Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-03T10:30:59.267562Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1004:0 2025-06-03T10:30:59.267565Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1004:0 2025-06-03T10:30:59.267574Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:30:59.267577Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:30:59.267580Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1004, publications: 3, subscribers: 0 2025-06-03T10:30:59.267582Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-03T10:30:59.267586Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-03T10:30:59.267590Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-03T10:30:59.267658Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:30:59.267669Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:30:59.267674Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1004 2025-06-03T10:30:59.267679Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-03T10:30:59.267684Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:30:59.267740Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:30:59.267747Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-03T10:30:59.267762Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-03T10:30:59.267836Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:30:59.267844Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:30:59.267849Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1004 2025-06-03T10:30:59.267854Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-03T10:30:59.267858Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:30:59.267917Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:30:59.267927Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:30:59.267931Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1004 2025-06-03T10:30:59.267935Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-03T10:30:59.267939Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:30:59.267947Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1004, subscribers: 0 2025-06-03T10:30:59.268689Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-03T10:30:59.269136Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:30:59.269163Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-03T10:30:59.269183Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 TestModificationResult got TxId: 1004, wait until txId: 1004 TestWaitNotification wait txId: 1004 2025-06-03T10:30:59.269275Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1004: send EvNotifyTxCompletion 2025-06-03T10:30:59.269284Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1004 2025-06-03T10:30:59.269440Z node 50 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1004, at schemeshard: 72057594046678944 2025-06-03T10:30:59.269461Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-03T10:30:59.269466Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [50:389:2379] TestWaitNotification: OK eventTxId 1004 2025-06-03T10:30:59.269571Z node 50 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:59.269609Z node 50 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 50us result status StatusPathDoesNotExist 2025-06-03T10:30:59.269655Z node 50 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/ExternalTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |67.4%| [TA] $(B)/ydb/core/fq/libs/result_formatter/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> LocalPartitionReader::Booting >> LocalPartitionReader::Simple >> IndexBuildTest::CancelBuild [GOOD] >> LocalPartitionReader::Booting [GOOD] >> LocalPartitionReader::Simple [GOOD] |67.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_local_partition_reader/unittest >> LocalPartitionReader::FeedSlowly [GOOD] |67.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_local_partition_reader/unittest >> THiveTest::TestCreateTabletReboots [GOOD] >> THiveTest::TestCreateTabletWithWrongSPoolsAndReassignGroupsFailButDeletionIsOk |67.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test |67.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test |67.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_local_partition_reader/unittest |67.4%| [TA] {RESULT} $(B)/ydb/core/fq/libs/result_formatter/ut/test-results/unittest/{meta.json ... results_accumulator.log} |67.4%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/idx_test/ydb-core-kqp-ut-idx_test |67.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_local_partition_reader/unittest >> TSchemeShardSplitByLoad::IndexTableSplitsUpToMainTableCurrentPartitionCount >> TExternalTableTestReboots::SimpleDropExternalTableWithReboots2 [GOOD] |67.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator |67.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator |67.4%| [LD] {RESULT} $(B)/ydb/core/tx/scheme_board/ut_populator/ydb-core-tx-scheme_board-ut_populator |67.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_local_partition_reader/unittest |67.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_local_partition_reader/unittest >> LocalPartitionReader::Simple [GOOD] |67.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_local_partition_reader/unittest >> LocalPartitionReader::Booting [GOOD] |67.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime >> THiveTest::TestCreateTabletWithWrongSPoolsAndReassignGroupsFailButDeletionIsOk [GOOD] >> THiveTest::TestFollowerCompatability1 |67.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime |67.5%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/runtime/ydb-core-kqp-ut-runtime >> KikimrIcGateway::TestCreateSameExternalTable >> THiveTest::TestFollowersCrossDC_KillingHiveAndFollower [GOOD] >> THiveTest::TestFollowerCompatability3 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::CancelBuild [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:51.154918Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:51.154945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:51.154950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:51.154954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:51.154966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:51.154969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:51.154980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:51.154991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:51.155078Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:51.155152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:51.166261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:51.166287Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:51.170378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:51.170518Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:51.170565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:51.172701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:51.172770Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:51.172923Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:51.173015Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:51.174129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:51.174211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:51.174684Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:51.174703Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:51.174718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:51.174729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:51.174736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:51.174761Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.176621Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:51.198577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:51.198657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.198719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:51.198763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:51.198773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.199589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:51.199616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:51.199681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.199691Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:51.199695Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:51.199700Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:51.200208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.200223Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:51.200228Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:51.200690Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.200705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.200711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:51.200731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:51.201487Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:51.201961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:51.202000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:51.202165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:51.202188Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:51.202195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:51.202262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:51.202269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:51.202302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:51.202313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:51.202736Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:51.202743Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:51.202772Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... LAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-03T10:30:59.956095Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-03T10:30:59.956099Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710760:0 progress is 1/1 2025-06-03T10:30:59.956104Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-03T10:30:59.956107Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 281474976710760, ready parts: 1/1, is published: true 2025-06-03T10:30:59.956116Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [2:127:2152] message: TxId: 281474976710760 2025-06-03T10:30:59.956121Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710760 ready parts: 1/1 2025-06-03T10:30:59.956125Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710760:0 2025-06-03T10:30:59.956128Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976710760:0 2025-06-03T10:30:59.956138Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 13 FAKE_COORDINATOR: Erasing txId 281474976710760 2025-06-03T10:30:59.956943Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6751: Handle: TEvNotifyTxCompletionResult: txId# 281474976710760 2025-06-03T10:30:59.956959Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:6753: Message: TxId: 281474976710760 2025-06-03T10:30:59.956969Z node 2 :BUILD_INDEX INFO: schemeshard_build_index__progress.cpp:2331: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976710760, buildInfoId: 102 2025-06-03T10:30:59.956980Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:2334: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxReply : TEvNotifyTxCompletionResult, txId# 281474976710760, buildInfo: TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: index1, IndexColumn: index, State: Cancellation_Unlocking, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [2:1180:3031], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 0, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-03T10:30:59.957345Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1117: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancellation_Unlocking 2025-06-03T10:30:59.957364Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1118: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancellation_Unlocking TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: index1, IndexColumn: index, State: Cancellation_Unlocking, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [2:1180:3031], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-03T10:30:59.957375Z node 2 :BUILD_INDEX INFO: schemeshard_build_index_tx_base.cpp:25: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: Change state from Cancellation_Unlocking to Cancelled 2025-06-03T10:30:59.957703Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__progress.cpp:1117: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancelled 2025-06-03T10:30:59.957720Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__progress.cpp:1118: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TTxBuildProgress: Execute: 102 Cancelled TBuildInfo{ IndexBuildId: 102, Uid: , DomainPathId: [OwnerId: 72057594046678944, LocalPathId: 1], TablePathId: [OwnerId: 72057594046678944, LocalPathId: 2], IndexType: EIndexTypeGlobal, IndexName: index1, IndexColumn: index, State: Cancelled, IsCancellationRequested: 1, Issue: , SubscribersCount: 1, CreateSender: [2:1180:3031], AlterMainTableTxId: 0, AlterMainTableTxStatus: StatusSuccess, AlterMainTableTxDone: 0, LockTxId: 281474976710757, LockTxStatus: StatusAccepted, LockTxDone: 1, InitiateTxId: 281474976710758, InitiateTxStatus: StatusAccepted, InitiateTxDone: 1, SnapshotStepId: 0, ApplyTxId: 281474976710759, ApplyTxStatus: StatusAccepted, ApplyTxDone: 1, UnlockTxId: 281474976710760, UnlockTxStatus: StatusAccepted, UnlockTxDone: 1, ToUploadShards: 0, DoneShards: 0, Processed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }, Billed: { upload rows: 0, upload bytes: 0, read rows: 0, read bytes: 0 }} 2025-06-03T10:30:59.957725Z node 2 :BUILD_INDEX TRACE: schemeshard_build_index_tx_base.cpp:339: TIndexBuilder::TXTYPE_PROGRESS_INDEX_BUILD: TIndexBuildInfo SendNotifications: : id# 102, subscribers count# 1 2025-06-03T10:30:59.957753Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:30:59.957760Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:1274:3114] TestWaitNotification: OK eventTxId 102 2025-06-03T10:30:59.958251Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__get.cpp:19: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" IndexBuildId: 102 2025-06-03T10:30:59.958359Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:93: TIndexBuilder::TXTYPE_GET_INDEX_BUILD: Reply Status: SUCCESS IndexBuild { Id: 102 State: STATE_CANCELLED Settings { source_path: "/MyRoot/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } EndTime { } } BUILDINDEX RESPONSE Get: NKikimrIndexBuilder.TEvGetResponse Status: SUCCESS IndexBuild { Id: 102 State: STATE_CANCELLED Settings { source_path: "/MyRoot/Table" index { name: "index1" index_columns: "index" global_index { } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } Progress: 0 StartTime { } EndTime { } } 2025-06-03T10:30:59.958630Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:30:59.958708Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 84us result status StatusSuccess 2025-06-03T10:30:59.958840Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table" PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 10 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 11 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:59.959107Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/index1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:30:59.959134Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/index1" took 31us result status StatusPathDoesNotExist 2025-06-03T10:30:59.959226Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/index1\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeTableIndex, state: EPathStateNotExist), drop stepId: 5000005, drop txId: 281474976710759, source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Table/index1" PathId: 3 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::SimpleDropExternalTableWithReboots2 [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:30:47.660818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:47.660846Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:47.660852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:47.660858Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:47.660871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:47.660875Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:47.660884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:47.660898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:47.661009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:47.661073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:47.677623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:30:47.677654Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:47.677754Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:30:47.680599Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:47.680707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:47.680744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:47.682840Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:47.682901Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:47.683025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:47.683085Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:47.683608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:47.683665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:47.683926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:47.683934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:47.683947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:47.683953Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:47.683959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:47.684000Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:30:47.685769Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:30:47.703340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:47.703410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:47.703467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:47.703511Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:47.703521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:47.704241Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:47.704267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:47.704310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:47.704318Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:47.704322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:47.704327Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:47.704774Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:47.704785Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:47.704790Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:47.705165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:47.705177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:47.705182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:47.705189Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:47.705979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:47.706522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:47.706563Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:47.706738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:47.706760Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:47.706767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:47.706833Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:00.289966Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-03T10:31:00.289988Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:31:00.290022Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:00.290029Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:205:2206], at schemeshard: 72057594046678944, txId: 1004, path id: 1 2025-06-03T10:31:00.290036Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:205:2206], at schemeshard: 72057594046678944, txId: 1004, path id: 4 2025-06-03T10:31:00.290041Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:205:2206], at schemeshard: 72057594046678944, txId: 1004, path id: 3 2025-06-03T10:31:00.290131Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1004:0, at schemeshard: 72057594046678944 2025-06-03T10:31:00.290151Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1004:0 ProgressState 2025-06-03T10:31:00.290181Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1004:0 progress is 1/1 2025-06-03T10:31:00.290189Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-03T10:31:00.290203Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1004:0 progress is 1/1 2025-06-03T10:31:00.290208Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-03T10:31:00.290219Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1004, ready parts: 1/1, is published: false 2025-06-03T10:31:00.290229Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-03T10:31:00.290236Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1004:0 2025-06-03T10:31:00.290241Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1004:0 2025-06-03T10:31:00.290259Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:31:00.290265Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:31:00.290273Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1004, publications: 3, subscribers: 0 2025-06-03T10:31:00.290278Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-03T10:31:00.290282Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-03T10:31:00.290286Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-03T10:31:00.290399Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:31:00.290417Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:31:00.290423Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1004 2025-06-03T10:31:00.290429Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-03T10:31:00.290435Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:31:00.290646Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:31:00.290657Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-03T10:31:00.290672Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-03T10:31:00.290793Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:31:00.290809Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:31:00.290815Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1004 2025-06-03T10:31:00.290820Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-03T10:31:00.290825Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:31:00.290914Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:31:00.290927Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:31:00.290932Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1004 2025-06-03T10:31:00.290937Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-03T10:31:00.290942Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:31:00.290954Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1004, subscribers: 0 2025-06-03T10:31:00.291735Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-03T10:31:00.292121Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:31:00.292148Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-03T10:31:00.292168Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 TestModificationResult got TxId: 1004, wait until txId: 1004 TestWaitNotification wait txId: 1004 2025-06-03T10:31:00.292235Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1004: send EvNotifyTxCompletion 2025-06-03T10:31:00.292246Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1004 2025-06-03T10:31:00.292384Z node 50 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1004, at schemeshard: 72057594046678944 2025-06-03T10:31:00.292428Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-03T10:31:00.292435Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [50:389:2379] TestWaitNotification: OK eventTxId 1004 2025-06-03T10:31:00.292534Z node 50 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:31:00.292577Z node 50 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 56us result status StatusPathDoesNotExist 2025-06-03T10:31:00.292620Z node 50 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/ExternalTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |67.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage |67.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage |67.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/backup/impl/ut_local_partition_reader/unittest |67.5%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/ydb-core-tx-tx_proxy-ut_encrypted_storage >> KikimrIcGateway::TestLoadBasicSecretValueFromExternalDataSourceMetadata >> TExternalTableTestReboots::CreateDroppedExternalTableAndDropWithReboots [GOOD] |67.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut |67.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut |67.5%| [LD] {RESULT} $(B)/ydb/services/ydb/table_split_ut/ydb-services-ydb-table_split_ut >> KikimrIcGateway::TestListPath >> KikimrProvider::TestFillAuthPropertiesBasic [GOOD] >> KikimrProvider::TestFillAuthPropertiesAws [GOOD] >> KikimrProvider::AlterTableAddIndexWithTableSettings [GOOD] >> KikimrIcGateway::TestCreateSameExternalTable [GOOD] >> KikimrIcGateway::TestDropExternalTable >> KikimrIcGateway::TestLoadTableMetadata >> KikimrIcGateway::TestLoadExternalTable |67.5%| [TA] $(B)/ydb/core/backup/impl/ut_local_partition_reader/test-results/unittest/{meta.json ... results_accumulator.log} >> ReadAttributesUtils::AttributesGatheringEmpry [GOOD] >> ReadAttributesUtils::AttributesGatheringFilter [GOOD] >> ReadAttributesUtils::AttributesGatheringRecursive [GOOD] >> TSchemeShardViewTest::ReadOnlyMode |67.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/provider/ut/unittest >> KikimrProvider::AlterTableAddIndexWithTableSettings [GOOD] >> TSchemeShardViewTest::DropView >> TExternalDataSourceTest::ReplaceExternalDataStoreShouldFailIfEntityOfAnotherTypeWithSameNameExists >> TSchemeShardViewTest::AsyncCreateDifferentViews |67.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/provider/ut/unittest >> ReadAttributesUtils::AttributesGatheringRecursive [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::CreateDroppedExternalTableAndDropWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:30:48.214254Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:48.214286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:48.214292Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:48.214297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:48.214313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:48.214318Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:48.214328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:48.214351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:48.214471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:48.214569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:48.229756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:30:48.229780Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:48.229878Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:30:48.231925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:48.231995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:48.232017Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:48.233326Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:48.233394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:48.233477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:48.233518Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:48.234112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:48.234171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:48.234423Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:48.234434Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:48.234449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:48.234457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:48.234464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:48.234496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:30:48.236213Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:30:48.253973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:48.254032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.254085Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:48.254119Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:48.254128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.254703Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:48.254722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:48.254763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.254775Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:48.254781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:48.254787Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:48.255110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.255123Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:48.255129Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:48.255521Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.255533Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:48.255538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:48.255547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:48.256035Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:48.256404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:48.256436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:48.256633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:48.256653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:48.256658Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:48.256710Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:00.999070Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-03T10:31:00.999096Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:31:00.999128Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:00.999134Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:205:2206], at schemeshard: 72057594046678944, txId: 1006, path id: 1 2025-06-03T10:31:00.999141Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:205:2206], at schemeshard: 72057594046678944, txId: 1006, path id: 5 2025-06-03T10:31:00.999145Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:205:2206], at schemeshard: 72057594046678944, txId: 1006, path id: 3 FAKE_COORDINATOR: Erasing txId 1006 2025-06-03T10:31:00.999255Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1006:0, at schemeshard: 72057594046678944 2025-06-03T10:31:00.999264Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1006:0 ProgressState 2025-06-03T10:31:00.999282Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1006:0 progress is 1/1 2025-06-03T10:31:00.999288Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-03T10:31:00.999294Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1006:0 progress is 1/1 2025-06-03T10:31:00.999298Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-03T10:31:00.999303Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1006, ready parts: 1/1, is published: false 2025-06-03T10:31:00.999310Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-03T10:31:00.999315Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1006:0 2025-06-03T10:31:00.999319Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1006:0 2025-06-03T10:31:00.999336Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-03T10:31:00.999340Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:31:00.999346Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1006, publications: 3, subscribers: 0 2025-06-03T10:31:00.999350Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 1], 15 2025-06-03T10:31:00.999354Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-03T10:31:00.999358Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 5], 18446744073709551615 2025-06-03T10:31:00.999448Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-03T10:31:00.999468Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-03T10:31:00.999473Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1006 2025-06-03T10:31:00.999479Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-03T10:31:00.999484Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-03T10:31:00.999595Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:31:00.999604Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-03T10:31:00.999619Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-03T10:31:00.999686Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-03T10:31:00.999695Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-03T10:31:00.999700Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1006 2025-06-03T10:31:00.999704Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 15 2025-06-03T10:31:00.999708Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:31:00.999994Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-03T10:31:01.000014Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-03T10:31:01.000020Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1006 2025-06-03T10:31:01.000025Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-03T10:31:01.000033Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:31:01.000050Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1006, subscribers: 0 2025-06-03T10:31:01.000839Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 2025-06-03T10:31:01.000915Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:31:01.001344Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 2025-06-03T10:31:01.001374Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 TestModificationResult got TxId: 1006, wait until txId: 1006 TestWaitNotification wait txId: 1006 2025-06-03T10:31:01.001461Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1006: send EvNotifyTxCompletion 2025-06-03T10:31:01.001471Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1006 2025-06-03T10:31:01.001563Z node 50 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1006, at schemeshard: 72057594046678944 2025-06-03T10:31:01.001586Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1006: got EvNotifyTxCompletionResult 2025-06-03T10:31:01.001592Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1006: satisfy waiter [50:448:2438] TestWaitNotification: OK eventTxId 1006 2025-06-03T10:31:01.001681Z node 50 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:31:01.001719Z node 50 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 51us result status StatusPathDoesNotExist 2025-06-03T10:31:01.001770Z node 50 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/ExternalTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> KikimrIcGateway::TestDropExternalTable [GOOD] >> KikimrIcGateway::TestDropExternalDataSource >> THiveTest::TestHiveBalancerUselessNeighbourMoves [GOOD] >> THiveTest::TestHiveBalancerWithImmovableTablets >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExistsShouldFailIfFeatureFlagIsNotSet |67.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr |67.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr |67.5%| [TA] {RESULT} $(B)/ydb/core/backup/impl/ut_local_partition_reader/test-results/unittest/{meta.json ... results_accumulator.log} |67.5%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_rtmr/ydb-core-tx-schemeshard-ut_rtmr |67.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace >> TSchemeShardViewTest::ReadOnlyMode [GOOD] |67.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace |67.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_trace/ydb-core-tx-datashard-ut_trace >> TSchemeShardViewTest::DropView [GOOD] >> TExternalDataSourceTest::ReplaceExternalDataStoreShouldFailIfEntityOfAnotherTypeWithSameNameExists [GOOD] >> TSchemeShardViewTest::AsyncCreateDifferentViews [GOOD] >> TExternalDataSourceTest::RemovingReferencesFromDataSources >> TExternalDataSourceTest::CreateExternalDataSourceWithProperties >> KikimrIcGateway::TestListPath [GOOD] >> KikimrIcGateway::TestDropTable >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExistsShouldFailIfFeatureFlagIsNotSet [GOOD] |67.6%| [TA] $(B)/ydb/core/persqueue/ut/ut_with_sdk/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::DropView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:31:02.101062Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:31:02.101095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:02.101100Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:31:02.101104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:31:02.101109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:31:02.101112Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:31:02.101120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:02.101136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:31:02.101229Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:31:02.101323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:31:02.114358Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:31:02.114384Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:02.118559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:31:02.118676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:31:02.118711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:31:02.121137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:31:02.121224Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:31:02.121353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.121429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:31:02.122295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:02.122357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:31:02.122711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:02.122723Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:02.122731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:31:02.122741Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:02.122746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:31:02.122765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.124724Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:31:02.152597Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:31:02.152696Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.152770Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:31:02.152829Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:31:02.152842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.154249Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.154288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:31:02.154362Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.154375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:31:02.154382Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:31:02.154388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:31:02.155355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.155374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:31:02.155382Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:31:02.156004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.156018Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.156026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:02.156034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:31:02.156832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:31:02.157388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:31:02.157443Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:31:02.157647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.157678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:02.157687Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:02.157753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:31:02.157763Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:02.157799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:02.157813Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:31:02.158373Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:02.158385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:02.158435Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 2057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 102 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:31:02.170525Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 102:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:102 msg type: 269090816 2025-06-03T10:31:02.170580Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 102, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 2025-06-03T10:31:02.170688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.170718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:02.170730Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_view.cpp:43: [72057594046678944] TDropView TPropose, opId: 102:0 HandleReply TEvOperationPlan, step: 5000003 2025-06-03T10:31:02.170777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 128 -> 240 2025-06-03T10:31:02.170850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:02.170865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 FAKE_COORDINATOR: Erasing txId 102 2025-06-03T10:31:02.171607Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:02.171625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:02.171673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:31:02.171731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:02.171738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-03T10:31:02.171745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-03T10:31:02.171845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.171855Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-03T10:31:02.171872Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:31:02.171878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:31:02.171885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:31:02.171889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:31:02.171900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-03T10:31:02.171907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:31:02.171913Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:31:02.171918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:31:02.171935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:31:02.171942Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-03T10:31:02.171947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-03T10:31:02.171951Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-03T10:31:02.172084Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:31:02.172099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:31:02.172108Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:31:02.172114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-03T10:31:02.172119Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:31:02.172619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:31:02.172650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:31:02.172658Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:31:02.172665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-03T10:31:02.172673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:31:02.172711Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-03T10:31:02.172825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:31:02.172833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:31:02.172847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:02.173828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:31:02.173944Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:31:02.173966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-03T10:31:02.174029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-03T10:31:02.174039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-03T10:31:02.174120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-03T10:31:02.174143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:31:02.174149Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:323:2313] TestWaitNotification: OK eventTxId 102 2025-06-03T10:31:02.174233Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:31:02.174273Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyView" took 53us result status StatusPathDoesNotExist 2025-06-03T10:31:02.174323Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyView\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/MyView" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::ReadOnlyMode [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:31:01.859991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:31:01.860021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:01.860026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:31:01.860032Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:31:01.860038Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:31:01.860043Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:31:01.860054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:01.860074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:31:01.860183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:31:01.860260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:31:01.876661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:31:01.876688Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:01.881255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:31:01.881495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:31:01.881537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:31:01.883799Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:31:01.883864Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:31:01.883989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:01.884055Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:31:01.884786Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:01.884834Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:31:01.885176Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:01.885189Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:01.885203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:31:01.885212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:01.885218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:31:01.885244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:31:01.886857Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:31:01.911715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:31:01.911814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:01.911884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:31:01.911945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:31:01.911956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:01.912823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:01.912854Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:31:01.912926Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:01.912938Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:31:01.912945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:31:01.912951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:31:01.913498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:01.913512Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:31:01.913519Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:31:01.913957Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:01.913973Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:01.913982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:01.913990Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:31:01.914910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:31:01.915438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:31:01.915483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:31:01.915692Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:01.915723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:01.915732Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:01.915797Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:31:01.915805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:01.915843Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:01.915857Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:31:01.916374Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:01.916408Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:01.916459Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... ng: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:02.010673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:31:02.010749Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 TestModificationResults wait txId: 103 Leader for TabletID 72057594046678944 is [1:380:2348] sender: [1:436:2058] recipient: [1:15:2062] 2025-06-03T10:31:02.052960Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateView CreateView { Name: "ThirdView" QueryText: "Some query" } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:31:02.053029Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_view.cpp:118: [72057594046678944] TCreateView Propose, path: /MyRoot/ThirdView, opId: 103:0 2025-06-03T10:31:02.053039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_create_view.cpp:124: [72057594046678944] TCreateView Propose, path: /MyRoot/ThirdView, opId: 103:0, viewDescription: Name: "ThirdView" QueryText: "Some query" 2025-06-03T10:31:02.053064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:319: AttachChild: child attached as only one child to the parent, parent id: [OwnerId: 72057594046678944, LocalPathId: 1], parent name: MyRoot, child name: ThirdView, child id: [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:31:02.053084Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 0 2025-06-03T10:31:02.053094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 103:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:31:02.057721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 103, response: Status: StatusAccepted TxId: 103 SchemeshardId: 72057594046678944 PathId: 3, at schemeshard: 72057594046678944 2025-06-03T10:31:02.057770Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusAccepted, operation: CREATE VIEW, path: /MyRoot/ThirdView 2025-06-03T10:31:02.057842Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.057851Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:30: [72057594046678944] TCreateView::TPropose, opId: 103:0 ProgressState 2025-06-03T10:31:02.057866Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 103 ready parts: 1/1 2025-06-03T10:31:02.057895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 103 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:31:02.062255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 103:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:103 msg type: 269090816 2025-06-03T10:31:02.062348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 103, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 103 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 103 at step: 5000003 2025-06-03T10:31:02.062571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.062620Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 103 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:02.062639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_view.cpp:45: [72057594046678944] TCreateView::TPropose, opId: 103:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000003 2025-06-03T10:31:02.062714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 103:0 128 -> 240 2025-06-03T10:31:02.062762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:31:02.062780Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 FAKE_COORDINATOR: Erasing txId 103 2025-06-03T10:31:02.064265Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:02.064285Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:02.064349Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:31:02.064377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:02.064397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:429:2386], at schemeshard: 72057594046678944, txId: 103, path id: 1 2025-06-03T10:31:02.064407Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:429:2386], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-03T10:31:02.064423Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.064433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-03T10:31:02.064452Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:31:02.064458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:31:02.064465Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:31:02.064468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:31:02.064474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-06-03T10:31:02.064481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:31:02.064487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-06-03T10:31:02.064492Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:0 2025-06-03T10:31:02.064516Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:31:02.064526Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 103, publications: 2, subscribers: 0 2025-06-03T10:31:02.064531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-03T10:31:02.064534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-03T10:31:02.064910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:31:02.064934Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:31:02.064941Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2025-06-03T10:31:02.064947Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-03T10:31:02.064953Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:31:02.065114Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:31:02.065127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:31:02.065132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-06-03T10:31:02.065137Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-03T10:31:02.065142Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:31:02.065154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-03T10:31:02.067298Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:31:02.067527Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 TestModificationResult got TxId: 103, wait until txId: 103 >> KikimrIcGateway::TestDropExternalDataSource [GOOD] >> KikimrIcGateway::TestLoadTableMetadata [GOOD] >> KikimrIcGateway::TestLoadTokenSecretValueFromExternalDataSourceMetadata >> TExternalDataSourceTest::RemovingReferencesFromDataSources [GOOD] >> TExternalDataSourceTest::CreateExternalDataSourceWithProperties [GOOD] >> TExternalDataSourceTest::DropExternalDataSource >> TExternalTableTestReboots::DropExternalTableWithReboots [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_view/unittest >> TSchemeShardViewTest::AsyncCreateDifferentViews [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:31:02.140121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:31:02.140153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:02.140160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:31:02.140165Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:31:02.140171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:31:02.140193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:31:02.140204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:02.140220Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:31:02.140344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:31:02.140457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:31:02.153282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:31:02.153326Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:02.156642Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:31:02.156735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:31:02.156769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:31:02.159798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:31:02.159861Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:31:02.159979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.160031Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:31:02.160860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:02.160908Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:31:02.161215Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:02.161231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:02.161246Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:31:02.161255Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:02.161260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:31:02.161281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.162786Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:31:02.189375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:31:02.189481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.189562Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:31:02.189628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:31:02.189643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.190904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.190946Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:31:02.191026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.191040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:31:02.191046Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:31:02.191053Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:31:02.191836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.191854Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:31:02.191862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:31:02.193942Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.193968Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.193977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:02.193988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:31:02.194930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:31:02.195817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:31:02.195877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:31:02.196106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.196148Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:02.196159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:02.196242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:31:02.196255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:02.196297Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:02.196314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:31:02.197088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:02.197102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:02.197162Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... ateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:31:02.219458Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:31:02.219464Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:31:02.219469Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-03T10:31:02.219475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:31:02.219607Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:31:02.219618Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:31:02.219623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:31:02.219627Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-03T10:31:02.219631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:31:02.219638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-03T10:31:02.220513Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:31:02.220545Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestModificationResults wait txId: 103 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 101 2025-06-03T10:31:02.220621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:31:02.220632Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-03T10:31:02.220650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-03T10:31:02.220655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 TestWaitNotification wait txId: 103 2025-06-03T10:31:02.220669Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-03T10:31:02.220673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-03T10:31:02.220772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:31:02.220804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:31:02.220810Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:332:2322] 2025-06-03T10:31:02.220850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-03T10:31:02.220864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-03T10:31:02.220872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:31:02.220876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:332:2322] 2025-06-03T10:31:02.220891Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:31:02.220895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:332:2322] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 TestWaitNotification: OK eventTxId 103 2025-06-03T10:31:02.220986Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDir" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:31:02.221028Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDir" took 63us result status StatusSuccess 2025-06-03T10:31:02.221165Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDir" PathDescription { Self { Name: "SomeDir" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 } ChildrenExist: true } Children { Name: "FirstView" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "SecondView" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 103 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:02.221237Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDir/FirstView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:31:02.221258Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDir/FirstView" took 24us result status StatusSuccess 2025-06-03T10:31:02.221339Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDir/FirstView" PathDescription { Self { Name: "FirstView" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 102 CreateStep: 5000004 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ViewDescription { Name: "FirstView" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 QueryText: "First query" CapturedContext { } } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:02.221399Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/SomeDir/SecondView" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:31:02.221433Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/SomeDir/SecondView" took 36us result status StatusSuccess 2025-06-03T10:31:02.221468Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/SomeDir/SecondView" PathDescription { Self { Name: "SecondView" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 103 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ViewDescription { Name: "SecondView" PathId { OwnerId: 72057594046678944 LocalId: 4 } Version: 1 QueryText: "Second query" CapturedContext { } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KikimrIcGateway::TestLoadExternalTable [GOOD] >> KikimrIcGateway::TestLoadServiceAccountSecretValueFromExternalDataSourceMetadata >> TExternalTableTestReboots::CreateDroppedExternalTableWithReboots [GOOD] >> TExternalDataSourceTest::DropExternalDataSource [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ReplaceExternalDataStoreShouldFailIfEntityOfAnotherTypeWithSameNameExists [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:31:02.151991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:31:02.152020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:02.152027Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:31:02.152033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:31:02.152048Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:31:02.152053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:31:02.152068Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:02.152084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:31:02.152197Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:31:02.152274Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:31:02.162768Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:31:02.162789Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:02.166961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:31:02.167108Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:31:02.167153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:31:02.170352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:31:02.170461Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:31:02.170707Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.170826Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:31:02.171964Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:02.172047Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:31:02.172596Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:02.172625Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:02.172646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:31:02.172660Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:02.172671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:31:02.172707Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.179228Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:31:02.211169Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:31:02.211297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.211391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:31:02.211477Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:31:02.211498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.213128Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.213184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:31:02.213271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.213287Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:31:02.213319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:31:02.213326Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:31:02.214216Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.214239Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:31:02.214248Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:31:02.214893Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.214917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.214927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:02.214937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:31:02.215973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:31:02.216749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:31:02.216819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:31:02.217104Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.217150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:02.217163Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:02.217269Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:31:02.217287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:02.217355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:02.217374Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:31:02.218193Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:02.218210Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:02.218281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 72057594046678944, txId: 101, path id: 2 2025-06-03T10:31:02.229080Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 101:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.229091Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 101:0 ProgressState 2025-06-03T10:31:02.229112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:31:02.229120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:31:02.229128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#101:0 progress is 1/1 2025-06-03T10:31:02.229135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:31:02.229143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 101, ready parts: 1/1, is published: false 2025-06-03T10:31:02.229152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 101 ready parts: 1/1 2025-06-03T10:31:02.229160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 101:0 2025-06-03T10:31:02.229166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 101:0 2025-06-03T10:31:02.229191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:31:02.229198Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 101, publications: 2, subscribers: 0 2025-06-03T10:31:02.229203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 1], 4 2025-06-03T10:31:02.229207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 101, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-03T10:31:02.229561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:31:02.229595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 4 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:31:02.229608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:31:02.229616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 4 2025-06-03T10:31:02.229625Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:31:02.233093Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:31:02.233185Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:31:02.233197Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:31:02.233206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-03T10:31:02.233218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:31:02.233263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-03T10:31:02.234931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:31:02.236933Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-03T10:31:02.237062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:31:02.237073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-03T10:31:02.237177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:31:02.237225Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:31:02.237232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:299:2289] TestWaitNotification: OK eventTxId 101 2025-06-03T10:31:02.237389Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/UniqueName" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:31:02.237446Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/UniqueName" took 87us result status StatusSuccess 2025-06-03T10:31:02.237594Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/UniqueName" PathDescription { Self { Name: "UniqueName" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeView CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ViewDescription { Name: "UniqueName" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 QueryText: "Some query" CapturedContext { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 102 2025-06-03T10:31:02.238604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "UniqueName" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } ReplaceIfExists: true } } TxId: 102 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:31:02.238672Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 102:0, feature flag EnableReplaceIfExistsForExternalEntities 1, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "UniqueName" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } ReplaceIfExists: true } 2025-06-03T10:31:02.238689Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_external_data_source.cpp:212: [72057594046678944] TAlterExternalDataSource Propose: opId# 102:0, path# /MyRoot/UniqueName 2025-06-03T10:31:02.238721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 102:1, propose status:StatusNameConflict, reason: Check failed: path: '/MyRoot/UniqueName', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalDataSource, source_location: ydb/core/tx/schemeshard/schemeshard__operation_alter_external_data_source.cpp:96, at schemeshard: 72057594046678944 2025-06-03T10:31:02.239830Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 102, response: Status: StatusNameConflict Reason: "Check failed: path: \'/MyRoot/UniqueName\', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalDataSource, source_location: ydb/core/tx/schemeshard/schemeshard__operation_alter_external_data_source.cpp:96" TxId: 102 SchemeshardId: 72057594046678944 PathId: 2 PathCreateTxId: 101, at schemeshard: 72057594046678944 2025-06-03T10:31:02.239881Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 102, database: /MyRoot, subject: , status: StatusNameConflict, reason: Check failed: path: '/MyRoot/UniqueName', error: unexpected path type (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeView, state: EPathStateNoChanges), expected types: EPathTypeExternalDataSource, source_location: ydb/core/tx/schemeshard/schemeshard__operation_alter_external_data_source.cpp:96, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/UniqueName TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-03T10:31:02.239983Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-03T10:31:02.239992Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-03T10:31:02.240088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-03T10:31:02.240115Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:31:02.240123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:307:2297] TestWaitNotification: OK eventTxId 102 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ReplaceExternalDataSourceIfNotExistsShouldFailIfFeatureFlagIsNotSet [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:128:2058] recipient: [1:110:2141] 2025-06-03T10:31:02.309354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:31:02.309391Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:02.309398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:31:02.309403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:31:02.309417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:31:02.309421Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:31:02.309432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:02.309450Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:31:02.309615Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:31:02.309721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:31:02.323041Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:31:02.323072Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:02.323196Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:31:02.327152Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:31:02.327306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:31:02.327357Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:31:02.331082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:31:02.331314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:31:02.331493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.331599Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:31:02.332337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:02.332425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:31:02.332781Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:02.332796Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:02.332819Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:31:02.332831Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:02.332838Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:31:02.332890Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.334616Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:240:2058] recipient: [1:15:2062] 2025-06-03T10:31:02.355310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:31:02.355425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.355504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:31:02.355563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:31:02.355574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.356604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.356638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:31:02.356701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.356713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:31:02.356720Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:31:02.356727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:31:02.357589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.357610Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:31:02.357616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:31:02.358277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.358297Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.358305Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:02.358314Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:31:02.358969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:31:02.359554Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:31:02.359610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:31:02.359843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.359880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:02.359889Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:02.359975Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:31:02.359984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:02.360026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:02.360040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:31:02.360571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:02.360583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:02.360644Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:02.360650Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:207:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-03T10:31:02.360742Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.360752Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-03T10:31:02.360767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-06-03T10:31:02.360772Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:31:02.360778Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-06-03T10:31:02.360781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:31:02.360786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-03T10:31:02.360792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:31:02.360797Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-06-03T10:31:02.360801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1:0 2025-06-03T10:31:02.360818Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:31:02.360824Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-03T10:31:02.360829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-03T10:31:02.361231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-03T10:31:02.361256Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-03T10:31:02.361262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-03T10:31:02.361268Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-03T10:31:02.361275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:02.361317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-03T10:31:02.362019Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-03T10:31:02.362186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 TestModificationResults wait txId: 101 2025-06-03T10:31:02.363063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } ReplaceIfExists: true } } TxId: 101 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:31:02.363126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_external_data_source.cpp:336: [72057594046678944] CreateNewExternalDataSource, opId 101:0, feature flag EnableReplaceIfExistsForExternalEntities 0, tx WorkingDir: "/MyRoot" OperationType: ESchemeOpCreateExternalDataSource FailOnExist: false CreateExternalDataSource { Name: "MyExternalDataSource" SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Auth { None { } } ReplaceIfExists: true } 2025-06-03T10:31:02.363136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 101:0, explain: Invalid TCreateExternalDataSource request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, at schemeshard: 72057594046678944 2025-06-03T10:31:02.363141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 101:1, propose status:StatusPreconditionFailed, reason: Invalid TCreateExternalDataSource request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, at schemeshard: 72057594046678944 2025-06-03T10:31:02.363215Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [1:270:2260] Bootstrap 2025-06-03T10:31:02.365701Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [1:270:2260] Become StateWork (SchemeCache [1:275:2265]) 2025-06-03T10:31:02.366135Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:270:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:31:02.367016Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 101, response: Status: StatusPreconditionFailed Reason: "Invalid TCreateExternalDataSource request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off" TxId: 101 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:02.367094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 101, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Invalid TCreateExternalDataSource request: Unsupported: feature flag EnableReplaceIfExistsForExternalEntities is off, operation: CREATE EXTERNAL DATA SOURCE, path: /MyRoot/MyExternalDataSource 2025-06-03T10:31:02.367699Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-03T10:31:02.367775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:31:02.367786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-03T10:31:02.367888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:31:02.367922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:31:02.367929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:285:2275] TestWaitNotification: OK eventTxId 101 2025-06-03T10:31:02.368052Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:31:02.368103Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 68us result status StatusPathDoesNotExist 2025-06-03T10:31:02.368193Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyExternalDataSource\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/MyExternalDataSource" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> THiveTest::TestFollowerCompatability1 [GOOD] >> THiveTest::TestFollowerCompatability2 >> TExternalDataSourceTest::ParallelCreateSameExternalDataSource >> THiveTest::TestFollowerCompatability3 [GOOD] >> THiveTest::TestGetStorageInfo ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::RemovingReferencesFromDataSources [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:128:2058] recipient: [1:110:2141] 2025-06-03T10:31:02.546279Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:31:02.546313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:02.546321Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:31:02.546327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:31:02.546344Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:31:02.546350Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:31:02.546362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:02.546380Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:31:02.546529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:31:02.546626Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:31:02.561019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:31:02.561044Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:02.561157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:31:02.564577Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:31:02.564698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:31:02.564743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:31:02.569065Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:31:02.569421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:31:02.569627Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.569751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:31:02.571281Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:02.571363Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:31:02.571694Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:02.571705Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:02.571728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:31:02.571735Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:02.571740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:31:02.571786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.573744Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:240:2058] recipient: [1:15:2062] 2025-06-03T10:31:02.602187Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:31:02.602329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.602432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:31:02.602504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:31:02.602520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.604017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.604074Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:31:02.604155Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.604169Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:31:02.604176Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:31:02.604183Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:31:02.605099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.605127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:31:02.605157Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:31:02.605947Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.605970Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.605979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:02.605989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:31:02.606977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:31:02.609683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:31:02.609769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:31:02.610111Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.610165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:02.610177Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:02.610298Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:31:02.610309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:02.610360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:02.610377Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:31:02.612013Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:02.612029Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594 ... 7594046316545 FAKE_COORDINATOR: Add transaction: 104 at step: 5000005 FAKE_COORDINATOR: advance: minStep5000005 State->FrontStep: 5000004 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 104 at step: 5000005 2025-06-03T10:31:02.646977Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000005, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.647009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 104 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000005 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:02.647023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_external_data_source.cpp:40: [72057594046678944] TDropExternalDataSource TPropose opId# 104:0 HandleReply TEvOperationPlan: step# 5000005 2025-06-03T10:31:02.647058Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:31:02.647082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 104:0 128 -> 240 2025-06-03T10:31:02.647120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:02.647130Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:31:02.647236Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-03T10:31:02.647313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-03T10:31:02.647734Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:02.647745Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:02.647789Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 104, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:31:02.647817Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:02.647823Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:207:2208], at schemeshard: 72057594046678944, txId: 104, path id: 1 2025-06-03T10:31:02.647829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:207:2208], at schemeshard: 72057594046678944, txId: 104, path id: 2 FAKE_COORDINATOR: Erasing txId 104 2025-06-03T10:31:02.647914Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.647922Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-03T10:31:02.647934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-06-03T10:31:02.647939Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:31:02.647943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 1/1 2025-06-03T10:31:02.647946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:31:02.647949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 104, ready parts: 1/1, is published: false 2025-06-03T10:31:02.647955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 1/1 2025-06-03T10:31:02.647962Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-06-03T10:31:02.647967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 104:0 2025-06-03T10:31:02.647984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:31:02.647995Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 104, publications: 2, subscribers: 0 2025-06-03T10:31:02.648000Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-03T10:31:02.648004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 104, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-03T10:31:02.648080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:31:02.648093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:31:02.648098Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 104 2025-06-03T10:31:02.648103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-03T10:31:02.648109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:31:02.648165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:31:02.648172Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:31:02.648184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:31:02.648233Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:31:02.648242Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 104 2025-06-03T10:31:02.648246Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 104 2025-06-03T10:31:02.648250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 104, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-03T10:31:02.648253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:02.648262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 104, subscribers: 0 2025-06-03T10:31:02.649175Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 2025-06-03T10:31:02.649212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:31:02.649223Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 104 TestModificationResult got TxId: 104, wait until txId: 104 TestWaitNotification wait txId: 104 2025-06-03T10:31:02.649289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 104: send EvNotifyTxCompletion 2025-06-03T10:31:02.649318Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 104 2025-06-03T10:31:02.649469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 104, at schemeshard: 72057594046678944 2025-06-03T10:31:02.649490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-03T10:31:02.649496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:388:2378] TestWaitNotification: OK eventTxId 104 2025-06-03T10:31:02.649598Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:31:02.649642Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 61us result status StatusPathDoesNotExist 2025-06-03T10:31:02.649689Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalDataSource\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/ExternalDataSource" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestDropExternalDataSource [GOOD] Test command err: Trying to start YDB, gRPC: 7970, MsgBus: 11398 2025-06-03T10:31:00.851819Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668710908694209:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:00.851843Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000904/r3tmp/tmpnxDrXq/pdisk_1.dat 2025-06-03T10:31:00.955351Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:00.955373Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:00.956293Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:00.957490Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:00.958092Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668710908694183:2079] 1748946660851566 != 1748946660851569 TServer::EnableGrpc on GrpcPort 7970, node 1 2025-06-03T10:31:00.973229Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:00.973245Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:00.973248Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:00.973311Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11398 TClient is connected to server localhost:11398 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:01.053654Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:01.056245Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:31:01.061804Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715658:2, at schemeshard: 72057594046644480 2025-06-03T10:31:01.073317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:31:01.080648Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668715203662176:2333] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/f1/f2/external_table\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeExternalTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:132" severity: 1 } 2025-06-03T10:31:01.080834Z node 1 :KQP_GATEWAY ERROR: scheme.h:178: Unexpected error on scheme request, TxId: 281474976715660, ProxyStatus: ExecComplete, SchemeShardReason: Check failed: path: '/Root/f1/f2/external_table', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeExternalTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:132
: Error: Scheme operation failed, status: ExecComplete, reason: Check failed: path: '/Root/f1/f2/external_table', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeExternalTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_external_table.cpp:132 Trying to start YDB, gRPC: 64294, MsgBus: 2896 2025-06-03T10:31:01.469713Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668715505845576:2220];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000904/r3tmp/tmpirmOw2/pdisk_1.dat 2025-06-03T10:31:01.481105Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:31:01.489018Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:01.489331Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511668715505845365:2079] 1748946661467585 != 1748946661467588 TServer::EnableGrpc on GrpcPort 64294, node 2 2025-06-03T10:31:01.508518Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:01.508537Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:01.508540Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:01.508607Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2896 TClient is connected to server localhost:2896 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:31:01.573628Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:01.573688Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: 2025-06-03T10:31:01.575327Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:01.577504Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:01.581889Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:31:01.588425Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715658:2, at schemeshard: 72057594046644480 2025-06-03T10:31:01.592345Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 62186, MsgBus: 24695 2025-06-03T10:31:02.053497Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511668719098395101:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:02.053811Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000904/r3tmp/tmpREcj2z/pdisk_1.dat 2025-06-03T10:31:02.080908Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62186, node 3 2025-06-03T10:31:02.094124Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:02.094139Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:02.094144Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:02.094198Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24695 TClient is connected to server localhost:24695 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:02.157362Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:02.157420Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:02.158440Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:02.162993Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:02.175974Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715658:2, at schemeshard: 72057594046644480 >> THiveTest::TestCreateSubHiveCreateManyTablets [GOOD] >> THiveTest::TestCreateSubHiveCreateManyTabletsWithReboots >> TExternalDataSourceTest::ReadOnlyMode >> KikimrIcGateway::TestLoadBasicSecretValueFromExternalDataSourceMetadata [GOOD] >> KikimrIcGateway::TestLoadAwsSecretValueFromExternalDataSourceMetadata >> YdbIndexTable::OnlineBuild >> TExternalDataSourceTest::ParallelCreateSameExternalDataSource [GOOD] >> TExternalDataSourceTest::PreventDeletionOfDependentDataSources >> KikimrIcGateway::TestDropTable [GOOD] >> KikimrIcGateway::TestDropResourcePool ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::DropExternalDataSource [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:128:2058] recipient: [1:110:2141] 2025-06-03T10:31:02.559108Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:31:02.559144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:02.559154Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:31:02.559160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:31:02.559177Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:31:02.559182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:31:02.559193Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:02.559210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:31:02.559354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:31:02.559473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:31:02.575194Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:31:02.575227Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:02.575329Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:31:02.578785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:31:02.578937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:31:02.578981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:31:02.582352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:31:02.582566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:31:02.582751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.582860Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:31:02.583590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:02.583654Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:31:02.583944Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:02.583954Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:02.583968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:31:02.583975Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:02.583980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:31:02.584019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.585599Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:240:2058] recipient: [1:15:2062] 2025-06-03T10:31:02.607979Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:31:02.608110Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.608195Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:31:02.608264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:31:02.608284Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.609197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.609238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:31:02.609321Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.609335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:31:02.609342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:31:02.609350Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:31:02.611041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.611056Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:31:02.611064Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:31:02.611431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.611439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.611444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:02.611450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:31:02.612109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:31:02.612531Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:31:02.612573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:31:02.612788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.612820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:02.612828Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:02.612919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:31:02.612929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:02.612969Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:02.612982Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:31:02.613566Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:02.613581Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594 ... 046316545 FAKE_COORDINATOR: Add transaction: 102 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 102 at step: 5000003 2025-06-03T10:31:02.845663Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:02.845687Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 102 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 8589936749 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:02.845697Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_external_data_source.cpp:40: [72057594046678944] TDropExternalDataSource TPropose opId# 102:0 HandleReply TEvOperationPlan: step# 5000003 2025-06-03T10:31:02.845726Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:31:02.845744Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 102:0 128 -> 240 2025-06-03T10:31:02.845792Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:02.845805Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:31:02.845941Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:31:02.846439Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 FAKE_COORDINATOR: Erasing txId 102 2025-06-03T10:31:02.846998Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:02.847010Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:02.847056Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:31:02.847103Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:02.847109Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:208:2209], at schemeshard: 72057594046678944, txId: 102, path id: 1 2025-06-03T10:31:02.847117Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:208:2209], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-03T10:31:02.847171Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.847180Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-03T10:31:02.847196Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:31:02.847201Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:31:02.847207Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:31:02.847212Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:31:02.847218Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-03T10:31:02.847224Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:31:02.847230Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:31:02.847235Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:31:02.847253Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:31:02.847260Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-03T10:31:02.847265Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-03T10:31:02.847270Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 18446744073709551615 2025-06-03T10:31:02.847383Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:31:02.847397Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:31:02.847403Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:31:02.847408Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 18446744073709551615 2025-06-03T10:31:02.847414Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:31:02.847468Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:31:02.847473Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 2], at schemeshard: 72057594046678944 2025-06-03T10:31:02.847486Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:31:02.847560Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:31:02.847571Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:31:02.847575Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:31:02.847581Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-03T10:31:02.847586Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:02.847595Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-03T10:31:02.848542Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:31:02.848572Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:31:02.848585Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 102 2025-06-03T10:31:02.848642Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-03T10:31:02.848650Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-03T10:31:02.848736Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-03T10:31:02.848756Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:31:02.848762Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [2:335:2325] TestWaitNotification: OK eventTxId 102 2025-06-03T10:31:02.848843Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/MyExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:31:02.848878Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/MyExternalDataSource" took 47us result status StatusPathDoesNotExist 2025-06-03T10:31:02.848930Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/MyExternalDataSource\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/MyExternalDataSource" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::DropExternalTableWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:30:49.740010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:49.740033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:49.740037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:49.740041Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:49.740054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:49.740057Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:49.740065Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:49.740077Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:49.740190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:49.740257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:49.752786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:30:49.752809Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:49.752896Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:30:49.755341Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:49.755414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:49.755437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:49.757282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:49.757338Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:49.757444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:49.757488Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:49.758081Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:49.758144Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:49.758396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:49.758406Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:49.758422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:49.758430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:49.758436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:49.758472Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:30:49.760018Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:30:49.780381Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:49.780463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:49.780529Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:49.780575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:49.780586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:49.781333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:49.781370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:49.781410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:49.781421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:49.781427Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:49.781433Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:49.781963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:49.781978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:49.781985Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:49.782414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:49.782425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:49.782430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:49.782435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:49.782963Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:49.783430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:49.783471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:49.783640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:49.783661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:49.783667Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:49.783740Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:02.658062Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-03T10:31:02.658079Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:31:02.658098Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:02.658101Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:205:2206], at schemeshard: 72057594046678944, txId: 1005, path id: 1 2025-06-03T10:31:02.658105Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:205:2206], at schemeshard: 72057594046678944, txId: 1005, path id: 5 2025-06-03T10:31:02.658112Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [50:205:2206], at schemeshard: 72057594046678944, txId: 1005, path id: 3 2025-06-03T10:31:02.658169Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1005:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.658177Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1005:0 ProgressState 2025-06-03T10:31:02.658190Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1005:0 progress is 1/1 2025-06-03T10:31:02.658193Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:31:02.658197Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1005:0 progress is 1/1 2025-06-03T10:31:02.658199Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:31:02.658202Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1005, ready parts: 1/1, is published: false 2025-06-03T10:31:02.658207Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:31:02.658211Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1005:0 2025-06-03T10:31:02.658213Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1005:0 2025-06-03T10:31:02.658225Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-03T10:31:02.658228Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:31:02.658232Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1005, publications: 3, subscribers: 0 2025-06-03T10:31:02.658234Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 1], 15 2025-06-03T10:31:02.658237Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-03T10:31:02.658239Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 5], 18446744073709551615 2025-06-03T10:31:02.658304Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:31:02.658312Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:31:02.658316Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1005 2025-06-03T10:31:02.658319Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-03T10:31:02.658325Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-03T10:31:02.658390Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:31:02.658396Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-03T10:31:02.658404Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-03T10:31:02.658485Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:31:02.658492Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:31:02.658495Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1005 2025-06-03T10:31:02.658498Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 15 2025-06-03T10:31:02.658500Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:31:02.658789Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:31:02.658816Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:31:02.658821Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1005 2025-06-03T10:31:02.658825Z node 50 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-03T10:31:02.658830Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:31:02.658856Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1005, subscribers: 0 2025-06-03T10:31:02.659378Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-03T10:31:02.659421Z node 50 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:31:02.659447Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-03T10:31:02.659678Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 TestModificationResult got TxId: 1005, wait until txId: 1005 TestWaitNotification wait txId: 1005 2025-06-03T10:31:02.659736Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1005: send EvNotifyTxCompletion 2025-06-03T10:31:02.659743Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1005 2025-06-03T10:31:02.659810Z node 50 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1005, at schemeshard: 72057594046678944 2025-06-03T10:31:02.659825Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1005: got EvNotifyTxCompletionResult 2025-06-03T10:31:02.659828Z node 50 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1005: satisfy waiter [50:451:2441] TestWaitNotification: OK eventTxId 1005 2025-06-03T10:31:02.659889Z node 50 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:31:02.659912Z node 50 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 32us result status StatusPathDoesNotExist 2025-06-03T10:31:02.659943Z node 50 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/ExternalTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/ExternalTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::CreateDroppedExternalTableWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:30:50.173587Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:50.173610Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:50.173614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:50.173618Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:50.173630Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:50.173633Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:50.173639Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:50.173651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:50.173746Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:50.173820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:50.187124Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:30:50.187145Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:50.187234Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:30:50.189204Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:50.189282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:50.189330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:50.190955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:50.190994Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:50.191088Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:50.191126Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:50.191544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:50.191596Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:50.191788Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:50.191795Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:50.191805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:50.191810Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:50.191814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:50.191842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:30:50.192982Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:30:50.213514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:50.213590Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:50.213651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:50.213690Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:50.213699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:50.214871Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:50.214903Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:50.214952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:50.214961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:50.214965Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:50.214969Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:50.215387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:50.215397Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:50.215402Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:50.215747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:50.215757Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:50.215762Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:50.215767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:50.216331Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:50.216833Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:50.216869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:50.217033Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:50.217056Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:50.217064Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:50.217128Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-03T10:31:02.814625Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1006:0 128 -> 240 2025-06-03T10:31:02.814665Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:31:02.814678Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-03T10:31:02.814689Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:31:02.815139Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 2025-06-03T10:31:02.815230Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 FAKE_COORDINATOR: Erasing txId 1006 2025-06-03T10:31:02.815678Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:02.815691Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:02.815753Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-03T10:31:02.815775Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1006, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:31:02.815803Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:02.815807Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [49:207:2208], at schemeshard: 72057594046678944, txId: 1006, path id: 1 2025-06-03T10:31:02.815812Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [49:207:2208], at schemeshard: 72057594046678944, txId: 1006, path id: 5 2025-06-03T10:31:02.815815Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [49:207:2208], at schemeshard: 72057594046678944, txId: 1006, path id: 3 2025-06-03T10:31:02.815897Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1006:0, at schemeshard: 72057594046678944 2025-06-03T10:31:02.815908Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1006:0 ProgressState 2025-06-03T10:31:02.815929Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1006:0 progress is 1/1 2025-06-03T10:31:02.815935Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-03T10:31:02.815941Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1006:0 progress is 1/1 2025-06-03T10:31:02.815945Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-03T10:31:02.815952Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1006, ready parts: 1/1, is published: false 2025-06-03T10:31:02.815960Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1006 ready parts: 1/1 2025-06-03T10:31:02.815966Z node 49 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1006:0 2025-06-03T10:31:02.815972Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1006:0 2025-06-03T10:31:02.815996Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-03T10:31:02.816001Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:31:02.816008Z node 49 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1006, publications: 3, subscribers: 0 2025-06-03T10:31:02.816012Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 1], 15 2025-06-03T10:31:02.816016Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-03T10:31:02.816020Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1006, [OwnerId: 72057594046678944, LocalPathId: 5], 18446744073709551615 2025-06-03T10:31:02.816137Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-03T10:31:02.816154Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-03T10:31:02.816160Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1006 2025-06-03T10:31:02.816167Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-03T10:31:02.816173Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-03T10:31:02.816299Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:31:02.816311Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-03T10:31:02.816327Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-03T10:31:02.816436Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-03T10:31:02.816450Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 15 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-03T10:31:02.816455Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1006 2025-06-03T10:31:02.816460Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 15 2025-06-03T10:31:02.816469Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:31:02.816589Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-03T10:31:02.816604Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1006 2025-06-03T10:31:02.816610Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1006 2025-06-03T10:31:02.816615Z node 49 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1006, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-03T10:31:02.816619Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:31:02.816632Z node 49 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1006, subscribers: 0 2025-06-03T10:31:02.817906Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 2025-06-03T10:31:02.817961Z node 49 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:31:02.817988Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 2025-06-03T10:31:02.818004Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1006 TestModificationResult got TxId: 1006, wait until txId: 1006 TestWaitNotification wait txId: 1006 2025-06-03T10:31:02.818067Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1006: send EvNotifyTxCompletion 2025-06-03T10:31:02.818074Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1006 2025-06-03T10:31:02.818151Z node 49 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1006, at schemeshard: 72057594046678944 2025-06-03T10:31:02.818171Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1006: got EvNotifyTxCompletionResult 2025-06-03T10:31:02.818175Z node 49 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1006: satisfy waiter [49:446:2436] TestWaitNotification: OK eventTxId 1006 >> THiveTest::TestGetStorageInfo [GOOD] >> THiveTest::TestGetStorageInfoDeleteTabletBeforeAssigned >> TExternalDataSourceTest::PreventDeletionOfDependentDataSources [GOOD] >> TExternalDataSourceTest::ReadOnlyMode [GOOD] >> TExternalTableTestReboots::CreateExternalTableWithReboots [GOOD] >> YdbIndexTable::MultiShardTableOneUniqIndex >> KikimrIcGateway::TestDropResourcePool [GOOD] >> THiveTest::TestFollowerCompatability2 [GOOD] >> THiveTest::TestGetStorageInfoDeleteTabletBeforeAssigned [GOOD] >> TPopulatorTest::Boot >> TKeyValueTest::TestCleanUpDataWithMockDisk [GOOD] >> THiveTest::TestCreateTabletChangeToExternal >> KqpScanSpilling::HandleErrorsCorrectly >> TSchemeShardSubDomainTest::TopicDiskSpaceQuotas [GOOD] >> KqpScanSpilling::SpillingInRuntimeNodes-EnabledSpilling >> TPopulatorTest::Boot [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::PreventDeletionOfDependentDataSources [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:128:2058] recipient: [1:110:2141] 2025-06-03T10:31:03.206434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:31:03.206485Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:03.206496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:31:03.206504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:31:03.206528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:31:03.206535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:31:03.206547Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:03.206566Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:31:03.206723Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:31:03.206836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:31:03.229331Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:31:03.229368Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:03.229504Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:31:03.233673Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:31:03.233837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:31:03.233880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:31:03.237771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:31:03.238049Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:31:03.238260Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:03.238386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:31:03.239284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:03.239367Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:31:03.239768Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:03.239786Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:03.239808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:31:03.239818Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:03.239826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:31:03.239883Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:31:03.242741Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:240:2058] recipient: [1:15:2062] 2025-06-03T10:31:03.272304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:31:03.272463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:03.272559Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:31:03.272624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:31:03.272640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:03.273736Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:03.273778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:31:03.273852Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:03.273865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:31:03.273873Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:31:03.273881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:31:03.274496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:03.274511Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:31:03.274518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:31:03.274907Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:03.274919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:03.274927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:03.274936Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:31:03.275811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:31:03.276363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:31:03.276432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:31:03.276676Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:03.276715Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:03.276727Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:03.276824Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:31:03.276834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:03.276884Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:03.276898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:31:03.277442Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:03.277457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594 ... sg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:31:03.549837Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:31:03.549843Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:31:03.549849Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-03T10:31:03.549855Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:31:03.550159Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:31:03.550175Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:31:03.550184Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:31:03.550190Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-03T10:31:03.550195Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:31:03.550519Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:31:03.550537Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 101 2025-06-03T10:31:03.550543Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 101 2025-06-03T10:31:03.550548Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 101, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-03T10:31:03.550554Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:31:03.550568Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 101, subscribers: 0 2025-06-03T10:31:03.550963Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:31:03.551052Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 2025-06-03T10:31:03.551316Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 101 TestModificationResult got TxId: 101, wait until txId: 101 TestWaitNotification wait txId: 101 2025-06-03T10:31:03.551377Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:31:03.551386Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-03T10:31:03.551479Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:31:03.551499Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:31:03.551506Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:339:2329] TestWaitNotification: OK eventTxId 101 2025-06-03T10:31:03.551599Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:31:03.551645Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalTable" took 61us result status StatusSuccess 2025-06-03T10:31:03.551747Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalTable" PathDescription { Self { Name: "ExternalTable" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 101 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false } Content: "" } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TestModificationResults wait txId: 103 2025-06-03T10:31:03.552673Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpDropExternalDataSource Drop { Name: "ExternalDataSource" } } TxId: 103 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:31:03.552716Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_drop_external_data_source.cpp:116: [72057594046678944] TDropExternalDataSource Propose: opId# 103:0, path# /MyRoot/ExternalDataSource 2025-06-03T10:31:03.552735Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 103:1, propose status:StatusSchemeError, reason: Other entities depend on this data source, please remove them at the beginning: /MyRoot/ExternalTable, at schemeshard: 72057594046678944 2025-06-03T10:31:03.553280Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 103, response: Status: StatusSchemeError Reason: "Other entities depend on this data source, please remove them at the beginning: /MyRoot/ExternalTable" TxId: 103 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:03.553340Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 103, database: /MyRoot, subject: , status: StatusSchemeError, reason: Other entities depend on this data source, please remove them at the beginning: /MyRoot/ExternalTable, operation: DROP EXTERNAL DATA SOURCE, path: /MyRoot/ExternalDataSource TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-03T10:31:03.553407Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-03T10:31:03.553416Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-03T10:31:03.553497Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-03T10:31:03.553514Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:31:03.553520Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [2:347:2337] TestWaitNotification: OK eventTxId 103 2025-06-03T10:31:03.553596Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/ExternalDataSource" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:31:03.553633Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/ExternalDataSource" took 43us result status StatusSuccess 2025-06-03T10:31:03.553723Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/ExternalDataSource" PathDescription { Self { Name: "ExternalDataSource" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalDataSource CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalDataSourceVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalDataSourceDescription { Name: "ExternalDataSource" PathId { OwnerId: 72057594046678944 LocalId: 2 } Version: 1 SourceType: "ObjectStorage" Location: "https://s3.cloud.net/my_bucket" Installation: "" Auth { None { } } Properties { } References { References { Path: "/MyRoot/ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 3 } } } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |67.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_populator/unittest |67.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_populator/unittest >> THiveTest::TestCreateTabletChangeToExternal [GOOD] >> THiveTest::TestExternalBoot >> YdbTableSplit::SplitByLoadWithUpdates |67.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestCleanUpDataWithMockDisk [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:58:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:75:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:58:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:75:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:77:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:80:2057] recipient: [2:79:2110] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:83:2057] recipient: [2:79:2110] !Reboot 72057594037927937 (actor [2:57:2097]) rebooted! !Reboot 72057594037927937 (actor [2:57:2097]) tablet resolver refreshed! new actor is[2:82:2111] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:168:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:58:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:75:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:57:2097]) on event NKikimr::TEvKeyValue::TEvAcquireLock ! Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:77:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:83:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:57:2097]) rebooted! !Reboot 72057594037927937 (actor [3:57:2097]) tablet resolver refreshed! new actor is[3:82:2111] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:168:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:58:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:75:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:78:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:82:2057] recipient: [4:80:2110] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:84:2057] recipient: [4:80:2110] !Reboot 72057594037927937 (actor [4:57:2097]) rebooted! !Reboot 72057594037927937 (actor [4:57:2097]) tablet resolver refreshed! new actor is[4:83:2111] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:169:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:58:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:75:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:81:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:84:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:85:2057] recipient: [5:83:2113] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:87:2057] recipient: [5:83:2113] !Reboot 72057594037927937 (actor [5:57:2097]) rebooted! !Reboot 72057594037927937 (actor [5:57:2097]) tablet resolver refreshed! new actor is[5:86:2114] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:172:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:58:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:75:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:57:2097]) on event NKikimr::TEvKeyValue::TEvCleanUpDataRequest ! Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:81:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:84:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:85:2057] recipient: [6:83:2113] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:87:2057] recipient: [6:83:2113] !Reboot 72057594037927937 (actor [6:57:2097]) rebooted! !Reboot 72057594037927937 (actor [6:57:2097]) tablet resolver refreshed! new actor is[6:86:2114] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:172:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:58:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:75:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:57:2097]) on event NKikimr::TEvKeyValue::TEvForceTabletDataCleanup ! Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:81:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:83:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:85:2057] recipient: [7:84:2113] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:87:2057] recipient: [7:84:2113] !Reboot 72057594037927937 (actor [7:57:2097]) rebooted! !Reboot 72057594037927937 (actor [7:57:2097]) tablet resolver refreshed! new actor is[7:86:2114] Leader for TabletID 72057594037927937 is [7:86:2114] sender: [7:172:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:58:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:75:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:57:2097]) on event NKikimr::TEvTablet::TEvFollowerGcApplied ! Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:86:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:89:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:90:2057] recipient: [8:88:2117] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:92:2057] recipient: [8:88:2117] !Reboot 72057594037927937 (actor [8:57:2097]) rebooted! !Reboot 72057594037927937 (actor [8:57:2097]) tablet resolver refreshed! new actor is[8:91:2118] Leader for TabletID 72057594037927937 is [8:91:2118] sender: [8:177:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:58:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:75:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:90:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:93:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:94:2057] recipient: [9:92:2121] Leader for TabletID 72057594037927937 is [9:95:2122] sender: [9:96:2057] recipient: [9:92:2121] !Reboot 72057594037927937 (actor [9:57:2097]) rebooted! !Reboot 72057594037927937 (actor [9:57:2097]) tablet resolver refreshed! new actor is[9:95:2122] Leader for TabletID 72057594037927937 is [9:95:2122] sender: [9:181:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:58:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:75:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:57:2097]) on event NKikimr::TEvKeyValue::TEvCleanUpDataRequest ! Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:90:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:93:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:94:2057] recipient: [10:92:2121] Leader for TabletID 72057594037927937 is [10:95:2122] sender: [10:96:2057] recipient: [10:92:2121] !Reboot 72057594037927937 (actor [10:57:2097]) rebooted! !Reboot 72057594037927937 (actor [10:57:2097]) tablet resolver refreshed! new actor is[10:95:2122] Leader for TabletID 72057594037927937 is [10:95:2122] sender: [10:181:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:58:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:75:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:92:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:95:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:96:2057] recipient: [11:94:2123] Leader for TabletID 72057594037927937 is [11:97:2124] sender: [11:98:2057] recipient: [11:94:2123] !Reboot 72057594037927937 (actor [11:57:2097]) rebooted! !Reboot 72057594037927937 (actor [11:57:2097]) tablet resolver refreshed! new actor is[11:97:2124] Leader for TabletID 72057594037927937 is [11:97:2124] sender: [11:183:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:58:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:75:2057] recipient: [12:1 ... 94037927937 is [35:100:2125] sender: [35:186:2057] recipient: [35:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:57:2057] recipient: [36:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [36:57:2057] recipient: [36:54:2097] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:60:2057] recipient: [36:54:2097] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:77:2057] recipient: [36:17:2064] !Reboot 72057594037927937 (actor [36:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:96:2057] recipient: [36:39:2086] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:99:2057] recipient: [36:17:2064] Leader for TabletID 72057594037927937 is [36:59:2099] sender: [36:100:2057] recipient: [36:98:2124] Leader for TabletID 72057594037927937 is [36:101:2125] sender: [36:102:2057] recipient: [36:98:2124] !Reboot 72057594037927937 (actor [36:59:2099]) rebooted! !Reboot 72057594037927937 (actor [36:59:2099]) tablet resolver refreshed! new actor is[36:101:2125] Leader for TabletID 72057594037927937 is [36:101:2125] sender: [36:187:2057] recipient: [36:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:57:2057] recipient: [37:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [37:57:2057] recipient: [37:54:2097] Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:60:2057] recipient: [37:54:2097] Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:77:2057] recipient: [37:17:2064] !Reboot 72057594037927937 (actor [37:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:99:2057] recipient: [37:39:2086] Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:102:2057] recipient: [37:17:2064] Leader for TabletID 72057594037927937 is [37:59:2099] sender: [37:103:2057] recipient: [37:101:2127] Leader for TabletID 72057594037927937 is [37:104:2128] sender: [37:105:2057] recipient: [37:101:2127] !Reboot 72057594037927937 (actor [37:59:2099]) rebooted! !Reboot 72057594037927937 (actor [37:59:2099]) tablet resolver refreshed! new actor is[37:104:2128] Leader for TabletID 72057594037927937 is [37:104:2128] sender: [37:190:2057] recipient: [37:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:57:2057] recipient: [38:54:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [38:57:2057] recipient: [38:54:2097] Leader for TabletID 72057594037927937 is [38:59:2099] sender: [38:60:2057] recipient: [38:54:2097] Leader for TabletID 72057594037927937 is [38:59:2099] sender: [38:77:2057] recipient: [38:17:2064] !Reboot 72057594037927937 (actor [38:59:2099]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [38:59:2099] sender: [38:99:2057] recipient: [38:39:2086] Leader for TabletID 72057594037927937 is [38:59:2099] sender: [38:102:2057] recipient: [38:17:2064] Leader for TabletID 72057594037927937 is [38:59:2099] sender: [38:103:2057] recipient: [38:101:2127] Leader for TabletID 72057594037927937 is [38:104:2128] sender: [38:105:2057] recipient: [38:101:2127] !Reboot 72057594037927937 (actor [38:59:2099]) rebooted! !Reboot 72057594037927937 (actor [38:59:2099]) tablet resolver refreshed! new actor is[38:104:2128] Leader for TabletID 72057594037927937 is [38:104:2128] sender: [38:190:2057] recipient: [38:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:57:2057] recipient: [39:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [39:57:2057] recipient: [39:53:2097] Leader for TabletID 72057594037927937 is [39:59:2099] sender: [39:60:2057] recipient: [39:53:2097] Leader for TabletID 72057594037927937 is [39:59:2099] sender: [39:77:2057] recipient: [39:17:2064] !Reboot 72057594037927937 (actor [39:59:2099]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [39:59:2099] sender: [39:100:2057] recipient: [39:39:2086] Leader for TabletID 72057594037927937 is [39:59:2099] sender: [39:103:2057] recipient: [39:17:2064] Leader for TabletID 72057594037927937 is [39:59:2099] sender: [39:104:2057] recipient: [39:102:2127] Leader for TabletID 72057594037927937 is [39:105:2128] sender: [39:106:2057] recipient: [39:102:2127] !Reboot 72057594037927937 (actor [39:59:2099]) rebooted! !Reboot 72057594037927937 (actor [39:59:2099]) tablet resolver refreshed! new actor is[39:105:2128] Leader for TabletID 72057594037927937 is [39:105:2128] sender: [39:191:2057] recipient: [39:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:57:2057] recipient: [40:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [40:57:2057] recipient: [40:52:2097] Leader for TabletID 72057594037927937 is [40:59:2099] sender: [40:60:2057] recipient: [40:52:2097] Leader for TabletID 72057594037927937 is [40:59:2099] sender: [40:77:2057] recipient: [40:17:2064] !Reboot 72057594037927937 (actor [40:59:2099]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [40:59:2099] sender: [40:101:2057] recipient: [40:39:2086] Leader for TabletID 72057594037927937 is [40:59:2099] sender: [40:104:2057] recipient: [40:17:2064] Leader for TabletID 72057594037927937 is [40:59:2099] sender: [40:105:2057] recipient: [40:103:2128] Leader for TabletID 72057594037927937 is [40:106:2129] sender: [40:107:2057] recipient: [40:103:2128] !Reboot 72057594037927937 (actor [40:59:2099]) rebooted! !Reboot 72057594037927937 (actor [40:59:2099]) tablet resolver refreshed! new actor is[40:106:2129] Leader for TabletID 72057594037927937 is [40:106:2129] sender: [40:126:2057] recipient: [40:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [41:57:2057] recipient: [41:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [41:57:2057] recipient: [41:53:2097] Leader for TabletID 72057594037927937 is [41:59:2099] sender: [41:60:2057] recipient: [41:53:2097] Leader for TabletID 72057594037927937 is [41:59:2099] sender: [41:77:2057] recipient: [41:17:2064] !Reboot 72057594037927937 (actor [41:59:2099]) on event NKikimr::TEvKeyValue::TEvCompleteGC ! Leader for TabletID 72057594037927937 is [41:59:2099] sender: [41:102:2057] recipient: [41:39:2086] Leader for TabletID 72057594037927937 is [41:59:2099] sender: [41:105:2057] recipient: [41:17:2064] Leader for TabletID 72057594037927937 is [41:59:2099] sender: [41:106:2057] recipient: [41:104:2129] Leader for TabletID 72057594037927937 is [41:107:2130] sender: [41:108:2057] recipient: [41:104:2129] !Reboot 72057594037927937 (actor [41:59:2099]) rebooted! !Reboot 72057594037927937 (actor [41:59:2099]) tablet resolver refreshed! new actor is[41:107:2130] Leader for TabletID 72057594037927937 is [41:107:2130] sender: [41:127:2057] recipient: [41:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [42:57:2057] recipient: [42:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [42:57:2057] recipient: [42:53:2097] Leader for TabletID 72057594037927937 is [42:59:2099] sender: [42:60:2057] recipient: [42:53:2097] Leader for TabletID 72057594037927937 is [42:59:2099] sender: [42:77:2057] recipient: [42:17:2064] !Reboot 72057594037927937 (actor [42:59:2099]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [42:59:2099] sender: [42:105:2057] recipient: [42:39:2086] Leader for TabletID 72057594037927937 is [42:59:2099] sender: [42:108:2057] recipient: [42:17:2064] Leader for TabletID 72057594037927937 is [42:59:2099] sender: [42:109:2057] recipient: [42:107:2132] Leader for TabletID 72057594037927937 is [42:110:2133] sender: [42:111:2057] recipient: [42:107:2132] !Reboot 72057594037927937 (actor [42:59:2099]) rebooted! !Reboot 72057594037927937 (actor [42:59:2099]) tablet resolver refreshed! new actor is[42:110:2133] Leader for TabletID 72057594037927937 is [42:110:2133] sender: [42:196:2057] recipient: [42:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [43:57:2057] recipient: [43:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [43:57:2057] recipient: [43:52:2097] Leader for TabletID 72057594037927937 is [43:59:2099] sender: [43:60:2057] recipient: [43:52:2097] Leader for TabletID 72057594037927937 is [43:59:2099] sender: [43:77:2057] recipient: [43:17:2064] !Reboot 72057594037927937 (actor [43:59:2099]) on event NKikimr::TEvKeyValue::TEvCleanUpDataRequest ! Leader for TabletID 72057594037927937 is [43:59:2099] sender: [43:105:2057] recipient: [43:39:2086] Leader for TabletID 72057594037927937 is [43:59:2099] sender: [43:108:2057] recipient: [43:17:2064] Leader for TabletID 72057594037927937 is [43:59:2099] sender: [43:109:2057] recipient: [43:107:2132] Leader for TabletID 72057594037927937 is [43:110:2133] sender: [43:111:2057] recipient: [43:107:2132] !Reboot 72057594037927937 (actor [43:59:2099]) rebooted! !Reboot 72057594037927937 (actor [43:59:2099]) tablet resolver refreshed! new actor is[43:110:2133] Leader for TabletID 72057594037927937 is [43:110:2133] sender: [43:196:2057] recipient: [43:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [44:57:2057] recipient: [44:52:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [44:57:2057] recipient: [44:52:2097] Leader for TabletID 72057594037927937 is [44:59:2099] sender: [44:60:2057] recipient: [44:52:2097] Leader for TabletID 72057594037927937 is [44:59:2099] sender: [44:77:2057] recipient: [44:17:2064] !Reboot 72057594037927937 (actor [44:59:2099]) on event NKikimr::TEvKeyValue::TEvForceTabletDataCleanup ! Leader for TabletID 72057594037927937 is [44:59:2099] sender: [44:105:2057] recipient: [44:39:2086] Leader for TabletID 72057594037927937 is [44:59:2099] sender: [44:108:2057] recipient: [44:17:2064] Leader for TabletID 72057594037927937 is [44:59:2099] sender: [44:109:2057] recipient: [44:107:2132] Leader for TabletID 72057594037927937 is [44:110:2133] sender: [44:111:2057] recipient: [44:107:2132] !Reboot 72057594037927937 (actor [44:59:2099]) rebooted! !Reboot 72057594037927937 (actor [44:59:2099]) tablet resolver refreshed! new actor is[44:110:2133] Leader for TabletID 72057594037927937 is [44:110:2133] sender: [44:196:2057] recipient: [44:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [45:57:2057] recipient: [45:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [45:57:2057] recipient: [45:53:2097] Leader for TabletID 72057594037927937 is [45:59:2099] sender: [45:60:2057] recipient: [45:53:2097] Leader for TabletID 72057594037927937 is [45:59:2099] sender: [45:77:2057] recipient: [45:17:2064] !Reboot 72057594037927937 (actor [45:59:2099]) on event NKikimr::TEvTablet::TEvFollowerGcApplied ! Leader for TabletID 72057594037927937 is [45:59:2099] sender: [45:110:2057] recipient: [45:39:2086] Leader for TabletID 72057594037927937 is [45:59:2099] sender: [45:113:2057] recipient: [45:17:2064] Leader for TabletID 72057594037927937 is [45:59:2099] sender: [45:114:2057] recipient: [45:112:2136] Leader for TabletID 72057594037927937 is [45:115:2137] sender: [45:116:2057] recipient: [45:112:2136] !Reboot 72057594037927937 (actor [45:59:2099]) rebooted! !Reboot 72057594037927937 (actor [45:59:2099]) tablet resolver refreshed! new actor is[45:115:2137] Leader for TabletID 72057594037927937 is [45:115:2137] sender: [45:201:2057] recipient: [45:17:2064] Leader for TabletID 72057594037927937 is [0:0:0] sender: [46:57:2057] recipient: [46:53:2097] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [46:57:2057] recipient: [46:53:2097] Leader for TabletID 72057594037927937 is [46:59:2099] sender: [46:60:2057] recipient: [46:53:2097] Leader for TabletID 72057594037927937 is [46:59:2099] sender: [46:77:2057] recipient: [46:17:2064] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_data_source/unittest >> TExternalDataSourceTest::ReadOnlyMode [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:128:2058] recipient: [1:110:2141] 2025-06-03T10:31:03.477373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:31:03.477404Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:03.477411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:31:03.477418Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:31:03.477436Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:31:03.477441Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:31:03.477453Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:03.477471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:31:03.477643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:31:03.477732Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:31:03.494231Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:31:03.494252Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:03.494361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:31:03.497835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:31:03.497973Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:31:03.498012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:31:03.501886Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:31:03.502081Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:31:03.502264Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:03.502370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:31:03.503071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:03.503132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:31:03.503388Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:03.503396Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:03.503410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:31:03.503416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:03.503421Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:31:03.503460Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:31:03.504854Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:240:2058] recipient: [1:15:2062] 2025-06-03T10:31:03.526457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:31:03.526563Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:03.526636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:31:03.526697Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:31:03.526713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:03.527320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:03.527349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:31:03.527391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:03.527401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:31:03.527405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:31:03.527410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:31:03.527746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:03.527755Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:31:03.527759Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:31:03.528041Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:03.528055Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:03.528064Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:03.528073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:31:03.529047Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:31:03.529610Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:31:03.529665Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:31:03.529892Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:03.529925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:03.529934Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:03.530021Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:31:03.530030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:03.530083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:03.530097Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:31:03.530604Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:03.530615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594 ... 28:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:31:03.678535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 128 ready parts: 1/1 2025-06-03T10:31:03.678574Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 128 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:31:03.678840Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 128 2025-06-03T10:31:03.678856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 128 2025-06-03T10:31:03.678863Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 128 2025-06-03T10:31:03.678870Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 128, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-03T10:31:03.678878Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-03T10:31:03.679082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 4 Version: 2 PathOwnerId: 72057594046678944, cookie: 128 2025-06-03T10:31:03.679095Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 4 Version: 2 PathOwnerId: 72057594046678944, cookie: 128 2025-06-03T10:31:03.679099Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 128 2025-06-03T10:31:03.679105Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 128, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 2 2025-06-03T10:31:03.679110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:31:03.679122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 128, ready parts: 0/1, is published: true 2025-06-03T10:31:03.679778Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 128:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:128 msg type: 269090816 2025-06-03T10:31:03.679831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 128, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 128 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 128 at step: 5000004 2025-06-03T10:31:03.680425Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:03.680462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 128 Coordinator: 72057594046316545 AckTo { RawX1: 137 RawX2: 4294969454 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:03.680475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_mkdir.cpp:33: MkDir::TPropose operationId# 128:0 HandleReply TEvPrivate::TEvOperationPlan, step: 5000004, at schemeshard: 72057594046678944 2025-06-03T10:31:03.680524Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 128:0 128 -> 240 2025-06-03T10:31:03.680569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:31:03.680583Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:31:03.680662Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 128 2025-06-03T10:31:03.680696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 128 2025-06-03T10:31:03.681171Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:03.681186Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 128, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:03.681240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 128, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-03T10:31:03.681258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:03.681263Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:451:2408], at schemeshard: 72057594046678944, txId: 128, path id: 1 2025-06-03T10:31:03.681269Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:451:2408], at schemeshard: 72057594046678944, txId: 128, path id: 4 FAKE_COORDINATOR: Erasing txId 128 2025-06-03T10:31:03.681396Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 128:0, at schemeshard: 72057594046678944 2025-06-03T10:31:03.681407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 128:0 ProgressState 2025-06-03T10:31:03.681425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#128:0 progress is 1/1 2025-06-03T10:31:03.681431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 128 ready parts: 1/1 2025-06-03T10:31:03.681437Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#128:0 progress is 1/1 2025-06-03T10:31:03.681440Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 128 ready parts: 1/1 2025-06-03T10:31:03.681445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 128, ready parts: 1/1, is published: false 2025-06-03T10:31:03.681451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 128 ready parts: 1/1 2025-06-03T10:31:03.681457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 128:0 2025-06-03T10:31:03.681463Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 128:0 2025-06-03T10:31:03.681482Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:31:03.681493Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 128, publications: 2, subscribers: 0 2025-06-03T10:31:03.681499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 128, [OwnerId: 72057594046678944, LocalPathId: 1], 9 2025-06-03T10:31:03.681503Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 128, [OwnerId: 72057594046678944, LocalPathId: 4], 3 2025-06-03T10:31:03.681641Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 128 2025-06-03T10:31:03.681655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 128 2025-06-03T10:31:03.681661Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 128 2025-06-03T10:31:03.681668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 128, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-03T10:31:03.681675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-03T10:31:03.681803Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 128 2025-06-03T10:31:03.681816Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 4 LocalPathId: 4 Version: 3 PathOwnerId: 72057594046678944, cookie: 128 2025-06-03T10:31:03.681821Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 128 2025-06-03T10:31:03.681826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 128, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 3 2025-06-03T10:31:03.681831Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:31:03.681842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 128, subscribers: 0 2025-06-03T10:31:03.682721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 128 2025-06-03T10:31:03.682937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 128 TestModificationResult got TxId: 128, wait until txId: 128 >> YdbTableSplit::RenameTablesAndSplit ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestDropResourcePool [GOOD] Test command err: Trying to start YDB, gRPC: 18649, MsgBus: 26570 2025-06-03T10:31:01.353500Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668712549801854:2265];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:01.353533Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0008c8/r3tmp/tmpp7AR4J/pdisk_1.dat 2025-06-03T10:31:01.443564Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:01.443801Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668712549801628:2079] 1748946661352558 != 1748946661352561 TServer::EnableGrpc on GrpcPort 18649, node 1 2025-06-03T10:31:01.476754Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:01.476772Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:01.476775Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:01.476835Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26570 2025-06-03T10:31:01.501763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:01.501798Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:01.502857Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26570 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:01.576457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:01.582509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:31:01.813867Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668712549802330:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:01.813903Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:01.862643Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:31:01.926432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:31:01.936468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:01.950020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:01.960179Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668712549802642:2362], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:01.960211Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:01.960316Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668712549802647:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:01.961177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715664:3, at schemeshard: 72057594046644480 2025-06-03T10:31:01.967755Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668712549802649:2366], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715664 completed, doublechecking } 2025-06-03T10:31:02.064431Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668716844769996:2553] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 21651, MsgBus: 29405 2025-06-03T10:31:02.477989Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668717826999947:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:02.478226Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0008c8/r3tmp/tmpKh6AWj/pdisk_1.dat 2025-06-03T10:31:02.497153Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:02.497452Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511668717826999924:2079] 1748946662477758 != 1748946662477761 TServer::EnableGrpc on GrpcPort 21651, node 2 2025-06-03T10:31:02.506854Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:02.506884Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:02.506887Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:02.506950Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29405 TClient is connected to server localhost:29405 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:02.584873Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:02.584908Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:02.585199Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.586038Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:31:02.588818Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:31:02.919306Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668717827000626:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:02.919352Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:02.925870Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.935009Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.948000Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.956696Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.973128Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668717827000936:2362], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:02.973160Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:02.973206Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668717827000941:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:02.974438Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715664:3, at schemeshard: 72057594046644480 2025-06-03T10:31:02.981840Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511668717827000943:2366], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715664 completed, doublechecking } 2025-06-03T10:31:03.046276Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668722121968290:2551] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 }
: Info: Success, code: 4 2025-06-03T10:31:03.084959Z node 2 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 2, TabletId: 72075186224037888 not found Trying to start YDB, gRPC: 23431, MsgBus: 24621 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0008c8/r3tmp/tmpyq7xjk/pdisk_1.dat 2025-06-03T10:31:03.485325Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:31:03.503081Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:03.505010Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511668720231970249:2079] 1748946663478189 != 1748946663478192 TServer::EnableGrpc on GrpcPort 23431, node 3 2025-06-03T10:31:03.515198Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:03.515215Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:03.515218Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:03.515293Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24621 TClient is connected to server localhost:24621 2025-06-03T10:31:03.584985Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected WaitRootIsUp '2025-06-03T10:31:03.585018Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting Root'... TClient::Ls request: Root 2025-06-03T10:31:03.586080Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:31:03.589213Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:31:03.597387Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorTest::Boot [GOOD] Test command err: 2025-06-03T10:31:04.292401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:31:04.292428Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded >> YdbTableSplit::SplitByLoadWithReads ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::CreateExternalTableWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:30:46.414429Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:46.414457Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:46.414463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:46.414469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:46.414486Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:46.414490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:46.414499Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:46.414516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:46.414614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:46.414679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:46.427713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:30:46.427748Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:46.427856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:30:46.431405Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:46.431578Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:46.431621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:46.433980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:46.434057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:46.434210Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:46.434295Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:46.434966Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:46.435037Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:46.435359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:46.435375Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:46.435393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:46.435403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:46.435410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:46.435460Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:30:46.437350Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:30:46.461495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:46.461579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.461651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:46.461702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:46.461715Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.462721Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:46.462756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:46.462836Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.462849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:46.462856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:46.462862Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:46.463477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.463494Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:46.463502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:46.463968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.463982Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:46.463989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:46.463996Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:46.464758Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:46.465312Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:46.465365Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:46.465598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:46.465631Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:46.465639Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:46.465719Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-03T10:31:03.806463Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-03T10:31:03.806480Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 5] 2025-06-03T10:31:03.806492Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:31:03.806517Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:03.806523Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [68:208:2209], at schemeshard: 72057594046678944, txId: 1004, path id: 4 2025-06-03T10:31:03.806530Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [68:208:2209], at schemeshard: 72057594046678944, txId: 1004, path id: 5 2025-06-03T10:31:03.806534Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [68:208:2209], at schemeshard: 72057594046678944, txId: 1004, path id: 5 2025-06-03T10:31:03.806541Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [68:208:2209], at schemeshard: 72057594046678944, txId: 1004, path id: 3 2025-06-03T10:31:03.806585Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1004:0, at schemeshard: 72057594046678944 2025-06-03T10:31:03.806592Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1004:0 ProgressState 2025-06-03T10:31:03.806606Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1004:0 progress is 1/1 2025-06-03T10:31:03.806611Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-03T10:31:03.806617Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1004:0 progress is 1/1 2025-06-03T10:31:03.806620Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-03T10:31:03.806625Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1004, ready parts: 1/1, is published: false 2025-06-03T10:31:03.806631Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-03T10:31:03.806637Z node 68 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1004:0 2025-06-03T10:31:03.806641Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1004:0 2025-06-03T10:31:03.806656Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-03T10:31:03.806660Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:31:03.806667Z node 68 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1004, publications: 3, subscribers: 1 2025-06-03T10:31:03.806672Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-03T10:31:03.806676Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 4], 5 2025-06-03T10:31:03.806680Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 5], 2 2025-06-03T10:31:03.806894Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 5 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:31:03.806908Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 5 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:31:03.806912Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1004 2025-06-03T10:31:03.806917Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 5 2025-06-03T10:31:03.806925Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:31:03.807374Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:31:03.807394Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 2 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:31:03.807399Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1004 2025-06-03T10:31:03.807404Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 2 2025-06-03T10:31:03.807409Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-03T10:31:03.807616Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:31:03.807632Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:31:03.807637Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1004 2025-06-03T10:31:03.807642Z node 68 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-03T10:31:03.807646Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:31:03.807658Z node 68 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1004, subscribers: 1 2025-06-03T10:31:03.807664Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [68:305:2295] 2025-06-03T10:31:03.808006Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-03T10:31:03.808370Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-03T10:31:03.808711Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-03T10:31:03.808737Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-03T10:31:03.808745Z node 68 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [68:334:2324] TestWaitNotification: OK eventTxId 1002 TestWaitNotification: OK eventTxId 1003 TestWaitNotification: OK eventTxId 1004 2025-06-03T10:31:03.808894Z node 68 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirExternalTable/ExternalTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:31:03.808949Z node 68 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirExternalTable/ExternalTable" took 66us result status StatusSuccess 2025-06-03T10:31:03.809040Z node 68 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirExternalTable/ExternalTable" PathDescription { Self { Name: "ExternalTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeExternalTable CreateFinished: true CreateTxId: 1004 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ExternalTableVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } ExternalTableDescription { Name: "ExternalTable" PathId { OwnerId: 72057594046678944 LocalId: 5 } Version: 1 SourceType: "ObjectStorage" DataSourcePath: "/MyRoot/ExternalDataSource" Location: "/" Columns { Name: "a" Type: "Int32" TypeId: 1 Id: 1 NotNull: true } Columns { Name: "b" Type: "Int32" TypeId: 1 Id: 2 NotNull: true } Content: "" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> THiveTest::TestExternalBoot [GOOD] >> THiveTest::TestHiveBalancerWithImmovableTablets [GOOD] >> KikimrIcGateway::TestLoadTokenSecretValueFromExternalDataSourceMetadata [GOOD] >> KikimrIcGateway::TestSecretsExistingValidation >> THiveTest::TestHiveBalancerHighUsage >> KikimrIcGateway::TestLoadServiceAccountSecretValueFromExternalDataSourceMetadata [GOOD] >> THiveTest::TestExternalBootWhenLocked >> KikimrIcGateway::TestLoadMdbBasicSecretValueFromExternalDataSourceMetadata ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/hive/ut/unittest >> THiveTest::TestGetStorageInfoDeleteTabletBeforeAssigned [GOOD] Test command err: 2025-06-03T10:30:31.816651Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:30:31.817504Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:31.817556Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 0 2025-06-03T10:30:31.817723Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [2:68:2073] ControllerId# 72057594037932033 2025-06-03T10:30:31.817729Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:30:31.817769Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:30:31.817785Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:30:31.819117Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-03T10:30:31.819136Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:30:31.819487Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:67:2072] Create Queue# [2:74:2077] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.819517Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:67:2072] Create Queue# [2:75:2078] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.819540Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:67:2072] Create Queue# [2:76:2079] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.819562Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:67:2072] Create Queue# [2:77:2080] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.819585Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:67:2072] Create Queue# [2:78:2081] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.819611Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:67:2072] Create Queue# [2:79:2082] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.819635Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:67:2072] Create Queue# [2:80:2083] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.819641Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:30:31.819657Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [2:68:2073] 2025-06-03T10:30:31.819663Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [2:68:2073] 2025-06-03T10:30:31.819670Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-03T10:30:31.819676Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:30:31.819808Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:30:31.819827Z node 3 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:30:31.820423Z node 3 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:31.820453Z node 3 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 0 2025-06-03T10:30:31.820606Z node 3 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [3:88:2074] ControllerId# 72057594037932033 2025-06-03T10:30:31.820610Z node 3 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:30:31.820624Z node 3 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:30:31.820642Z node 3 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:30:31.821854Z node 3 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:31.821929Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-03T10:30:31.821939Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:30:31.822339Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:87:2073] Create Queue# [3:94:2078] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.822382Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:87:2073] Create Queue# [3:95:2079] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.822412Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:87:2073] Create Queue# [3:96:2080] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.822449Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:87:2073] Create Queue# [3:97:2081] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.822481Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:87:2073] Create Queue# [3:98:2082] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.822514Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:87:2073] Create Queue# [3:99:2083] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.822554Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:87:2073] Create Queue# [3:100:2084] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.822561Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:30:31.822574Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [3:88:2074] 2025-06-03T10:30:31.822578Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [3:88:2074] 2025-06-03T10:30:31.822585Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-03T10:30:31.822591Z node 3 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:30:31.822656Z node 3 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:30:31.822714Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:30:31.823285Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:31.823326Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "/tmp/pdisk.dat" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-03T10:30:31.823487Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:30:31.823792Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-03T10:30:31.823803Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 0 2025-06-03T10:30:31.823988Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:111:2077] ControllerId# 72057594037932033 2025-06-03T10:30:31.823992Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:30:31.824013Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:30:31.824028Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:30:31.824972Z node 1 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:31.827046Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-03T10:30:31.827058Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:30:31.827320Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:110:2076] Create Queue# [1:119:2082] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.827350Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:110:2076] Create Queue# [1:120:2083] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.827374Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:110:2076] Create Queue# [1:121:2084] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.827393Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:110:2076] Create Queue# [1:122:2085] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.827417Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:110:2076] Create Queue# [1:123:2086] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.827441Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:110:2076] Create Queue# [1:124:2087] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.827460Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:110:2076] Create Queue# [1:125:2088] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.827463Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:30:31.827474Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:111:2077] 2025-06-03T10:30:31.827477Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:111:2077] 2025-06-03T10:30:31.827484Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-03T10:30:31.827493Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:30:31.827680Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [2:68:2073] 2025-06-03T10:30:31.827690Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:31.827696Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:30:31.827727Z node 2 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:31.827752Z node 3 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:30:31.827785Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [3:88:2074] 2025-06-03T10:30:31.827789Z node 3 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForIni ... are# 0.998955} 2025-06-03T10:31:03.889423Z node 64 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} commited cookie 1 for step 4 2025-06-03T10:31:03.889447Z node 64 :HIVE DEBUG: tx__create_tablet.cpp:503: HIVE#72057594037927937 THive::TTxCreateTablet::Complete (72057594037927937,0) TabletId: 72075186224037888 SideEffects: {Notifications: 0x10040201 [64:263:2259] {EvCreateTabletReply Status: OK Owner: 72057594037927937 OwnerIdx: 0 TabletID: 72075186224037888 Origin: 72057594037927937}} 2025-06-03T10:31:03.889540Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [64:311:2293] 2025-06-03T10:31:03.889547Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [64:311:2293] 2025-06-03T10:31:03.889558Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [64:311:2293] 2025-06-03T10:31:03.889571Z node 64 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:31:03.889582Z node 64 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 64 selfDC leaderDC 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [64:267:2261] 2025-06-03T10:31:03.889593Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [64:311:2293] 2025-06-03T10:31:03.889600Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037927937] forward result local node, try to connect [64:311:2293] 2025-06-03T10:31:03.889606Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [64:311:2293] 2025-06-03T10:31:03.889623Z node 64 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [64:311:2293] 2025-06-03T10:31:03.889655Z node 64 :HIVE TRACE: hive_impl.cpp:114: HIVE#72057594037927937 Handle TEvTabletPipe::TEvServerConnected([64:311:2293]) [64:312:2294] 2025-06-03T10:31:03.889667Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [64:311:2293] 2025-06-03T10:31:03.889673Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [64:311:2293] 2025-06-03T10:31:03.889678Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [64:311:2293] 2025-06-03T10:31:03.889685Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [64:311:2293] 2025-06-03T10:31:03.889691Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [64:311:2293] 2025-06-03T10:31:03.889701Z node 64 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [64:310:2292] EventType# 268697621 2025-06-03T10:31:03.889716Z node 64 :HIVE DEBUG: hive_impl.cpp:894: HIVE#72057594037927937 THive::Handle::TEvGetTabletStorageInfo TabletId=72075186224037888 2025-06-03T10:31:03.889779Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [64:314:2296] 2025-06-03T10:31:03.889784Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [64:314:2296] 2025-06-03T10:31:03.889792Z node 64 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:31:03.889803Z node 64 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 64 selfDC leaderDC 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [64:267:2261] 2025-06-03T10:31:03.889810Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [64:314:2296] 2025-06-03T10:31:03.889817Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [64:314:2296] 2025-06-03T10:31:03.889826Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037927937] forward result local node, try to connect [64:314:2296] 2025-06-03T10:31:03.889831Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [64:314:2296] 2025-06-03T10:31:03.889840Z node 64 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [64:314:2296] 2025-06-03T10:31:03.889858Z node 64 :HIVE TRACE: hive_impl.cpp:114: HIVE#72057594037927937 Handle TEvTabletPipe::TEvServerConnected([64:314:2296]) [64:315:2297] 2025-06-03T10:31:03.889865Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [64:314:2296] 2025-06-03T10:31:03.889870Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [64:314:2296] 2025-06-03T10:31:03.889875Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [64:314:2296] 2025-06-03T10:31:03.889880Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [64:314:2296] 2025-06-03T10:31:03.889885Z node 64 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [64:314:2296] 2025-06-03T10:31:03.889891Z node 64 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [64:313:2295] EventType# 268697615 2025-06-03T10:31:03.889914Z node 64 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{5, NKikimr::NHive::TTxDeleteTablet} queued, type NKikimr::NHive::TTxDeleteTablet 2025-06-03T10:31:03.889923Z node 64 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{5, NKikimr::NHive::TTxDeleteTablet} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-03T10:31:03.889945Z node 64 :HIVE DEBUG: tx__delete_tablet.cpp:74: HIVE#72057594037927937 THive::TTxDeleteTablet::Execute() ShardOwnerId: 72057594037927937 ShardLocalIdx: 0 TxId_Deprecated: 0 2025-06-03T10:31:03.889956Z node 64 :HIVE DEBUG: tx__delete_tablet.cpp:19: HIVE#72057594037927937 THive::TTxDeleteTablet::Execute Tablet 72075186224037888 2025-06-03T10:31:03.889994Z node 64 :HIVE DEBUG: tablet_info.cpp:125: HIVE#72057594037927937 Tablet(Dummy.72075186224037888.Leader.0) VolatileState: Unknown -> Stopped 2025-06-03T10:31:03.890012Z node 64 :HIVE DEBUG: tx__delete_tablet.cpp:67: HIVE#72057594037927937 THive::TTxDeleteTablet::Execute() result Status: OK Origin: 72057594037927937 TxId_Deprecated: 0 ShardOwnerId: 72057594037927937 ShardLocalIdx: 0 2025-06-03T10:31:03.890037Z node 64 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{5, NKikimr::NHive::TTxDeleteTablet} hope 1 -> done Change{5, redo 102b alter 0b annex 0, ~{ 1 } -{ }, 0 gb} 2025-06-03T10:31:03.890048Z node 64 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{5, NKikimr::NHive::TTxDeleteTablet} release 4194304b of static, Memory{0 dyn 0} 2025-06-03T10:31:03.890085Z node 64 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:6} Tx{6, NKikimr::NHive::TTxDeleteTabletResult} queued, type NKikimr::NHive::TTxDeleteTabletResult 2025-06-03T10:31:03.890091Z node 64 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:6} Tx{6, NKikimr::NHive::TTxDeleteTabletResult} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-03T10:31:03.890098Z node 64 :HIVE DEBUG: tx__delete_tablet_result.cpp:26: HIVE#72057594037927937 THive::TTxDeleteTabletResult::Execute(72075186224037888 OK) 2025-06-03T10:31:03.890137Z node 64 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:6} Tx{6, NKikimr::NHive::TTxDeleteTabletResult} hope 1 -> done Change{6, redo 106b alter 0b annex 0, ~{ 16, 1 } -{ }, 0 gb} 2025-06-03T10:31:03.890143Z node 64 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:6} Tx{6, NKikimr::NHive::TTxDeleteTabletResult} release 4194304b of static, Memory{0 dyn 0} 2025-06-03T10:31:03.900546Z node 64 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [185eac4b9c06d110] bootstrap ActorId# [64:317:2299] Group# 0 BlobCount# 1 BlobIDs# [[72057594037927937:2:5:0:0:157:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-03T10:31:03.900625Z node 64 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [185eac4b9c06d110] Id# [72057594037927937:2:5:0:0:157:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:31:03.900638Z node 64 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [185eac4b9c06d110] restore Id# [72057594037927937:2:5:0:0:157:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:31:03.900654Z node 64 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [185eac4b9c06d110] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037927937:2:5:0:0:157:1] Marker# BPG33 2025-06-03T10:31:03.900661Z node 64 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [185eac4b9c06d110] Sending missing VPut part# 0 to# 0 blob Id# [72057594037927937:2:5:0:0:157:1] Marker# BPG32 2025-06-03T10:31:03.900704Z node 64 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [64:35:2079] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037927937:2:5:0:0:157:1] FDS# 157 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-03T10:31:03.901389Z node 64 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [185eac4b9c06d110] received {EvVPutResult Status# OK ID# [72057594037927937:2:5:0:0:157:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 20 } Cost# 81236 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 21 }}}} from# [0:1:0:0:0] Marker# BPP01 2025-06-03T10:31:03.901434Z node 64 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [185eac4b9c06d110] Result# TEvPutResult {Id# [72057594037927937:2:5:0:0:157:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 0 Marker# BPP12 2025-06-03T10:31:03.901446Z node 64 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [185eac4b9c06d110] SendReply putResult# TEvPutResult {Id# [72057594037927937:2:5:0:0:157:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:31:03.901479Z node 64 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.213 sample PartId# [72057594037927937:2:5:0:0:157:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 64 } TEvVPutResult{ TimestampMs# 0.917 VDiskId# [0:1:0:0:0] NodeId# 64 Status# OK } ] } 2025-06-03T10:31:03.901519Z node 64 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:5:0:0:157:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-03T10:31:03.901558Z node 64 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:6} commited cookie 1 for step 5 2025-06-03T10:31:03.901592Z node 64 :HIVE DEBUG: tx__delete_tablet.cpp:136: HIVE#72057594037927937 THive::TTxDeleteTablet::Complete() SideEffects: {Notifications: 0x10040206 [64:313:2295] NKikimrHive.TEvDeleteTabletReply Status: OK Origin: 72057594037927937 TxId_Deprecated: 0 ShardOwnerId: 72057594037927937 ShardLocalIdx: 0} 2025-06-03T10:31:03.901628Z node 64 :HIVE DEBUG: tx__delete_tablet_result.cpp:72: HIVE#72057594037927937 THive::TTxDeleteTabletResult(72075186224037888)::Complete SideEffects {Notifications: 0x1004020B [64:310:2292] NKikimrHive.TEvGetTabletStorageInfoResult TabletID: 72075186224037888 Status: ERROR StatusMessage: "Tablet deleted"} |67.7%| [TA] {RESULT} $(B)/ydb/core/persqueue/ut/ut_with_sdk/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_subdomain/unittest >> TSchemeShardSubDomainTest::TopicDiskSpaceQuotas [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:38.036288Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:38.036322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:38.036328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:38.036336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:38.036355Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:38.036361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:38.036378Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:38.036394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:38.036543Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:38.036621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:38.053385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:38.053412Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:38.057514Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:38.057630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:38.057671Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:38.059711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:38.059792Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:38.059916Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:38.059982Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:38.060644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:38.060706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:38.061043Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:38.061059Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:38.061072Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:38.061082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:38.061089Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:38.061111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.062594Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:38.084648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:38.084738Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.084803Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:38.084862Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:38.084873Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.085576Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:38.085614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:38.085674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.085686Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:38.085693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:38.085699Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:38.086184Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.086200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:38.086207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:38.086733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.086746Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:38.086753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:38.086761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:38.087565Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:38.088070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:38.088112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:38.088319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:38.088348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:38.088371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:38.088450Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:38.088460Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:38.088505Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:38.088537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:38.089048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:38.089059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:38.089118Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T1 ... T10:31:03.658159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:31:03.658215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:31:03.658226Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 103, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:31:03.658253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:03.658258Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-03T10:31:03.658264Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 103, path id: 2 2025-06-03T10:31:03.658268Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 103, path id: 3 2025-06-03T10:31:03.658388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 103:0, at schemeshard: 72057594046678944 2025-06-03T10:31:03.658398Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 103:0 ProgressState 2025-06-03T10:31:03.658415Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:31:03.658420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:31:03.658426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:0 progress is 1/1 2025-06-03T10:31:03.658429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:31:03.658435Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 1/1, is published: false 2025-06-03T10:31:03.658441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 1/1 2025-06-03T10:31:03.658447Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-06-03T10:31:03.658451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:0 2025-06-03T10:31:03.658480Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:31:03.658487Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 103, publications: 2, subscribers: 0 2025-06-03T10:31:03.658491Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 2], 9 2025-06-03T10:31:03.658495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 103, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-03T10:31:03.658682Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:31:03.658702Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:31:03.658709Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 103 2025-06-03T10:31:03.658714Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-03T10:31:03.658719Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:31:03.658810Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 2025-06-03T10:31:03.658862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:31:03.658868Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:31:03.658880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 5 2025-06-03T10:31:03.659002Z node 1 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 2025-06-03T10:31:03.659030Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-03T10:31:03.659258Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 9 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:31:03.659270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 9 PathOwnerId: 72057594046678944, cookie: 103 2025-06-03T10:31:03.659275Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 103 2025-06-03T10:31:03.659281Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 103, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 9 2025-06-03T10:31:03.659286Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:31:03.659297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 103, subscribers: 0 2025-06-03T10:31:03.659390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-03T10:31:03.659946Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:31:03.660508Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:31:03.660535Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:31:03.660551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 103 2025-06-03T10:31:03.660564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 TestModificationResult got TxId: 103, wait until txId: 103 TestWaitNotification wait txId: 103 2025-06-03T10:31:03.660675Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 103: send EvNotifyTxCompletion 2025-06-03T10:31:03.660684Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 103 2025-06-03T10:31:03.660767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 103, at schemeshard: 72057594046678944 2025-06-03T10:31:03.660787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:31:03.660793Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:771:2682] TestWaitNotification: OK eventTxId 103 2025-06-03T10:31:04.138964Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/USER_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:31:04.139079Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/USER_1" took 145us result status StatusSuccess 2025-06-03T10:31:04.139209Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/USER_1" PathDescription { Self { Name: "USER_1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SubDomainStateVersion: 2 SecurityStateVersion: 0 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72075186233409546 TimeCastBucketsPerMediator: 2 Mediators: 72075186233409547 } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } StoragePools { Name: "name_USER_0_kind_hdd-1" Kind: "hdd-1" } StoragePools { Name: "name_USER_0_kind_hdd-2" Kind: "hdd-2" } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 0 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 2 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 DatabaseQuotas { data_size_hard_quota: 1 } SecurityState { } } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TDataShardTrace::TestTraceDistributedUpsert+UseSink >> TDataShardTrace::TestTraceDistributedSelect >> TDataShardTrace::TestTraceDistributedUpsert-UseSink >> TDataShardTrace::TestTraceDistributedSelectViaReadActors >> THiveTest::TestExternalBootWhenLocked [GOOD] >> KikimrIcGateway::TestLoadAwsSecretValueFromExternalDataSourceMetadata [GOOD] >> KikimrIcGateway::TestLoadDataSourceProperties >> KikimrIcGateway::TestSecretsExistingValidation [GOOD] >> TDataShardTrace::TestTraceWriteImmediateOnShard |67.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr/unittest |67.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr/unittest |67.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr/unittest >> THiveTest::TestHiveBalancerHighUsage [GOOD] >> THiveTest::TestHiveBalancerHighUsageAndColumnShards |67.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestSecretsExistingValidation [GOOD] Test command err: Trying to start YDB, gRPC: 25017, MsgBus: 27733 2025-06-03T10:31:01.509570Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668713674781056:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:01.509925Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0008be/r3tmp/tmpZ2yF3j/pdisk_1.dat 2025-06-03T10:31:01.583069Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668713674781037:2079] 1748946661509282 != 1748946661509285 2025-06-03T10:31:01.588025Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25017, node 1 2025-06-03T10:31:01.601599Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:01.601617Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:01.601620Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:01.601679Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27733 2025-06-03T10:31:01.650674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:01.650707Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:01.651762Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27733 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:01.674412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:01.691329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-03T10:31:01.978795Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668713674781742:2332], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:01.978831Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:02.031956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.056418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.070775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.126600Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.137392Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668717969749351:2362], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:02.137419Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:02.137515Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668717969749356:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:02.138459Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715664:3, at schemeshard: 72057594046644480 2025-06-03T10:31:02.141863Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668717969749358:2366], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715664 completed, doublechecking } 2025-06-03T10:31:02.236036Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668717969749409:2555] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 11], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 15270, MsgBus: 28058 2025-06-03T10:31:02.691164Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668718090248439:2164];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:02.691263Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0008be/r3tmp/tmptLU4sn/pdisk_1.dat 2025-06-03T10:31:02.707415Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:02.707671Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511668718090248314:2079] 1748946662690370 != 1748946662690373 TServer::EnableGrpc on GrpcPort 15270, node 2 2025-06-03T10:31:02.720966Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:02.720981Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:02.720991Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:02.721049Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28058 TClient is connected to server localhost:28058 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:02.794968Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:02.795003Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting waiting... 2025-06-03T10:31:02.795490Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.795964Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:02.803709Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:02.817078Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:02.841009Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:02.854168Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:03.176182Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668722385217239:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resour ... 81474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:03.693639Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480 2025-06-03T10:31:03.788179Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:31:03.854746Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:1, at schemeshard: 72057594046644480 2025-06-03T10:31:03.932133Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.023942Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715684:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.124943Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715687:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.211337Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:31:04.223551Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.682126Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715702:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 3555, MsgBus: 21916 2025-06-03T10:31:05.282272Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511668729234263338:2202];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:05.284690Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0008be/r3tmp/tmpzEqYxP/pdisk_1.dat 2025-06-03T10:31:05.337630Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511668729234263173:2079] 1748946665271447 != 1748946665271450 2025-06-03T10:31:05.338000Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3555, node 3 2025-06-03T10:31:05.357743Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:05.357757Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:05.357760Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:05.357815Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21916 2025-06-03T10:31:05.404773Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:05.404812Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:05.405610Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:21916 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:05.470781Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:05.473063Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:31:05.484213Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:05.511397Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:05.549902Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:05.581073Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:05.807482Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668729234264802:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.807513Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.822324Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.833523Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.847231Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.860786Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.875663Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.891973Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.910669Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.938578Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668729234265457:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.938606Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.938772Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668729234265462:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.939821Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:05.944275Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:31:05.944403Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511668729234265464:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:31:06.009670Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511668733529232811:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> TRtmrTest::CreateWithoutTimeCastBuckets |67.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr/unittest >> TRtmrTest::CreateWithoutTimeCastBuckets [GOOD] >> TExternalTableTestReboots::ParallelCreateDrop [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/hive/ut/unittest >> THiveTest::TestExternalBootWhenLocked [GOOD] Test command err: 2025-06-03T10:30:30.516203Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:30:30.516974Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:30.517013Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 0 2025-06-03T10:30:30.517175Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [2:68:2073] ControllerId# 72057594037932033 2025-06-03T10:30:30.517180Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:30:30.517208Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:30:30.517224Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:30:30.518510Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-03T10:30:30.518522Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:30:30.518878Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:67:2072] Create Queue# [2:74:2077] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.518910Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:67:2072] Create Queue# [2:75:2078] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.518934Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:67:2072] Create Queue# [2:76:2079] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.518956Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:67:2072] Create Queue# [2:77:2080] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.518977Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:67:2072] Create Queue# [2:78:2081] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.519003Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:67:2072] Create Queue# [2:79:2082] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.519023Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:67:2072] Create Queue# [2:80:2083] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.519027Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:30:30.519037Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [2:68:2073] 2025-06-03T10:30:30.519041Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [2:68:2073] 2025-06-03T10:30:30.519048Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-03T10:30:30.519054Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:30:30.519185Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:30:30.519203Z node 3 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:30:30.519744Z node 3 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:30.519770Z node 3 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 0 2025-06-03T10:30:30.519900Z node 3 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [3:88:2074] ControllerId# 72057594037932033 2025-06-03T10:30:30.519904Z node 3 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:30:30.519915Z node 3 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:30:30.519931Z node 3 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:30:30.520891Z node 3 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:30.520939Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-03T10:30:30.520944Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:30:30.521363Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:87:2073] Create Queue# [3:94:2078] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.521406Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:87:2073] Create Queue# [3:95:2079] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.521442Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:87:2073] Create Queue# [3:96:2080] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.521483Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:87:2073] Create Queue# [3:97:2081] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.521522Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:87:2073] Create Queue# [3:98:2082] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.521560Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:87:2073] Create Queue# [3:99:2083] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.521601Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:87:2073] Create Queue# [3:100:2084] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.521604Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:30:30.521613Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [3:88:2074] 2025-06-03T10:30:30.521617Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [3:88:2074] 2025-06-03T10:30:30.521623Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-03T10:30:30.521628Z node 3 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:30:30.521694Z node 3 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:30:30.521751Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:30:30.522437Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:30.522472Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "/tmp/pdisk.dat" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-03T10:30:30.522603Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:30:30.522839Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-03T10:30:30.522849Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 0 2025-06-03T10:30:30.522986Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:111:2077] ControllerId# 72057594037932033 2025-06-03T10:30:30.522989Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:30:30.523004Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:30:30.523020Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:30:30.523939Z node 1 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:30.526932Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-03T10:30:30.526948Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:30:30.527206Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:110:2076] Create Queue# [1:119:2082] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.527230Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:110:2076] Create Queue# [1:120:2083] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.527250Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:110:2076] Create Queue# [1:121:2084] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.527277Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:110:2076] Create Queue# [1:122:2085] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.527307Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:110:2076] Create Queue# [1:123:2086] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.527331Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:110:2076] Create Queue# [1:124:2087] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.527350Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:110:2076] Create Queue# [1:125:2088] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.527353Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:30:30.527363Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:111:2077] 2025-06-03T10:30:30.527367Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:111:2077] 2025-06-03T10:30:30.527374Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-03T10:30:30.527384Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:30:30.527558Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [2:68:2073] 2025-06-03T10:30:30.527569Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:30.527574Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:30:30.527603Z node 2 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:30.527621Z node 3 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:30:30.527652Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [3:88:2074] 2025-06-03T10:30:30.527657Z node 3 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForIni ... 4304b of static, Memory{0 dyn 0} 2025-06-03T10:31:05.740049Z node 30 :HIVE DEBUG: tx__process_boot_queue.cpp:26: HIVE#72057594037927937 THive::TTxProcessBootQueue()::Complete 2025-06-03T10:31:05.740136Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037888] ::Bootstrap [30:451:2358] 2025-06-03T10:31:05.740143Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037888] lookup [30:451:2358] 2025-06-03T10:31:05.740161Z node 30 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037888 entry.State: StNormal ev: {EvForward TabletID: 72075186224037888 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:31:05.740171Z node 30 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 30 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72075186224037888 followers: 0 countLeader 1 allowFollowers 0 winner: [30:371:2299] 2025-06-03T10:31:05.740184Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037888] forward result local node, try to connect [30:451:2358] 2025-06-03T10:31:05.740190Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [30:451:2358] 2025-06-03T10:31:05.740205Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:349: TClient[72075186224037888] connect request undelivered [30:451:2358] 2025-06-03T10:31:05.740211Z node 30 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:498: TClient[72075186224037888] connect failed [30:451:2358] 2025-06-03T10:31:05.740224Z node 30 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:536: Handle TEvTabletProblem tabletId: 72075186224037888 entry.State: StNormal 2025-06-03T10:31:05.740282Z node 30 :STATESTORAGE DEBUG: statestorage_proxy.cpp:246: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037888 Cookie: 0 ProxyOptions: SigNone} 2025-06-03T10:31:05.740306Z node 30 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 0} 2025-06-03T10:31:05.740317Z node 30 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 1} 2025-06-03T10:31:05.740324Z node 30 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 2} 2025-06-03T10:31:05.740336Z node 30 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037888 CurrentLeader: [30:371:2299] CurrentLeaderTablet: [30:386:2311] CurrentGeneration: 1 CurrentStep: 0} 2025-06-03T10:31:05.740351Z node 30 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037888 CurrentLeader: [30:371:2299] CurrentLeaderTablet: [30:386:2311] CurrentGeneration: 1 CurrentStep: 0} 2025-06-03T10:31:05.740391Z node 30 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037888 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037888 Cookie: 0 CurrentLeader: [30:371:2299] CurrentLeaderTablet: [30:386:2311] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[30:1099535971443:0] : 6}, {[30:24343667:0] : 3}}}} 2025-06-03T10:31:05.740413Z node 30 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72075186224037888 followers: 0 2025-06-03T10:31:05.740454Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [31:453:2093] 2025-06-03T10:31:05.740461Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [31:453:2093] 2025-06-03T10:31:05.740470Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [31:453:2093] 2025-06-03T10:31:05.740478Z node 31 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:31:05.740486Z node 31 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 31 selfDC 2 leaderDC 1 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [30:321:2263] 2025-06-03T10:31:05.740495Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [31:453:2093] 2025-06-03T10:31:05.740502Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72057594037927937] forward result remote node 30 [31:453:2093] 2025-06-03T10:31:05.740516Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72057594037927937] remote node connected [31:453:2093] 2025-06-03T10:31:05.740519Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [31:453:2093] 2025-06-03T10:31:05.740553Z node 30 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [31:453:2093] 2025-06-03T10:31:05.740581Z node 30 :HIVE TRACE: hive_impl.cpp:114: HIVE#72057594037927937 Handle TEvTabletPipe::TEvServerConnected([31:453:2093]) [30:454:2359] 2025-06-03T10:31:05.740597Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [31:453:2093] 2025-06-03T10:31:05.740601Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [31:453:2093] 2025-06-03T10:31:05.740603Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [31:453:2093] 2025-06-03T10:31:05.740611Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [31:453:2093] 2025-06-03T10:31:05.740614Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [31:453:2093] 2025-06-03T10:31:05.740617Z node 31 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [31:453:2093] 2025-06-03T10:31:05.740646Z node 30 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:72: [72057594037927937] Push Sender# [31:441:2088] EventType# 268697624 2025-06-03T10:31:05.740668Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{24, NKikimr::NHive::TTxStartTablet} queued, type NKikimr::NHive::TTxStartTablet 2025-06-03T10:31:05.740674Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{24, NKikimr::NHive::TTxStartTablet} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-03T10:31:05.740682Z node 30 :HIVE DEBUG: tx__start_tablet.cpp:31: HIVE#72057594037927937 THive::TTxStartTablet::Execute Tablet (72075186224037888,0) 2025-06-03T10:31:05.740738Z node 30 :HIVE DEBUG: tx__start_tablet.cpp:73: HIVE#72057594037927937 THive::TTxStartTablet::Execute, Sending TEvBootTablet(Dummy.72075186224037888.Leader.2) to node 31 storage {Version# 1 TabletID# 72075186224037888 TabletType# Dummy Channels# {0:{Channel# 0 Type# none StoragePool# def1 History# {0:{FromGeneration# 0 GroupID# 2147483648 Timestamp# 1970-01-01T00:00:00.059536Z}}, 1:{Channel# 1 Type# none StoragePool# def2 History# {0:{FromGeneration# 0 GroupID# 2147483649 Timestamp# 1970-01-01T00:00:00.059536Z}}, 2:{Channel# 2 Type# none StoragePool# def3 History# {0:{FromGeneration# 0 GroupID# 2147483650 Timestamp# 1970-01-01T00:00:00.059536Z}}} Tenant: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:05.740763Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{24, NKikimr::NHive::TTxStartTablet} hope 1 -> done Change{13, redo 144b alter 0b annex 0, ~{ 1, 16 } -{ }, 0 gb} 2025-06-03T10:31:05.740769Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:9} Tx{24, NKikimr::NHive::TTxStartTablet} release 4194304b of static, Memory{0 dyn 0} 2025-06-03T10:31:05.751191Z node 30 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [db158bc7997c188e] bootstrap ActorId# [30:456:2361] Group# 0 BlobCount# 1 BlobIDs# [[72057594037927937:2:9:0:0:127:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-03T10:31:05.751259Z node 30 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [db158bc7997c188e] Id# [72057594037927937:2:9:0:0:127:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:31:05.751269Z node 30 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [db158bc7997c188e] restore Id# [72057594037927937:2:9:0:0:127:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:31:05.751283Z node 30 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [db158bc7997c188e] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037927937:2:9:0:0:127:1] Marker# BPG33 2025-06-03T10:31:05.751291Z node 30 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [db158bc7997c188e] Sending missing VPut part# 0 to# 0 blob Id# [72057594037927937:2:9:0:0:127:1] Marker# BPG32 2025-06-03T10:31:05.751335Z node 30 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [30:56:2080] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037927937:2:9:0:0:127:1] FDS# 127 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-03T10:31:05.752029Z node 30 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [db158bc7997c188e] received {EvVPutResult Status# OK ID# [72057594037927937:2:9:0:0:127:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 24 } Cost# 81000 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 25 }}}} from# [0:1:0:0:0] Marker# BPP01 2025-06-03T10:31:05.752070Z node 30 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [db158bc7997c188e] Result# TEvPutResult {Id# [72057594037927937:2:9:0:0:127:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 0 Marker# BPP12 2025-06-03T10:31:05.752082Z node 30 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [db158bc7997c188e] SendReply putResult# TEvPutResult {Id# [72057594037927937:2:9:0:0:127:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:31:05.752115Z node 30 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.214 sample PartId# [72057594037927937:2:9:0:0:127:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 30 } TEvVPutResult{ TimestampMs# 0.925 VDiskId# [0:1:0:0:0] NodeId# 30 Status# OK } ] } 2025-06-03T10:31:05.752148Z node 30 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:9:0:0:127:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-03T10:31:05.752190Z node 30 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:10} commited cookie 1 for step 9 2025-06-03T10:31:05.752269Z node 30 :HIVE DEBUG: tx__start_tablet.cpp:122: HIVE#72057594037927937 THive::TTxStartTablet::Complete Tablet (72075186224037888,0) SideEffects: {Notifications: 0x10080002 [31:441:2088] NKikimrLocal.TEvBootTablet Info { TabletID: 72075186224037888 Channels { Channel: 0 ChannelType: 0 History { FromGeneration: 0 GroupID: 2147483648 } StoragePool: "def1" } Channels { Channel: 1 ChannelType: 0 History { FromGeneration: 0 GroupID: 2147483649 } StoragePool: "def2" } Channels { Channel: 2 ChannelType: 0 History { FromGeneration: 0 GroupID: 2147483650 } StoragePool: "def3" } TabletType: Dummy Version: 1 TenantIdOwner: 72057594046678944 TenantIdLocalId: 1 } SuggestedGeneration: 2 BootMode: BOOT_MODE_LEADER FollowerId: 0} 2025-06-03T10:31:05.752341Z node 30 :HIVE TRACE: hive_impl.cpp:775: HIVE#72057594037927937 Handle TEvInterconnect::TEvNodeConnected (duplicate), NodeId 31 Cookie 0 |67.7%| [TA] $(B)/ydb/core/tx/schemeshard/ut_subdomain/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr/unittest >> TRtmrTest::CreateWithoutTimeCastBuckets [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:31:07.744050Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:31:07.744084Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:07.744091Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:31:07.744097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:31:07.744116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:31:07.744121Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:31:07.744134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:07.744156Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:31:07.744307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:31:07.744417Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:31:07.775388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:31:07.775425Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:07.787370Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:31:07.787615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:31:07.787675Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:31:07.798484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:31:07.798638Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:31:07.798835Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:07.798939Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:31:07.800431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:07.800537Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:31:07.801017Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:07.801042Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:07.801054Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:31:07.801069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:07.801076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:31:07.801109Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:31:07.803159Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:31:07.854507Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:31:07.854628Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:07.854716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:31:07.854788Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:31:07.854805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:07.861820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:07.861883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:31:07.861995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:07.862014Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:31:07.862022Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:31:07.862030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:31:07.863057Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:07.863083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:31:07.863094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:31:07.863727Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:07.863745Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:07.863754Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:07.863764Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:31:07.864658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:31:07.865355Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:31:07.865439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:31:07.865716Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:07.865765Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:07.865777Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:07.865879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:31:07.865890Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:07.865940Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:07.865955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:31:07.874451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:07.874483Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:07.874571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 1:07.905401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_rtmr.cpp:162: TCreateRTMR TPropose, operationId: 100:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:31:07.905414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 100 ready parts: 1/1 2025-06-03T10:31:07.905469Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 100 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:31:07.905930Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 100:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:100 msg type: 269090816 2025-06-03T10:31:07.905978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 100, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 100 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 100 at step: 5000002 2025-06-03T10:31:07.906082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000002, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:07.906112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 100 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000002 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:07.906122Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_create_rtmr.cpp:130: TCreateRTMR TPropose, operationId: 100:0 HandleReply TEvOperationPlan, at schemeshard: 72057594046678944 2025-06-03T10:31:07.906154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 100:0 128 -> 240 2025-06-03T10:31:07.906203Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:07.906218Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:31:07.917103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:07.917144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:07.917228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 100, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:31:07.917262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:07.917270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 100, path id: 1 2025-06-03T10:31:07.917277Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 100, path id: 2 FAKE_COORDINATOR: Erasing txId 100 2025-06-03T10:31:07.917433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 100:0, at schemeshard: 72057594046678944 2025-06-03T10:31:07.917446Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 100:0 ProgressState 2025-06-03T10:31:07.917467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-06-03T10:31:07.917473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:31:07.917486Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#100:0 progress is 1/1 2025-06-03T10:31:07.917490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:31:07.917497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 100, ready parts: 1/1, is published: false 2025-06-03T10:31:07.917522Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 100 ready parts: 1/1 2025-06-03T10:31:07.917529Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 100:0 2025-06-03T10:31:07.917534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 100:0 2025-06-03T10:31:07.917575Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:31:07.917583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 100, publications: 2, subscribers: 0 2025-06-03T10:31:07.917589Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 1], 5 2025-06-03T10:31:07.917592Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 100, [OwnerId: 72057594046678944, LocalPathId: 2], 2 2025-06-03T10:31:07.917865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:31:07.917889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 5 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:31:07.917896Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 100 2025-06-03T10:31:07.917902Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 5 2025-06-03T10:31:07.917909Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:31:07.918210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:31:07.918232Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 2 PathOwnerId: 72057594046678944, cookie: 100 2025-06-03T10:31:07.918238Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 100 2025-06-03T10:31:07.918244Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 100, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 2 2025-06-03T10:31:07.918250Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:31:07.918267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 100, subscribers: 0 2025-06-03T10:31:07.919593Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 2025-06-03T10:31:07.919903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 100 TestModificationResult got TxId: 100, wait until txId: 100 TestWaitNotification wait txId: 100 2025-06-03T10:31:07.919976Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 100: send EvNotifyTxCompletion 2025-06-03T10:31:07.919986Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 100 2025-06-03T10:31:07.920089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 100, at schemeshard: 72057594046678944 2025-06-03T10:31:07.920123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 100: got EvNotifyTxCompletionResult 2025-06-03T10:31:07.920129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 100: satisfy waiter [1:309:2299] TestWaitNotification: OK eventTxId 100 2025-06-03T10:31:07.920268Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/rtmr1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:31:07.925442Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/rtmr1" took 5.13ms result status StatusSuccess 2025-06-03T10:31:07.925648Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/rtmr1" PathDescription { Self { Name: "rtmr1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeRtmrVolume CreateFinished: true CreateTxId: 100 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 RTMRVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } RtmrVolumeDescription { Name: "rtmr1" PathId: 2 PartitionsCount: 0 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> KikimrIcGateway::TestLoadMdbBasicSecretValueFromExternalDataSourceMetadata [GOOD] >> YdbTableSplit::SplitByLoadWithNonEmptyRangeReads |67.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr/unittest |67.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::ParallelCreateDrop [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:30:49.729353Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:49.729387Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:49.729394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:49.729399Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:49.729417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:49.729422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:49.729434Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:49.729451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:49.729584Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:49.729670Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:49.745085Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:30:49.745105Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:49.745185Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:30:49.747388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:49.747459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:49.747483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:49.749052Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:49.749092Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:49.749195Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:49.749236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:49.749762Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:49.749816Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:49.750068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:49.750078Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:49.750093Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:49.750101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:49.750107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:49.750144Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:30:49.751600Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:30:49.768547Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:49.768612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:49.768664Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:49.768702Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:49.768712Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:49.769344Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:49.769369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:49.769404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:49.769412Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:49.769416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:49.769421Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:49.770039Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:49.770073Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:49.770080Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:49.770628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:49.770665Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:49.770672Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:49.770679Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:49.771444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:49.771962Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:49.771995Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:49.772148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:49.772173Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:49.772179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:49.772241Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... 2025-06-03T10:31:07.920706Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:07.920764Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-03T10:31:07.920787Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1004, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:31:07.920816Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:07.920822Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [67:208:2209], at schemeshard: 72057594046678944, txId: 1004, path id: 1 2025-06-03T10:31:07.920829Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [67:208:2209], at schemeshard: 72057594046678944, txId: 1004, path id: 4 2025-06-03T10:31:07.920838Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [67:208:2209], at schemeshard: 72057594046678944, txId: 1004, path id: 3 2025-06-03T10:31:07.920940Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1004:0, at schemeshard: 72057594046678944 2025-06-03T10:31:07.920952Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1004:0 ProgressState 2025-06-03T10:31:07.920970Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1004:0 progress is 1/1 2025-06-03T10:31:07.920976Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-03T10:31:07.920983Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1004:0 progress is 1/1 2025-06-03T10:31:07.920987Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-03T10:31:07.920992Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1004, ready parts: 1/1, is published: false 2025-06-03T10:31:07.920999Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1004 ready parts: 1/1 2025-06-03T10:31:07.921005Z node 67 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1004:0 2025-06-03T10:31:07.921013Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1004:0 2025-06-03T10:31:07.921032Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:31:07.921037Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:31:07.921045Z node 67 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1004, publications: 3, subscribers: 0 2025-06-03T10:31:07.921050Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 1], 11 2025-06-03T10:31:07.921054Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-03T10:31:07.921059Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1004, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-03T10:31:07.921162Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:31:07.921176Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:31:07.921181Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1004 2025-06-03T10:31:07.921188Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-03T10:31:07.921196Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:31:07.921336Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:31:07.921346Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-03T10:31:07.921363Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-03T10:31:07.921502Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:31:07.921515Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 11 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:31:07.921520Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1004 2025-06-03T10:31:07.921525Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 11 2025-06-03T10:31:07.921529Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:31:07.921867Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:31:07.921894Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1004 2025-06-03T10:31:07.921900Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1004 2025-06-03T10:31:07.921905Z node 67 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1004, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-03T10:31:07.921910Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:31:07.921927Z node 67 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1004, subscribers: 0 2025-06-03T10:31:07.922504Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-03T10:31:07.922538Z node 67 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:31:07.922953Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 2025-06-03T10:31:07.922991Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1004 TestModificationResult got TxId: 1004, wait until txId: 1004 TestWaitNotification wait txId: 1004 2025-06-03T10:31:07.923072Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1004: send EvNotifyTxCompletion 2025-06-03T10:31:07.923083Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1004 2025-06-03T10:31:07.923182Z node 67 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1004, at schemeshard: 72057594046678944 2025-06-03T10:31:07.923209Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-03T10:31:07.923214Z node 67 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [67:398:2388] TestWaitNotification: OK eventTxId 1004 2025-06-03T10:31:07.923312Z node 67 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DropMe" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:31:07.923357Z node 67 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DropMe" took 59us result status StatusPathDoesNotExist 2025-06-03T10:31:07.923406Z node 67 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/DropMe\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/DropMe" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |67.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr/unittest |67.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut |67.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut |67.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_rtmr/unittest >> YdbTableSplit::SplitByLoadWithReadsMultipleSplitsWithData ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestLoadMdbBasicSecretValueFromExternalDataSourceMetadata [GOOD] Test command err: Trying to start YDB, gRPC: 64284, MsgBus: 20038 2025-06-03T10:31:01.544900Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668713564344089:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:01.544925Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0008a8/r3tmp/tmpvUyExx/pdisk_1.dat 2025-06-03T10:31:01.619923Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668713564344069:2079] 1748946661544730 != 1748946661544733 2025-06-03T10:31:01.623642Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64284, node 1 2025-06-03T10:31:01.641601Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:01.641615Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:01.641619Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:01.641668Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20038 2025-06-03T10:31:01.685018Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:01.685060Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:01.689577Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20038 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:01.722645Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:01.726754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:31:01.738590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:31:01.781441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-06-03T10:31:01.819806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:01.838646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:02.013094Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668717859313017:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:02.013129Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:02.071982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.084924Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.096754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.108417Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.122795Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.143969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.154105Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.166534Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668717859313669:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:02.166562Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668717859313674:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:02.166569Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:02.167472Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:02.177752Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668717859313676:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:31:02.262039Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668717859313727:3398] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:02.510264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.512895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 22514, MsgBus: 12374 2025-06-03T10:31:02.897716Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668719064724156:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:02.897951Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0008a8/r3tmp/tmpgWSnmi/pdisk_1.dat 2025-06-03T10:31:02.918161Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:02.918476Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511668719064724136:2079] 1748946662897271 != 1748946662897274 TServer::EnableGrpc on GrpcPort 22514, node 2 2025-06-03T10:31:02.928484Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:02.928498Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:02.928501Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:02.928561Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12374 TClient is connected to server localhost:12374 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 7205759 ... 57594046644480 2025-06-03T10:31:05.134588Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715704:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.139180Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalTable, opId: 281474976715705:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 9629, MsgBus: 64448 2025-06-03T10:31:05.614326Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511668728704529762:2202];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:05.616721Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0008a8/r3tmp/tmpMMZmCu/pdisk_1.dat TServer::EnableGrpc on GrpcPort 9629, node 3 2025-06-03T10:31:05.654549Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:05.655030Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511668728704529597:2079] 1748946665605054 != 1748946665605057 2025-06-03T10:31:05.655296Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:05.655297Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:05.655299Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:05.655344Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64448 TClient is connected to server localhost:64448 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:05.723492Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:05.723529Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:05.724347Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.724925Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:31:05.726591Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:31:05.738433Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:05.758829Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:05.798345Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:05.818538Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:06.121484Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668732999498525:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:06.121618Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:06.130562Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:06.145990Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:06.161784Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:06.176696Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:06.192008Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:06.204178Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:06.217552Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:06.239316Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668732999499180:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:06.239346Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:06.239504Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668732999499185:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:06.240528Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:06.244157Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511668732999499187:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:31:06.350385Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511668732999499238:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:06.628233Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480 2025-06-03T10:31:06.766126Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:31:06.862890Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:1, at schemeshard: 72057594046644480 2025-06-03T10:31:07.014736Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480 2025-06-03T10:31:07.190139Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715684:0, at schemeshard: 72057594046644480 2025-06-03T10:31:07.284116Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715687:0, at schemeshard: 72057594046644480 2025-06-03T10:31:07.405631Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:31:07.426571Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:31:08.243405Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715710:0, at schemeshard: 72057594046644480 >> KikimrIcGateway::TestLoadDataSourceProperties [GOOD] >> TDataShardTrace::TestTraceWriteImmediateOnShard [GOOD] >> TDataShardTrace::TestTraceDistributedUpsert+UseSink [GOOD] |67.8%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_subdomain/test-results/unittest/{meta.json ... results_accumulator.log} |67.8%| [LD] {RESULT} $(B)/ydb/core/persqueue/dread_cache_service/ut/ydb-core-persqueue-dread_cache_service-ut >> TDataShardTrace::TestTraceDistributedUpsert-UseSink [GOOD] >> TDataShardTrace::TestTraceDistributedSelectViaReadActors [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/provider/ut/unittest >> KikimrIcGateway::TestLoadDataSourceProperties [GOOD] Test command err: Trying to start YDB, gRPC: 17729, MsgBus: 27505 2025-06-03T10:31:01.122692Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668714936464634:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:01.122738Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0008f5/r3tmp/tmpc6hizU/pdisk_1.dat 2025-06-03T10:31:01.211626Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668714936464614:2079] 1748946661122519 != 1748946661122522 2025-06-03T10:31:01.215485Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17729, node 1 2025-06-03T10:31:01.233552Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:01.233568Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:01.233571Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:01.233622Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27505 2025-06-03T10:31:01.274423Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:01.274460Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:01.275639Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27505 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:01.310590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:01.317685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:31:01.348094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:01.378423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:31:01.403401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:31:01.416214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:01.612995Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668714936466245:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:01.613041Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:01.675089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:01.701802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:01.715320Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:01.731730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:01.742932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:01.755342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:01.767178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:01.786682Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668714936466897:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:01.786702Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:01.786859Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668714936466902:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:01.787676Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:01.791926Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668714936466904:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:31:01.872602Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668714936466955:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:02.124505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480 2025-06-03T10:31:02.225898Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.319985Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:1, at schemeshard: 72057594046644480 2025-06-03T10:31:02.431486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.523687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715684:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.613249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715687:0, at schemeshard: 72057594046644480 2025-06-03T10:31:02.693786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:31:02.703429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:31:03.029960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715702:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 12437, MsgBus: 3798 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0008f5/r3tmp/tmpnYwCOv/pdisk_1.dat 2025-06-03T10:31:03.318399Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:31:03.318963Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 12437, node 2 2025-06-03T1 ... 4480 2025-06-03T10:31:05.193943Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.956696Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715711:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 15581, MsgBus: 16128 2025-06-03T10:31:06.515790Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511668733487367710:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:06.516597Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0008f5/r3tmp/tmpHz4W6B/pdisk_1.dat 2025-06-03T10:31:06.552589Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:06.557432Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511668733487367519:2079] 1748946666500146 != 1748946666500149 TServer::EnableGrpc on GrpcPort 15581, node 3 2025-06-03T10:31:06.574047Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:06.574065Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:06.574067Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:06.574123Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16128 2025-06-03T10:31:06.629785Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:06.629823Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:06.633003Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16128 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:06.760495Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:06.772768Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:31:06.782719Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:06.809008Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:06.895564Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:31:06.945247Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:31:07.125368Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668737782336466:2410], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:07.125439Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:07.130325Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:07.148238Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:07.219385Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:07.235125Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:07.254401Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:07.267077Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:07.287157Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:07.329604Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668737782337116:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:07.329650Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:07.329911Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668737782337121:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:07.334731Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:07.338913Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511668737782337123:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:31:07.418493Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511668737782337174:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:07.706379Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480 2025-06-03T10:31:07.829785Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:31:07.931028Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:1, at schemeshard: 72057594046644480 2025-06-03T10:31:08.052451Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480 2025-06-03T10:31:08.168825Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715684:0, at schemeshard: 72057594046644480 2025-06-03T10:31:08.267661Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715687:0, at schemeshard: 72057594046644480 2025-06-03T10:31:08.380802Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:31:08.398117Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:31:09.026970Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715711:0, at schemeshard: 72057594046644480 >> TDataShardTrace::TestTraceDistributedSelect [GOOD] |67.8%| [TA] $(B)/ydb/core/tx/schemeshard/ut_rtmr/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceDistributedUpsert+UseSink [GOOD] Test command err: 2025-06-03T10:31:06.893268Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:31:06.893406Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:31:06.893443Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000fb6/r3tmp/tmpF0yMCj/pdisk_1.dat 2025-06-03T10:31:07.026125Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:31:07.051376Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:07.052639Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946666275124 != 1748946666275128 2025-06-03T10:31:07.095648Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:07.095694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:07.106472Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:07.184916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:31:09.160036Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:936:2772], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:09.160081Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:925:2767], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:09.160183Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:09.161414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:31:09.176390Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-03T10:31:09.335929Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:939:2775], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:31:09.393032Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:1001:2817] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:09.470433Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtngkk76ec5z5xy158w2k6f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTk2YzIzMGYtZWRjOWM4MWMtOTRkZGFmMGQtNGZhMWI1Njk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Trace: (Session.query.QUERY_ACTION_EXECUTE -> [(CompileService -> [(CompileActor)]) , (DataExecuter -> [(WaitForTableResolve) , (ComputeActor -> [(ForwardWriteActor)]) , (RunTasks) , (Commit -> [(Datashard.WriteTransaction -> [(Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWithConfirmedReadOnlyLease) , (Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.WriteLog -> [(Tablet.WriteLog.LogEntry)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWriteResult)]) , (Datashard.WriteTransaction -> [(Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWithConfirmedReadOnlyLease) , (Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.WriteLog -> [(Tablet.WriteLog.LogEntry)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWriteResult)])])])]) >> YdbTableSplit::SplitByLoadWithDeletes ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceWriteImmediateOnShard [GOOD] Test command err: 2025-06-03T10:31:07.091801Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:31:07.091903Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:31:07.091942Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000fbf/r3tmp/tmpYjMeZ5/pdisk_1.dat 2025-06-03T10:31:07.230482Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:31:07.247801Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:07.249014Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946666487391 != 1748946666487395 2025-06-03T10:31:07.295730Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:07.295773Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:07.309425Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:07.387337Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 >> TStorageTenantTest::CreateTableOutsideDatabaseFailToStartTabletsButDropIsOk [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceDistributedSelect [GOOD] Test command err: 2025-06-03T10:31:06.885677Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:31:06.885780Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:31:06.885815Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000fc7/r3tmp/tmpArQrzS/pdisk_1.dat 2025-06-03T10:31:07.068466Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:31:07.090107Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:07.091399Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946666318291 != 1748946666318295 2025-06-03T10:31:07.133936Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:07.133979Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:07.145339Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:07.229932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:31:09.168341Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:936:2772], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:09.168402Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:925:2767], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:09.168493Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:09.169631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:31:09.183649Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-03T10:31:09.355542Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:939:2775], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:31:09.413996Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:1001:2817] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:09.514952Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtngkkff063fgsv7cm6tkwv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2ViYjgxYTktOWUzYzljMjktOTEyMzY2NTQtMWNlYmM5MWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:09.551957Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtngkyz3hd3d1w94styx9xs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGViZWM3ODgtZWFlMjhmZTgtYjk5MjlhYzctNTIzZjNkNTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:09.866160Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jwtngm57264scxvjm15emgf5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmQ3MWE3MDEtMTdiZTNhMDYtNzU1ODZkNzYtYTUzNDVkZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceDistributedSelectViaReadActors [GOOD] Test command err: 2025-06-03T10:31:06.880923Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:31:06.881032Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:31:06.881073Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000fd0/r3tmp/tmpZD1zqU/pdisk_1.dat 2025-06-03T10:31:07.030608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:31:07.050087Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:07.051327Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946666304107 != 1748946666304111 2025-06-03T10:31:07.104299Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:07.104347Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:07.117402Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:07.198054Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:31:09.192316Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:936:2772], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:09.192375Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:925:2767], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:09.192469Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:09.207426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:31:09.222906Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-03T10:31:09.399961Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:939:2775], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:31:09.453119Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:1001:2817] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:09.521540Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtngkm78pd1332p0qrtppcz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjJiZmNlNTUtMTQzYTcwMWYtOTU5N2JhYTUtM2RmOTI1YzM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:09.557484Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtngkz39k3g8czp2yxy91bk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWY3YTI1ZjUtMmQ2NTYxNGItODdkNmRkMDQtMWFhM2VhZWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:09.632938Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jwtngm0316s0rtr34gqmznne, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTgwMWU5NjMtZmE3YWI4YTYtNjM0NjgzMTYtMTA0MWZiNQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root |67.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_trace/unittest >> TDataShardTrace::TestTraceDistributedUpsert-UseSink [GOOD] Test command err: 2025-06-03T10:31:06.904334Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:31:06.904495Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:31:06.904543Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000fd9/r3tmp/tmpfZPUTO/pdisk_1.dat 2025-06-03T10:31:07.042374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:31:07.066059Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:07.067327Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946666314087 != 1748946666314091 2025-06-03T10:31:07.119864Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:07.119918Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:07.130810Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:07.210039Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:31:09.180169Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:936:2772], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:09.180236Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:925:2767], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:09.180362Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:09.181944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:31:09.200832Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found 2025-06-03T10:31:09.372602Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:939:2775], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:31:09.414726Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:1001:2817] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:09.482622Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtngkkvfwzyyhd5s4j4yfaa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjJhYTcwZTctYWQzZTMxM2YtMzhlZjM1OGEtNWQzZDQ2NGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Trace: (Session.query.QUERY_ACTION_EXECUTE -> [(CompileService -> [(CompileActor)]) , (LiteralExecuter) , (DataExecuter -> [(WaitForTableResolve) , (RunTasks) , (Datashard.Transaction -> [(Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWithConfirmedReadOnlyLease) , (Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.WriteLog -> [(Tablet.WriteLog.LogEntry)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendResult)]) , (Datashard.Transaction -> [(Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendWithConfirmedReadOnlyLease) , (Tablet.Transaction -> [(Tablet.Transaction.Execute -> [(Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit) , (Datashard.Unit)]) , (Tablet.WriteLog -> [(Tablet.WriteLog.LogEntry)]) , (Tablet.Transaction.Complete)]) , (Datashard.SendResult)])])]) |67.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |67.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> YdbTableSplit::MergeByNoLoadAfterSplit |67.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> TStorageTenantTest::CreateTableOutsideDatabaseFailToStartTabletsButDropIsOk [GOOD] >> TStorageTenantTest::Empty [GOOD] |67.8%| [TA] $(B)/ydb/core/tx/datashard/ut_trace/test-results/unittest/{meta.json ... results_accumulator.log} |67.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |67.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> TStorageTenantTest::Empty [GOOD] |67.9%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_rtmr/test-results/unittest/{meta.json ... results_accumulator.log} |67.9%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_trace/test-results/unittest/{meta.json ... results_accumulator.log} |67.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> KqpScanSpilling::SpillingPragmaParseError |67.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace |67.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace |67.9%| [LD] {RESULT} $(B)/ydb/core/keyvalue/ut_trace/ydb-core-keyvalue-ut_trace >> THiveTest::TestHiveBalancerHighUsageAndColumnShards [GOOD] >> THiveTest::TestHiveBalancerOneTabletHighUsage |67.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest >> KqpRe2::IncorrectRegexNoError >> TSchemeShardSplitByLoad::IndexTableSplitsUpToMainTableCurrentPartitionCount [GOOD] >> TSchemeShardSplitByLoad::IndexTableDoesNotSplitsIfDisabledByMainTable >> KqpScanSpilling::SelfJoinQueryService >> KqpScanLogs::WideCombine-EnabledLogs >> KqpScanLogs::GraceJoin+EnabledLogs |67.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_encrypted_storage/unittest |67.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/dread_cache_service/ut/unittest >> TPQCachingProxyTest::TestWrongSessionOrGeneration >> KqpService::PatternCache [GOOD] >> KqpService::RangeCache+UseCache |67.9%| [TA] $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/test-results/unittest/{meta.json ... results_accumulator.log} >> TPQCachingProxyTest::TestWrongSessionOrGeneration [GOOD] >> TPQCachingProxyTest::TestPublishAndForget |67.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/dread_cache_service/ut/unittest >> KqpScanSpilling::SpillingPragmaParseError [GOOD] >> TPQCachingProxyTest::TestPublishAndForget [GOOD] |67.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator |67.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator |67.9%| [TA] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_encrypted_storage/test-results/unittest/{meta.json ... results_accumulator.log} |67.9%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_stream_creator/tx-replication-controller-ut_stream_creator ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/dread_cache_service/ut/unittest >> TPQCachingProxyTest::TestWrongSessionOrGeneration [GOOD] Test command err: 2025-06-03T10:31:13.396765Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:31:13.396806Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-03T10:31:13.401538Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:31:13.401576Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session1:1 with generation 2 2025-06-03T10:31:13.401598Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:171: Direct read cache: staged direct read id 1 for session: session1 2025-06-03T10:31:13.401607Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 1 for session session1, Generation: 2 2025-06-03T10:31:13.401625Z node 1 :PQ_READ_PROXY INFO: caching_service.cpp:297: Direct read cache: attempted to register server session: session1:1 with stale generation 1, ignored 2025-06-03T10:31:13.401634Z node 1 :PQ_READ_PROXY ALERT: caching_service.cpp:159: Direct read cache: tried to stage direct read for session session1 with generation 1, previously had this session with generation 2. Data ignored 2025-06-03T10:31:13.401665Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 1 for session session1, Generation: 1 2025-06-03T10:31:13.401687Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:218: Direct read cache: forget read: 1 for session session1 |67.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/dread_cache_service/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/dread_cache_service/ut/unittest >> TPQCachingProxyTest::TestPublishAndForget [GOOD] Test command err: 2025-06-03T10:31:13.713568Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:31:13.713607Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-03T10:31:13.726782Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:31:13.726832Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session1:1 with generation 1 2025-06-03T10:31:13.726857Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:171: Direct read cache: staged direct read id 1 for session: session1 2025-06-03T10:31:13.726866Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 1 for session session1, Generation: 1 2025-06-03T10:31:13.726900Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:218: Direct read cache: forget read: 1 for session session1 |67.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq |67.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq |67.9%| [LD] {RESULT} $(B)/ydb/core/tx/tx_proxy/ut_schemereq/ydb-core-tx-tx_proxy-ut_schemereq |67.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/dread_cache_service/ut/unittest >> TPQCachingProxyTest::OutdatedSession >> KqpRe2::IncorrectRegexNoError [GOOD] >> KqpRe2::IncorrectRegexWithoutExecutionNoError ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/runtime/unittest >> KqpScanSpilling::SpillingPragmaParseError [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/u93c/00291e/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk9 Trying to start YDB, gRPC: 6986, MsgBus: 1960 2025-06-03T10:31:12.457369Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668761449316255:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:12.457387Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00291e/r3tmp/tmpwemS30/pdisk_1.dat 2025-06-03T10:31:12.556845Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:12.556966Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668761449316227:2079] 1748946672457055 != 1748946672457058 TServer::EnableGrpc on GrpcPort 6986, node 1 2025-06-03T10:31:12.577560Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:12.577577Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:12.577580Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:12.577636Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1960 2025-06-03T10:31:12.614559Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:12.614596Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:12.615840Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1960 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:12.659538Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:12.670681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:31:12.682259Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:12.778581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:12.812863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:12.873371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:13.032625Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668765744285160:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:13.032681Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:13.108470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.121084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.135015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.148573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.162228Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.176176Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.190266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.218373Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668765744285815:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:13.218404Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:13.218507Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668765744285820:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:13.219611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:13.224398Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668765744285822:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:31:13.293408Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668765744285873:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:13.612122Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668765744286152:2512], status: GENERIC_ERROR, issues:
: Error: Pre type annotation, code: 1020
:3:40: Error: Bad "EnableSpillingNodes" setting for "$all" cluster: (yexception) tools/enum_parser/enum_serialization_runtime/enum_runtime.cpp:70: Key 'GraceJoin1' not found in enum NYql::NDq::EEnabledSpillingNodes. Valid options are: 'None', 'GraceJoin', 'Aggregation', 'All'. 2025-06-03T10:31:13.613128Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=NDU1NGZmZjYtNTdmMmQ4NDUtMWE4NWZjZTMtOGYzMDQ1YTU=, ActorId: [1:7511668765744286145:2508], ActorState: ExecuteState, TraceId: 01jwtngqx5avhbsfv92pnyn4hs, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: >> KqpScanSpilling::SelfJoinQueryService [GOOD] >> TPQCachingProxyTest::MultipleSessions >> TPQCachingProxyTest::OutdatedSession [GOOD] >> TExternalTableTestReboots::DropReplacedExternalTableWithReboots [GOOD] >> TPQCachingProxyTest::MultipleSessions [GOOD] |68.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/dread_cache_service/ut/unittest |68.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut |68.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/runtime/unittest >> KqpScanSpilling::SelfJoinQueryService [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/u93c/002914/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk6 Trying to start YDB, gRPC: 63107, MsgBus: 19464 2025-06-03T10:31:12.803778Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668758943981700:2206];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:12.804718Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002914/r3tmp/tmpudytFl/pdisk_1.dat 2025-06-03T10:31:12.910034Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:12.910075Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:12.916485Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:12.919819Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668758943981521:2079] 1748946672801748 != 1748946672801751 TServer::EnableGrpc on GrpcPort 63107, node 1 2025-06-03T10:31:12.937401Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:12.950677Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:12.950690Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:12.950693Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:12.950748Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19464 TClient is connected to server localhost:19464 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:31:13.155094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.158413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:31:13.162960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:13.243106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:13.285771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:13.312820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:13.398397Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668763238950469:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:13.398427Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:13.495821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.508888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.565773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.593070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.607674Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.629103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.667443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.695487Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668763238951127:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:13.695523Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:13.698324Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668763238951132:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:13.699374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:13.704281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:31:13.704390Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668763238951134:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:31:13.770754Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668763238951185:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (OptionalType (DataType 'Uint64))) (let $4 (DataType 'String)) (let $5 (OptionalType $4)) (let $6 (StructType '('"Key" $3) '('"Value" $5))) (let $7 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($18) (block '( (let $19 (lambda '($20) (block '( (let $21 (VariantType (TupleType $6 $6))) (let $22 (Variant $20 '0 $21)) (let $23 (Variant $20 '1 $21)) (return $22 $23) )))) (return (FromFlow (MultiMap (ToFlow $18) $19))) ))) '('('"_logical_id" '689) '('"_id" '"ac6879a1-5814458-96cc4a96-fc032b42")))) (let $8 (DqCnMap (TDqOutput $7 '0))) (let $9 (DqCnBroadcast (TDqOutput $7 '1))) (let $10 (StructType '('"t1.Key" $3) '('"t1.Value" $5) '('"t2.Key" $3) '('"t2.Value" $5))) (let $11 '('('"_logical_id" '608) '('"_id" '"b16e1b94-77ffbc39-d56a6c2e-a061c382") '('"_wide_channels" $10))) (let $12 (DqPhyStage '($8 $9) (lambda '($24 $25) (block '( (let $26 '('Many 'Hashed 'Compact)) (let $27 (SqueezeToDict (FlatMap (ToFlow $25) (lambda '($30) (block '( (let $31 (Member $30 '"Value")) (let $32 (Nothing (OptionalType (TupleType $4 $6)))) (let $33 (IfPresent $31 (lambda '($34) (Just '($34 $30))) $32)) (return (If (Exists $31) $33 $32)) )))) (lambda '($35) (Nth $35 '0)) (lambda '($36) (Nth $36 '1)) $26)) (let $28 (Sort (FlatMap $27 (lambda '($37) (block '( (let $38 '('"Value")) (let $39 '('"Key" '"t1.Key" '"Value" '"t1.Value")) (let $40 '('"Key" '"t2.Key" '"Value" '"t2.Value")) (return (MapJoinCore (OrderedFilter (ToFlow $24) (lambda '($41) (Exists (Member $41 '"Value")))) $37 'Inner $38 $38 $39 $40 '('"t1.Value") '('"t2.Value"))) )))) (Bool 'true) (lambda '($42) (Member $42 '"t1.Key")))) (let $29 (lambda '($43) (Member $43 '"t1.Key") (Member $43 '"t1.Value") (Member $43 '"t2.Key") (Member $43 '"t2.Value"))) (return (FromFlow (ExpandMap $28 $29))) ))) $11)) (let $13 (DqCnMerge (TDqOutput $12 '0) '('('0 '"Asc")))) (let $14 (DqPhyStage '($13) (lambda '($44) (FromFlow (NarrowMap (ToFlow $44) (lambda '($45 $46 $47 $48) (AsStruct '('"t1.Key" $45) '('"t1.Value" $46) '('"t2.Key" $47) '('"t2.Value" $48)))))) '('('"_logical_id" '620) '('"_id" '"a8a0ad19-46db86ac-fd763253-50daa8b2")))) (let $15 '($7 $12 $14)) (let $16 '('"t1.Key" '"t1.Value" '"t2.Key" '"t2.Value")) (let $17 (DqCnResult (TDqOutput $14 '0) $16)) (return (KqpPhysicalQuery '((KqpPhysicalTx $15 '($17) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType $10) '0 '0)) '('('"type" '"query")))) ) |68.0%| [LD] {RESULT} $(B)/ydb/services/dynamic_config/ut/ydb-services-dynamic_config-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/dread_cache_service/ut/unittest >> TPQCachingProxyTest::OutdatedSession [GOOD] Test command err: 2025-06-03T10:31:15.068821Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:31:15.068859Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-03T10:31:15.073505Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:31:15.073541Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session1:1 with generation 1 2025-06-03T10:31:15.073561Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:171: Direct read cache: staged direct read id 1 for session: session1 2025-06-03T10:31:15.073570Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 1 for session session1, Generation: 1 2025-06-03T10:31:15.073589Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:293: Direct read cache: registered server session: session1:1 with generation 2, killed existing session with older generation >> KqpScanSpilling::SelfJoin >> KqpScanSpilling::SpillingInRuntimeNodes+EnabledSpilling ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/dread_cache_service/ut/unittest >> TPQCachingProxyTest::MultipleSessions [GOOD] Test command err: 2025-06-03T10:31:15.323404Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:31:15.323441Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-03T10:31:15.328056Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:31:15.328091Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session1:1 with generation 1 2025-06-03T10:31:15.328111Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:171: Direct read cache: staged direct read id 1 for session: session1 2025-06-03T10:31:15.328119Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:171: Direct read cache: staged direct read id 2 for session: session1 2025-06-03T10:31:15.328127Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 1 for session session1, Generation: 1 2025-06-03T10:31:15.328140Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 2 for session session1, Generation: 1 2025-06-03T10:31:15.328149Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session2:1 with generation 2 2025-06-03T10:31:15.328157Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:171: Direct read cache: staged direct read id 3 for session: session2 2025-06-03T10:31:15.328163Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:179: Direct read cache: publish read: 3 for session session2, Generation: 2 >> TPQCachingProxyTest::TestDeregister |68.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat |68.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat |68.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut_fat/ydb-core-blobstorage-dsproxy-ut_fat ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_external_table_reboots/unittest >> TExternalTableTestReboots::DropReplacedExternalTableWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:30:50.333823Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:50.333848Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:50.333852Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:50.333856Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:50.333866Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:50.333869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:50.333877Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:50.333887Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:50.333983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:50.334037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:50.345573Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:30:50.345602Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:50.345695Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:30:50.348171Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:50.348273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:50.348302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:50.349997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:50.350039Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:50.350134Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:50.350182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:50.350598Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:50.350641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:50.350849Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:50.350856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:50.350867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:50.350871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:50.350876Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:50.350910Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:30:50.351967Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:30:50.365853Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:50.365947Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:50.366007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:50.366042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:50.366052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:50.366894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:50.366920Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:50.366968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:50.366978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:50.366984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:50.366990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:50.367451Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:50.367466Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:50.367472Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:50.367906Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:50.367917Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:50.367922Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:50.367927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:50.368591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:50.369074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:50.369121Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:50.369413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:50.369445Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:50.369454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:50.369531Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:31:15.235260Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1005:0 128 -> 240 2025-06-03T10:31:15.235307Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:31:15.235322Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:31:15.235330Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:31:15.235611Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-03T10:31:15.235654Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 FAKE_COORDINATOR: Erasing txId 1005 2025-06-03T10:31:15.236118Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:15.236129Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:15.236198Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 4] 2025-06-03T10:31:15.236229Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1005, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:31:15.236266Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:15.236274Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [83:206:2207], at schemeshard: 72057594046678944, txId: 1005, path id: 1 2025-06-03T10:31:15.236282Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [83:206:2207], at schemeshard: 72057594046678944, txId: 1005, path id: 4 2025-06-03T10:31:15.236288Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [83:206:2207], at schemeshard: 72057594046678944, txId: 1005, path id: 3 2025-06-03T10:31:15.236416Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1005:0, at schemeshard: 72057594046678944 2025-06-03T10:31:15.236429Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1005:0 ProgressState 2025-06-03T10:31:15.236452Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1005:0 progress is 1/1 2025-06-03T10:31:15.236463Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:31:15.236470Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1005:0 progress is 1/1 2025-06-03T10:31:15.236474Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:31:15.236479Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1005, ready parts: 1/1, is published: false 2025-06-03T10:31:15.236486Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:31:15.236491Z node 83 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1005:0 2025-06-03T10:31:15.236497Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1005:0 2025-06-03T10:31:15.236522Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:31:15.236527Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:31:15.236534Z node 83 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1005, publications: 3, subscribers: 0 2025-06-03T10:31:15.236538Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 1], 13 2025-06-03T10:31:15.236542Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-03T10:31:15.236546Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1005, [OwnerId: 72057594046678944, LocalPathId: 4], 18446744073709551615 2025-06-03T10:31:15.236670Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:31:15.236685Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:31:15.236691Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1005 2025-06-03T10:31:15.236697Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-03T10:31:15.236704Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:31:15.236800Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:31:15.236809Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-03T10:31:15.236824Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-03T10:31:15.236941Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:31:15.236954Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 13 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:31:15.236959Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1005 2025-06-03T10:31:15.236964Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 13 2025-06-03T10:31:15.236989Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:31:15.237377Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:31:15.237403Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:31:15.237412Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1005 2025-06-03T10:31:15.237418Z node 83 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-03T10:31:15.237425Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:31:15.237448Z node 83 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1005, subscribers: 0 2025-06-03T10:31:15.238722Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-03T10:31:15.238954Z node 83 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:31:15.238990Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-03T10:31:15.239054Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 TestModificationResult got TxId: 1005, wait until txId: 1005 TestWaitNotification wait txId: 1005 2025-06-03T10:31:15.239138Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1005: send EvNotifyTxCompletion 2025-06-03T10:31:15.239150Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1005 2025-06-03T10:31:15.239257Z node 83 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1005, at schemeshard: 72057594046678944 2025-06-03T10:31:15.239292Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1005: got EvNotifyTxCompletionResult 2025-06-03T10:31:15.239299Z node 83 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1005: satisfy waiter [83:418:2408] TestWaitNotification: OK eventTxId 1005 >> TPopulatorTest::RemoveDir >> KqpScanLogs::WideCombine+EnabledLogs >> KqpRe2::IncorrectRegexWithoutExecutionNoError [GOOD] >> TPQCachingProxyTest::TestDeregister [GOOD] >> TPopulatorTest::RemoveDir [GOOD] >> KqpService::RangeCache+UseCache [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/runtime/unittest >> KqpRe2::IncorrectRegexWithoutExecutionNoError [GOOD] Test command err: Trying to start YDB, gRPC: 5728, MsgBus: 1538 2025-06-03T10:31:12.699567Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668762407521980:2087];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:12.699897Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00291d/r3tmp/tmpiAGBrj/pdisk_1.dat 2025-06-03T10:31:12.801592Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:12.801643Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:12.803821Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:12.812764Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5728, node 1 2025-06-03T10:31:12.841786Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:12.841798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:12.841801Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:12.841851Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1538 TClient is connected to server localhost:1538 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:12.992861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:12.997143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:31:13.001621Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:13.060131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:13.145397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:31:13.208470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.463168Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668766702490847:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:13.463210Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:13.565434Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.577872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.600497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.613446Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.625850Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.638518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.661154Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.689878Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668766702491499:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:13.689930Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:13.690155Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668766702491504:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:13.691612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:13.696695Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:31:13.696915Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668766702491506:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:31:13.751587Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668766702491566:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } ( (let $1 (DataType 'Bool)) (let $2 '('('"_logical_id" '235) '('"_id" '"a34f869-ceb72efb-b333c3ff-432f0d44") '('"_partition_mode" '"single"))) (let $3 (DqPhyStage '() (lambda '() (block '( (let $5 (String '"a[x")) (let $6 (OptionalType (StructType '('"CaseSensitive" $1) '('"DotNl" $1) '('"Literal" $1) '('"LogErrors" $1) '('"LongestMatch" $1) '('"MaxMem" (DataType 'Uint64)) '('"NeverCapture" $1) '('"NeverNl" $1) '('"OneLine" $1) '('"PerlClasses" $1) '('"PosixSyntax" $1) '('"Utf8" $1) '('"WordBoundary" $1)))) (let $7 (DataType 'String)) (let $8 (CallableType '() '($1) '((OptionalType $7)))) (let $9 (Udf '"Re2.Grep" '($5 (Nothing $6)) (VoidType) '"" $8 (TupleType $7 $6) '"" '())) (return (Iterator (AsList (AsStruct '('"column0" (Apply $9 (Just $5))))))) ))) $2)) (let $4 (DqCnResult (TDqOutput $3 '"0") '('"column0"))) (return (KqpPhysicalQuery '((KqpPhysicalTx '($3) '($4) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType (StructType '('"column0" $1))) '"0" '"0")) '('('"type" '"query")))) ) Trying to start YDB, gRPC: 24803, MsgBus: 61752 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00291d/r3tmp/tmpVDjwis/pdisk_1.dat 2025-06-03T10:31:14.785481Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:31:14.797499Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:14.801441Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511668769098130398:2079] 1748946674721941 != 1748946674721944 TServer::EnableGrpc on GrpcPort 24803, node 2 2025-06-03T10:31:14.845836Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:14.845871Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:14.850794Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:14.850803Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:14.850806Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:14.850870Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:31:14.852191Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61752 TClient is connected to server localhost:61752 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:14.983168Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:14.992403Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:31:14.999990Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:15.040852Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:31:15.121335Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:31:15.138263Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:15.494152Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668773393099326:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:15.494204Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:15.498898Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:15.510489Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:15.521460Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:15.535077Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:15.550085Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:15.563114Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:15.579035Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:15.598870Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668773393099977:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:15.598920Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:15.598970Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668773393099982:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:15.600168Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:15.603372Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511668773393099984:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:31:15.659863Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668773393100035:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (DataType 'Uint64)) (let $4 (DataType 'String)) (let $5 (OptionalType $4)) (let $6 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($10) (FromFlow (Filter (ToFlow $10) (lambda '($11) (block '( (let $12 (DataType 'Bool)) (let $13 (OptionalType (StructType '('"CaseSensitive" $12) '('"DotNl" $12) '('"Literal" $12) '('"LogErrors" $12) '('"LongestMatch" $12) '('"MaxMem" $3) '('"NeverCapture" $12) '('"NeverNl" $12) '('"OneLine" $12) '('"PerlClasses" $12) '('"PosixSyntax" $12) '('"Utf8" $12) '('"WordBoundary" $12)))) (let $14 (CallableType '() '($12) '($5))) (let $15 (Udf '"Re2.Grep" '((String '"[") (Nothing $13)) (VoidType) '"" $14 (TupleType $4 $13) '"" '())) (return (Or (Coalesce (== (Member $11 '"Key") (Int32 '1)) (Bool 'false)) (Apply $15 (Member $11 '"Value")))) )))))) '('('"_logical_id" '493) '('"_id" '"9cfd7431-befe3cd2-741b0a30-a71863bd")))) (let $7 (DqCnUnionAll (TDqOutput $6 '"0"))) (let $8 (DqPhyStage '($7) (lambda '($16) $16) '('('"_logical_id" '573) '('"_id" '"ecb2da9a-c6a764c8-afcb0747-6e539eb0")))) (let $9 (DqCnResult (TDqOutput $8 '"0") '())) (return (KqpPhysicalQuery '((KqpPhysicalTx '($6 $8) '($9) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType (StructType '('"Key" (OptionalType $3)) '('"Value" $5))) '"0" '"0")) '('('"type" '"query")))) ) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/persqueue/dread_cache_service/ut/unittest >> TPQCachingProxyTest::TestDeregister [GOOD] Test command err: 2025-06-03T10:31:16.456163Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72057594037927937] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:31:16.456195Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72057594037927937] doesn't have tx writes info 2025-06-03T10:31:16.463422Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:31:16.463454Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session1:1 with generation 1 2025-06-03T10:31:16.463463Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:283: Direct read cache: registered server session: session2:1 with generation 1 2025-06-03T10:31:16.463485Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: session1 |68.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_populator/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/scheme_board/ut_populator/unittest >> TPopulatorTest::RemoveDir [GOOD] Test command err: 2025-06-03T10:31:16.467046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:31:16.467078Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TestModificationResults wait txId: 100 2025-06-03T10:31:16.499781Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:660: [1:96:2121] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:71:2109], cookie# 100, event size# 330, preserialized size# 51 2025-06-03T10:31:16.499828Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:677: [1:96:2121] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 3 2025-06-03T10:31:16.500109Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2121], cookie# 100 2025-06-03T10:31:16.500124Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2121], cookie# 100 2025-06-03T10:31:16.500132Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2121], cookie# 100 2025-06-03T10:31:16.500415Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:660: [1:96:2121] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/DirB" PathDescription { Self { Name: "DirB" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: false CreateTxId: 100 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944 }: sender# [1:71:2109], cookie# 100, event size# 220, preserialized size# 2 2025-06-03T10:31:16.500431Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:677: [1:96:2121] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], cookie# 100, is deletion# false, version: 2 2025-06-03T10:31:16.500457Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:12:2059], cookie# 100 2025-06-03T10:31:16.500468Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:15:2062], cookie# 100 2025-06-03T10:31:16.500474Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:18:2065], cookie# 100 2025-06-03T10:31:16.500518Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:747: [1:96:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:97:2122], cookie# 100 2025-06-03T10:31:16.500525Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2121], cookie# 100 2025-06-03T10:31:16.500532Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2121], cookie# 100 2025-06-03T10:31:16.500538Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2121], cookie# 100 2025-06-03T10:31:16.500566Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:747: [1:96:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:98:2123], cookie# 100 2025-06-03T10:31:16.500572Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:771: [1:96:2121] Ack update: ack to# [1:71:2109], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 3 2025-06-03T10:31:16.500578Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:12:2059], cookie# 100 2025-06-03T10:31:16.500584Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:15:2062], cookie# 100 2025-06-03T10:31:16.500590Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:18:2065], cookie# 100 2025-06-03T10:31:16.500721Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:747: [1:96:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 3 }: sender# [1:99:2124], cookie# 100 2025-06-03T10:31:16.500744Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:747: [1:96:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:97:2122], cookie# 100 2025-06-03T10:31:16.500767Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:747: [1:96:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:98:2123], cookie# 100 2025-06-03T10:31:16.500772Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:771: [1:96:2121] Ack update: ack to# [1:71:2109], cookie# 100, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 2 2025-06-03T10:31:16.500829Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:747: [1:96:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 2 }: sender# [1:99:2124], cookie# 100 2025-06-03T10:31:16.500836Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:753: [1:96:2121] Ack for unknown update (already acked?): sender# [1:99:2124], cookie# 100 FAKE_COORDINATOR: Add transaction: 100 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 100 at step: 5000001 FAKE_COORDINATOR: Erasing txId 100 2025-06-03T10:31:16.501484Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:660: [1:96:2121] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: true } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:71:2109], cookie# 100, event size# 340, preserialized size# 56 2025-06-03T10:31:16.501499Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:677: [1:96:2121] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 100, is deletion# false, version: 4 2025-06-03T10:31:16.501524Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2121], cookie# 100 2025-06-03T10:31:16.501533Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2121], cookie# 100 2025-06-03T10:31:16.501540Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2121], cookie# 100 2025-06-03T10:31:16.501660Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:660: [1:96:2121] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root/DirB" PathDescription { Self { Name: "DirB" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 100 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 1 PathsLimit: 10000 ShardsI ... eration: 2 }: sender# [1:96:2121], cookie# 101 2025-06-03T10:31:16.503345Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:747: [1:96:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 5 }: sender# [1:97:2122], cookie# 101 2025-06-03T10:31:16.503353Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2121], cookie# 101 2025-06-03T10:31:16.503358Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2121], cookie# 101 2025-06-03T10:31:16.503371Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:747: [1:96:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 5 }: sender# [1:98:2123], cookie# 101 2025-06-03T10:31:16.503378Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:771: [1:96:2121] Ack update: ack to# [1:71:2109], cookie# 101, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 5 2025-06-03T10:31:16.503384Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:12:2059], cookie# 101 2025-06-03T10:31:16.503394Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:15:2062], cookie# 101 2025-06-03T10:31:16.503400Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:18:2065], cookie# 101 2025-06-03T10:31:16.503436Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:747: [1:96:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 5 }: sender# [1:99:2124], cookie# 101 2025-06-03T10:31:16.503496Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:747: [1:96:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:97:2122], cookie# 101 2025-06-03T10:31:16.503508Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:747: [1:96:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:98:2123], cookie# 101 2025-06-03T10:31:16.503513Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:771: [1:96:2121] Ack update: ack to# [1:71:2109], cookie# 101, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 3 FAKE_COORDINATOR: Add transaction: 101 at step: 5000002 FAKE_COORDINATOR: advance: minStep5000002 State->FrontStep: 5000001 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 101 at step: 5000002 2025-06-03T10:31:16.503601Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:747: [1:96:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 3 }: sender# [1:99:2124], cookie# 101 2025-06-03T10:31:16.503607Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:753: [1:96:2121] Ack for unknown update (already acked?): sender# [1:99:2124], cookie# 101 2025-06-03T10:31:16.503870Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:660: [1:96:2121] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusSuccess Path: "/Root" PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/Root" } } } PathId: 1 PathOwnerId: 72057594046678944 }: sender# [1:71:2109], cookie# 101, event size# 232, preserialized size# 2 2025-06-03T10:31:16.503884Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:677: [1:96:2121] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], cookie# 101, is deletion# false, version: 6 2025-06-03T10:31:16.503906Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2121], cookie# 101 2025-06-03T10:31:16.503914Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2121], cookie# 101 2025-06-03T10:31:16.503920Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2121], cookie# 101 FAKE_COORDINATOR: Erasing txId 101 2025-06-03T10:31:16.503965Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:660: [1:96:2121] Handle TEvSchemeShard::TEvDescribeSchemeResult { Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/Root/DirB\', error: path has been deleted (id: [OwnerId: 72057594046678944, LocalPathId: 2], type: EPathTypeDir, state: EPathStateNotExist), drop stepId: 5000002, drop txId: 101, source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/Root/DirB" PathId: 2 LastExistedPrefixPath: "/Root" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 72057594046678944 }: sender# [1:71:2109], cookie# 101, event size# 384, preserialized size# 0 2025-06-03T10:31:16.503971Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:677: [1:96:2121] Update description: owner# 72057594046678944, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], cookie# 101, is deletion# true, version: 0 2025-06-03T10:31:16.503981Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2121], cookie# 101 2025-06-03T10:31:16.503987Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2121], cookie# 101 2025-06-03T10:31:16.503994Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:18:2065], cookie# 101 2025-06-03T10:31:16.504009Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:747: [1:96:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:99:2124], cookie# 101 2025-06-03T10:31:16.504016Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:12:2059], cookie# 101 2025-06-03T10:31:16.504023Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:15:2062], cookie# 101 2025-06-03T10:31:16.504029Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:271: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdate { Owner: 72057594046678944 Generation: 2 }: sender# [1:96:2121], cookie# 101 2025-06-03T10:31:16.504074Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:747: [1:96:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:97:2122], cookie# 101 2025-06-03T10:31:16.504080Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:771: [1:96:2121] Ack update: ack to# [1:71:2109], cookie# 101, pathId# [OwnerId: 72057594046678944, LocalPathId: 1], version# 6 2025-06-03T10:31:16.504087Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:97:2122] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:12:2059], cookie# 101 2025-06-03T10:31:16.504093Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:98:2123] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:15:2062], cookie# 101 2025-06-03T10:31:16.504099Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:279: [1:99:2124] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:18:2065], cookie# 101 2025-06-03T10:31:16.504156Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:747: [1:96:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 1] Version: 6 }: sender# [1:98:2123], cookie# 101 2025-06-03T10:31:16.504176Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:747: [1:96:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:97:2122], cookie# 101 2025-06-03T10:31:16.504190Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:747: [1:96:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:98:2123], cookie# 101 2025-06-03T10:31:16.504195Z node 1 :SCHEME_BOARD_POPULATOR NOTICE: populator.cpp:771: [1:96:2121] Ack update: ack to# [1:71:2109], cookie# 101, pathId# [OwnerId: 72057594046678944, LocalPathId: 2], version# 18446744073709551615 2025-06-03T10:31:16.504230Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:747: [1:96:2121] Handle NKikimrSchemeBoard.TEvUpdateAck { Owner: 72057594046678944 Generation: 2 PathId: [OwnerId: 72057594046678944, LocalPathId: 2] Version: 18446744073709551615 }: sender# [1:99:2124], cookie# 101 2025-06-03T10:31:16.504235Z node 1 :SCHEME_BOARD_POPULATOR DEBUG: populator.cpp:753: [1:96:2121] Ack for unknown update (already acked?): sender# [1:99:2124], cookie# 101 TestModificationResult got TxId: 101, wait until txId: 101 |68.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut_trace/unittest >> TKeyValueTracingTest::ReadSmall >> TKeyValueTracingTest::WriteSmall >> TKeyValueTracingTest::ReadHuge |68.1%| [TA] $(B)/ydb/core/persqueue/dread_cache_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpScanSpilling::SelfJoin [GOOD] >> THiveTest::TestHiveBalancerOneTabletHighUsage [GOOD] >> THiveTest::TestHiveBalancerWithSpareNodes >> NodeDisconnected::BsQueueRetries [GOOD] >> ProxyEncryption::CorrectlyFailOnNoKeys ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/runtime/unittest >> KqpScanSpilling::SelfJoin [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/u93c/00290a/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk5 Trying to start YDB, gRPC: 7746, MsgBus: 27050 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00290a/r3tmp/tmpfiFIMM/pdisk_1.dat 2025-06-03T10:31:16.134908Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:31:16.183775Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:16.184553Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668775950204719:2079] 1748946676059385 != 1748946676059388 TServer::EnableGrpc on GrpcPort 7746, node 1 2025-06-03T10:31:16.207285Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:16.207298Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:16.207301Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:16.207352Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:31:16.233516Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:16.233560Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:16.235682Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27050 TClient is connected to server localhost:27050 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:16.382493Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:16.389895Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:31:16.398904Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:16.439731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:16.507772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:16.531936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:16.723341Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668775950206372:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:16.723524Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:16.729024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:16.742442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:16.752912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:16.768338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:16.781282Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:16.838875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:16.897884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:16.938949Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668775950207033:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:16.938983Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:16.939141Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668775950207038:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:16.940266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:16.943773Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668775950207040:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:31:17.001130Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668775950207091:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:17.285981Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:148;event=channel_info;ch_size=50;ch_count=1;ch_limit=50;inputs=0;input_channels_count=0; 2025-06-03T10:31:17.286063Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:134: SelfId: [1:7511668780245174678:2508], TxId: 281474976715672, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=ZmE3NGRjOGQtYTMzOTUyYy1hNTZlMmQzNi03YzIzZjNmMA==. CustomerSuppliedId : . TraceId : 01jwtngvgpd2vyrpckk5qgthpe. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Start compute actor [1:7511668780245174678:2508], task: 1 2025-06-03T10:31:17.286074Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:141: SelfId: [1:7511668780245174678:2508], TxId: 281474976715672, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=ZmE3NGRjOGQtYTMzOTUyYy1hNTZlMmQzNi03YzIzZjNmMA==. CustomerSuppliedId : . TraceId : 01jwtngvgpd2vyrpckk5qgthpe. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Set execution timeout 299.985749s 2025-06-03T10:31:17.287139Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1452: SelfId: [1:7511668780245174678:2508], TxId: 281474976715672, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=ZmE3NGRjOGQtYTMzOTUyYy1hNTZlMmQzNi03YzIzZjNmMA==. CustomerSuppliedId : . TraceId : 01jwtngvgpd2vyrpckk5qgthpe. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Create sink for output 0 { Sink { Type: "KqpTableSink" Settings { type_url: "type.googleapis.com/NKikimrKqp.TKqpTableSinkSettings" value: "\032\036\n\016/Root/KeyValue\020\200\202\224\204\200\200\200\200\001\030\006(\001\"\t\n\003Key\020\001 \004*\t\n\003Key\020\001 \004*\014\n\005Value\020\002 \201 0\230\247\200\200\200\200@8\001@\000H\000R\022\t\222\311\347\004\365\316>h\021\314\t\000\000\001\000\020\000X\000`\000h\000h\001x\000" } } } 2025-06-03T10:31:17.287201Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668780245174678:2508], TxId: 281474976715672, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=ZmE3NGRjOGQtYTMzOTUyYy1hNTZlMmQzNi03YzIzZjNmMA==. CustomerSuppliedId : . TraceId : 01jwtngvgpd2vyrpckk5qgthpe. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646926 2025-06-03T10:31:17.287211Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:1074: SelfId: [1:7511668780245174678:2508], TxId: 281474976715672, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=ZmE3NGRjOGQtYTMzOTUyYy1hNTZlMmQzNi03YzIzZjNmMA==. CustomerSuppliedId : . TraceId : 01jwtngvgpd2vyrpckk5qgthpe. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Received channels info: 2025-06-03T10:31:17.287249Z node 1 :KQP_COMPUTE DEBUG: dq_sync_compute_actor_base.h:357: SelfId: [1:7511668780245174678:2508], TxId: 281474976715672, task: 1. Ctx: { SessionId : ydb://session/3?node_id=1&id=ZmE3NGRjOGQtYTM ... ppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-03T10:31:17.603586Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668780245174874:2559], TxId: 281474976715683, task: 3. Ctx: { TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-03T10:31:17.603675Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668780245174873:2558], TxId: 281474976715683, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646927 2025-06-03T10:31:17.603679Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668780245174873:2558], TxId: 281474976715683, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-03T10:31:17.603704Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7511668780245174873:2558], TxId: 281474976715683, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-03T10:31:17.603747Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668780245174874:2559], TxId: 281474976715683, task: 3. Ctx: { TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646923 2025-06-03T10:31:17.603757Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:163: TxId: 281474976715683, task: 3. Finish input channelId: 3, from: [1:7511668780245174873:2558] 2025-06-03T10:31:17.603765Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668780245174874:2559], TxId: 281474976715683, task: 3. Ctx: { TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-03T10:31:17.603774Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668780245174873:2558], TxId: 281474976715683, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646927 2025-06-03T10:31:17.603777Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668780245174873:2558], TxId: 281474976715683, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. CA StateFunc 271646922 2025-06-03T10:31:17.603787Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715683, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 1, seqNo: [10] 2025-06-03T10:31:17.603788Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668780245174874:2559], TxId: 281474976715683, task: 3. Ctx: { TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-03T10:31:17.603791Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715683, task: 2. Tasks execution finished, don't wait for ack delivery in input channelId: 2, seqNo: [10] 2025-06-03T10:31:17.603793Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715683, task: 2. Tasks execution finished 2025-06-03T10:31:17.603797Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7511668780245174873:2558], TxId: 281474976715683, task: 2. Ctx: { SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. Compute state finished. All channels and sinks finished 2025-06-03T10:31:17.603830Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715683, task: 2. pass away 2025-06-03T10:31:17.603862Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976715683;task_id=2;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-03T10:31:17.604001Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668780245174874:2559], TxId: 281474976715683, task: 3. Ctx: { TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-03T10:31:17.604034Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668780245174874:2559], TxId: 281474976715683, task: 3. Ctx: { TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-03T10:31:17.604204Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668780245174874:2559], TxId: 281474976715683, task: 3. Ctx: { TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-03T10:31:17.604225Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668780245174874:2559], TxId: 281474976715683, task: 3. Ctx: { TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-03T10:31:17.604236Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668780245174874:2559], TxId: 281474976715683, task: 3. Ctx: { TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-03T10:31:17.604247Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7511668780245174874:2559], TxId: 281474976715683, task: 3. Ctx: { TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-03T10:31:17.604427Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668780245174874:2559], TxId: 281474976715683, task: 3. Ctx: { TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-03T10:31:17.604443Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7511668780245174874:2559], TxId: 281474976715683, task: 3. Ctx: { TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-03T10:31:17.604502Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668780245174874:2559], TxId: 281474976715683, task: 3. Ctx: { TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-03T10:31:17.604510Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:502: SelfId: [1:7511668780245174874:2559], TxId: 281474976715683, task: 3. Ctx: { TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Continue execution, either output buffers are not empty or not all channels are ready, hasDataToSend: 1, channelsReady: 1 2025-06-03T10:31:17.604675Z node 1 :KQP_COMPUTE DEBUG: kqp_pure_compute_actor.cpp:148: SelfId: [1:7511668780245174874:2559], TxId: 281474976715683, task: 3. Ctx: { TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. CA StateFunc 271646922 2025-06-03T10:31:17.604692Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:658: TxId: 281474976715683, task: 3. Tasks execution finished, don't wait for ack delivery in input channelId: 3, seqNo: [11] 2025-06-03T10:31:17.604693Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:675: TxId: 281474976715683, task: 3. Tasks execution finished 2025-06-03T10:31:17.604696Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_impl.h:510: SelfId: [1:7511668780245174874:2559], TxId: 281474976715683, task: 3. Ctx: { TraceId : 01jwtngvrd6vrp3f53h7k3yq7k. SessionId : ydb://session/3?node_id=1&id=NjYwZDZmMTItNTZlODRiNmItMjcwZjQ4ZTEtMTFhZGMyODA=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Compute state finished. All channels and sinks finished 2025-06-03T10:31:17.604715Z node 1 :KQP_COMPUTE DEBUG: dq_compute_actor_channels.cpp:494: TxId: 281474976715683, task: 3. pass away 2025-06-03T10:31:17.604739Z node 1 :KQP_COMPUTE DEBUG: log.cpp:784: fline=kqp_compute_actor_factory.cpp:66;problem=finish_compute_actor;tx_id=281474976715683;task_id=3;success=1;message={
: Error: COMPUTE_STATE_FINISHED }; 2025-06-03T10:31:17.605319Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946677632, txId: 281474976715682] shutting down ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/service/unittest >> KqpService::RangeCache+UseCache [GOOD] Test command err: Trying to start YDB, gRPC: 65021, MsgBus: 22111 2025-06-03T10:30:18.986199Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668530646945062:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:18.986221Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d43/r3tmp/tmpPzWWKK/pdisk_1.dat 2025-06-03T10:30:19.041972Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 65021, node 1 2025-06-03T10:30:19.062982Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:19.062998Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:19.063000Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:19.063065Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22111 2025-06-03T10:30:19.087822Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:19.087857Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:19.088954Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22111 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:30:19.136378Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:19.144156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:19.208395Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:19.229475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:19.242776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:19.414084Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668534941913970:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:19.414124Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:19.478094Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.486661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.499081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.512976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.527193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.541728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.555977Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:19.573186Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668534941914624:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:19.573221Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:19.573306Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668534941914629:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:19.574447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:19.582681Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668534941914631:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:19.683596Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668534941914682:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:19.873613Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2323: SessionId: ydb://session/3?node_id=1&id=YmVlZjgzMDktZjFmMWYzMzgtOTUzMmRhOS02ZjdmNzczMw==, ActorId: [1:7511668534941913952:2401], ActorState: ReadyState, Session closed due to explicit close event 2025-06-03T10:30:19.873635Z node 1 :KQP_SESSION INFO: kqp_session_actor.cpp:2481: SessionId: ydb://session/3?node_id=1&id=YmVlZjgzMDktZjFmMWYzMzgtOTUzMmRhOS02ZjdmNzczMw==, ActorId: [1:7511668534941913952:2401], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-03T10:30:19.873639Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2542: SessionId: ydb://session/3?node_id=1&id=YmVlZjgzMDktZjFmMWYzMzgtOTUzMmRhOS02ZjdmNzczMw==, ActorId: [1:7511668534941913952:2401], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-03T10:30:19.873643Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2554: SessionId: ydb://session/3?node_id=1&id=YmVlZjgzMDktZjFmMWYzMzgtOTUzMmRhOS02ZjdmNzczMw==, ActorId: [1:7511668534941913952:2401], ActorState: unknown state, Cleanup temp tables: 0 2025-06-03T10:30:19.873670Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2645: SessionId: ydb://session/3?node_id=1&id=YmVlZjgzMDktZjFmMWYzMzgtOTUzMmRhOS02ZjdmNzczMw==, ActorId: [1:7511668534941913952:2401], ActorState: unknown state, Session actor destroyed 2025-06-03T10:30:19.878251Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=ZTdjNjY0ZDEtOTNiNzcwOTUtZGI5Y2FkZTUtYWViNGZkN2M=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZTdjNjY0ZDEtOTNiNzcwOTUtZGI5Y2FkZTUtYWViNGZkN2M= 2025-06-03T10:30:19.878328Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=ZTdjNjY0ZDEtOTNiNzcwOTUtZGI5Y2FkZTUtYWViNGZkN2M=, ActorId: [1:7511668534941914945:2507], ActorState: unknown state, session actor bootstrapped 2025-06-03T10:30:19.881664Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=NmIwZDliYzItZGViZDNhNDQtZWM0MDk0NGItZjA1MTdhZWM=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NmIwZDliYzItZGViZDNhNDQtZWM0MDk0NGItZjA1MTdhZWM= 2025-06-03T10:30:19.881782Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=NmIwZDliYzItZGViZDNhNDQtZWM0MDk0NGItZjA1MTdhZWM=, ActorId: [1:7511668534941914947:2509], ActorState: unknown state, session actor bootstrapped 2025-06-03T10:30:19.883459Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=YTI4ZTYyYTgtMjE4Y2ZjNjMtYmJhYjQ1NGEtMmU5YzMzYTM=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id YTI4ZTYyYTgtMjE4Y2ZjNjMtYmJhYjQ1NGEtMmU5YzMzYTM= 2025-06-03T10:30:19.883670Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=YTI4ZTYyYTgtMjE4Y2ZjNjMtYmJhYjQ1NGEtMmU5YzMzYTM=, ActorId: [1:7511668534941914949:2511], ActorState: unknown state, session actor bootstrapped 2025-06-03T10:30:19.884676Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=MWQyMmEyMTktZGM0N2U4NTEtYjU3MmJlMTUtOWRhMzJmNTU=, ActorId: [0:0:0], ActorState: unknow ... cepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:58.098619Z node 7 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [7:7511668701868857927:2437] txid# 281474976715674, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:58.107790Z node 7 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [7:7511668701868857936:2444] txid# 281474976715676, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:58.107920Z node 7 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [7:7511668701868857937:2445] txid# 281474976715677, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:58.107946Z node 7 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [7:7511668701868857935:2443] txid# 281474976715675, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:02.612416Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[7:7511668697573889763:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:02.612580Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; Trying to start YDB, gRPC: 1149, MsgBus: 20523 2025-06-03T10:31:13.183707Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7511668762983600571:2210];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d43/r3tmp/tmpHINZ6u/pdisk_1.dat 2025-06-03T10:31:13.190232Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:31:13.221385Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:7511668762983600387:2079] 1748946673168938 != 1748946673168941 2025-06-03T10:31:13.228866Z node 8 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1149, node 8 2025-06-03T10:31:13.248648Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:13.248666Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:13.248669Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:13.248731Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:31:13.294249Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:13.294289Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:13.294955Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20523 TClient is connected to server localhost:20523 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:13.426080Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:13.431971Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:31:13.453620Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:13.496191Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:13.580942Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:13.617479Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:13.890973Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7511668762983602016:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:13.891001Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:13.911125Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.926320Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:13.991221Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:14.020289Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:14.051879Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:14.068091Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:14.086573Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:14.117053Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7511668767278569966:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:14.117092Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:14.117198Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7511668767278569971:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:14.118326Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:14.122898Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:31:14.122985Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [8:7511668767278569973:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:31:14.182610Z node 8 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [8:7511668767278570024:3393] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } took: 2.078166s took: 2.090015s took: 2.090286s took: 2.090775s took: 2.091010s took: 2.091328s took: 2.091312s took: 2.091707s took: 2.091854s took: 2.092199s |68.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_stream_creator/unittest |68.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_stream_creator/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-25 >> StreamCreator::Basic >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-43 >> ProxyEncryption::CorrectlyFailOnNoKeys [GOOD] >> RequestValidation::TestSinglePutBlobSize0 >> TKeyValueTracingTest::ReadSmall [FAIL] >> RequestValidation::TestSinglePutBlobSize0 [GOOD] >> RequestValidation::TestMultiPutBlobSize0 >> RequestValidation::TestMultiPutBlobSize0 [GOOD] >> RequestValidation::TestVMovedPatchSize0 >> RequestValidation::TestVMovedPatchSize0 [GOOD] >> TKeyValueTracingTest::WriteSmall [FAIL] |68.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp |68.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp |68.1%| [TA] {RESULT} $(B)/ydb/core/persqueue/dread_cache_service/ut/test-results/unittest/{meta.json ... results_accumulator.log} |68.1%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_kqp/ydb-core-tx-datashard-ut_kqp >> TKeyValueTracingTest::ReadHuge [FAIL] >> TChargeBTreeIndex::FewNodes_Groups_History [GOOD] >> TChargeBTreeIndex::FewNodes_Groups_History_Sticky >> StreamCreator::Basic [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-4 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-25 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-26 |68.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/dynamic_config/ut/unittest |68.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots |68.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots |68.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_base_reboots/ydb-core-tx-schemeshard-ut_base_reboots >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-43 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-44 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_stream_creator/unittest >> StreamCreator::Basic [GOOD] Test command err: 2025-06-03T10:31:18.918645Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668787577462625:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:18.918779Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00155d/r3tmp/tmpp98g0L/pdisk_1.dat 2025-06-03T10:31:19.011091Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:19.017549Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:19.017581Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:19.018166Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23037 TServer::EnableGrpc on GrpcPort 13403, node 1 2025-06-03T10:31:19.065555Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:19.065574Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:19.065577Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:19.065633Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23037 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:19.124892Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:19.132829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:31:19.134116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946679200 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946679172 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946679200 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-03T10:31:19.172507Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:31:19.172555Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:31:19.172558Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-03T10:31:19.172774Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-03T10:31:19.359633Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946679200, tx_id: 281474976715658 } } } 2025-06-03T10:31:19.359794Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-03T10:31:19.360257Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:31:19.360550Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-06-03T10:31:19.360562Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715659 2025-06-03T10:31:19.380040Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 2025-06-03T10:31:19.380058Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] 2025-06-03T10:31:19.381545Z node 1 :REPLICATION_CONTROLLER TRACE: stream_creator.cpp:57: [StreamCreator][rid 1][tid 1] Handle NKikimr::NReplication::NController::TEvPrivate::TEvAllowCreateStream 2025-06-03T10:31:19.450944Z node 1 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037888:1][1:7511668791872430648:2345] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:5:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-03T10:31:19.454169Z node 1 :REPLICATION_CONTROLLER TRACE: stream_creator.cpp:85: [StreamCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvAlterTableResponse { Result: { status: SUCCESS, issues: } } 2025-06-03T10:31:19.454186Z node 1 :REPLICATION_CONTROLLER INFO: stream_creator.cpp:100: [StreamCreator][rid 1][tid 1] Success: issues# 2025-06-03T10:31:19.456465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:19.469654Z node 1 :REPLICATION_CONTROLLER TRACE: stream_creator.cpp:137: [StreamCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvAlterTopicResponse { Result: { status: SUCCESS, issues: } } 2025-06-03T10:31:19.469670Z node 1 :REPLICATION_CONTROLLER INFO: stream_creator.cpp:155: [StreamCreator][rid 1][tid 1] Success: issues# TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946679200 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyC... (TRUNCATED) |68.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/dynamic_config/ut/unittest |68.1%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/dynamic_config/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/unittest >> RequestValidation::TestVMovedPatchSize0 [GOOD] Test command err: RandomSeed# 17405345178885612774 2025-06-03T10:25:37.155730Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: data is too large; id# [1:1:0:0:0:20971520:1] size# 20971520 chunkSize# 134217728 Marker# BSVS02 2025-06-03T10:25:37.341741Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: buffer size does not match with part size; buffer size# 99 PartSize# 100 id# [1:1:0:0:0:100:1] Marker# BSVS01 2025-06-03T10:25:38.716569Z 7 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) TEvVPut: ingress mismatch; id# [1:1:0:0:0:100:2] Marker# BSVS11 2025-06-03T10:25:39.599482Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-03T10:25:39.604310Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) ydb/core/erasure/erasure.cpp:2103: Unknown crcMode = 3 Marker# BSVS41 2025-06-03T10:25:40.180860Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-03T10:25:40.185957Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) ydb/core/erasure/erasure.cpp:2103: Unknown crcMode = 2 Marker# BSVS41 2025-06-03T10:25:40.523653Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-03T10:25:40.524044Z 1 00h00m06.060512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) ydb/core/erasure/erasure.cpp:2103: Unknown crcMode = 3 Marker# BSVS41 2025-06-03T10:25:40.776613Z 2 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-03T10:25:40.777001Z 7 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) ydb/core/erasure/erasure.cpp:2116: Unknown crcMode = 2 Marker# BSVS41 2025-06-03T10:25:41.107837Z 2 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-03T10:25:41.108239Z 7 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) ydb/core/erasure/erasure.cpp:2116: Unknown crcMode = 3 Marker# BSVS41 2025-06-03T10:25:41.443698Z 2 00h00m21.160512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-03T10:25:41.444223Z 3 00h00m21.160512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) ydb/core/erasure/erasure.cpp:2103: Unknown crcMode = 2 Marker# BSVS41 2025-06-03T10:25:41.592527Z 2 00h00m21.160512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-03T10:25:41.592922Z 3 00h00m21.160512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:2:0]: (2181038080) ydb/core/erasure/erasure.cpp:2103: Unknown crcMode = 3 Marker# BSVS41 2025-06-03T10:25:41.805063Z 2 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-03T10:25:41.805492Z 7 00h00m41.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) ydb/core/erasure/erasure.cpp:2103: Unknown crcMode = 2 Marker# BSVS41 2025-06-03T10:25:42.001669Z 2 00h00m41.260512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-03T10:25:42.002064Z 7 00h00m41.260512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:6:0]: (2181038080) ydb/core/erasure/erasure.cpp:2103: Unknown crcMode = 3 Marker# BSVS41 2025-06-03T10:25:42.291066Z 1 00h00m46.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-03T10:25:42.291458Z 8 00h00m46.300000s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) ydb/core/erasure/erasure.cpp:2103: Unknown crcMode = 2 Marker# BSVS41 2025-06-03T10:25:43.127782Z 1 00h00m46.310512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:0:0:0]: (2181038080) TEvVPut: blob size cannot be 0; id# [0:0:0:0:0:0:0] Marker# BSVS44 2025-06-03T10:25:43.128238Z 8 00h00m46.310512s :BS_VDISK_PUT ERROR: PDiskId# 1000 VDISK[82000000:_:2:1:0]: (2181038080) ydb/core/erasure/erasure.cpp:2103: Unknown crcMode = 3 Marker# BSVS41 *** PUT BLOB [72075186270680851:57:3905:6:786432:4194304:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:4194304:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:4194304:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:4194304:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:4194304:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** 0 5 1 6 2 7 3 0 4 1 5 2 6 3 7 4 2025-06-03T10:25:46.290862Z 8 00h02m00.060512s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# LogoBlobs; putting blob beyond the barrier id# [72075186270680851:57:3905:6:786432:4194304:3] barrier# {Soft# {Gen# 57 Step# 3905} Hard# } 2025-06-03T10:25:46.290927Z 2 00h02m00.060512s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# LogoBlobs; putting blob beyond the barrier id# [72075186270680851:57:3905:6:786432:4194304:5] barrier# {Soft# {Gen# 57 Step# 3905} Hard# } 2025-06-03T10:25:46.292510Z 8 00h02m00.060512s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:7:0]: (2181038080) Db# LogoBlobs; putting blob beyond the barrier id# [72075186270680851:57:3905:6:786432:4194304:3] barrier# {Soft# {Gen# 57 Step# 3905} Hard# } 2025-06-03T10:25:46.293276Z 2 00h02m00.060512s :BS_HULLRECS CRIT: PDiskId# 1000 VDISK[82000000:_:0:1:0]: (2181038080) Db# LogoBlobs; putting blob beyond the barrier id# [72075186270680851:57:3905:6:786432:4194304:5] barrier# {Soft# {Gen# 57 Step# 3905} Hard# } 0 5 1 6 2 7 3 0 4 1 5 2 6 3 7 4 BlobsWritten# 18144 step 0 waiting for replies scanning parts step 1 waiting for replies scanning parts step 2 waiting for replies scanning parts step 3 waiting for replies scanning parts step 4 waiting for replies scanning parts empty@ 4 0 empty@ 4 1 empty@ 4 2 empty@ 4 3 empty@ 4 4 empty@ 4 5 empty@ 4 6 empty@ 4 7 empty@ 4 8 empty@ 4 9 empty@ 4 10 empty@ 4 11 empty@ 4 12 empty@ 4 13 empty@ 4 14 empty@ 4 15 empty@ 4 16 empty@ 4 17 empty@ 4 18 empty@ 4 19 empty@ 4 20 empty@ 4 21 empty@ 4 22 empty@ 4 23 empty@ 4 24 empty@ 4 25 empty@ 4 26 empty@ 4 27 empty@ 4 28 empty@ 4 29 empty@ 4 30 empty@ 4 31 empty@ 4 32 empty@ 4 33 empty@ 4 34 empty@ 4 35 empty@ 4 36 empty@ 4 37 empty@ 4 38 empty@ 4 39 empty@ 4 40 empty@ 4 41 empty@ 4 42 empty@ 4 43 empty@ 4 44 empty@ 4 45 empty@ 4 46 empty@ 4 47 empty@ 4 120 empty@ 4 121 empty@ 4 122 empty@ 4 123 empty@ 4 124 empty@ 4 125 empty@ 4 126 empty@ 4 127 empty@ 4 128 empty@ 4 129 empty@ 4 130 empty@ 4 131 empty@ 4 132 empty@ 4 133 empty@ 4 134 empty@ 4 135 empty@ 4 136 empty@ 4 137 empty@ 4 138 empty@ 4 139 empty@ 4 140 empty@ 4 141 empty@ 4 142 empty@ 4 143 empty@ 4 144 empty@ 4 145 empty@ 4 146 empty@ 4 147 empty@ 4 148 empty@ 4 149 empty@ 4 150 empty@ 4 151 empty@ 4 152 empty@ 4 153 empty@ 4 154 empty@ 4 155 empty@ 4 156 empty@ 4 157 empty@ 4 158 empty@ 4 159 empty@ 4 160 empty@ 4 161 empty@ 4 162 empty@ 4 163 empty@ 4 164 empty@ 4 165 empty@ 4 166 empty@ 4 167 empty@ 4 240 empty@ 4 241 empty@ 4 242 empty@ 4 243 empty@ 4 244 empty@ 4 245 empty@ 4 246 empty@ 4 247 empty@ 4 248 empty@ 4 249 empty@ 4 250 empty@ 4 251 empty@ 4 252 empty@ 4 253 empty@ 4 254 empty@ 4 255 empty@ 4 256 empty@ 4 257 empty@ 4 258 empty@ 4 259 empty@ 4 260 empty@ 4 261 empty@ 4 262 empty@ 4 263 empty@ 4 264 empty@ 4 265 empty@ 4 266 empty@ 4 267 empty@ 4 268 empty@ 4 269 empty@ 4 270 empty@ 4 271 empty@ 4 272 empty@ 4 273 empty@ 4 274 empty@ 4 275 empty@ 4 276 empty@ 4 277 empty@ 4 278 empty@ 4 279 empty@ 4 280 empty@ 4 281 empty@ 4 282 empty@ 4 283 empty@ 4 284 empty@ 4 285 empty@ 4 286 empty@ 4 287 empty@ 4 432 empty@ 4 433 empty@ 4 434 empty@ 4 435 empty@ 4 436 empty@ 4 437 empty@ 4 438 empty@ 4 439 empty@ 4 440 empty@ 4 441 empty@ 4 442 empty@ 4 443 empty@ 4 444 empty@ 4 445 empty@ 4 446 empty@ 4 447 empty@ 4 448 empty@ 4 449 empty@ 4 450 empty@ 4 451 empty@ 4 452 empty@ 4 453 empty@ 4 454 empty@ 4 455 empty@ 4 456 empty@ 4 457 empty@ 4 458 empty@ 4 459 empty@ 4 460 empty@ 4 461 empty@ 4 462 empty@ 4 463 empty@ 4 464 empty@ 4 465 empty@ 4 466 empty@ 4 467 empty@ 4 468 empty@ 4 469 empty@ 4 470 empty@ 4 471 empty@ 4 472 empty@ 4 473 empty@ 4 474 empty@ 4 475 empty@ 4 476 empty@ 4 477 empty@ 4 478 empty@ 4 479 empty@ 4 552 empty@ 4 553 empty@ 4 554 empty@ 4 555 empty@ 4 556 empty@ 4 557 empty@ 4 558 empty@ 4 559 empty@ 4 560 empty@ 4 561 empty@ 4 562 empty@ 4 563 empty@ 4 564 empty@ 4 565 empty@ 4 566 empty@ 4 567 empty@ 4 568 empty@ 4 569 empty@ 4 570 empty@ 4 571 empty@ 4 572 empty@ 4 573 empty@ 4 574 empty@ 4 575 empty@ 4 576 empty@ 4 577 empty@ 4 578 empty@ 4 579 empty@ 4 580 empty@ 4 581 empty@ 4 582 empty@ 4 583 empty@ 4 584 empty@ 4 585 empty@ 4 586 empty@ 4 587 empty@ 4 588 empty@ 4 589 empty@ 4 590 empty@ 4 591 empty@ 4 592 empty@ 4 593 empty@ 4 594 empty@ 4 595 empty@ 4 596 empty@ 4 597 empty@ 4 598 empty@ 4 599 empty@ 4 672 empty@ 4 673 empty@ 4 674 empty@ 4 675 empty@ 4 676 empty@ 4 677 empty@ 4 678 empty@ 4 679 empty@ 4 680 empty@ 4 681 empty@ 4 682 empty@ 4 683 empty@ 4 684 empty@ 4 685 empty@ 4 686 empty@ 4 687 empty@ 4 688 empty@ 4 689 empty@ 4 690 empty@ 4 691 empty@ 4 692 empty@ 4 693 empty@ 4 694 empty@ 4 695 empty@ 4 696 empty@ 4 697 empty@ 4 698 empty@ 4 699 empty@ 4 700 empty@ 4 701 empty@ 4 702 empty@ 4 703 empty@ 4 704 empty@ 4 705 empty@ 4 706 empty@ 4 707 empty@ 4 708 empty@ 4 709 empty@ 4 710 empty@ 4 711 empty@ 4 712 empty@ 4 713 empty@ 4 714 empty@ 4 715 empty@ 4 716 empty@ 4 717 empty@ 4 718 empty@ 4 719 empty@ 4 720 empty@ 4 721 empty@ 4 722 empty@ 4 723 empty@ 4 724 empty@ 4 725 empty@ 4 726 empty@ 4 727 empty@ 4 728 empty@ 4 729 empty@ 4 730 empty@ 4 731 empty@ 4 732 empty@ 4 733 empty@ 4 734 empty@ 4 735 empty@ 4 736 empty@ 4 737 empty@ 4 738 empty@ 4 739 empty@ 4 740 empty@ 4 741 empty@ 4 742 empty@ 4 743 empty@ 4 744 empty@ 4 745 empty@ 4 746 empty@ 4 747 empty@ 4 748 empty@ 4 749 empty@ 4 750 empty@ 4 751 empty@ 4 752 empty@ 4 753 empty@ 4 754 empty@ 4 755 empty@ 4 756 empty@ 4 757 empty@ 4 758 empty@ 4 759 empty@ 4 760 empty@ 4 761 empty@ 4 762 empty@ 4 763 empty@ 4 764 empty@ 4 765 empty@ 4 766 empty@ 4 767 empty@ 4 840 empty@ 4 841 empty@ 4 842 empty@ 4 843 empty@ 4 844 empty@ 4 845 empty@ 4 846 empty@ 4 847 empty@ 4 848 empty@ 4 849 empty@ 4 850 empty@ 4 851 empty@ 4 852 empty@ 4 853 empty@ 4 854 empty@ 4 855 empty@ 4 856 empty@ 4 857 empty@ 4 858 empty@ 4 859 empty@ 4 860 empty@ 4 861 empty@ 4 862 empty@ 4 863 empty@ 4 864 empty@ 4 865 empty@ 4 866 empty@ 4 867 empty@ 4 868 empty@ 4 869 empty@ 4 870 empty@ 4 871 empty@ 4 872 empty@ 4 873 empty@ 4 874 empty@ 4 875 empty@ 4 876 empty@ 4 877 empty@ 4 878 empty@ 4 879 empty@ 4 880 empty@ 4 881 empty@ 4 882 empty@ 4 883 empty@ 4 884 empty@ 4 885 empty@ 4 886 empty@ 4 887 empty@ 4 960 empty@ 4 961 empty@ 4 962 empty@ 4 963 empty@ 4 964 empty@ 4 965 empty@ 4 966 empty@ 4 967 empty@ 4 968 empty@ 4 969 empty@ 4 970 empty@ 4 971 empty@ 4 972 empty@ 4 973 empty@ 4 974 empty@ 4 975 empty@ 4 976 empty@ 4 977 empty@ 4 978 empty@ 4 979 empty@ 4 980 empty@ 4 981 empty@ 4 982 empty@ 4 983 empty@ 4 984 empty@ 4 985 empty@ 4 9 ... on 9623 iteration 9624 iteration 9625 iteration 9626 iteration 9627 iteration 9628 iteration 9629 iteration 9630 iteration 9631 iteration 9632 iteration 9633 iteration 9634 iteration 9635 iteration 9636 iteration 9637 iteration 9638 iteration 9639 iteration 9640 iteration 9641 iteration 9642 iteration 9643 iteration 9644 iteration 9645 iteration 9646 iteration 9647 iteration 9648 iteration 9649 iteration 9650 iteration 9651 iteration 9652 iteration 9653 iteration 9654 iteration 9655 iteration 9656 iteration 9657 iteration 9658 iteration 9659 iteration 9660 iteration 9661 iteration 9662 iteration 9663 iteration 9664 iteration 9665 iteration 9666 iteration 9667 iteration 9668 iteration 9669 iteration 9670 iteration 9671 iteration 9672 iteration 9673 iteration 9674 iteration 9675 iteration 9676 iteration 9677 iteration 9678 iteration 9679 iteration 9680 iteration 9681 iteration 9682 iteration 9683 iteration 9684 iteration 9685 iteration 9686 iteration 9687 iteration 9688 iteration 9689 iteration 9690 iteration 9691 iteration 9692 iteration 9693 iteration 9694 iteration 9695 iteration 9696 iteration 9697 iteration 9698 iteration 9699 compaction iteration 9700 iteration 9701 iteration 9702 iteration 9703 iteration 9704 iteration 9705 iteration 9706 iteration 9707 iteration 9708 iteration 9709 iteration 9710 iteration 9711 iteration 9712 iteration 9713 iteration 9714 iteration 9715 iteration 9716 iteration 9717 iteration 9718 iteration 9719 iteration 9720 iteration 9721 iteration 9722 iteration 9723 iteration 9724 iteration 9725 iteration 9726 iteration 9727 iteration 9728 iteration 9729 iteration 9730 iteration 9731 iteration 9732 iteration 9733 iteration 9734 iteration 9735 iteration 9736 iteration 9737 iteration 9738 iteration 9739 iteration 9740 iteration 9741 iteration 9742 iteration 9743 iteration 9744 iteration 9745 iteration 9746 iteration 9747 iteration 9748 iteration 9749 iteration 9750 iteration 9751 iteration 9752 iteration 9753 iteration 9754 iteration 9755 iteration 9756 iteration 9757 iteration 9758 iteration 9759 iteration 9760 iteration 9761 iteration 9762 iteration 9763 iteration 9764 iteration 9765 iteration 9766 iteration 9767 iteration 9768 iteration 9769 iteration 9770 iteration 9771 iteration 9772 iteration 9773 iteration 9774 iteration 9775 iteration 9776 iteration 9777 iteration 9778 iteration 9779 iteration 9780 iteration 9781 iteration 9782 iteration 9783 iteration 9784 iteration 9785 iteration 9786 iteration 9787 iteration 9788 iteration 9789 iteration 9790 iteration 9791 iteration 9792 iteration 9793 iteration 9794 iteration 9795 iteration 9796 iteration 9797 iteration 9798 iteration 9799 compaction garbage collect iteration 9800 iteration 9801 iteration 9802 iteration 9803 iteration 9804 iteration 9805 iteration 9806 iteration 9807 iteration 9808 iteration 9809 iteration 9810 iteration 9811 iteration 9812 iteration 9813 iteration 9814 iteration 9815 iteration 9816 iteration 9817 iteration 9818 iteration 9819 iteration 9820 iteration 9821 iteration 9822 iteration 9823 iteration 9824 iteration 9825 iteration 9826 iteration 9827 iteration 9828 iteration 9829 iteration 9830 iteration 9831 iteration 9832 iteration 9833 iteration 9834 iteration 9835 iteration 9836 iteration 9837 iteration 9838 iteration 9839 iteration 9840 iteration 9841 iteration 9842 iteration 9843 iteration 9844 iteration 9845 iteration 9846 iteration 9847 iteration 9848 iteration 9849 iteration 9850 iteration 9851 iteration 9852 iteration 9853 iteration 9854 iteration 9855 iteration 9856 iteration 9857 iteration 9858 iteration 9859 iteration 9860 iteration 9861 iteration 9862 iteration 9863 iteration 9864 iteration 9865 iteration 9866 iteration 9867 iteration 9868 iteration 9869 iteration 9870 iteration 9871 iteration 9872 iteration 9873 iteration 9874 iteration 9875 iteration 9876 iteration 9877 iteration 9878 iteration 9879 iteration 9880 iteration 9881 iteration 9882 iteration 9883 iteration 9884 iteration 9885 iteration 9886 iteration 9887 iteration 9888 iteration 9889 iteration 9890 iteration 9891 iteration 9892 iteration 9893 iteration 9894 iteration 9895 iteration 9896 iteration 9897 iteration 9898 iteration 9899 compaction iteration 9900 iteration 9901 iteration 9902 iteration 9903 iteration 9904 iteration 9905 iteration 9906 iteration 9907 iteration 9908 iteration 9909 iteration 9910 iteration 9911 iteration 9912 iteration 9913 iteration 9914 iteration 9915 iteration 9916 iteration 9917 iteration 9918 iteration 9919 iteration 9920 iteration 9921 iteration 9922 iteration 9923 iteration 9924 iteration 9925 iteration 9926 iteration 9927 iteration 9928 iteration 9929 iteration 9930 iteration 9931 iteration 9932 iteration 9933 iteration 9934 iteration 9935 iteration 9936 iteration 9937 iteration 9938 iteration 9939 iteration 9940 iteration 9941 iteration 9942 iteration 9943 iteration 9944 iteration 9945 iteration 9946 iteration 9947 iteration 9948 iteration 9949 iteration 9950 iteration 9951 iteration 9952 iteration 9953 iteration 9954 iteration 9955 iteration 9956 iteration 9957 iteration 9958 iteration 9959 iteration 9960 iteration 9961 iteration 9962 iteration 9963 iteration 9964 iteration 9965 iteration 9966 iteration 9967 iteration 9968 iteration 9969 iteration 9970 iteration 9971 iteration 9972 iteration 9973 iteration 9974 iteration 9975 iteration 9976 iteration 9977 iteration 9978 iteration 9979 iteration 9980 iteration 9981 iteration 9982 iteration 9983 iteration 9984 iteration 9985 iteration 9986 iteration 9987 iteration 9988 iteration 9989 iteration 9990 iteration 9991 iteration 9992 iteration 9993 iteration 9994 iteration 9995 iteration 9996 iteration 9997 iteration 9998 iteration 9999 compaction garbage collect {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:12:0] NODATA} BlockedGeneration# 0} {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:12:1] OK Size# 12 FullDataSize# 12 BufferData# aGVsbG8sIHdvcmxk} BlockedGeneration# 0} {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:12:1] OK Size# 12 FullDataSize# 12 BufferData# aGVsbG8sIHdvcmxk} BlockedGeneration# 0} {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:12:3] OK Size# 0 FullDataSize# 12 BufferData# } BlockedGeneration# 0} {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:12:3] OK Size# 0 FullDataSize# 12 BufferData# } BlockedGeneration# 0} {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:12:1] OK Size# 12 FullDataSize# 12 BufferData# aGVsbG8sIHdvcmxk} BlockedGeneration# 0} {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:12:0] NODATA} BlockedGeneration# 0} {EvVGetResult QueryResult Status# OK {[1:1:1:0:0:12:0] NODATA} BlockedGeneration# 0} rssOnBegin# 737107968 737107968 -> 737107968 2025-06-03T10:28:14.564157Z 1 00h01m00.010512s :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:689} StateWork event processing took too much time Type# 269877761 Duration# 0.112638s 2025-06-03T10:29:32.534584Z 1 00h01m00.010512s :BS_CONTROLLER ERROR: {BSC00@bsc.cpp:689} StateWork event processing took too much time Type# 269877761 Duration# 0.233113s 2025-06-03T10:30:05.844427Z 1 00h02m00.060512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 CTR 4000078 2025-06-03T10:31:19.008700Z 11 00h01m00.011024s :BS_NODE ERROR: {NW19@node_warden_group.cpp:211} error while parsing group GroupId# 2181038080 Err# LifeCyclePhase# KEY_NOT_LOADED Key.Id# "" Key.Version# 0 MainKey.Id# "tenant key" MainKey.Version# 1 GroupKeyNonce# 2181038080 2025-06-03T10:31:19.195626Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:0:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:0:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:0:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 2025-06-03T10:31:19.305385Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:0:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:0:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:0:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 2025-06-03T10:31:19.305420Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:1:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:1:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:1:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 2025-06-03T10:31:19.305431Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:2:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:2:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:2:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 2025-06-03T10:31:19.305441Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:3:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:3:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:3:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 2025-06-03T10:31:19.305450Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:4:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:4:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:4:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 2025-06-03T10:31:19.305461Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:5:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:5:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:5:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 2025-06-03T10:31:19.305470Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:6:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:6:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:6:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 2025-06-03T10:31:19.305480Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:7:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:7:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:7:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 2025-06-03T10:31:19.305490Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:8:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:8:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:8:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 2025-06-03T10:31:19.305499Z 1 00h01m00.010512s :BS_PROXY ERROR: HandleNormal ev# TEvPut {Id# [100:1:1:0:9:0:0] Size# 0 Deadline# 18446744073709551 HandleClass# TabletLog Tactic# Default} result# TEvPutResult {Id# [100:1:1:0:9:0:0] Status# ERROR StatusFlags# { } ErrorReason# "Blob size must be greater than 0, LogoBlobId# [100:1:1:0:9:0:0] Group# 2181038080 Marker# DSP55" ApproximateFreeSpaceShare# 0} Marker# DSP54 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-26 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-27 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-44 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-45 >> TBlobStorageProxyTest::TestNormal >> THiveTest::TestHiveBalancerWithSpareNodes [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-4 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-5 >> TBlobStorageProxyTest::TestEmptyDiscover >> TBlobStorageProxyTest::TestGetMultipart >> TBlobStorageProxyTest::TestVPutVCollectVGetRace >> TBlobStorageProxyTest::TestPersistence >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-27 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-28 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-45 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-46 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut_trace/unittest >> TKeyValueTracingTest::WriteSmall [FAIL] Test command err: assertion failed at ydb/core/keyvalue/keyvalue_ut_trace.cpp:103, void TestOneWrite(TString, TVector &&): (env.WilsonUploader->Traces.size() == 1) failed: (2 != 1) TBackTrace::Capture()+28 (0xC54570C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+137 (0xC6F8A99) TestOneWrite(TBasicString>, TVector>, std::__y1::allocator>>>&&)+1948 (0xC43A66C) NTestSuiteTKeyValueTracingTest::TTestCaseWriteSmall::Execute_(NUnitTest::TTestContext&)+138 (0xC43F6BA) NTestSuiteTKeyValueTracingTest::TCurrentTest::Execute()::'lambda'()::operator()() const+71 (0xC447647) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+126 (0xC6FA94E) NTestSuiteTKeyValueTracingTest::TCurrentTest::Execute()+426 (0xC446EAA) NUnitTest::TTestFactory::Execute()+803 (0xC6FB0C3) NUnitTest::RunMain(int, char**)+3021 (0xC7093DD) ??+0 (0x7F4565A87D90) __libc_start_main+128 (0x7F4565A87E40) _start+41 (0xB590029) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut_trace/unittest >> TKeyValueTracingTest::ReadHuge [FAIL] Test command err: equal assertion failed at ydb/core/keyvalue/keyvalue_ut_trace.cpp:124, void TestOneRead(TString, TString): env.WilsonUploader->Traces.size() == 1 TBackTrace::Capture()+28 (0xC54570C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+137 (0xC6F8A99) TestOneRead(TBasicString>, TBasicString>)+2035 (0xC43C9D3) NTestSuiteTKeyValueTracingTest::TTestCaseReadHuge::Execute_(NUnitTest::TTestContext&)+157 (0xC43FB3D) NTestSuiteTKeyValueTracingTest::TCurrentTest::Execute()::'lambda'()::operator()() const+71 (0xC447647) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+126 (0xC6FA94E) NTestSuiteTKeyValueTracingTest::TCurrentTest::Execute()+426 (0xC446EAA) NUnitTest::TTestFactory::Execute()+803 (0xC6FB0C3) NUnitTest::RunMain(int, char**)+3021 (0xC7093DD) ??+0 (0x7F8842D39D90) __libc_start_main+128 (0x7F8842D39E40) _start+41 (0xB590029) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut_trace/unittest >> TKeyValueTracingTest::ReadSmall [FAIL] Test command err: equal assertion failed at ydb/core/keyvalue/keyvalue_ut_trace.cpp:124, void TestOneRead(TString, TString): env.WilsonUploader->Traces.size() == 1 TBackTrace::Capture()+28 (0xC54570C) NUnitTest::NPrivate::RaiseError(char const*, TBasicString> const&, bool)+137 (0xC6F8A99) TestOneRead(TBasicString>, TBasicString>)+2035 (0xC43C9D3) NTestSuiteTKeyValueTracingTest::TTestCaseReadSmall::Execute_(NUnitTest::TTestContext&)+157 (0xC43F9AD) NTestSuiteTKeyValueTracingTest::TCurrentTest::Execute()::'lambda'()::operator()() const+71 (0xC447647) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+126 (0xC6FA94E) NTestSuiteTKeyValueTracingTest::TCurrentTest::Execute()+426 (0xC446EAA) NUnitTest::TTestFactory::Execute()+803 (0xC6FB0C3) NUnitTest::RunMain(int, char**)+3021 (0xC7093DD) ??+0 (0x7FC6CA75BD90) __libc_start_main+128 (0x7FC6CA75BE40) _start+41 (0xB590029) >> TBlobStorageProxyTest::TestVPutVCollectVGetRace [GOOD] >> TBlobStorageProxyTest::TestVGetNoData ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/hive/ut/unittest >> THiveTest::TestHiveBalancerWithSpareNodes [GOOD] Test command err: 2025-06-03T10:30:30.844118Z node 4 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:30:30.845445Z node 4 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:30.845499Z node 4 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 0 2025-06-03T10:30:30.845647Z node 4 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [4:149:2076] ControllerId# 72057594037932033 2025-06-03T10:30:30.845651Z node 4 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:30:30.845688Z node 4 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:30:30.845704Z node 4 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:30:30.846900Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:30:30.847503Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:30.847546Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "/tmp/pdisk.dat" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-03T10:30:30.847716Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:30:30.847987Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-03T10:30:30.848000Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 0 2025-06-03T10:30:30.848129Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:161:2079] ControllerId# 72057594037932033 2025-06-03T10:30:30.848132Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:30:30.848148Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:30:30.848163Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:30:30.851543Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-03T10:30:30.851563Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:30:30.851847Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:160:2078] Create Queue# [1:169:2084] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.851871Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:160:2078] Create Queue# [1:170:2085] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.851899Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:160:2078] Create Queue# [1:171:2086] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.851932Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:160:2078] Create Queue# [1:172:2087] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.851963Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:160:2078] Create Queue# [1:173:2088] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.851981Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:160:2078] Create Queue# [1:174:2089] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.852022Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:160:2078] Create Queue# [1:175:2090] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.852027Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:30:30.852040Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:161:2079] 2025-06-03T10:30:30.852045Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:161:2079] 2025-06-03T10:30:30.852053Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-03T10:30:30.852060Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:30:30.852251Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:30:30.852273Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:30:30.853322Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:30.853358Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 0 2025-06-03T10:30:30.853507Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [2:184:2077] ControllerId# 72057594037932033 2025-06-03T10:30:30.853511Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:30:30.853525Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:30:30.853546Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:30:30.854568Z node 2 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:30.854607Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-03T10:30:30.854613Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:30:30.854923Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:183:2076] Create Queue# [2:190:2081] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.854946Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:183:2076] Create Queue# [2:191:2082] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.854967Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:183:2076] Create Queue# [2:192:2083] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.854991Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:183:2076] Create Queue# [2:193:2084] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.855032Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:183:2076] Create Queue# [2:194:2085] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.855053Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:183:2076] Create Queue# [2:195:2086] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.855075Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:183:2076] Create Queue# [2:196:2087] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.855080Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:30:30.855091Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [2:184:2077] 2025-06-03T10:30:30.855095Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [2:184:2077] 2025-06-03T10:30:30.855101Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-03T10:30:30.855106Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:30:30.855207Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:30:30.855223Z node 3 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:30:30.856229Z node 3 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:30.856265Z node 3 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 0 2025-06-03T10:30:30.856393Z node 3 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [3:204:2077] ControllerId# 72057594037932033 2025-06-03T10:30:30.856397Z node 3 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:30:30.856418Z node 3 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:30:30.856451Z node 3 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:30:30.857478Z node 3 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:30.857516Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-03T10:30:30.857522Z node 3 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:30:30.857893Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:203:2076] Create Queue# [3:210:2081] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.857919Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:203:2076] Create Queue# [3:211:2082] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.857944Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:203:2076] Create Queue# [3:212:2083] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.857962Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:203:2076] Create Queue# [3:213:2084] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.857987Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:203:2076] Create Queue# [3:214:2085] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.858012Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:203:2076] Create Queue# [3:215:2086] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.858032Z node 3 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [3:203:2076] Create Queue# [3:216:2087] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.858035Z node 3 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:30:30.858046Z node 3 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [3:204:2077] 2025-0 ... R DEBUG: tablet_resolver.cpp:279: SelectForward node 65 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037893 followers: 0 countLeader 1 allowFollowers 0 winner: [70:1293:2098] 2025-06-03T10:31:20.340666Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037893] forward result remote node 70 [65:2087:2742] 2025-06-03T10:31:20.340691Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037893] remote node connected [65:2087:2742] 2025-06-03T10:31:20.340696Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037893]::SendEvent [65:2087:2742] 2025-06-03T10:31:20.340749Z node 70 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037893] Accept Connect Originator# [65:2087:2742] 2025-06-03T10:31:20.340841Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037893] connected with status OK role: Leader [65:2087:2742] 2025-06-03T10:31:20.340848Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037893] send queued [65:2087:2742] 2025-06-03T10:31:20.340982Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037894] ::Bootstrap [65:2090:2744] 2025-06-03T10:31:20.340987Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037894] lookup [65:2090:2744] 2025-06-03T10:31:20.340996Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037894 entry.State: StNormal ev: {EvForward TabletID: 72075186224037894 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:31:20.341003Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 65 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037894 followers: 0 countLeader 1 allowFollowers 0 winner: [69:1301:2099] 2025-06-03T10:31:20.341020Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037894] forward result remote node 69 [65:2090:2744] 2025-06-03T10:31:20.341046Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037894] remote node connected [65:2090:2744] 2025-06-03T10:31:20.341060Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037894]::SendEvent [65:2090:2744] 2025-06-03T10:31:20.341169Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:349: TClient[72075186224037894] connect request undelivered [65:2090:2744] 2025-06-03T10:31:20.341175Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:559: TClient[72075186224037894] immediate retry [65:2090:2744] 2025-06-03T10:31:20.341179Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037894] lookup [65:2090:2744] 2025-06-03T10:31:20.341186Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:536: Handle TEvTabletProblem tabletId: 72075186224037894 entry.State: StNormal 2025-06-03T10:31:20.341219Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037894 entry.State: StProblemResolve ev: {EvForward TabletID: 72075186224037894 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:31:20.341233Z node 65 :STATESTORAGE DEBUG: statestorage_proxy.cpp:246: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037894 Cookie: 0 ProxyOptions: SigNone} 2025-06-03T10:31:20.341264Z node 65 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037894 Cookie: 0} 2025-06-03T10:31:20.341272Z node 65 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037894 Cookie: 1} 2025-06-03T10:31:20.341280Z node 65 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037894 Cookie: 2} 2025-06-03T10:31:20.348096Z node 65 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037894 CurrentLeader: [70:1955:2266] CurrentLeaderTablet: [70:1961:2269] CurrentGeneration: 3 CurrentStep: 0} 2025-06-03T10:31:20.348319Z node 65 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037894 CurrentLeader: [70:1955:2266] CurrentLeaderTablet: [70:1961:2269] CurrentGeneration: 3 CurrentStep: 0} 2025-06-03T10:31:20.348354Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037894 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037894 Cookie: 0 CurrentLeader: [70:1955:2266] CurrentLeaderTablet: [70:1961:2269] CurrentGeneration: 3 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[65:24343667:0] : 7}, {[65:1099535971443:0] : 10}}}} 2025-06-03T10:31:20.348363Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 72075186224037894 followers: 0 2025-06-03T10:31:20.348374Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 65 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037894 followers: 0 countLeader 1 allowFollowers 0 winner: [70:1955:2266] 2025-06-03T10:31:20.348410Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037894] forward result remote node 70 [65:2090:2744] 2025-06-03T10:31:20.348454Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037894] remote node connected [65:2090:2744] 2025-06-03T10:31:20.348459Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037894]::SendEvent [65:2090:2744] 2025-06-03T10:31:20.348555Z node 70 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037894] Accept Connect Originator# [65:2090:2744] 2025-06-03T10:31:20.348654Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037894] connected with status OK role: Leader [65:2090:2744] 2025-06-03T10:31:20.348661Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037894] send queued [65:2090:2744] 2025-06-03T10:31:20.348867Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037895] ::Bootstrap [65:2094:2746] 2025-06-03T10:31:20.348875Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037895] lookup [65:2094:2746] 2025-06-03T10:31:20.348889Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037895 entry.State: StNormal ev: {EvForward TabletID: 72075186224037895 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:31:20.348897Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 65 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037895 followers: 0 countLeader 1 allowFollowers 0 winner: [70:1799:2193] 2025-06-03T10:31:20.348915Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037895] forward result remote node 70 [65:2094:2746] 2025-06-03T10:31:20.348938Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037895] remote node connected [65:2094:2746] 2025-06-03T10:31:20.348943Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037895]::SendEvent [65:2094:2746] 2025-06-03T10:31:20.348996Z node 70 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037895] Accept Connect Originator# [65:2094:2746] 2025-06-03T10:31:20.349091Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037895] connected with status OK role: Leader [65:2094:2746] 2025-06-03T10:31:20.349098Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037895] send queued [65:2094:2746] 2025-06-03T10:31:20.349228Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037896] ::Bootstrap [65:2097:2748] 2025-06-03T10:31:20.349236Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037896] lookup [65:2097:2748] 2025-06-03T10:31:20.349246Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037896 entry.State: StNormal ev: {EvForward TabletID: 72075186224037896 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:31:20.349252Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 65 selfDC 1 leaderDC 3 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72075186224037896 followers: 0 countLeader 1 allowFollowers 0 winner: [70:1802:2195] 2025-06-03T10:31:20.349270Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72075186224037896] forward result remote node 70 [65:2097:2748] 2025-06-03T10:31:20.349307Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72075186224037896] remote node connected [65:2097:2748] 2025-06-03T10:31:20.349314Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037896]::SendEvent [65:2097:2748] 2025-06-03T10:31:20.349370Z node 70 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037896] Accept Connect Originator# [65:2097:2748] 2025-06-03T10:31:20.349472Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037896] connected with status OK role: Leader [65:2097:2748] 2025-06-03T10:31:20.349478Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037896] send queued [65:2097:2748] 2025-06-03T10:31:20.349635Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [65:2099:2749] 2025-06-03T10:31:20.349641Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [65:2099:2749] 2025-06-03T10:31:20.349651Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:31:20.349658Z node 65 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 65 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [65:593:2274] 2025-06-03T10:31:20.349673Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [65:2099:2749] 2025-06-03T10:31:20.349691Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [65:2099:2749] 2025-06-03T10:31:20.349706Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037927937] forward result local node, try to connect [65:2099:2749] 2025-06-03T10:31:20.349711Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [65:2099:2749] 2025-06-03T10:31:20.349732Z node 65 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [65:2099:2749] 2025-06-03T10:31:20.349771Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [65:2099:2749] 2025-06-03T10:31:20.349778Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [65:2099:2749] 2025-06-03T10:31:20.349783Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [65:2099:2749] 2025-06-03T10:31:20.349788Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [65:2099:2749] 2025-06-03T10:31:20.349794Z node 65 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [65:2099:2749] 2025-06-03T10:31:20.349806Z node 65 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [65:565:2269] EventType# 268697616 2025-06-03T10:31:20.349834Z node 65 :HIVE TRACE: hive_impl.cpp:114: HIVE#72057594037927937 Handle TEvTabletPipe::TEvServerConnected([65:2099:2749]) [65:2100:2750] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-5 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-6 >> TBlobStorageProxyTest::TestEmptyDiscover [GOOD] >> TBlobStorageProxyTest::TestEmptyDiscoverMaxi >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-28 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-29 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-46 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-47 >> TBlobStorageProxyTest::TestProxyLongTailDiscover >> TBlobStorageProxyTest::TestGetMultipart [GOOD] >> TBlobStorageProxyTest::TestGetFail >> TBlobStorageProxyTest::TestBlock >> TBlobStorageProxyTest::TestVGetNoData [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus2Block >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-6 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-7 >> TBlobStorageProxyTest::TestVPutVGetPersistence >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-29 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-30 |68.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestVGetNoData [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-47 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-48 >> TBlobStorageProxyTest::TestPersistence [GOOD] >> TBlobStorageProxyTest::TestPartialGetStripe >> TBlobStorageProxyTest::TestGetFail [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-7 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-8 >> TBlobStorageProxyTest::TestNormal [GOOD] >> TBlobStorageProxyTest::TestNormalMirror |68.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut |68.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut |68.2%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/src-client-topic-ut-with_direct_read_ut >> TBlobStorageProxyTest::TestBlock [GOOD] >> TBlobStorageProxyTest::TestBatchedPutRequestDoesNotContainAHugeBlob |68.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestGetFail [GOOD] >> TBlobStorageProxyTest::TestEmptyDiscoverMaxi [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-30 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-31 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-48 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-1 >> TBlobStorageProxyTest::TestPartialGetStripe [GOOD] |68.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestEmptyDiscoverMaxi [GOOD] >> TBlobStorageProxyTest::TestDoubleEmptyGet >> TBlobStorageProxyTest::TestVPutVGetPersistence [GOOD] >> TBlobStorageProxyTest::TestBatchedPutRequestDoesNotContainAHugeBlob [GOOD] >> TBlobStorageProxyTest::TestProxyLongTailDiscover [GOOD] >> TBlobStorageProxyTest::TestProxyLongTailDiscoverMaxi >> VDiskBalancing::TestRandom_Block42 [GOOD] |68.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestPartialGetStripe [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-8 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-31 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-32 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-9 >> TBlobStorageProxyTest::TestNormalMirror [GOOD] |68.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestVPutVGetPersistence [GOOD] |68.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestBatchedPutRequestDoesNotContainAHugeBlob [GOOD] >> KqpStreamLookup::ReadTableWithIndexDuringSplit >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-1 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-2 >> TTablesWithReboots::ChainedCopyTableAndDropWithReboots ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_balancing/unittest >> VDiskBalancing::TestRandom_Block42 [GOOD] Test command err: RandomSeed# 11578312850087719596 Step = 0 SEND TEvPut with key [1:1:0:0:0:585447:0] TEvPutResult: TEvPutResult {Id# [1:1:0:0:0:585447:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 1 SEND TEvPut with key [1:1:1:0:0:37868:0] TEvPutResult: TEvPutResult {Id# [1:1:1:0:0:37868:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 2 SEND TEvPut with key [1:1:2:0:0:619381:0] TEvPutResult: TEvPutResult {Id# [1:1:2:0:0:619381:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 3 SEND TEvPut with key [1:1:3:0:0:725585:0] TEvPutResult: TEvPutResult {Id# [1:1:3:0:0:725585:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Step = 4 SEND TEvPut with key [1:1:4:0:0:2934723:0] TEvPutResult: TEvPutResult {Id# [1:1:4:0:0:2934723:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999988} Stop node 4 2025-06-03T10:30:38.267544Z 1 00h01m00.010512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 Step = 5 SEND TEvPut with key [1:1:5:0:0:502135:0] TEvPutResult: TEvPutResult {Id# [1:1:5:0:0:502135:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999976} Step = 6 SEND TEvPut with key [1:1:6:0:0:3044947:0] TEvPutResult: TEvPutResult {Id# [1:1:6:0:0:3044947:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999976} Stop node 7 2025-06-03T10:30:38.316138Z 1 00h01m10.060512s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 8 Step = 7 SEND TEvPut with key [1:1:7:0:0:582354:0] TEvPutResult: TEvPutResult {Id# [1:1:7:0:0:582354:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 8 SEND TEvPut with key [1:1:8:0:0:1478820:0] TEvPutResult: TEvPutResult {Id# [1:1:8:0:0:1478820:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Step = 9 SEND TEvPut with key [1:1:9:0:0:1360774:0] TEvPutResult: TEvPutResult {Id# [1:1:9:0:0:1360774:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999963} Start node 4 Step = 10 SEND TEvPut with key [1:1:10:0:0:1727870:0] TEvPutResult: TEvPutResult {Id# [1:1:10:0:0:1727870:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 11 SEND TEvPut with key [1:1:11:0:0:1883457:0] TEvPutResult: TEvPutResult {Id# [1:1:11:0:0:1883457:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 12 SEND TEvPut with key [1:1:12:0:0:568368:0] TEvPutResult: TEvPutResult {Id# [1:1:12:0:0:568368:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 13 SEND TEvPut with key [1:1:13:0:0:896600:0] TEvPutResult: TEvPutResult {Id# [1:1:13:0:0:896600:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 14 SEND TEvPut with key [1:1:14:0:0:179270:0] TEvPutResult: TEvPutResult {Id# [1:1:14:0:0:179270:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 15 SEND TEvPut with key [1:1:15:0:0:3026131:0] TEvPutResult: TEvPutResult {Id# [1:1:15:0:0:3026131:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 16 SEND TEvPut with key [1:1:16:0:0:670396:0] TEvPutResult: TEvPutResult {Id# [1:1:16:0:0:670396:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 17 SEND TEvPut with key [1:1:17:0:0:1584741:0] TEvPutResult: TEvPutResult {Id# [1:1:17:0:0:1584741:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 18 SEND TEvPut with key [1:1:18:0:0:2384818:0] TEvPutResult: TEvPutResult {Id# [1:1:18:0:0:2384818:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 19 SEND TEvPut with key [1:1:19:0:0:2867010:0] TEvPutResult: TEvPutResult {Id# [1:1:19:0:0:2867010:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 20 SEND TEvPut with key [1:1:20:0:0:2911789:0] TEvPutResult: TEvPutResult {Id# [1:1:20:0:0:2911789:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 21 SEND TEvPut with key [1:1:21:0:0:2463622:0] TEvPutResult: TEvPutResult {Id# [1:1:21:0:0:2463622:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 22 SEND TEvPut with key [1:1:22:0:0:322338:0] TEvPutResult: TEvPutResult {Id# [1:1:22:0:0:322338:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 23 SEND TEvPut with key [1:1:23:0:0:2119770:0] TEvPutResult: TEvPutResult {Id# [1:1:23:0:0:2119770:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 24 SEND TEvPut with key [1:1:24:0:0:56036:0] TEvPutResult: TEvPutResult {Id# [1:1:24:0:0:56036:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Step = 25 SEND TEvPut with key [1:1:25:0:0:2648607:0] TEvPutResult: TEvPutResult {Id# [1:1:25:0:0:2648607:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999939} Stop node 0 2025-06-03T10:30:38.630888Z 3 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [3:188:17] ServerId# [1:296:58] TabletId# 72057594037932033 PipeClientId# [3:188:17] 2025-06-03T10:30:38.630999Z 6 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [6:209:17] ServerId# [1:299:61] TabletId# 72057594037932033 PipeClientId# [6:209:17] 2025-06-03T10:30:38.631054Z 5 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [5:7662:16] ServerId# [1:7671:1092] TabletId# 72057594037932033 PipeClientId# [5:7662:16] 2025-06-03T10:30:38.631080Z 4 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [4:195:17] ServerId# [1:297:59] TabletId# 72057594037932033 PipeClientId# [4:195:17] 2025-06-03T10:30:38.631106Z 2 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [2:181:17] ServerId# [1:295:57] TabletId# 72057594037932033 PipeClientId# [2:181:17] 2025-06-03T10:30:38.631129Z 7 00h01m30.100512s :BS_NODE ERROR: {NW42@node_warden_pipe.cpp:59} Handle(TEvTabletPipe::TEvClientDestroyed) ClientId# [7:216:17] ServerId# [1:300:62] TabletId# 72057594037932033 PipeClientId# [7:216:17] Step = 26 SEND TEvPut with key [1:1:26:0:0:539431:0] TEvPutResult: TEvPutResult {Id# [1:1:26:0:0:539431:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 27 SEND TEvPut with key [1:1:27:0:0:148482:0] TEvPutResult: TEvPutResult {Id# [1:1:27:0:0:148482:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 28 SEND TEvPut with key [1:1:28:0:0:2673563:0] TEvPutResult: TEvPutResult {Id# [1:1:28:0:0:2673563:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 29 SEND TEvPut with key [1:1:29:0:0:265170:0] TEvPutResult: TEvPutResult {Id# [1:1:29:0:0:265170:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 30 SEND TEvPut with key [1:1:30:0:0:2398732:0] TEvPutResult: TEvPutResult {Id# [1:1:30:0:0:2398732:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Compact vdisk 2 Step = 31 SEND TEvPut with key [1:1:31:0:0:2302132:0] TEvPutResult: TEvPutResult {Id# [1:1:31:0:0:2302132:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 32 SEND TEvPut with key [1:1:32:0:0:3112269:0] TEvPutResult: TEvPutResult {Id# [1:1:32:0:0:3112269:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 33 SEND TEvPut with key [1:1:33:0:0:883758:0] TEvPutResult: TEvPutResult {Id# [1:1:33:0:0:883758:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 34 SEND TEvPut with key [1:1:34:0:0:1212958:0] TEvPutResult: TEvPutResult {Id# [1:1:34:0:0:1212958:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 35 SEND TEvPut with key [1:1:35:0:0:3026131:0] TEvPutResult: TEvPutResult {Id# [1:1:35:0:0:3026131:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 36 SEND TEvPut with key [1:1:36:0:0:139148:0] TEvPutResult: TEvPutResult {Id# [1:1:36:0:0:139148:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 37 SEND TEvPut with key [1:1:37:0:0:200198:0] TEvPutResult: TEvPutResult {Id# [1:1:37:0:0:200198:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 38 SEND TEvPut with key [1:1:38:0:0:1252178:0] TEvPutResult: TEvPutResult {Id# [1:1:38:0:0:1252178:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 39 SEND TEvPut with key [1:1:39:0:0:1897783:0] TEvPutResult: TEvPutResult {Id# [1:1:39:0:0:1897783:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 40 SEND TEvPut with key [1:1:40:0:0:1486678:0] TEvPutResult: TEvPutResult {Id# [1:1:40:0:0:1486678:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 41 SEND TEvPut with key [1:1:41:0:0:1285964:0] TEvPutResult: TEvPutResult {Id# [1:1:41:0:0:1285964:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 42 SEND TEvPut with key [1:1:42:0:0:1221731:0] TEvPutResult: TEvPutResult {Id# [1:1:42:0:0:1221731:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 43 SEND TEvPut with key [1:1:43:0:0:1613844:0] TEvPutResult: TEvPutResult {Id# [1:1:43:0:0:1613844:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 44 SEND TEvPut with key [1:1:44:0:0:2582908:0] TEvPutResult: TEvPutResult {Id# [1:1:44:0:0:2582908:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 45 SEND TEvPut with key [1:1:45:0:0:1703743:0] TEvPutResult: TEvPutResult {Id# [1:1:45:0:0:1703743:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 46 SEND TEvPut with key [1:1:46:0:0:1362981:0] TEvPutResult: TEvPutResult {Id# [1:1:46:0:0:1362981:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 47 SEND TEvPut with key [1:1:47:0:0:1469807:0] TEvPutResult: TEvPutResult {Id# [1:1:47:0:0:1469807:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 48 SEND TEvPut with key [1:1:48:0:0:2832565:0] TEvPutResult: TEvPutResult {Id# [1:1:48:0:0:2832565:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 49 SEND TEvPut with key [1:1:49:0:0:1960611:0] TEvPutResult: TEvPutResult {Id# [1:1:49:0:0:1960611:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 50 SEND TEvPut with key [1:1:50:0:0:1164230:0] TEvPutResult: TEvPutResult {Id# [1:1:50:0:0:1164230:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 51 SEND TEvPut with key [1:1:51:0:0:836900:0] TEvPutResult: TEvPutResult {Id# [1:1:51:0:0:836900:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 52 SEND TEvPut with key [1:1:52:0:0:838380:0] TEvPutResult: TEvPutResult {Id# [1:1:52:0:0:838380:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Step = 53 SEND TEvPut with key [1:1:53:0:0:1975575:0] TEvPutResult: TEvPutResult {Id# [1:1:53:0:0:1975575:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.99989} Start node 0 Step = 54 SEND TEvPut with key [1:1:54:0:0:1888556:0] TEvPutResult: TEvPutResult {Id# [1:1:54:0:0:1888556:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 55 SEND TEvPut with key [1:1:55:0:0:715063:0] TEvPutResult: TEvPutResult {Id# [1:1:55:0:0:715063:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 56 SEND TEvPut with key [1:1:56:0:0:42993:0] TEvPutResult: TEvPutResult {Id# [1:1:56:0:0:42993:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 57 SEND TEvPut with key [1:1:57:0:0:1491407:0] TEvPutResult: TEvPutResult {Id# [1:1:57:0:0:1491407:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 58 SEND TEvPut with key [1:1:58:0:0:702845:0] TEvPutResult: TEvPutResult {Id# [1:1:58:0:0:702845:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999817} Step = 59 SEND TEvPut with key [1:1:59:0:0:2539948:0] TEvPutResult: TEvPutResult {Id# [1:1:59:0:0:2539948:0] Statu ... ND TEvPut with key [1:1:936:0:0:2748248:0] TEvPutResult: TEvPutResult {Id# [1:1:936:0:0:2748248:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999744} Step = 937 SEND TEvPut with key [1:1:937:0:0:112302:0] TEvPutResult: TEvPutResult {Id# [1:1:937:0:0:112302:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 938 SEND TEvPut with key [1:1:938:0:0:800417:0] TEvPutResult: TEvPutResult {Id# [1:1:938:0:0:800417:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 939 SEND TEvPut with key [1:1:939:0:0:2336442:0] TEvPutResult: TEvPutResult {Id# [1:1:939:0:0:2336442:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Step = 940 SEND TEvPut with key [1:1:940:0:0:982070:0] TEvPutResult: TEvPutResult {Id# [1:1:940:0:0:982070:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999646} Start node 4 Step = 941 SEND TEvPut with key [1:1:941:0:0:713632:0] TEvPutResult: TEvPutResult {Id# [1:1:941:0:0:713632:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Step = 942 SEND TEvPut with key [1:1:942:0:0:1644191:0] TEvPutResult: TEvPutResult {Id# [1:1:942:0:0:1644191:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Step = 943 SEND TEvPut with key [1:1:943:0:0:254634:0] TEvPutResult: TEvPutResult {Id# [1:1:943:0:0:254634:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Step = 944 SEND TEvPut with key [1:1:944:0:0:1141270:0] TEvPutResult: TEvPutResult {Id# [1:1:944:0:0:1141270:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Step = 945 SEND TEvPut with key [1:1:945:0:0:610103:0] TEvPutResult: TEvPutResult {Id# [1:1:945:0:0:610103:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999744} Step = 946 SEND TEvPut with key [1:1:946:0:0:24822:0] TEvPutResult: TEvPutResult {Id# [1:1:946:0:0:24822:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999744} Compact vdisk 6 Step = 947 SEND TEvPut with key [1:1:947:0:0:100167:0] TEvPutResult: TEvPutResult {Id# [1:1:947:0:0:100167:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999548} Step = 948 SEND TEvPut with key [1:1:948:0:0:645630:0] TEvPutResult: TEvPutResult {Id# [1:1:948:0:0:645630:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999548} Step = 949 SEND TEvPut with key [1:1:949:0:0:2125890:0] TEvPutResult: TEvPutResult {Id# [1:1:949:0:0:2125890:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999548} Step = 950 SEND TEvPut with key [1:1:950:0:0:2544891:0] TEvPutResult: TEvPutResult {Id# [1:1:950:0:0:2544891:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999548} Step = 951 SEND TEvPut with key [1:1:951:0:0:647007:0] TEvPutResult: TEvPutResult {Id# [1:1:951:0:0:647007:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999548} Step = 952 SEND TEvPut with key [1:1:952:0:0:2031652:0] TEvPutResult: TEvPutResult {Id# [1:1:952:0:0:2031652:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999548} Step = 953 SEND TEvPut with key [1:1:953:0:0:2109805:0] TEvPutResult: TEvPutResult {Id# [1:1:953:0:0:2109805:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999548} Stop node 3 2025-06-03T10:31:08.177592Z 1 00h28m30.715821s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 4 Step = 954 SEND TEvPut with key [1:1:954:0:0:1353403:0] TEvPutResult: TEvPutResult {Id# [1:1:954:0:0:1353403:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Stop node 4 2025-06-03T10:31:08.509545Z 1 00h28m40.725467s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 5 Step = 955 SEND TEvPut with key [1:1:955:0:0:1286278:0] TEvPutResult: TEvPutResult {Id# [1:1:955:0:0:1286278:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999622} Start node 3 Step = 956 SEND TEvPut with key [1:1:956:0:0:1875483:0] TEvPutResult: TEvPutResult {Id# [1:1:956:0:0:1875483:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999707} Step = 957 SEND TEvPut with key [1:1:957:0:0:1021388:0] TEvPutResult: TEvPutResult {Id# [1:1:957:0:0:1021388:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Start node 4 Step = 958 SEND TEvPut with key [1:1:958:0:0:860806:0] TEvPutResult: TEvPutResult {Id# [1:1:958:0:0:860806:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Step = 959 SEND TEvPut with key [1:1:959:0:0:385917:0] TEvPutResult: TEvPutResult {Id# [1:1:959:0:0:385917:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Step = 960 SEND TEvPut with key [1:1:960:0:0:200998:0] TEvPutResult: TEvPutResult {Id# [1:1:960:0:0:200998:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Step = 961 SEND TEvPut with key [1:1:961:0:0:1661659:0] TEvPutResult: TEvPutResult {Id# [1:1:961:0:0:1661659:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Step = 962 SEND TEvPut with key [1:1:962:0:0:771410:0] TEvPutResult: TEvPutResult {Id# [1:1:962:0:0:771410:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Step = 963 SEND TEvPut with key [1:1:963:0:0:1414281:0] TEvPutResult: TEvPutResult {Id# [1:1:963:0:0:1414281:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Step = 964 SEND TEvPut with key [1:1:964:0:0:2848837:0] TEvPutResult: TEvPutResult {Id# [1:1:964:0:0:2848837:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 965 SEND TEvPut with key [1:1:965:0:0:989600:0] TEvPutResult: TEvPutResult {Id# [1:1:965:0:0:989600:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999719} Step = 966 SEND TEvPut with key [1:1:966:0:0:2761296:0] TEvPutResult: TEvPutResult {Id# [1:1:966:0:0:2761296:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Step = 967 SEND TEvPut with key [1:1:967:0:0:981163:0] TEvPutResult: TEvPutResult {Id# [1:1:967:0:0:981163:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Step = 968 SEND TEvPut with key [1:1:968:0:0:14298:0] TEvPutResult: TEvPutResult {Id# [1:1:968:0:0:14298:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Step = 969 SEND TEvPut with key [1:1:969:0:0:626285:0] TEvPutResult: TEvPutResult {Id# [1:1:969:0:0:626285:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Step = 970 SEND TEvPut with key [1:1:970:0:0:334566:0] TEvPutResult: TEvPutResult {Id# [1:1:970:0:0:334566:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999634} Stop node 7 2025-06-03T10:31:08.982619Z 1 00h29m10.747043s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 8 Step = 971 SEND TEvPut with key [1:1:971:0:0:972888:0] TEvPutResult: TEvPutResult {Id# [1:1:971:0:0:972888:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999585} Step = 972 SEND TEvPut with key [1:1:972:0:0:786055:0] TEvPutResult: TEvPutResult {Id# [1:1:972:0:0:786055:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999585} Step = 973 SEND TEvPut with key [1:1:973:0:0:2707502:0] TEvPutResult: TEvPutResult {Id# [1:1:973:0:0:2707502:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999585} Stop node 1 2025-06-03T10:31:09.101277Z 1 00h29m20.751536s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 Step = 974 SEND TEvPut with key [1:1:974:0:0:2660812:0] TEvPutResult: TEvPutResult {Id# [1:1:974:0:0:2660812:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Start node 1 Step = 975 SEND TEvPut with key [1:1:975:0:0:3005283:0] TEvPutResult: TEvPutResult {Id# [1:1:975:0:0:3005283:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999744} Stop node 1 2025-06-03T10:31:09.239490Z 1 00h29m40.753072s :PIPE_SERVER ERROR: [72057594037932033] NodeDisconnected NodeId# 2 Step = 976 SEND TEvPut with key [1:1:976:0:0:1542748:0] TEvPutResult: TEvPutResult {Id# [1:1:976:0:0:1542748:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 977 SEND TEvPut with key [1:1:977:0:0:2837300:0] TEvPutResult: TEvPutResult {Id# [1:1:977:0:0:2837300:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 978 SEND TEvPut with key [1:1:978:0:0:481535:0] TEvPutResult: TEvPutResult {Id# [1:1:978:0:0:481535:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 979 SEND TEvPut with key [1:1:979:0:0:24668:0] TEvPutResult: TEvPutResult {Id# [1:1:979:0:0:24668:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 980 SEND TEvPut with key [1:1:980:0:0:1760402:0] TEvPutResult: TEvPutResult {Id# [1:1:980:0:0:1760402:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 981 SEND TEvPut with key [1:1:981:0:0:1711812:0] TEvPutResult: TEvPutResult {Id# [1:1:981:0:0:1711812:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 982 SEND TEvPut with key [1:1:982:0:0:1422922:0] TEvPutResult: TEvPutResult {Id# [1:1:982:0:0:1422922:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 983 SEND TEvPut with key [1:1:983:0:0:2533122:0] TEvPutResult: TEvPutResult {Id# [1:1:983:0:0:2533122:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 984 SEND TEvPut with key [1:1:984:0:0:347759:0] TEvPutResult: TEvPutResult {Id# [1:1:984:0:0:347759:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 985 SEND TEvPut with key [1:1:985:0:0:1862506:0] TEvPutResult: TEvPutResult {Id# [1:1:985:0:0:1862506:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 986 SEND TEvPut with key [1:1:986:0:0:101043:0] TEvPutResult: TEvPutResult {Id# [1:1:986:0:0:101043:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 987 SEND TEvPut with key [1:1:987:0:0:672278:0] TEvPutResult: TEvPutResult {Id# [1:1:987:0:0:672278:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 988 SEND TEvPut with key [1:1:988:0:0:2042425:0] TEvPutResult: TEvPutResult {Id# [1:1:988:0:0:2042425:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 989 SEND TEvPut with key [1:1:989:0:0:1201477:0] TEvPutResult: TEvPutResult {Id# [1:1:989:0:0:1201477:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 990 SEND TEvPut with key [1:1:990:0:0:1724337:0] TEvPutResult: TEvPutResult {Id# [1:1:990:0:0:1724337:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 991 SEND TEvPut with key [1:1:991:0:0:2174403:0] TEvPutResult: TEvPutResult {Id# [1:1:991:0:0:2174403:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 992 SEND TEvPut with key [1:1:992:0:0:193000:0] TEvPutResult: TEvPutResult {Id# [1:1:992:0:0:193000:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 993 SEND TEvPut with key [1:1:993:0:0:618508:0] TEvPutResult: TEvPutResult {Id# [1:1:993:0:0:618508:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 994 SEND TEvPut with key [1:1:994:0:0:2278246:0] TEvPutResult: TEvPutResult {Id# [1:1:994:0:0:2278246:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 995 SEND TEvPut with key [1:1:995:0:0:2001881:0] TEvPutResult: TEvPutResult {Id# [1:1:995:0:0:2001881:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 996 SEND TEvPut with key [1:1:996:0:0:1759634:0] TEvPutResult: TEvPutResult {Id# [1:1:996:0:0:1759634:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 997 SEND TEvPut with key [1:1:997:0:0:2469234:0] TEvPutResult: TEvPutResult {Id# [1:1:997:0:0:2469234:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 998 SEND TEvPut with key [1:1:998:0:0:1329395:0] TEvPutResult: TEvPutResult {Id# [1:1:998:0:0:1329395:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Step = 999 SEND TEvPut with key [1:1:999:0:0:1243807:0] TEvPutResult: TEvPutResult {Id# [1:1:999:0:0:1243807:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0.999683} Starting nodes Start compaction 1 Start checking >> YdbTableSplit::SplitByLoadWithUpdates [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus2Block [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus2Stripe |68.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestNormalMirror [GOOD] |68.2%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithUpdates [GOOD] Test command err: 2025-06-03T10:31:05.014617Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668729238645521:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:05.015771Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00228f/r3tmp/tmpFmEE6r/pdisk_1.dat 2025-06-03T10:31:05.169581Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29320, node 1 2025-06-03T10:31:05.196683Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:05.196719Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:05.214324Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:05.228814Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:05.228830Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:05.228832Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:05.228890Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24755 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:05.328535Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:24755 2025-06-03T10:31:05.659612Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668729238646362:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.659646Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.706338Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.789475Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668729238646529:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.789503Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.796465Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946665809 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 1 shards TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946665809 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-03T10:31:05.836741Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668729238646624:2377], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.836791Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.837204Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668729238646629:2380], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.838300Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.838372Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715660:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:31:05.838377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager, operationId: 281474976715660:1, at schemeshard: 72057594046644480 2025-06-03T10:31:05.838393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715660:2, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:31:05.838397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager/pools, operationId: 281474976715660:2, at schemeshard: 72057594046644480 2025-06-03T10:31:05.838409Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715660:3, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:31:05.838419Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_resource_pool.cpp:148: [72057594046644480] TCreateResourcePool Propose: opId# 281474976715660:3, path# /Root/.metadata/workload_manager/pools/default 2025-06-03T10:31:05.838472Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976715660:3 1 -> 128 2025-06-03T10:31:05.838556Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715660:4, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:31:05.838561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715660:3, at schemeshard: 72057594046644480 2025-06-03T10:31:05.839444Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668729238646639:2387], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.839465Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668729238646640:2388], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.839474Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668729238646635:2384], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.839506Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.840287Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_resource_pool.cpp:148: [72057594046644480] TCreateResourcePool Propose: opId# 281474976715661:0, path# /Root/.metadata/workload_manager/pools/default 2025-06-03T10:31:05.840332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715661:1, propose status:StatusMultipleModifications, reason: Check failed: path: '/Root/.metadata/workload_manager/pools/default', error: path exists but crea ... 3. Ctx: { TraceId: 01jwtnh3vfenj09k6260pc68rc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWUwNjg4ZDQtYTllOWFkMTItOTA5YmNlMjQtOWI5N2I0Mzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.819231Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730304. Ctx: { TraceId: 01jwtnh3vfc29jm7bdvw037qv9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2JkM2UyMjktMzI5YWY3ZDctMzUyNWMyNzItNTRmZDk1M2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.819519Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730305. Ctx: { TraceId: 01jwtnh3vfb9sb4f3hsk6seejs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDdkOWFmZGUtNDBiOTNkMTktZDg3ZGI5ZjMtYmJiZjYzZmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.819802Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730306. Ctx: { TraceId: 01jwtnh3vf96hmw4sbsq7ere1n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2U2MDM2NWItYTkzMWYwNWYtZjA0ZjZhYWQtMmJhYzg5MmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.820066Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730307. Ctx: { TraceId: 01jwtnh3vf6j5krwaaybfpw98g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDI4MDE1NWEtZTBhZDc5MDgtMWU1ZWNlNzMtYmQxZTU5NWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.820473Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730308. Ctx: { TraceId: 01jwtnh3vk7krc7s0taa6h02hd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWRkM2MwZTgtZWU3ZDA2NWQtYWIwNmZmMzUtMzRmNGI0ZTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.821793Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730310. Ctx: { TraceId: 01jwtnh3vv0e3z8jhyjcttn0f8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmRjYTk1NGMtYmU1MWU5NGYtOGEzZWY4MmEtNzc2YTUzYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.823210Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730309. Ctx: { TraceId: 01jwtnh3vvev214mndacze8vy1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTE1MzU3MmEtNGQwNTYxNWYtYmQ2OGQ4NmQtZDRiMDU2MmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.826107Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730311. Ctx: { TraceId: 01jwtnh3vrbcemrjf4d723kesm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjFkNWM3ZGItMzE3ZjgxMTMtYmQyNzViMTQtOGEwOTkwNTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.831946Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730312. Ctx: { TraceId: 01jwtnh3w5ayzh71jh4agqrte2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODZiOGJmNi0zY2RlYzVmZS01MThjOGZhNC00NDY0NjA4NQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.832106Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730313. Ctx: { TraceId: 01jwtnh3w58zttz1tx7es4fpch, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2U2MDM2NWItYTkzMWYwNWYtZjA0ZjZhYWQtMmJhYzg5MmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.832449Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730314. Ctx: { TraceId: 01jwtnh3w5b96jhqrk47bwphc5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWRkM2MwZTgtZWU3ZDA2NWQtYWIwNmZmMzUtMzRmNGI0ZTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.832496Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730315. Ctx: { TraceId: 01jwtnh3w52w3f2zky46p4mgvk, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2JkM2UyMjktMzI5YWY3ZDctMzUyNWMyNzItNTRmZDk1M2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.840902Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730316. Ctx: { TraceId: 01jwtnh3wce7ggepym26e808zc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDdkOWFmZGUtNDBiOTNkMTktZDg3ZGI5ZjMtYmJiZjYzZmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.840939Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730319. Ctx: { TraceId: 01jwtnh3wcf8kw4h0nkv354763, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWUwNjg4ZDQtYTllOWFkMTItOTA5YmNlMjQtOWI5N2I0Mzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.841327Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730317. Ctx: { TraceId: 01jwtnh3wc8m2hf3fz2w9yjnjm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDI4MDE1NWEtZTBhZDc5MDgtMWU1ZWNlNzMtYmQxZTU5NWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.841336Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730318. Ctx: { TraceId: 01jwtnh3wc6fsav0ztp7e37kpj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmRjYTk1NGMtYmU1MWU5NGYtOGEzZWY4MmEtNzc2YTUzYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.843060Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730320. Ctx: { TraceId: 01jwtnh3wg6w6fwbkjzxqwfh9g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTE1MzU3MmEtNGQwNTYxNWYtYmQ2OGQ4NmQtZDRiMDU2MmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo 2025-06-03T10:31:25.853859Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730321. Ctx: { TraceId: 01jwtnh3wt2h37xzypx4ahftyz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWRkM2MwZTgtZWU3ZDA2NWQtYWIwNmZmMzUtMzRmNGI0ZTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.854247Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730322. Ctx: { TraceId: 01jwtnh3wt16ckv0ec4jbs3tda, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjFkNWM3ZGItMzE3ZjgxMTMtYmQyNzViMTQtOGEwOTkwNTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.860584Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730324. Ctx: { TraceId: 01jwtnh3wt9jfs4zfq2mqzs1kd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2U2MDM2NWItYTkzMWYwNWYtZjA0ZjZhYWQtMmJhYzg5MmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.860962Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730325. Ctx: { TraceId: 01jwtnh3wt515gdyt15v7ypw3y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2JkM2UyMjktMzI5YWY3ZDctMzUyNWMyNzItNTRmZDk1M2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.861551Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730323. Ctx: { TraceId: 01jwtnh3wt1z3rmvv2bw5qptgf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODZiOGJmNi0zY2RlYzVmZS01MThjOGZhNC00NDY0NjA4NQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls response: 2025-06-03T10:31:25.862369Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730329. Ctx: { TraceId: 01jwtnh3x05bk075xtpjk77q3q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmRjYTk1NGMtYmU1MWU5NGYtOGEzZWY4MmEtNzc2YTUzYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946665809 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-03T10:31:25.862666Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730330. Ctx: { TraceId: 01jwtnh3x0aqbz2swdazheq4v1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDI4MDE1NWEtZTBhZDc5MDgtMWU1ZWNlNzMtYmQxZTU5NWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.862728Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730327. Ctx: { TraceId: 01jwtnh3x17bkcvyaehk3kz2pb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDdkOWFmZGUtNDBiOTNkMTktZDg3ZGI5ZjMtYmJiZjYzZmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.862930Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730328. Ctx: { TraceId: 01jwtnh3x0515qh340hbckvq8r, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTE1MzU3MmEtNGQwNTYxNWYtYmQ2OGQ4NmQtZDRiMDU2MmM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:25.863313Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976730326. Ctx: { TraceId: 01jwtnh3x1e6gcv2xfny4ee2x0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWUwNjg4ZDQtYTllOWFkMTItOTA5YmNlMjQtOWI5N2I0Mzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946665809 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 2 shards >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-CreateUser-9 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-49 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-32 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-33 >> TTablesWithReboots::DropCopyWithRebootsAtCommit |68.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base_reboots/unittest |68.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-2 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-3 |68.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker |68.2%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_balancing/test-results/unittest/{meta.json ... results_accumulator.log} |68.2%| [LD] {RESULT} $(B)/ydb/core/tx/replication/service/ut_worker/ydb-core-tx-replication-service-ut_worker |68.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base_reboots/unittest >> IndexBuildTest::CancellationNotEnoughRetries [GOOD] >> IndexBuildTest::CancellationNoTable >> TBlobStorageProxyTest::TestDoubleEmptyGet [GOOD] >> TBlobStorageProxyTest::TestCompactedGetMultipart [GOOD] |68.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base_reboots/unittest >> IndexBuildTest::CancellationNoTable [GOOD] |68.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base_reboots/unittest >> THiveTest::TestLockTabletExecutionRebootTimeout [GOOD] >> THiveTest::TestLockTabletExecutionReconnect >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-33 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-34 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-49 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-50 |68.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestCompactedGetMultipart [GOOD] >> TTablesWithReboots::CopyWithRebootsAtCommit >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-3 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-4 |68.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base_reboots/unittest >> TTablesWithReboots::LostBorrowAckWithReboots >> IndexBuildTestReboots::BaseCaseWithDataColumns [GOOD] >> TBlobStorageProxyTest::TestProxyLongTailDiscoverMaxi [GOOD] >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus2Stripe [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-34 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-35 >> TxUsage::WriteToTopic_Demo_2_Table >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-50 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-51 >> TxUsage::WriteToTopic_Demo_2_Query |68.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestProxyLongTailDiscoverMaxi [GOOD] >> THiveTest::TestLockTabletExecutionReconnect [GOOD] >> THiveTest::TestLockTabletExecutionReconnectExpire >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-4 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-5 >> BasicUsage::ReadWithoutConsumerWithRestarts [GOOD] >> BasicUsage::ReadWithRestarts >> KqpStreamLookup::ReadTableWithIndexDuringSplit [GOOD] >> LocalPartition::Restarts |68.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut_fat/unittest >> TBlobStorageProxyTest::TestPutGetStatusErasure3Plus2Stripe [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build/unittest >> IndexBuildTest::CancellationNoTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:30:51.226071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:30:51.226097Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:51.226102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:30:51.226106Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:30:51.226120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:30:51.226123Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:30:51.226135Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:30:51.226148Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:30:51.226251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:30:51.226310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:30:51.237050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:30:51.237074Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:51.240424Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:30:51.240523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:30:51.240554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:30:51.242376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:30:51.242425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:30:51.242534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:51.242588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:30:51.243392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:51.243443Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:30:51.243722Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:51.243729Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:30:51.243738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:30:51.243744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:51.243749Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:30:51.243763Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.244904Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:30:51.258950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:30:51.259016Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.259073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:30:51.259116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:30:51.259124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.259869Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:51.259899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:30:51.259966Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.259978Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:30:51.259984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:30:51.259990Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:30:51.260538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.260553Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:30:51.260557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:30:51.260908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.260915Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:30:51.260919Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:51.260935Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:30:51.261560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:30:51.262014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:30:51.262060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:30:51.262283Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:30:51.262310Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:30:51.262317Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:51.262379Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:30:51.262384Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:30:51.262409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:30:51.262418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:30:51.262821Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:30:51.262829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:30:51.262864Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... ommon.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:31:29.568334Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:31:29.568341Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:31:29.568918Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:29.568936Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:31:29.568944Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:31:29.569541Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:29.569559Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:29.569569Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:29.569578Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:31:29.569622Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:31:29.570061Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:31:29.570116Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:31:29.570362Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:29.570392Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 131 RawX2: 8589936746 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:29.570402Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:29.570478Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:31:29.570489Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:29.570531Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:29.570547Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:31:29.571072Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:29.571086Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:29.571150Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:29.571157Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [2:207:2208], at schemeshard: 72057594046678944, txId: 1, path id: 1 2025-06-03T10:31:29.571271Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:29.571285Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1:0 ProgressState 2025-06-03T10:31:29.571302Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-06-03T10:31:29.571307Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:31:29.571313Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-06-03T10:31:29.571316Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:31:29.571322Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-03T10:31:29.571328Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:31:29.571334Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-06-03T10:31:29.571340Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1:0 2025-06-03T10:31:29.571356Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:31:29.571363Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 0 2025-06-03T10:31:29.571368Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046678944, LocalPathId: 1], 3 2025-06-03T10:31:29.571470Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-03T10:31:29.571488Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046678944, cookie: 1 2025-06-03T10:31:29.571494Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1 2025-06-03T10:31:29.571500Z node 2 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 3 2025-06-03T10:31:29.571505Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:29.571522Z node 2 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1, subscribers: 0 2025-06-03T10:31:29.572304Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1 2025-06-03T10:31:29.572475Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:29.572598Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:434: actor# [2:270:2260] Bootstrap 2025-06-03T10:31:29.574777Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:453: actor# [2:270:2260] Become StateWork (SchemeCache [2:275:2265]) 2025-06-03T10:31:29.574996Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index__create.cpp:23: TIndexBuilder::TXTYPE_CREATE_INDEX_BUILD: DoExecute TxId: 101 DatabaseName: "/MyRoot" Settings { source_path: "/MyRoot/Table" index { name: "index1" index_columns: "index" global_index { settings { } } } max_shards_in_flight: 2 ScanSettings { MaxBatchRows: 1 } } 2025-06-03T10:31:29.575074Z node 2 :BUILD_INDEX NOTICE: schemeshard_build_index_tx_base.h:91: TIndexBuilder::TXTYPE_CREATE_INDEX_BUILD: Reply TxId: 101 Status: BAD_REQUEST Issues { message: "Check failed: path: \'/MyRoot/Table\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_build_index__create.cpp:70" severity: 1 } SchemeStatus: 2 2025-06-03T10:31:29.575284Z node 2 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [2:270:2260] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:31:29.576153Z node 2 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 BUILDINDEX RESPONSE CREATE: NKikimrIndexBuilder.TEvCreateResponse TxId: 101 Status: BAD_REQUEST Issues { message: "Check failed: path: \'/MyRoot/Table\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_build_index__create.cpp:70" severity: 1 } SchemeStatus: 2 TestWaitNotification wait txId: 101 2025-06-03T10:31:29.576304Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:31:29.576315Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 2025-06-03T10:31:29.576415Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:31:29.576447Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:31:29.576454Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [2:282:2272] TestWaitNotification: OK eventTxId 101 2025-06-03T10:31:29.576537Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index__list.cpp:23: TIndexBuilder::TXTYPE_LIST_INDEX_BUILD: DoExecute DatabaseName: "/MyRoot" PageSize: 100 PageToken: "" 2025-06-03T10:31:29.576563Z node 2 :BUILD_INDEX DEBUG: schemeshard_build_index_tx_base.h:93: TIndexBuilder::TXTYPE_LIST_INDEX_BUILD: Reply Status: SUCCESS NextPageToken: "0" BUILDINDEX RESPONSE LIST: NKikimrIndexBuilder.TEvListResponse Status: SUCCESS NextPageToken: "0" ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_kqp/unittest >> KqpStreamLookup::ReadTableWithIndexDuringSplit [GOOD] Test command err: 2025-06-03T10:31:27.897064Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:31:27.897163Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:31:27.897199Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0029b7/r3tmp/tmp4Biqdu/pdisk_1.dat 2025-06-03T10:31:28.022817Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:31:28.040797Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:28.042159Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946687428285 != 1748946687428289 2025-06-03T10:31:28.086181Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:28.086228Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:28.096982Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:28.178360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:31:28.415585Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:781:2652], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:28.415632Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:791:2657], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:28.415645Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:28.416875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:31:28.565848Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:795:2660], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:31:28.605898Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:866:2700] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:31.309695Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jwtnh6cxdz957y02b87tqp7v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWZjNWRlNTEtMzIzYmMxOGItNzllYTRmNmYtMzc5ZGM5Yzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:31.321628Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtnh6cxdz957y02b87tqp7v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWZjNWRlNTEtMzIzYmMxOGItNzllYTRmNmYtMzc5ZGM5Yzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:31.389841Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtnh991a499nean0zez85c2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjAyMWJkOTEtMTZjNzdjOTMtOTc5ZmE2OWEtOTg3N2M4OTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Captured TEvDataShard::TEvRead from KQP_SOURCE_READ_ACTOR to TX_DATASHARD_ACTOR |68.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut |68.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut |68.3%| [LD] {RESULT} $(B)/ydb/core/tx/tx_allocator_client/ut/ydb-core-tx-tx_allocator_client-ut >> TKeyValueTest::TestInlineWriteReadWithRestartsThenResponseOkNewApi [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-51 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-52 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-35 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-36 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestInlineWriteReadWithRestartsThenResponseOkNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:58:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:75:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:58:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:75:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:77:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:80:2057] recipient: [2:79:2110] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:83:2057] recipient: [2:79:2110] !Reboot 72057594037927937 (actor [2:57:2097]) rebooted! !Reboot 72057594037927937 (actor [2:57:2097]) tablet resolver refreshed! new actor is[2:82:2111] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:168:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:58:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:75:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:77:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:83:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:57:2097]) rebooted! !Reboot 72057594037927937 (actor [3:57:2097]) tablet resolver refreshed! new actor is[3:82:2111] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:168:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:58:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:75:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:78:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:82:2057] recipient: [4:80:2110] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:84:2057] recipient: [4:80:2110] !Reboot 72057594037927937 (actor [4:57:2097]) rebooted! !Reboot 72057594037927937 (actor [4:57:2097]) tablet resolver refreshed! new actor is[4:83:2111] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:169:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:58:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:75:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:81:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:84:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:85:2057] recipient: [5:83:2113] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:87:2057] recipient: [5:83:2113] !Reboot 72057594037927937 (actor [5:57:2097]) rebooted! !Reboot 72057594037927937 (actor [5:57:2097]) tablet resolver refreshed! new actor is[5:86:2114] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:172:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:58:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:75:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:81:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:84:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:85:2057] recipient: [6:83:2113] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:87:2057] recipient: [6:83:2113] !Reboot 72057594037927937 (actor [6:57:2097]) rebooted! !Reboot 72057594037927937 (actor [6:57:2097]) tablet resolver refreshed! new actor is[6:86:2114] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:172:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:58:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:75:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:82:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:85:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:86:2057] recipient: [7:84:2113] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:88:2057] recipient: [7:84:2113] !Reboot 72057594037927937 (actor [7:57:2097]) rebooted! !Reboot 72057594037927937 (actor [7:57:2097]) tablet resolver refreshed! new actor is[7:87:2114] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:173:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:58:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:75:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:85:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:88:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:89:2057] recipient: [8:87:2116] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:91:2057] recipient: [8:87:2116] !Reboot 72057594037927937 (actor [8:57:2097]) rebooted! !Reboot 72057594037927937 (actor [8:57:2097]) tablet resolver refreshed! new actor is[8:90:2117] Leader for TabletID 72057594037927937 is [8:90:2117] sender: [8:176:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:58:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:75:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:85:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:88:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:89:2057] recipient: [9:87:2116] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:91:2057] recipient: [9:87:2116] !Reboot 72057594037927937 (actor [9:57:2097]) rebooted! !Reboot 72057594037927937 (actor [9:57:2097]) tablet resolver refreshed! new actor is[9:90:2117] Leader for TabletID 72057594037927937 is [9:90:2117] sender: [9:176:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:58:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:75:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:86:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:89:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:90:2057] recipient: [10:88:2116] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:92:2057] recipient: [10:88:2116] !Reboot 72057594037927937 (actor [10:57:2097]) rebooted! !Reboot 72057594037927937 (actor [10:57:2097]) tablet resolver refreshed! new actor is[10:91:2117] Leader for TabletID 72057594037927937 is [10:91:2117] sender: [10:177:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:58:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:75:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:89:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:92:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:93:2057] recipient: [11:91:2119] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:95:2057] recipient: [11:91:2119] !Reboot 72057594037927937 (actor [11:57:2097]) rebooted! !Reboot 72057594037927937 (actor [11:57:2097]) tablet resolver refreshed! new actor is[11:94:2120] Leader for TabletID 72057594037927937 is [11:94:2120] sender: [11:180:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:58:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:75:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (acto ... 97]) rebooted! !Reboot 72057594037927937 (actor [44:57:2097]) tablet resolver refreshed! new actor is[44:102:2126] Leader for TabletID 72057594037927937 is [44:102:2126] sender: [44:188:2057] recipient: [44:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [45:55:2057] recipient: [45:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [45:55:2057] recipient: [45:51:2095] Leader for TabletID 72057594037927937 is [45:57:2097] sender: [45:58:2057] recipient: [45:51:2095] Leader for TabletID 72057594037927937 is [45:57:2097] sender: [45:75:2057] recipient: [45:14:2061] !Reboot 72057594037927937 (actor [45:57:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [45:57:2097] sender: [45:98:2057] recipient: [45:36:2083] Leader for TabletID 72057594037927937 is [45:57:2097] sender: [45:101:2057] recipient: [45:14:2061] Leader for TabletID 72057594037927937 is [45:57:2097] sender: [45:102:2057] recipient: [45:100:2125] Leader for TabletID 72057594037927937 is [45:103:2126] sender: [45:104:2057] recipient: [45:100:2125] !Reboot 72057594037927937 (actor [45:57:2097]) rebooted! !Reboot 72057594037927937 (actor [45:57:2097]) tablet resolver refreshed! new actor is[45:103:2126] Leader for TabletID 72057594037927937 is [45:103:2126] sender: [45:121:2057] recipient: [45:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [46:55:2057] recipient: [46:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [46:55:2057] recipient: [46:51:2095] Leader for TabletID 72057594037927937 is [46:57:2097] sender: [46:58:2057] recipient: [46:51:2095] Leader for TabletID 72057594037927937 is [46:57:2097] sender: [46:75:2057] recipient: [46:14:2061] !Reboot 72057594037927937 (actor [46:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [46:57:2097] sender: [46:100:2057] recipient: [46:36:2083] Leader for TabletID 72057594037927937 is [46:57:2097] sender: [46:103:2057] recipient: [46:14:2061] Leader for TabletID 72057594037927937 is [46:57:2097] sender: [46:104:2057] recipient: [46:102:2127] Leader for TabletID 72057594037927937 is [46:105:2128] sender: [46:106:2057] recipient: [46:102:2127] !Reboot 72057594037927937 (actor [46:57:2097]) rebooted! !Reboot 72057594037927937 (actor [46:57:2097]) tablet resolver refreshed! new actor is[46:105:2128] Leader for TabletID 72057594037927937 is [46:105:2128] sender: [46:191:2057] recipient: [46:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [47:55:2057] recipient: [47:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [47:55:2057] recipient: [47:51:2095] Leader for TabletID 72057594037927937 is [47:57:2097] sender: [47:58:2057] recipient: [47:51:2095] Leader for TabletID 72057594037927937 is [47:57:2097] sender: [47:75:2057] recipient: [47:14:2061] !Reboot 72057594037927937 (actor [47:57:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [47:57:2097] sender: [47:100:2057] recipient: [47:36:2083] Leader for TabletID 72057594037927937 is [47:57:2097] sender: [47:103:2057] recipient: [47:14:2061] Leader for TabletID 72057594037927937 is [47:57:2097] sender: [47:104:2057] recipient: [47:102:2127] Leader for TabletID 72057594037927937 is [47:105:2128] sender: [47:106:2057] recipient: [47:102:2127] !Reboot 72057594037927937 (actor [47:57:2097]) rebooted! !Reboot 72057594037927937 (actor [47:57:2097]) tablet resolver refreshed! new actor is[47:105:2128] Leader for TabletID 72057594037927937 is [47:105:2128] sender: [47:191:2057] recipient: [47:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [48:55:2057] recipient: [48:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [48:55:2057] recipient: [48:51:2095] Leader for TabletID 72057594037927937 is [48:57:2097] sender: [48:58:2057] recipient: [48:51:2095] Leader for TabletID 72057594037927937 is [48:57:2097] sender: [48:75:2057] recipient: [48:14:2061] !Reboot 72057594037927937 (actor [48:57:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [48:57:2097] sender: [48:101:2057] recipient: [48:36:2083] Leader for TabletID 72057594037927937 is [48:57:2097] sender: [48:104:2057] recipient: [48:14:2061] Leader for TabletID 72057594037927937 is [48:57:2097] sender: [48:105:2057] recipient: [48:103:2127] Leader for TabletID 72057594037927937 is [48:106:2128] sender: [48:107:2057] recipient: [48:103:2127] !Reboot 72057594037927937 (actor [48:57:2097]) rebooted! !Reboot 72057594037927937 (actor [48:57:2097]) tablet resolver refreshed! new actor is[48:106:2128] Leader for TabletID 72057594037927937 is [48:106:2128] sender: [48:124:2057] recipient: [48:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [49:55:2057] recipient: [49:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [49:55:2057] recipient: [49:50:2095] Leader for TabletID 72057594037927937 is [49:57:2097] sender: [49:58:2057] recipient: [49:50:2095] Leader for TabletID 72057594037927937 is [49:57:2097] sender: [49:75:2057] recipient: [49:14:2061] !Reboot 72057594037927937 (actor [49:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [49:57:2097] sender: [49:103:2057] recipient: [49:36:2083] Leader for TabletID 72057594037927937 is [49:57:2097] sender: [49:105:2057] recipient: [49:14:2061] Leader for TabletID 72057594037927937 is [49:57:2097] sender: [49:107:2057] recipient: [49:106:2129] Leader for TabletID 72057594037927937 is [49:108:2130] sender: [49:109:2057] recipient: [49:106:2129] !Reboot 72057594037927937 (actor [49:57:2097]) rebooted! !Reboot 72057594037927937 (actor [49:57:2097]) tablet resolver refreshed! new actor is[49:108:2130] Leader for TabletID 72057594037927937 is [49:108:2130] sender: [49:194:2057] recipient: [49:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [50:55:2057] recipient: [50:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [50:55:2057] recipient: [50:51:2095] Leader for TabletID 72057594037927937 is [50:57:2097] sender: [50:58:2057] recipient: [50:51:2095] Leader for TabletID 72057594037927937 is [50:57:2097] sender: [50:75:2057] recipient: [50:14:2061] !Reboot 72057594037927937 (actor [50:57:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [50:57:2097] sender: [50:103:2057] recipient: [50:36:2083] Leader for TabletID 72057594037927937 is [50:57:2097] sender: [50:105:2057] recipient: [50:14:2061] Leader for TabletID 72057594037927937 is [50:57:2097] sender: [50:107:2057] recipient: [50:106:2129] Leader for TabletID 72057594037927937 is [50:108:2130] sender: [50:109:2057] recipient: [50:106:2129] !Reboot 72057594037927937 (actor [50:57:2097]) rebooted! !Reboot 72057594037927937 (actor [50:57:2097]) tablet resolver refreshed! new actor is[50:108:2130] Leader for TabletID 72057594037927937 is [50:108:2130] sender: [50:194:2057] recipient: [50:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [51:55:2057] recipient: [51:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [51:55:2057] recipient: [51:51:2095] Leader for TabletID 72057594037927937 is [51:57:2097] sender: [51:58:2057] recipient: [51:51:2095] Leader for TabletID 72057594037927937 is [51:57:2097] sender: [51:75:2057] recipient: [51:14:2061] !Reboot 72057594037927937 (actor [51:57:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [51:57:2097] sender: [51:104:2057] recipient: [51:36:2083] Leader for TabletID 72057594037927937 is [51:57:2097] sender: [51:106:2057] recipient: [51:14:2061] Leader for TabletID 72057594037927937 is [51:57:2097] sender: [51:108:2057] recipient: [51:107:2129] Leader for TabletID 72057594037927937 is [51:109:2130] sender: [51:110:2057] recipient: [51:107:2129] !Reboot 72057594037927937 (actor [51:57:2097]) rebooted! !Reboot 72057594037927937 (actor [51:57:2097]) tablet resolver refreshed! new actor is[51:109:2130] Leader for TabletID 72057594037927937 is [51:109:2130] sender: [51:127:2057] recipient: [51:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [52:55:2057] recipient: [52:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [52:55:2057] recipient: [52:50:2095] Leader for TabletID 72057594037927937 is [52:57:2097] sender: [52:58:2057] recipient: [52:50:2095] Leader for TabletID 72057594037927937 is [52:57:2097] sender: [52:75:2057] recipient: [52:14:2061] !Reboot 72057594037927937 (actor [52:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [52:57:2097] sender: [52:106:2057] recipient: [52:36:2083] Leader for TabletID 72057594037927937 is [52:57:2097] sender: [52:109:2057] recipient: [52:14:2061] Leader for TabletID 72057594037927937 is [52:57:2097] sender: [52:110:2057] recipient: [52:108:2131] Leader for TabletID 72057594037927937 is [52:111:2132] sender: [52:112:2057] recipient: [52:108:2131] !Reboot 72057594037927937 (actor [52:57:2097]) rebooted! !Reboot 72057594037927937 (actor [52:57:2097]) tablet resolver refreshed! new actor is[52:111:2132] Leader for TabletID 72057594037927937 is [52:111:2132] sender: [52:197:2057] recipient: [52:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [53:55:2057] recipient: [53:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [53:55:2057] recipient: [53:51:2095] Leader for TabletID 72057594037927937 is [53:57:2097] sender: [53:58:2057] recipient: [53:51:2095] Leader for TabletID 72057594037927937 is [53:57:2097] sender: [53:75:2057] recipient: [53:14:2061] !Reboot 72057594037927937 (actor [53:57:2097]) on event NKikimr::TEvKeyValue::TEvReadRange ! Leader for TabletID 72057594037927937 is [53:57:2097] sender: [53:106:2057] recipient: [53:36:2083] Leader for TabletID 72057594037927937 is [53:57:2097] sender: [53:108:2057] recipient: [53:14:2061] Leader for TabletID 72057594037927937 is [53:57:2097] sender: [53:110:2057] recipient: [53:109:2131] Leader for TabletID 72057594037927937 is [53:111:2132] sender: [53:112:2057] recipient: [53:109:2131] !Reboot 72057594037927937 (actor [53:57:2097]) rebooted! !Reboot 72057594037927937 (actor [53:57:2097]) tablet resolver refreshed! new actor is[53:111:2132] Leader for TabletID 72057594037927937 is [53:111:2132] sender: [53:197:2057] recipient: [53:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [54:55:2057] recipient: [54:52:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [54:55:2057] recipient: [54:52:2095] Leader for TabletID 72057594037927937 is [54:57:2097] sender: [54:58:2057] recipient: [54:52:2095] Leader for TabletID 72057594037927937 is [54:57:2097] sender: [54:75:2057] recipient: [54:14:2061] !Reboot 72057594037927937 (actor [54:57:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [54:57:2097] sender: [54:107:2057] recipient: [54:36:2083] Leader for TabletID 72057594037927937 is [54:57:2097] sender: [54:109:2057] recipient: [54:14:2061] Leader for TabletID 72057594037927937 is [54:57:2097] sender: [54:111:2057] recipient: [54:110:2131] Leader for TabletID 72057594037927937 is [54:112:2132] sender: [54:113:2057] recipient: [54:110:2131] !Reboot 72057594037927937 (actor [54:57:2097]) rebooted! !Reboot 72057594037927937 (actor [54:57:2097]) tablet resolver refreshed! new actor is[54:112:2132] Leader for TabletID 72057594037927937 is [0:0:0] sender: [55:55:2057] recipient: [55:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [55:55:2057] recipient: [55:50:2095] Leader for TabletID 72057594037927937 is [55:57:2097] sender: [55:58:2057] recipient: [55:50:2095] Leader for TabletID 72057594037927937 is [55:57:2097] sender: [55:75:2057] recipient: [55:14:2061] >> TxUsage::WriteToTopic_Demo_12_Table >> THiveTest::TestLockTabletExecutionReconnectExpire [GOOD] >> THiveTest::TestLockTabletExecutionStealLock >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-5 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-6 >> TxUsage::TwoSessionOneConsumer_Table |68.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut |68.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut |68.3%| [LD] {RESULT} $(B)/ydb/core/mind/bscontroller/ut/ydb-core-mind-bscontroller-ut >> TSchemeShardSplitByLoad::IndexTableDoesNotSplitsIfDisabledByMainTable [GOOD] >> TxUsage::WriteToTopic_Invalid_Session_Table >> TxUsage::Sinks_Oltp_WriteToTopic_1_Table >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-36 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-37 >> THiveTest::TestLockTabletExecutionStealLock [GOOD] >> THiveTest::TestProgressWithMaxTabletsScheduled >> IndexBuildTestReboots::IndexPartitioning [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-52 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-53 >> IndexBuildTestReboots::BaseCase [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_split_merge/unittest >> TSchemeShardSplitByLoad::IndexTableDoesNotSplitsIfDisabledByMainTable [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:31:00.540127Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:31:00.540161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:00.540176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:31:00.540183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:31:00.540199Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:31:00.540204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:31:00.540228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:00.540242Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:31:00.540375Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:31:00.540471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:31:00.561086Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:31:00.561116Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:00.566146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:31:00.566309Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:31:00.566354Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:31:00.570678Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:31:00.570797Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:31:00.570943Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:00.571029Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:31:00.572228Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:00.572297Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:31:00.572758Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:00.572776Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:00.572787Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:31:00.572802Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:00.572808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:31:00.572842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:31:00.577362Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:31:00.611696Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:31:00.611842Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:00.611937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:31:00.612024Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:31:00.612043Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:00.613498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:00.613555Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:31:00.613636Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:00.613653Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:31:00.613660Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:31:00.613668Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:31:00.614495Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:00.614518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:31:00.614527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:31:00.615490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:00.615514Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:00.615550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:00.615561Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:31:00.616462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:31:00.617289Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:31:00.617368Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:31:00.617644Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:00.617693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:00.617704Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:00.617818Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:31:00.617832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:00.617885Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:31:00.617903Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:31:00.618753Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:00.618768Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:00.618838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... [OwnerId: 72057594046678944, LocalPathId: 4] state 'Ready' dataSize 0 rowCount 0 cpuUsage 100 TEST SplitByLoad, splitted 0 times, datashard count 1 2025-06-03T10:31:33.122456Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 4 shard idx 72057594046678944:6 data size 0 row count 0 2025-06-03T10:31:33.122487Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:292: TTxStoreTableStats.PersistSingleStats: main stats from datashardId(TabletID)=72075186233409546 maps to shardIdx: 72057594046678944:6 followerId=0, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], pathId map=indexImplTable, is column=0, is olap=0, RowCount 0, DataSize 0 2025-06-03T10:31:33.122587Z node 2 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:485: Do not want to split tablet 72075186233409546 2025-06-03T10:31:33.122728Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/by-value/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:31:33.130108Z node 2 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/by-value/indexImplTable" took 101us result status StatusSuccess 2025-06-03T10:31:33.130454Z node 2 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/by-value/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 1000000 Memory: 119208 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 TEST table final state: Status: StatusSuccess Path: "/MyRoot/Table/by-value/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409546 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 1000000 Memory: 119208 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 6 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-6 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-7 >> TxUsage::WriteToTopic_Demo_1_Table >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-37 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-38 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-53 [GOOD] >> TKeyValueTest::TestRewriteThenLastValue [GOOD] >> TKeyValueTest::TestRewriteThenLastValueNewApi >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-54 >> THiveTest::TestProgressWithMaxTabletsScheduled [GOOD] >> THiveTest::TestResetServerlessComputeResourcesMode >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-7 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-8 |68.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_worker/unittest >> YdbIndexTable::MultiShardTableOneUniqIndex [GOOD] >> YdbIndexTable::MultiShardTableOneUniqIndexDataColumn >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-38 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-39 |68.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs |68.4%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs |68.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_external_blobs/ydb-core-tx-datashard-ut_external_blobs >> THiveTest::TestResetServerlessComputeResourcesMode [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-54 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-55 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-8 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-9 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-39 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-40 |68.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_worker/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/hive/ut/unittest >> THiveTest::TestResetServerlessComputeResourcesMode [GOOD] Test command err: 2025-06-03T10:30:30.004974Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:30:30.005768Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:30.005840Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "/tmp/pdisk.dat" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-03T10:30:30.005978Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:30:30.006216Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-03T10:30:30.006226Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 0 2025-06-03T10:30:30.006414Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:48:2075] ControllerId# 72057594037932033 2025-06-03T10:30:30.006419Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:30:30.006448Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:30:30.006465Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:30:30.011235Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-03T10:30:30.011261Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:30:30.011544Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:47:2074] Create Queue# [1:56:2080] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.011578Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:47:2074] Create Queue# [1:57:2081] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.011597Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:47:2074] Create Queue# [1:58:2082] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.011624Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:47:2074] Create Queue# [1:59:2083] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.011646Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:47:2074] Create Queue# [1:60:2084] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.011671Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:47:2074] Create Queue# [1:61:2085] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.011698Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:47:2074] Create Queue# [1:62:2086] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.011703Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:30:30.011714Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:48:2075] 2025-06-03T10:30:30.011718Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:48:2075] 2025-06-03T10:30:30.011725Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-03T10:30:30.011732Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:30:30.011920Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:30:30.011944Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:30:30.012486Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:30.012520Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 0 2025-06-03T10:30:30.012680Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [2:71:2073] ControllerId# 72057594037932033 2025-06-03T10:30:30.012683Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:30:30.012697Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:30:30.012725Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:30:30.013731Z node 2 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:30.013779Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-03T10:30:30.013784Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:30:30.014082Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:70:2072] Create Queue# [2:77:2077] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.014117Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:70:2072] Create Queue# [2:78:2078] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.014137Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:70:2072] Create Queue# [2:79:2079] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.014158Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:70:2072] Create Queue# [2:80:2080] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.014181Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:70:2072] Create Queue# [2:81:2081] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.014200Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:70:2072] Create Queue# [2:82:2082] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.014223Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:70:2072] Create Queue# [2:83:2083] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.014226Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:30:30.014236Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [2:71:2073] 2025-06-03T10:30:30.014239Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [2:71:2073] 2025-06-03T10:30:30.014247Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-03T10:30:30.014252Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:30:30.014321Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:30:30.017998Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:48:2075] 2025-06-03T10:30:30.018042Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:30.018052Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:30:30.018575Z node 1 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:30.018616Z node 2 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:30:30.018689Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [2:71:2073] 2025-06-03T10:30:30.018700Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:30.018706Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:30:30.018712Z node 2 :LOCAL DEBUG: local.cpp:1441: TDomainLocal(dc-1): Bootstrap 2025-06-03T10:30:30.018736Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:30:30.018818Z node 1 :LOCAL DEBUG: local.cpp:1441: TDomainLocal(dc-1): Bootstrap 2025-06-03T10:30:30.018863Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:30.018872Z node 2 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-03T10:30:30.019916Z node 2 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-03T10:30:30.020677Z node 2 :LOCAL DEBUG: local.cpp:1149: TDomainLocal(dc-1): Binding to hive 72057594037927937 at domain dc-1 (allocated resources: ) 2025-06-03T10:30:30.020704Z node 2 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-03T10:30:30.020716Z node 2 :LOCAL DEBUG: local.cpp:975: TLocalNodeRegistrar::Bootstrap 2025-06-03T10:30:30.020722Z node 2 :LOCAL DEBUG: local.cpp:181: TLocalNodeRegistrar::TryToRegister 2025-06-03T10:30:30.020770Z node 2 :LOCAL DEBUG: local.cpp:213: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[2:95:2087] 2025-06-03T10:30:30.020831Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:48:2075] 2025-06-03T10:30:30.020843Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:30.020849Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-03T10:30:30.020871Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-03T10:30:30.020963Z node 2 :STATESTORAGE DEBUG: statestorage_proxy.cpp:246: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-03T10:30:30.021050Z node 1 :LOCAL DEBUG: local.cpp:1149: TDomainLocal(dc-1): Binding to hive 72057594037927937 at domain dc-1 (allocated resources: ) 2025-06-03T10:30:30.021066Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-03T10:30:30.021073Z node 1 :LOCAL DEBUG: local.cpp:975: TLocalNodeRegistrar::Bootstrap 2025-06-03T10:30:30.021078Z node 1 :LOCAL DEBUG: local.cpp:181: TLocalNodeRegistrar::TryToRegister 2025-06-03T10:30:30.021088Z node 1 :LOCAL DEBUG: local.cpp:213: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:99:2093] 2025-06-03T10:30:30.021105Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:30.021120Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:246: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup Tabl ... letID: 72075186224037888 CurrentLeader: [22:644:2453] CurrentLeaderTablet: [22:646:2454] CurrentGeneration: 3 CurrentStep: 0} 2025-06-03T10:31:35.792553Z node 22 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037888 entry.State: StProblemResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037888 Cookie: 0 CurrentLeader: [22:644:2453] CurrentLeaderTablet: [22:646:2454] CurrentGeneration: 3 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[22:1099535971443:0] : 6}, {[22:24343667:0] : 3}}}} 2025-06-03T10:31:35.792558Z node 22 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 72075186224037888 followers: 0 2025-06-03T10:31:35.792562Z node 22 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 22 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72075186224037888 followers: 0 countLeader 1 allowFollowers 0 winner: [22:644:2453] 2025-06-03T10:31:35.792575Z node 22 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037888] forward result local node, try to connect [22:683:2482] 2025-06-03T10:31:35.792579Z node 22 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037888]::SendEvent [22:683:2482] 2025-06-03T10:31:35.792592Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594046678944] connected with status OK role: Leader [23:687:2144] 2025-06-03T10:31:35.792599Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594046678944] send queued [23:687:2144] 2025-06-03T10:31:35.792611Z node 23 :LOCAL DEBUG: local.cpp:1207: TDomainLocal(dc-1): TDomainLocal::TEvClientConnected for dc-1 shard 72057594046678944 2025-06-03T10:31:35.792617Z node 23 :LOCAL DEBUG: local.cpp:1066: TDomainLocal(dc-1): Send resolve request for /dc-1/tenant1 to schemeshard 72057594046678944 2025-06-03T10:31:35.792632Z node 22 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037888] Accept Connect Originator# [22:683:2482] 2025-06-03T10:31:35.792646Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594046678944] send [23:687:2144] 2025-06-03T10:31:35.792650Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594046678944] push event to server [23:687:2144] 2025-06-03T10:31:35.792662Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594046678944]::SendEvent [23:687:2144] 2025-06-03T10:31:35.792673Z node 22 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037888] connected with status OK role: Leader [22:683:2482] 2025-06-03T10:31:35.792678Z node 22 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037888] send queued [22:683:2482] 2025-06-03T10:31:35.792712Z node 22 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:72: [72057594046678944] Push Sender# [23:686:2144] EventType# 271122945 2025-06-03T10:31:35.792737Z node 22 :TABLET_EXECUTOR DEBUG: Leader{72057594046678944:2:12} Tx{17, NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme} queued, type NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme 2025-06-03T10:31:35.792744Z node 22 :TABLET_EXECUTOR DEBUG: Leader{72057594046678944:2:12} Tx{17, NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-03T10:31:35.792799Z node 22 :TABLET_EXECUTOR DEBUG: Leader{72057594046678944:2:12} Tx{17, NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme} hope 1 -> done Change{11, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-03T10:31:35.792808Z node 22 :TABLET_EXECUTOR DEBUG: Leader{72057594046678944:2:12} Tx{17, NKikimr::NSchemeShard::TSchemeShard::TTxDescribeScheme} release 4194304b of static, Memory{0 dyn 0} 2025-06-03T10:31:35.792963Z node 23 :LOCAL DEBUG: local.cpp:1234: TDomainLocal(dc-1): HandleResolve from schemeshard 72057594046678944: Status: StatusSuccess Path: "/dc-1/tenant1" PathDescription { Self { Name: "tenant1" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeExtSubDomain CreateFinished: false CreateTxId: 101 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 } } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { } DomainKey { SchemeShard: 72057594046678944 PathId: 2 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 18446744073709551615 PathId: 18446744073709551615 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944 2025-06-03T10:31:35.792984Z node 23 :LOCAL DEBUG: local.cpp:1172: TDomainLocal(dc-1): Binding tenant /dc-1/tenant1 to hive 72057594037927937 (allocated resources: ) 2025-06-03T10:31:35.793100Z node 23 :LOCAL DEBUG: local.cpp:975: TLocalNodeRegistrar::Bootstrap 2025-06-03T10:31:35.793108Z node 23 :LOCAL DEBUG: local.cpp:181: TLocalNodeRegistrar::TryToRegister 2025-06-03T10:31:35.793124Z node 23 :LOCAL DEBUG: local.cpp:213: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[23:693:2145] 2025-06-03T10:31:35.793226Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [23:693:2145] 2025-06-03T10:31:35.793235Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [23:693:2145] 2025-06-03T10:31:35.793267Z node 23 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:31:35.793276Z node 23 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 23 selfDC 2 leaderDC 1 1:2:0 local 0 localDc 0 other 1 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [22:323:2264] 2025-06-03T10:31:35.793287Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [23:693:2145] 2025-06-03T10:31:35.793351Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:195: TClient[72057594037927937] forward result remote node 22 [23:693:2145] 2025-06-03T10:31:35.793391Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:229: TClient[72057594037927937] remote node connected [23:693:2145] 2025-06-03T10:31:35.793398Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [23:693:2145] 2025-06-03T10:31:35.793501Z node 22 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [23:693:2145] 2025-06-03T10:31:35.793587Z node 22 :HIVE TRACE: hive_impl.cpp:114: HIVE#72057594037927937 Handle TEvTabletPipe::TEvServerConnected([23:693:2145]) [22:700:2485] 2025-06-03T10:31:35.793638Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [23:693:2145] 2025-06-03T10:31:35.793644Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [23:693:2145] 2025-06-03T10:31:35.793648Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [23:693:2145] 2025-06-03T10:31:35.793661Z node 23 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [23:693:2145] 2025-06-03T10:31:35.793678Z node 23 :LOCAL DEBUG: local.cpp:260: TEvTabletPipe::TEvClientConnected {TabletId=72057594037927937 Status=OK ClientId=[23:693:2145]} 2025-06-03T10:31:35.793753Z node 22 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:72: [72057594037927937] Push Sender# [23:691:2145] EventType# 268959744 2025-06-03T10:31:35.793800Z node 22 :HIVE DEBUG: hive_impl.cpp:141: HIVE#72057594037927937 Handle TEvLocal::TEvRegisterNode from [23:691:2145] HiveId: 72057594037927937 ServicedDomains { SchemeShard: 72057594046678944 PathId: 2 } TabletAvailability { Type: Dummy Priority: 0 } TabletAvailability { Type: Hive Priority: 0 } 2025-06-03T10:31:35.793823Z node 22 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:14} Tx{41, NKikimr::NHive::TTxRegisterNode} queued, type NKikimr::NHive::TTxRegisterNode 2025-06-03T10:31:35.793830Z node 22 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:14} Tx{41, NKikimr::NHive::TTxRegisterNode} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-03T10:31:35.793838Z node 22 :HIVE DEBUG: tx__register_node.cpp:21: HIVE#72057594037927937 THive::TTxRegisterNode(23)::Execute 2025-06-03T10:31:35.793878Z node 22 :HIVE WARN: node_info.cpp:25: HIVE#72057594037927937 Node(23, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:35.793892Z node 22 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:14} Tx{41, NKikimr::NHive::TTxRegisterNode} hope 1 -> done Change{24, redo 152b alter 0b annex 0, ~{ 4 } -{ }, 0 gb} 2025-06-03T10:31:35.793905Z node 22 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:14} Tx{41, NKikimr::NHive::TTxRegisterNode} release 4194304b of static, Memory{0 dyn 0} 2025-06-03T10:31:35.793964Z node 22 :HIVE DEBUG: hive_impl.cpp:798: HIVE#72057594037927937 TEvInterconnect::TEvNodeInfo NodeId 23 Location DataCenter: "2" Module: "2" Rack: "2" Unit: "2" 2025-06-03T10:31:35.794039Z node 22 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [22:702:2487] 2025-06-03T10:31:35.794048Z node 22 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [22:702:2487] 2025-06-03T10:31:35.794058Z node 22 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [22:702:2487] 2025-06-03T10:31:35.794069Z node 22 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StNormal ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:31:35.794078Z node 22 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 22 selfDC 1 leaderDC 1 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72057594037927937 followers: 0 countLeader 1 allowFollowers 0 winner: [22:323:2264] 2025-06-03T10:31:35.794088Z node 22 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:411: TClient[72057594037927937] received pending shutdown [22:702:2487] 2025-06-03T10:31:35.794097Z node 22 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72057594037927937] forward result local node, try to connect [22:702:2487] 2025-06-03T10:31:35.794102Z node 22 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72057594037927937]::SendEvent [22:702:2487] 2025-06-03T10:31:35.794116Z node 22 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72057594037927937] Accept Connect Originator# [22:702:2487] 2025-06-03T10:31:35.794136Z node 22 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037927937] connected with status OK role: Leader [22:702:2487] 2025-06-03T10:31:35.794142Z node 22 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037927937] send queued [22:702:2487] 2025-06-03T10:31:35.794146Z node 22 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [22:702:2487] 2025-06-03T10:31:35.794152Z node 22 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:332: TClient[72057594037927937] shutdown pipe due to pending shutdown request [22:702:2487] 2025-06-03T10:31:35.794158Z node 22 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:505: TClient[72057594037927937] notify reset [22:702:2487] 2025-06-03T10:31:35.794167Z node 22 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [22:701:2486] EventType# 268697616 2025-06-03T10:31:35.794179Z node 22 :HIVE TRACE: hive_impl.cpp:114: HIVE#72057594037927937 Handle TEvTabletPipe::TEvServerConnected([22:702:2487]) [22:703:2488] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-55 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-56 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-9 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-10 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-40 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-41 |68.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt |68.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt |68.4%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/opt/ydb-core-kqp-ut-opt |68.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_worker/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-56 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-57 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-10 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-11 |68.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_worker/unittest >> TxUsage::WriteToTopic_Demo_12_Table [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-41 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-42 >> TxUsage::WriteToTopic_Demo_12_Query >> TxUsage::TwoSessionOneConsumer_Table [GOOD] >> BasicUsage::WriteSessionSwitchDatabases [GOOD] >> TxUsage::TwoSessionOneConsumer_Query >> BasicUsage::ReadWithRestarts [GOOD] >> BasicUsage::ConflictingWrites |68.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_worker/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-11 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-12 >> TxUsage::WriteToTopic_Invalid_Session_Table [GOOD] >> Worker::Basic ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/federated_topic/ut/unittest >> BasicUsage::WriteSessionSwitchDatabases [GOOD] Test command err: 2025-06-03T10:29:22.691404Z :WriteSessionNoAvailableDatabase INFO: Random seed for debugging is 1748946562691393 2025-06-03T10:29:22.866247Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668287016391714:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:22.866318Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:29:22.881721Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668289994242932:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:22.882050Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:29:22.999418Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001962/r3tmp/tmpzyTHWq/pdisk_1.dat 2025-06-03T10:29:23.021408Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:29:23.155461Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:23.155806Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:23.155824Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:23.162332Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:29:23.163038Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3915, node 1 2025-06-03T10:29:23.207050Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/001962/r3tmp/yandexzaeWjM.tmp 2025-06-03T10:29:23.207071Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/001962/r3tmp/yandexzaeWjM.tmp 2025-06-03T10:29:23.207155Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/001962/r3tmp/yandexzaeWjM.tmp 2025-06-03T10:29:23.207219Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:29:23.215786Z INFO: TTestServer started on Port 12798 GrpcPort 3915 2025-06-03T10:29:23.225500Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:23.225537Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:23.226936Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:12798 PQClient connected to localhost:3915 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:23.270512Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:23.279117Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:29:23.360650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710658, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:29:23.600845Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668294289210522:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:23.600869Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511668294289210475:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:23.600909Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:23.614364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480 2025-06-03T10:29:23.624652Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511668294289210527:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-03T10:29:23.716906Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511668294289210555:2132] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:29:23.722659Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668291311359894:2340], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:29:23.722791Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=ZDg0OTk3MTUtNDAyMjQ0OWEtY2IzMjBlYTQtYjdiMmU5, ActorId: [1:7511668291311359876:2333], ActorState: ExecuteState, TraceId: 01jwtndcj89241qsqqea024950, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:29:23.723184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-06-03T10:29:23.723194Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7511668294289210570:2314], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:29:23.723276Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=2&id=ZmE1MGY2Ny0zMGNmMTA4MC1mYzQ5ZGZiZi1jMGY1YzBlMg==, ActorId: [2:7511668294289210472:2304], ActorState: ExecuteState, TraceId: 01jwtndcg273se1xq76wdpk9xd, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:29:23.723408Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:29:23.723318Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:29:23.793807Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:29:23.863867Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:3915", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, false, 1000); 2025-06-03T10:29:23.902735Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710664. Ctx: { TraceId: 01jwtndcs9exjj8k59kc1fn25z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDdiY2JiYzEtMmVhMWJlYTUtNThjZGY2M2ItYjhhZGIzOTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7511668291311360366:2994] 2025-06-03T10:29:27.865576Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668287016391714:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:27.865609Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/ ... MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 0 2025-06-03T10:31:18.574112Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1748946678574 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:31:18.574183Z :INFO: [/Root] TraceId [] SessionId [] MessageGroupId [src_id] Write session established. Init response: last_seq_no: 2 session_id: "src_id|bd809e-3664fecd-940ff0fc-24b38be_0" supported_codecs { codecs: 1 codecs: 2 codecs: 3 } 2025-06-03T10:31:19.574339Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [3:7511668784495702094:3419] (SourceId=src_id, PreferedPartition=(NULL)) Update the table 2025-06-03T10:31:19.587060Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [3:7511668784495702094:3419] (SourceId=src_id, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=1 Status=SUCCESS 2025-06-03T10:31:19.587088Z node 3 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [3:7511668784495702094:3419] (SourceId=src_id, PreferedPartition=(NULL)) Start idle 2025-06-03T10:31:37.425869Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:157: [72075186224037893][rt3.dc1--test-topic] TPersQueueReadBalancer::HandleWakeup 2025-06-03T10:31:37.425917Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:550: [72075186224037893][rt3.dc1--test-topic] Send TEvPersQueue::TEvStatus TabletId: 72075186224037892 Cookie: 4 2025-06-03T10:31:37.430347Z node 4 :PERSQUEUE DEBUG: partition.cpp:858: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic PartitionStatus PartitionSize: 0 UsedReserveSize: 0 ReserveSize: 0 PartitionConfig{ LifetimeSeconds: 86400 LowWatermark: 8388608 SourceIdLifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 20000000 BurstSize: 20000000 TotalPartitions: 1 SourceIdMaxCounts: 6000000 } 2025-06-03T10:31:37.438029Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:688: [72075186224037893][rt3.dc1--test-topic] Send TEvPeriodicTopicStats PathId: 13 Generation: 1 StatsReportRound: 4 DataSize: 0 UsedReserveSize: 0 2025-06-03T10:31:37.438085Z node 3 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1823: [72075186224037893][rt3.dc1--test-topic] ProcessPendingStats. PendingUpdates size 1 2025-06-03T10:31:38.661446Z :DEBUG: [/Root] TraceId [] SessionId [src_id|bd809e-3664fecd-940ff0fc-24b38be_0] MessageGroupId [src_id] Write 1 messages with Id from 1 to 1 >>> Got event: ReadyToAcceptEvent 2025-06-03T10:31:38.661901Z :DEBUG: [/Root] TraceId [] SessionId [src_id|bd809e-3664fecd-940ff0fc-24b38be_0] MessageGroupId [src_id] Write session: try to update token 2025-06-03T10:31:38.661917Z :DEBUG: [/Root] TraceId [] SessionId [src_id|bd809e-3664fecd-940ff0fc-24b38be_0] MessageGroupId [src_id] Send 1 message(s) (0 left), first sequence number is 3 2025-06-03T10:31:38.662394Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: src_id|bd809e-3664fecd-940ff0fc-24b38be_0 grpc read done: success: 1 data: write_request[data omitted] 2025-06-03T10:31:38.662600Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::NPQ::TEvPartitionWriter::TEvWriteRequest >>> Ready to answer: ok 2025-06-03T10:31:38.666399Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::IEventHandle 2025-06-03T10:31:38.665886Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-03T10:31:38.665923Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-03T10:31:38.665989Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 1 2025-06-03T10:31:38.666705Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-03T10:31:38.666713Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-03T10:31:38.666751Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2196: [PQ: 72075186224037892] got client message topic: rt3.dc1--test-topic partition: 0 SourceId: '\0src_id' SeqNo: 3 partNo : 0 messageNo: 1 size 98 offset: -1 2025-06-03T10:31:38.666840Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 part blob processing sourceId '\0src_id' seqNo 3 partNo 0 2025-06-03T10:31:38.666915Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1333: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 part blob complete sourceId '\0src_id' seqNo 3 partNo 0 FormedBlobsCount 0 NewHead: Offset 2 PartNo 0 PackedSize 172 count 1 nextOffset 3 batches 1 2025-06-03T10:31:38.666978Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:1623: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Add new write blob: topic 'rt3.dc1--test-topic' partition 0 compactOffset 2,1 HeadOffset 0 endOffset 2 curOffset 3 d0000000000_00000000000000000002_00000_0000000001_00000| size 160 WTime 1748946698666 2025-06-03T10:31:38.667002Z node 4 :PERSQUEUE DEBUG: partition.cpp:2185: [PQ: 72075186224037892, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-06-03T10:31:38.667004Z node 4 :PERSQUEUE DEBUG: partition.cpp:2186: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- delete ---------------- 2025-06-03T10:31:38.667009Z node 4 :PERSQUEUE DEBUG: partition.cpp:2192: [PQ: 72075186224037892, Partition: 0, State: StateIdle] [x0000000000, x0000000001) 2025-06-03T10:31:38.667012Z node 4 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- write ----------------- 2025-06-03T10:31:38.667017Z node 4 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037892, Partition: 0, State: StateIdle] m0000000000psrc_id 2025-06-03T10:31:38.667020Z node 4 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037892, Partition: 0, State: StateIdle] d0000000000_00000000000000000002_00000_0000000001_00000| 2025-06-03T10:31:38.667023Z node 4 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037892, Partition: 0, State: StateIdle] i0000000000 2025-06-03T10:31:38.667026Z node 4 :PERSQUEUE DEBUG: partition.cpp:2199: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- rename ---------------- 2025-06-03T10:31:38.667030Z node 4 :PERSQUEUE DEBUG: partition.cpp:2204: [PQ: 72075186224037892, Partition: 0, State: StateIdle] =========================== 2025-06-03T10:31:38.667041Z node 4 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-06-03T10:31:38.667061Z node 4 :PERSQUEUE DEBUG: read.h:300: CacheProxy. Passthrough blob. Partition 0 offset 2 partNo 0 count 1 size 160 2025-06-03T10:31:38.678200Z node 4 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 0 offset 2 count 1 size 160 actorID [4:7511668353806463727:2419] 2025-06-03T10:31:38.678320Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 105 WriteNewSizeFromSupportivePartitions# 0 2025-06-03T10:31:38.678336Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-03T10:31:38.678355Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0src_id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 2 is stored on disk 2025-06-03T10:31:38.678421Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:1372: [PQ: 72075186224037892] Topic 'rt3.dc1--test-topic' counters. CacheSize 480 CachedBlobs 3 2025-06-03T10:31:38.678438Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-03T10:31:38.678513Z node 4 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037892' partition 0 offset 2 partno 0 count 1 parts 0 size 160 2025-06-03T10:31:38.678653Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::IEventHandle 2025-06-03T10:31:38.679531Z :DEBUG: [/Root] TraceId [] SessionId [src_id|bd809e-3664fecd-940ff0fc-24b38be_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 0 2025-06-03T10:31:38.679636Z :DEBUG: [/Root] TraceId [] SessionId [src_id|bd809e-3664fecd-940ff0fc-24b38be_0] MessageGroupId [src_id] Write session got write response: acks { seq_no: 3 written { offset: 2 } } write_statistics { persisting_time { nanos: 11000000 } min_queue_wait_time { } max_queue_wait_time { } partition_quota_wait_time { } topic_quota_wait_time { } } 2025-06-03T10:31:38.679646Z :DEBUG: [/Root] TraceId [] SessionId [src_id|bd809e-3664fecd-940ff0fc-24b38be_0] MessageGroupId [src_id] OnAck: seqNo=1, txId=? 2025-06-03T10:31:38.679653Z :DEBUG: [/Root] TraceId [] SessionId [src_id|bd809e-3664fecd-940ff0fc-24b38be_0] MessageGroupId [src_id] Write session: acknoledged message 1 2025-06-03T10:31:38.680049Z node 3 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: src_id|bd809e-3664fecd-940ff0fc-24b38be_0 grpc read done: success: 0 data: 2025-06-03T10:31:38.680072Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: src_id|bd809e-3664fecd-940ff0fc-24b38be_0 grpc read failed 2025-06-03T10:31:38.680085Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: src_id|bd809e-3664fecd-940ff0fc-24b38be_0 grpc closed 2025-06-03T10:31:38.680092Z node 3 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: src_id|bd809e-3664fecd-940ff0fc-24b38be_0 is DEAD 2025-06-03T10:31:38.680618Z :DEBUG: [/Root] TraceId [] SessionId [src_id|bd809e-3664fecd-940ff0fc-24b38be_0] MessageGroupId [src_id] Write session: OnReadDone gRpcStatusCode: 1, Msg: Cancelled on the server side, Details: , InternalError: 0 2025-06-03T10:31:38.680657Z :ERROR: [/Root] TraceId [] SessionId [src_id|bd809e-3664fecd-940ff0fc-24b38be_0] MessageGroupId [src_id] Got error. Status: CLIENT_CANCELLED, Description:
: Error: GRpc error: (1): Cancelled on the server side 2025-06-03T10:31:38.680664Z :ERROR: [/Root] TraceId [] SessionId [src_id|bd809e-3664fecd-940ff0fc-24b38be_0] MessageGroupId [src_id] Write session will not restart after a fatal error 2025-06-03T10:31:38.680668Z :INFO: [/Root] TraceId [] SessionId [src_id|bd809e-3664fecd-940ff0fc-24b38be_0] MessageGroupId [src_id] Write session will now close 2025-06-03T10:31:38.680686Z :DEBUG: [/Root] TraceId [] SessionId [src_id|bd809e-3664fecd-940ff0fc-24b38be_0] MessageGroupId [src_id] Write session: aborting 2025-06-03T10:31:38.681123Z node 3 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:31:38.681430Z node 4 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037892] server disconnected, pipe [3:7511668784495702128:3419] destroyed 2025-06-03T10:31:38.681472Z node 4 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-03T10:31:38.714009Z :DEBUG: [/Root] TraceId [] SessionId [src_id|bd809e-3664fecd-940ff0fc-24b38be_0] MessageGroupId [src_id] Write session: destroy >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-57 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-58 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-ModifyUser-42 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-61 >> TxUsage::WriteToTopic_Invalid_Session_Query >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-12 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-13 |68.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_worker/unittest |68.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_worker/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-58 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-59 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-61 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-62 >> Worker::Basic [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-59 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-60 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-13 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-14 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-62 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-63 |68.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_allocator_client/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_worker/unittest >> Worker::Basic [GOOD] Test command err: 2025-06-03T10:31:39.916474Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668877995932470:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:39.917551Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001870/r3tmp/tmpPLl4xC/pdisk_1.dat 2025-06-03T10:31:39.992643Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:63254 TServer::EnableGrpc on GrpcPort 27947, node 1 2025-06-03T10:31:40.028923Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:40.028936Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:40.028939Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:40.028987Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63254 2025-06-03T10:31:40.057847Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:40.057872Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:40.058559Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:40.080561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:40.087869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:31:40.160832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946700263 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) 2025-06-03T10:31:40.241209Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:32: [RemoteTopicReader][/Root/topic][0][1:7511668882290900419:2411] Handshake: worker# [1:7511668882290900418:2411] 2025-06-03T10:31:40.241236Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:295: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7511668882290900420:2411] Handshake: worker# [1:7511668882290900418:2411] 2025-06-03T10:31:40.241328Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:312: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7511668882290900420:2411] Handle TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:3:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:31:40.241370Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:387: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7511668882290900420:2411] Handle TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 3] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-03T10:31:40.241376Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:417: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7511668882290900420:2411] Send handshake: worker# [1:7511668882290900418:2411] 2025-06-03T10:31:40.241389Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:150: [Worker][1:7511668882290900418:2411] Handle NKikimr::NReplication::NService::TEvWorker::TEvHandshake 2025-06-03T10:31:40.241396Z node 1 :REPLICATION_SERVICE INFO: worker.cpp:162: [Worker][1:7511668882290900418:2411] Handshake with writer: sender# [1:7511668882290900420:2411] 2025-06-03T10:31:40.242010Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:41: [RemoteTopicReader][/Root/topic][0][1:7511668882290900419:2411] Create read session: session# [1:7511668882290900426:2284] 2025-06-03T10:31:40.242027Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:150: [Worker][1:7511668882290900418:2411] Handle NKikimr::NReplication::NService::TEvWorker::TEvHandshake 2025-06-03T10:31:40.242030Z node 1 :REPLICATION_SERVICE INFO: worker.cpp:154: [Worker][1:7511668882290900418:2411] Handshake with reader: sender# [1:7511668882290900419:2411] 2025-06-03T10:31:40.242037Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7511668882290900419:2411] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-03T10:31:40.245334Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:79: [RemoteTopicReader][/Root/topic][0][1:7511668882290900419:2411] Handle NKikimr::NReplication::TEvYdbProxy::TEvStartTopicReadingSession { Result: { ReadSessionId: consumer_1_1_5361183765636233453_v1 } } 2025-06-03T10:31:40.922438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:2, at schemeshard: 72057594046644480 2025-06-03T10:31:40.932070Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668882290900603:2370], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:40.932075Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668882290900592:2367], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:40.932092Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:40.932785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:2, at schemeshard: 72057594046644480 2025-06-03T10:31:40.935223Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668882290900606:2371], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-03T10:31:41.016734Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668886585867942:2509] txid# 281474976715662, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:41.146290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:41.227566Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:41.317988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715671:0, at schemeshard: 72057594046644480 2025-06-03T10:31:41.389823Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:31:41.460681Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715677:0, at schemeshard: 72057594046644480 2025-06-03T10:31:41.599304Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7511668882290900419:2411] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 36b Offset: 0 SeqNo: 1 CreateTime: 2025-06-03T10:31:41.595000Z MessageGroupId: producer ProducerId: producer }] } } 2025-06-03T10:31:41.599330Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:216: [Worker][1:7511668882290900418:2411] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 0 SeqNo: 1 CreateTime: 2025-06-03T10:31:41.595000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-03T10:31:41.599340Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7511668882290900420:2411] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 0 SeqNo: 1 CreateTime: 2025-06-03T10:31:41.595000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-03T10:31:41.599375Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7511668882290900420:2411] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 0 BodySize: 36 }] } 2025-06-03T10:31:41.599420Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:54: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7511668886585868498:2411] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-03T10:31:41.599433Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7511668882290900420:2411] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037890 } 2025-06-03T10:31:41.599448Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7511668886585868498:2411] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 0 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b }] } 2025-06-03T10:31:41.600680Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7511668886585868498:2411] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-03T10:31:41.600706Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7511668882290900420:2411] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037890 } 2025-06-03T10:31:41.600715Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7511668882290900420:2411] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [0] } 2025-06-03T10:31:41.600732Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:176: [Worker][1:7511668882290900418:2411] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-03T10:31:41.600743Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7511668882290900419:2411] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-03T10:31:41.714884Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7511668882290900419:2411] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 36b Offset: 1 SeqNo: 2 CreateTime: 2025-06-03T10:31:41.712000Z MessageGroupId: producer ProducerId: producer }] } } 2025-06-03T10:31:41.714903Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:216: [Worker][1:7511668882290900418:2411] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 1 SeqNo: 2 CreateTime: 2025-06-03T10:31:41.712000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-03T10:31:41.714911Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7511668882290900420:2411] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 1 SeqNo: 2 CreateTime: 2025-06-03T10:31:41.712000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-03T10:31:41.714936Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7511668882290900420:2411] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 1 BodySize: 36 }] } 2025-06-03T10:31:41.714958Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7511668886585868498:2411] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 1 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b }] } 2025-06-03T10:31:41.716360Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7511668886585868498:2411] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-03T10:31:41.716375Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7511668882290900420:2411] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037890 } 2025-06-03T10:31:41.716380Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7511668882290900420:2411] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [1] } 2025-06-03T10:31:41.716392Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:176: [Worker][1:7511668882290900418:2411] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-03T10:31:41.716405Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7511668882290900419:2411] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-03T10:31:41.834348Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:58: [RemoteTopicReader][/Root/topic][0][1:7511668882290900419:2411] Handle NKikimr::NReplication::TEvYdbProxy::TEvReadTopicResponse { Result: { PartitionId: 0 Messages [{ Codec: RAW Data: 36b Offset: 2 SeqNo: 3 CreateTime: 2025-06-03T10:31:41.832000Z MessageGroupId: producer ProducerId: producer }] } } 2025-06-03T10:31:41.834371Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:216: [Worker][1:7511668882290900418:2411] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 2 SeqNo: 3 CreateTime: 2025-06-03T10:31:41.832000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-03T10:31:41.834380Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:431: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7511668882290900420:2411] Handle NKikimr::NReplication::NService::TEvWorker::TEvData { Source: 0 Records [{ Codec: RAW Data: 36b Offset: 2 SeqNo: 3 CreateTime: 2025-06-03T10:31:41.832000Z MessageGroupId: producer ProducerId: producer }] } 2025-06-03T10:31:41.834414Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:556: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7511668882290900420:2411] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRequestRecords { Records [{ Order: 2 BodySize: 36 }] } 2025-06-03T10:31:41.834445Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:74: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7511668886585868498:2411] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 2 Group: 0 Step: 0 TxId: 0 Kind: CdcDataChange Source: Unspecified Body: 36b }] } 2025-06-03T10:31:41.835642Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:111: [TablePartitionWriter][72057594046644480:3:1][72075186224037890][1:7511668886585868498:2411] Handle NKikimrTxDataShard.TEvApplyReplicationChangesResult Status: STATUS_OK 2025-06-03T10:31:41.835667Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:587: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7511668882290900420:2411] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037890 } 2025-06-03T10:31:41.835676Z node 1 :REPLICATION_SERVICE DEBUG: base_table_writer.cpp:570: [LocalTableWriter][OwnerId: 72057594046644480, LocalPathId: 3][1:7511668882290900420:2411] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRemoveRecords { Records [2] } 2025-06-03T10:31:41.835691Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:176: [Worker][1:7511668882290900418:2411] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-03T10:31:41.835705Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7511668882290900419:2411] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } 2025-06-03T10:31:41.942232Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:119: [RemoteTopicReader][/Root/topic][0][1:7511668882290900419:2411] Handle NKikimr::NReplication::TEvYdbProxy::TEvTopicReaderGone { Result: { status: UNAVAILABLE, issues: {
: Error: PartitionSessionClosed { Partition session id: 1 Topic: "topic" Partition: 0 Reason: ConnectionLost } } } } 2025-06-03T10:31:41.942253Z node 1 :REPLICATION_SERVICE INFO: topic_reader.cpp:131: [RemoteTopicReader][/Root/topic][0][1:7511668882290900419:2411] Leave 2025-06-03T10:31:41.942273Z node 1 :REPLICATION_SERVICE INFO: worker.cpp:235: [Worker][1:7511668882290900418:2411] Reader has gone: sender# [1:7511668882290900419:2411] 2025-06-03T10:31:41.942290Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:32: [RemoteTopicReader][/Root/topic][0][1:7511668886585868679:2411] Handshake: worker# [1:7511668882290900418:2411] 2025-06-03T10:31:41.942624Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:41: [RemoteTopicReader][/Root/topic][0][1:7511668886585868679:2411] Create read session: session# [1:7511668886585868680:2284] 2025-06-03T10:31:41.942653Z node 1 :REPLICATION_SERVICE DEBUG: worker.cpp:150: [Worker][1:7511668882290900418:2411] Handle NKikimr::NReplication::NService::TEvWorker::TEvHandshake 2025-06-03T10:31:41.942657Z node 1 :REPLICATION_SERVICE INFO: worker.cpp:154: [Worker][1:7511668882290900418:2411] Handshake with reader: sender# [1:7511668886585868679:2411] 2025-06-03T10:31:41.942665Z node 1 :REPLICATION_SERVICE DEBUG: topic_reader.cpp:48: [RemoteTopicReader][/Root/topic][0][1:7511668886585868679:2411] Handle NKikimr::NReplication::NService::TEvWorker::TEvPoll { SkipCommit: 0 } |68.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/service/ut_worker/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-14 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-15 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-60 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-61 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-63 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-64 >> TxUsage::Sinks_Oltp_WriteToTopic_1_Table [GOOD] >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesHuge >> TxUsage::Sinks_Oltp_WriteToTopic_1_Query |68.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut/unittest >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesHuge [GOOD] >> ExternalBlobsMultipleChannels::Simple |68.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/bscontroller/ut/unittest >> TBlobStorageControllerGrouperTest::TestGroupFromCandidatesHuge [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-15 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-16 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-64 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-65 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-61 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-62 >> TxUsage::WriteToTopic_Demo_12_Query [GOOD] >> TxUsage::WriteToTopic_Demo_2_Table [GOOD] >> TxUsage::WriteToTopic_Demo_2_Query [GOOD] |68.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_external_blobs/unittest >> TxUsage::TwoSessionOneConsumer_Query [GOOD] >> TxUsage::WriteToTopic_Demo_13_Table >> TxUsage::WriteToTopic_Demo_22_RestartNo_Table >> TxUsage::WriteToTopic_Demo_30_Table >> TxUsage::WriteToTopic_Demo_10_Table >> KqpNewEngine::BlindWrite >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-16 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-17 |68.5%| [TA] $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/test-results/unittest/{meta.json ... results_accumulator.log} >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-65 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-66 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-62 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-63 |68.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut |68.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut >> ExternalBlobsMultipleChannels::Simple [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-66 [GOOD] >> KqpNewEngine::JoinWithParams >> TxUsage::WriteToTopic_Invalid_Session_Query [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-63 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-17 [GOOD] >> KqpNewEngine::BlindWrite [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-18 >> YdbTableSplit::SplitByLoadWithReads [GOOD] >> YdbTableSplit::SplitByLoadWithNonEmptyRangeReads [GOOD] >> KqpNewEngine::BlindWriteParameters >> LocalPartition::Restarts [GOOD] >> TStorageBalanceTest::TestScenario2 [GOOD] >> YdbTableSplit::SplitByLoadWithDeletes [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-67 >> TxUsage::WriteToTopic_Demo_1_Table [GOOD] >> YdbIndexTable::OnlineBuild [GOOD] >> TxUsage::WriteToTopic_Two_WriteSession_Table >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-64 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-18 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-19 >> KqpNewEngine::BlindWriteParameters [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build_reboots/unittest >> IndexBuildTestReboots::BaseCase [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:29:37.279568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:37.279593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:37.279600Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:37.279607Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:37.279624Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:37.279629Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:37.279640Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:37.279657Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:37.279794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:37.279882Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:37.296088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:29:37.296117Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:37.296214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:29:37.299532Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:37.299751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:37.299796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:37.302014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:37.302089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:37.302244Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:37.302337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:37.303159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:37.303238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:37.303618Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:37.303636Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:37.303661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:37.303673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:37.303681Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:37.303735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:29:37.306143Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:29:37.336584Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:37.336674Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:37.336759Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:37.336820Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:37.336838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:37.338051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:37.338095Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:37.338180Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:37.338195Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:37.338201Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:37.338208Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:37.338854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:37.338871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:37.338878Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:37.339475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:37.339491Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:37.339498Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:37.339506Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:37.340350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:37.340948Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:37.341007Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:37.341250Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:37.341288Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:37.341323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:37.341415Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... } Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "index1" LocalPathId: 5 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "index" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:33.602742Z node 278 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:31:33.602805Z node 278 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1" took 64us result status StatusSuccess 2025-06-03T10:31:33.602973Z node 278 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/Table/index1" PathDescription { Self { Name: "index1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 5 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } TableIndex { Name: "index1" LocalPathId: 5 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "index" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "index" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:33.603055Z node 278 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:31:33.603094Z node 278 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1/indexImplTable" took 42us result status StatusSuccess 2025-06-03T10:31:33.603234Z node 278 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/Table/index1/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "index" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 6 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build_reboots/unittest >> IndexBuildTestReboots::BaseCaseWithDataColumns [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:29:36.825397Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:36.825431Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:36.825438Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:36.825445Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:36.825464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:36.825469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:36.825480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:36.825498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:36.825648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:36.825753Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:36.843811Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:29:36.843843Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:36.843954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:29:36.847090Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:36.847227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:36.847266Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:36.849026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:36.849086Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:36.849224Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:36.849361Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:36.849954Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:36.850009Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:36.850326Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:36.850339Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:36.850359Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:36.850369Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:36.850375Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:36.850427Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:29:36.852111Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:29:36.875693Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:36.875815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:36.875904Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:36.875963Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:36.875979Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:36.877147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:36.877188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:36.877275Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:36.877313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:36.877322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:36.877329Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:36.878107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:36.878128Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:36.878136Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:36.878674Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:36.878690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:36.878697Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:36.878707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:36.879530Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:36.880261Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:36.880319Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:36.880571Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:36.880612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:36.880623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:36.880711Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... s { Name: "index1" LocalPathId: 5 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "index" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataColumnNames: "value" DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:30.662793Z node 278 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:31:30.662834Z node 278 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1" took 45us result status StatusSuccess 2025-06-03T10:31:30.663002Z node 278 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/Table/index1" PathDescription { Self { Name: "index1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 5 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } TableIndex { Name: "index1" LocalPathId: 5 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "index" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataColumnNames: "value" DataSize: 0 IndexImplTableDescriptions { Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "index" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } } } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:30.663085Z node 278 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:31:30.663120Z node 278 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1/indexImplTable" took 36us result status StatusSuccess 2025-06-03T10:31:30.663241Z node 278 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/Table/index1/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "index" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "index" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 6 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_index_build_reboots/unittest >> IndexBuildTestReboots::IndexPartitioning [GOOD] >> KqpNewEngine::JoinWithParams [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:29:36.134211Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:36.134240Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:36.134246Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:36.134253Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:36.134269Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:36.134274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:36.134285Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:36.134298Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:36.134373Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:36.134449Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:36.145475Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:29:36.145503Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:36.145621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:29:36.148432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:36.148523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:36.148548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:36.149997Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:36.150032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:36.150142Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:36.150200Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:36.150605Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:36.150637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:36.150857Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:36.150865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:36.150877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:36.150883Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:36.150888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:36.150926Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:29:36.151992Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:29:36.166333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:36.166402Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:36.166457Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:36.166492Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:36.166501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:36.167251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:36.167275Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:36.167329Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:36.167337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:36.167342Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:36.167346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:36.167757Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:36.167766Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:36.167769Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:36.168096Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:36.168105Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:36.168112Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:36.168120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:36.168621Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:36.168945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:36.168977Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:36.169146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:36.169166Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:36.169173Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:36.169224Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... ull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableIndexes { Name: "Index" LocalPathId: 4 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { } } TableSchemaVersion: 3 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:33.700095Z node 323 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:31:33.700125Z node 323 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index" took 33us result status StatusSuccess 2025-06-03T10:31:33.700257Z node 323 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Index" PathDescription { Self { Name: "Index" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 2 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateAlter Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } TableIndex { Name: "Index" LocalPathId: 4 Type: EIndexTypeGlobal State: EIndexStateReady KeyColumnNames: "value" SchemaVersion: 2 PathOwnerId: 72057594046678944 DataSize: 0 IndexImplTableDescriptions { PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } } } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:33.700322Z node 323 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/Index/indexImplTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:31:33.700383Z node 323 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/Index/indexImplTable" took 61us result status StatusSuccess 2025-06-03T10:31:33.700533Z node 323 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/Index/indexImplTable" PathDescription { Self { Name: "indexImplTable" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 3 MaxPartitionsCount: 3 } } SplitBoundary { KeyPrefix { Tuple { Optional { Text: "alice" } } Tuple { } } } SplitBoundary { KeyPrefix { Tuple { Optional { Text: "bob" } } Tuple { } } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\002\000\005\000\000\000alice\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409547 } TablePartitions { EndOfRangeKeyPrefix: "\002\000\003\000\000\000bob\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 3 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> LocalPartition::DiscoveryServiceBadPort >> TStorageBalanceTest::TestScenario3 >> TxUsage::WriteToTopic_Demo_1_Query >> YdbIndexTable::OnlineBuildWithDataColumn >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-64 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-67 [GOOD] >> TChargeBTreeIndex::FewNodes_Groups_History_Sticky [GOOD] >> TxUsage::WriteToTopic_Demo_13_Table [GOOD] >> TxUsage::WriteToTopic_Demo_30_Table [GOOD] >> KqpScanSpilling::HandleErrorsCorrectly [GOOD] >> TxUsage::WriteToTopic_Demo_22_RestartNo_Table [GOOD] >> TxUsage::WriteToTopic_Two_WriteSession_Table [GOOD] >> TxUsage::WriteToTopic_Demo_10_Table [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_1_Query [GOOD] >> KqpNewEngine::BlindWriteListParameter >> KqpNewEngine::JoinPure >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-65 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-68 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-19 [GOOD] >> YdbIndexTable::MultiShardTableOneUniqIndexDataColumn [GOOD] >> TxUsage::WriteToTopic_Demo_30_Query >> TClockProCache::Lifecycle [GOOD] >> TClockProCache::EvictNext [GOOD] >> TClockProCache::Erase [GOOD] >> TxUsage::WriteToTopic_Demo_13_Query >> TClockProCache::Random [GOOD] >> TxUsage::WriteToTopic_Demo_22_RestartNo_Query >> NFwd_TFlatIndexCache::Skip_Wait [GOOD] >> TxUsage::WriteToTopic_Two_WriteSession_Query >> NFwd_TFlatIndexCache::Trace [GOOD] >> TxUsage::WriteToTopic_Demo_10_Query >> NFwd_TFlatIndexCache::Slices [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_2_Table >> NFwd_TLoadedPagesCircularBuffer::Basics [GOOD] >> KqpNewEngine::BlindWriteListParameter [GOOD] >> NOther::Blocks [GOOD] >> NPage::Encoded [GOOD] >> NPage::ABI_002 >> KqpNewEngine::JoinPure [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-65 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-68 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-20 >> KqpNewEngine::BrokenLocksAtROTx >> NPage::ABI_002 [GOOD] >> KqpNewEngine::JoinPureUncomparableKeys >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-66 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-69 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-20 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-21 >> KqpNewEngine::BrokenLocksAtROTx [GOOD] >> NPage::GroupIdEncoding [GOOD] >> KqpNewEngine::JoinPureUncomparableKeys [GOOD] >> NPageCollection::Align [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-66 [GOOD] >> NPageCollection::Meta [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-21 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-69 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-67 >> NPageCollection::PagesToBlobsConverter [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/runtime/unittest >> KqpScanSpilling::HandleErrorsCorrectly [GOOD] >> NPageCollection::Grow [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/u93c/002936/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk4 Trying to start YDB, gRPC: 14486, MsgBus: 61699 2025-06-03T10:31:04.474639Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668726462691500:2211];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:04.477013Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002936/r3tmp/tmpYflOjb/pdisk_1.dat TServer::EnableGrpc on GrpcPort 14486, node 1 2025-06-03T10:31:04.546260Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:04.546404Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668726462691315:2079] 1748946664422978 != 1748946664422981 2025-06-03T10:31:04.546570Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:04.546572Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:04.546574Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:04.546620Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:31:04.586061Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:04.586110Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:04.595139Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61699 TClient is connected to server localhost:61699 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:04.778474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:04.793552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:04.806288Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.898597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:04.940678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:04.967175Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:05.042191Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668730757660252:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.042238Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.122313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.137849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.151206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.167921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.181774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.214491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.246988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.319260Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668730757660912:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.319287Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.319391Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668730757660917:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.320244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:05.323998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-03T10:31:05.324084Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668730757660919:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:31:05.401353Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668730757660970:3400] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:09.433442Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668726462691500:2211];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:09.434933Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:31:19.529167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7306: Cannot get console configs 2025-06-03T10:31:19.529188Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (OptionalType (DataType 'Uint64))) (let $4 (OptionalType (DataType 'String))) (let $5 '('('"_logical_id" '787) '('"_id" '"9c62d1c8-b9f43672-dbd6520d-710f7cfb") '('"_wide_channels" (StructType '('"Key" $3) '('"Value" $4))))) (let $6 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($17) (block '( (let $18 (lambda '($19) (Member $19 '"Key") (Member $19 '"Value"))) (return (FromFlow (ExpandMap (ToFlow $17) $18))) ))) $5)) (let $7 '('1)) (let $8 (DqCnHashShuffle (TDqOutput $6 '0) $7 '1)) (let $9 (StructType '('"t1.Key" $3) '('"t1.Value" $4) '('"t2.Key" $3) '('"t2.Value" $4))) (let $10 '('('"_logical_id" '685) '('"_id" '"a020e2b5-86e1b882-b352f6bc-4b60eea3") '('"_wide_channels" $9))) (let $11 (DqPhyStage '($8) (lambda '($20) (block '( (let $21 '('0 '0 '1 '1)) (let $22 '('0 '2 '1 '3)) (let $23 (GraceSelfJoinCore (ToFlow $20) 'Full $7 $7 $21 $22 '('"t1.Value") '('"t2.Value") '())) (return (FromFlow (WideSort $23 '('('1 (Bool 'true)))))) ))) $10)) (let $12 (DqCnMerge (TDqOutput $11 '0) '('('1 '"Asc")))) (let $13 (DqPhyStage '($12) (lambda '($24) (FromFlow (NarrowMap (ToFlow $24) (lambda '($25 $26 $27 $28) (AsStruct '('"t1.Key" $25) '('"t1.Value" $26) '('"t2.Key" $27) '('"t2.Value" $28)))))) '('('"_logical_id" '697) '('"_id" '"4c4dd5da-a714e1bc-6705c55b-37638b1d")))) (let $14 '($6 $11 $13)) (let $15 '('"t1.Key" '"t1.Value" '"t2.Key" '"t2.Value")) (let $16 (DqCnResult (TDqOutput $13 '0) $15)) (return (KqpPhysicalQuery '((KqpPhysicalTx $14 '($16) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType $9) '0 '0)) '('('"type" '"query")))) ) 2025-06-03T10:31:52.797391Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7511668932621130768:6041], blobId: 0, bytes: 1401088 2025-06-03T10:31:52.797443Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7511668932621130768:6041], blobId: 1, bytes: 84 2025-06-03T10:31:52.797448Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7511668932621130768:6041], blobId: 2, bytes: 2402376 2025-06-03T10:31:52.797447Z node 1 :KQP_COMPUTE ERROR: compute_storage_actor.cpp:79: TxId: 281474976710972. Error: [TEvError] File size limit exceeded: 1/0Mb 2025-06-03T10:31:52.797460Z node 1 :KQP_COMPUTE ERROR: spilling_file.cpp:412: [Write] File size limit exceeded. From: [1:7511668932621130768:6041], blobId: 3, bytes: 144 2025-06-03T10:31:52.799252Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7511668932621130758:4193], TxId: 281474976710972, task: 2. Ctx: { CustomerSuppliedId : . SessionId : ydb://session/3?node_id=1&id=Yzg4ODllODYtZTc2NWNhMmItZmQ3YTY1ZWQtNTUxMmJmY2Q=. TraceId : 01jwtnhy1y71at94vqvxj6b6rk. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: INTERNAL_ERROR DEFAULT_ERROR: {
: Error: [Compute spilling][TEvError] File size limit exceeded: 1/0Mb }. 2025-06-03T10:31:52.799402Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [1:7511668932621130759:4194], TxId: 281474976710972, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=Yzg4ODllODYtZTc2NWNhMmItZmQ3YTY1ZWQtNTUxMmJmY2Q=. CustomerSuppliedId : . TraceId : 01jwtnhy1y71at94vqvxj6b6rk. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: INTERNAL_ERROR DEFAULT_ERROR: {
: Error: Terminate execution }. 2025-06-03T10:31:52.800706Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=Yzg4ODllODYtZTc2NWNhMmItZmQ3YTY1ZWQtNTUxMmJmY2Q=, ActorId: [1:7511668932621130744:4188], ActorState: ExecuteState, TraceId: 01jwtnhy1y71at94vqvxj6b6rk, Create QueryResponse for error on request, msg: >> NPageCollection::Groups [GOOD] >> NPageCollection::Chop [GOOD] >> KqpNewEngine::BrokenLocksAtROTxSharded >> NPageCollection::CookieAllocator [GOOD] >> KqpNewEngine::JoinWithPrecompute >> NProto::LargeGlobId [GOOD] >> Redo::ABI_008 [GOOD] >> Self::Literals [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-67 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-22 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-70 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-22 [GOOD] >> KqpNewEngine::BrokenLocksAtROTxSharded [GOOD] >> KqpNewEngine::JoinWithPrecompute [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-23 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-23 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-24 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-24 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-68 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-68 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-69 >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-69 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-70 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-70 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-71 >> KqpNewEngine::BrokenLocksOnUpdate >> KqpNewEngine::BrokenLocksOnUpdate [GOOD] >> KqpNewEngine::ComplexLookupLimit >> KqpNewEngine::ComplexLookupLimit [GOOD] >> KqpNewEngine::JoinProjectMulti >> KqpNewEngine::JoinProjectMulti [GOOD] >> KqpNewEngine::JoinMultiConsumer >> KqpNewEngine::JoinMultiConsumer [GOOD] >> KqpNewEngine::JoinSameKey >> KqpNewEngine::JoinSameKey [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-70 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-71 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-72 >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-72 [GOOD] >> VDiskTest::HugeBlobWrite [GOOD] >> VectorIndexBuildTestReboots::BaseCase[PipeResets] [GOOD] >> BasicUsage::ConflictingWrites [GOOD] >> KqpScanSpilling::SpillingInRuntimeNodes-EnabledSpilling [GOOD] >> KqpQueryService::DdlColumnTable [GOOD] >> TxUsage::WriteToTopic_Demo_30_Query [GOOD] >> TxUsage::WriteToTopic_Demo_13_Query [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-71 >> TxUsage::WriteToTopic_Two_WriteSession_Query [GOOD] >> LocalPartition::DiscoveryServiceBadPort [GOOD] >> TxUsage::WriteToTopic_Demo_22_RestartNo_Query [GOOD] >> TxUsage::WriteToTopic_Demo_10_Query [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_2_Table [GOOD] >> Describe::Basic >> KqpNewEngine::PkRangeSelect1 >> KqpQueryService::DdlCache >> KqpQueryService::DdlCache [GOOD] >> KqpQueryService::DdlExecuteScript >> KqpQueryService::DdlExecuteScript [GOOD] >> TxUsage::WriteToTopic_Demo_31_Table >> TxUsage::WriteToTopic_Demo_14_Table >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-71 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-72 >> TxUsage::WriteToTopic_Demo_4_Table >> LocalPartition::DiscoveryServiceBadNodeId >> TxUsage::WriteToTopic_Demo_11_Table >> TxUsage::Sinks_Oltp_WriteToTopic_2_Query >> KqpReturning::ReturningWorksIndexedDelete+QueryService >> Describe::Basic [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_external_blobs/unittest >> ExternalBlobsMultipleChannels::Simple [GOOD] Test command err: 2025-06-03T10:31:44.559468Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:31:44.559593Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:31:44.559630Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000dfa/r3tmp/tmpWDjFV3/pdisk_1.dat 2025-06-03T10:31:44.699351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:31:44.719762Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:44.722159Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946704011190 != 1748946704011194 2025-06-03T10:31:44.767578Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:44.767641Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:44.778520Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:44.858028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:31:45.066775Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:736:2617], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:45.066809Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:746:2622], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:45.066821Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:45.067748Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:31:45.218797Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:750:2625], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:31:45.255612Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:820:2664] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:45.311995Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jwtnhpna39csxytgy2j20gtc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2UzY2U2NTMtODYxZmQ0YjYtNmM4YjA3NGItYTVjN2YwYTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.328038Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtnhpx45ab4xn1g35t5gyek, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODNmOGY3YWItNzgwYWEwMTMtZGYxZTgzNDYtNThkMzZjYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.343783Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtnhpxjc5zc6z7b8er7d1te, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmQ3MDdmNzUtMzg5NWQ0ODEtNGQwZDBmNDYtY2I1YTk0MmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.361220Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jwtnhpy28t91tkjran2tfz6d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWNiMTU4MGItNTA5NDUyMjYtZmYzZjM2NDItNzE5YzdhZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.374510Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jwtnhpyk78z2yd1hr027m61t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTEwMDcwNmYtNzkxODc2MmItNThhNTBjNDUtYzA3MTMwNjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.388291Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jwtnhpz0d3cs42mw7n1pzqjv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzE1Yjg1OGMtYzczYWY4NzgtOTdjYmRhYzMtODM5ZWI1Yw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.401477Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715666. Ctx: { TraceId: 01jwtnhpze9xsctardx1pggce6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWNhMjc4OTEtNGVjYzllNjgtMWI5Mzk3MTUtMTkxNDViNTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.412920Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jwtnhpzv4g4weemc8w26qs6b, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTM5NzY3YWUtZjgzZTUwYy1hODI2YWI1OS04ZjM3ZmNhNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.426711Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715668. Ctx: { TraceId: 01jwtnhq072jdqv1mntkk9c77n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWUwNjIyM2EtMjIxYjFkYzEtODBkY2YzYzktNjIxMjNlZGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.437842Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715669. Ctx: { TraceId: 01jwtnhq0m4w84fy1z62w1y0g1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTgwODU1MzgtZjI5MzNiYS02ZjZiYzdiNC04YmJkMDRiNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.448060Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715670. Ctx: { TraceId: 01jwtnhq0zeagz8s865b235st5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2Q1MWIzNDgtNWExNTU0MGEtMmQyMjA1NTItM2M2ZmVhNGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.460269Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715671. Ctx: { TraceId: 01jwtnhq19ag83sseve7386b8w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWVhY2Q3MGMtNGI2M2UyYzgtNDA4MGJhODYtYmNkMWNkYWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.472835Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715672. Ctx: { TraceId: 01jwtnhq1pa6trvpj7am5cp8ec, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmJjNTY4YWQtZTQxMjk5MDEtNTZhZTVhNTAtNWI3NzRhOTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.486775Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jwtnhq22bbjzx8aay1r1zkjv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Mzg5MWQ0ZmItNGI1ODhkNjItYzg4NDcyYTktNTYwNGMzOGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.506308Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715674. Ctx: { TraceId: 01jwtnhq2g5mj2dahagj725m51, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmU0YmFjNWMtYmUyNGEzOTgtMTVmMGQyZS1jZDVhYjYwZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.521643Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715675. Ctx: { TraceId: 01jwtnhq371j6d5ebp8dtmw719, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDMzODFiY2ItOTRiNDIxMWYtYjliOTM2NDAtOWRkMGM0ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.533096Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715676. Ctx: { TraceId: 01jwtnhq3key3g1t4ng6sbab5f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjI3YmZhOGYtNzNhYTFmOTktODZkMTAzMzMtYmQzYjQwYTQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.544848Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715677. Ctx: { TraceId: 01jwtnhq3y0hbpvps6zt5vq20v, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTJhNDlkMGMtYWFmMDU3ODEtZWY0MTdiY2QtOTNlMGNjZDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.556047Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715678. Ctx: { TraceId: 01jwtnhq4a6yw12qc6b3s4nj95, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzFlYTZkZmItZGE2MDZkMzItZDc2MjFiNDItM2Y2ZTliN2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.567891Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715679. Ctx: { TraceId: 01jwtnhq4naadfza9q0jdcgsa7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzNjZTM3YzctMTQ1MmU0MDktNmNmNWE1NzUtODc2NDY0NzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.579296Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715680. Ctx: { TraceId: 01jwtnhq516vdh7xkeymtjmdrh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWNjZTlkMTEtYWVjNzYzMTgtODc3NGUyNjgtMTk2OTRjZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.591277Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715681. Ctx: { TraceId: 01jwtnhq5d58bad66rjrns212y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmJiMWZiM2UtMzlmYWUxNi1kMGNiYTY1OS1hZTI5N2E4ZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.602666Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715682. Ctx: { TraceId: 01jwtnhq5s9frr3v1nrf29vfaj, Database: , DatabaseId: /Roo ... r.cpp:119: TxId: 281474976715727. Ctx: { TraceId: 01jwtnhqr73e6f9htv2y662wgg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWE1MDhlNy0zMWMzZjVkOS04MmQyNzY4My0yMDNjZjY1NQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.208731Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715728. Ctx: { TraceId: 01jwtnhqrm4hnbt0qzvdqrfvnf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzVkZjYxZTQtZDU1OTk1MWYtY2ZhZjdmZTYtNDM3YjAzZjM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.222736Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715729. Ctx: { TraceId: 01jwtnhqs2e16tsgpn3v5mrrm3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWYwODFkMjEtN2I1ODhlYjQtZjdiMzdjZDItNjNiOGYwOTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.232924Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715730. Ctx: { TraceId: 01jwtnhqsg92q1ft06ccj8cb7j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWY1YzdlNTEtN2U4MGJiMWItMjgzYTQwYzUtM2ZiNjExOGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.244743Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715731. Ctx: { TraceId: 01jwtnhqstcqpeq4y41zqfmp8g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTMzMmUxODgtNzkwMzdiMTQtNGIzNjNlNmMtYzRiOWY2ZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.257511Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715732. Ctx: { TraceId: 01jwtnhqt62k5q6mg8q2bjr28f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjlkMjEwN2UtNDQzNmYyNWYtMmE5NmRhY2QtYTIyYmE1NWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.269901Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715733. Ctx: { TraceId: 01jwtnhqtk59f1q3xnt8v3rz9m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTdlMTZhMzEtZWQ5MmJhYmItNWFkMmRkMmEtYTc4MDhmMzI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.283092Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715734. Ctx: { TraceId: 01jwtnhqtz3pmzgqg8khq2w2md, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDY4MzZmZjAtYzYyZmExMzMtZWVmMzkwNTQtOTgzNWExZDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.295936Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715735. Ctx: { TraceId: 01jwtnhqvd57qjmt2pr7szjar6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2ViMjBiNmMtYjk2MWY2NzItNTI2ODg5NzItZWI0NDI0MGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.309755Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715736. Ctx: { TraceId: 01jwtnhqvt442mw8p6n17dbd87, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWRiOTlhOTAtNDA4MjRiMS1mOGRlY2ViZC1iMzY2MzQ2, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.323010Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715737. Ctx: { TraceId: 01jwtnhqw718ttrp8yr8zvv6fe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTViM2ViN2YtNmEzZjE1MjEtZWJiZDBhMWMtNjRjN2UxZTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.336193Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715738. Ctx: { TraceId: 01jwtnhqwn41by8wbszqaq17sy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDE0YjQxOWMtNmYxZjdlZGUtZjViMTE1YzEtNDZhMmE0MjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.347545Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715739. Ctx: { TraceId: 01jwtnhqx29ac57rfq51pmaq67, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTdjMjU2NmYtODQyMmNmMDAtYTMzYTYyNjYtMmEyNTNkMDE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.358067Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715740. Ctx: { TraceId: 01jwtnhqxd2ar5npshxkbxrwgh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzUxYjI0NTAtMTY3YTVmYzUtNzZiOTk4ODEtZjQ3ZTJhNWQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.368546Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715741. Ctx: { TraceId: 01jwtnhqxq4nqkz0y895qc3p9m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTc1ZjYxOTQtODRhNGJkMTUtODkyNmZmYjktNzFhMGE5MDM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.378778Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715742. Ctx: { TraceId: 01jwtnhqy26tymdxehwkmf6am2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODY0ZWU4Y2QtMjZkZjcxOWYtZWViOGE4MjMtOGMyZThkMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.390864Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715743. Ctx: { TraceId: 01jwtnhqyccrck0bh5kyezc9j5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MmM4ZDU0ZjQtMWUyYzY2YTYtMThhNzAwN2QtNWYwOWIzNw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.402072Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715744. Ctx: { TraceId: 01jwtnhqyr8jxc1tf2c67gawmv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDVkYWQ0OTItOWIzNWI2Y2EtZTc4MmRlZjQtNDk4MDViMWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.413036Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715745. Ctx: { TraceId: 01jwtnhqz35yny2qf499esdwq8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzdiODJlMmUtOTNkMzhhZjktMjgxODUyODgtMmZiZGM4ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.424680Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715746. Ctx: { TraceId: 01jwtnhqzee2zv2cgd3qcaek9n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDQ1OTVjMmYtOTA2ODE3M2MtYjZlNmVmZWItMmExOWIwMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.436247Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715747. Ctx: { TraceId: 01jwtnhqztc9d53wkhrspzt9qn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=N2RjYzQxMDUtZjJiYjczOGQtZGMyMDVjNTQtZDhjYzAwOTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.446876Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715748. Ctx: { TraceId: 01jwtnhr063dpazz33x54sgzyf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NWNiYzZlZmYtYTIyMDU5OTctNWE4ZGY5MjItN2NiMzQyNzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.457341Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715749. Ctx: { TraceId: 01jwtnhr0gcxcactnb6nb6stde, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2M0N2MxNWQtYzdhNzUwYjQtNDFjZTYwMjktZGEwNzI5MTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.467753Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715750. Ctx: { TraceId: 01jwtnhr0v746f5kcagtgbh9av, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzEwNWIwZWQtM2QwODkyODctNmZmMTViNzAtMjNjZGRlNzc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.479526Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715751. Ctx: { TraceId: 01jwtnhr150nsrdgghedgeg66c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDQ5NzUzOTQtMjJmNmFlNWMtZWU5MTc5ZDEtODRhMTA4YjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.493779Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715752. Ctx: { TraceId: 01jwtnhr1h7j2zcxhzcy409fhc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZmZkNDc0ZTktNDg4N2FkOWMtMTFmOGJkMWItZmZlYmFjMTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.507562Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715753. Ctx: { TraceId: 01jwtnhr208ea10brvb6pt1hdx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmZiMzZhMzEtMjQ0MGYyYTYtOGMwNGE4MGUtYTI5MDJlYTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.521229Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715754. Ctx: { TraceId: 01jwtnhr2d3h0d25g02h0p1ctp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTY5ZjE5ZmItMTY0ZDlhZDQtZGE4NDc0MGItNWNhZTllYTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.534487Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715755. Ctx: { TraceId: 01jwtnhr2v99mra0hdr8a79jmz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTAwMjMxODYtY2UzODhiNDgtYmRmN2Q5M2YtYTJhNDYyMDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.547296Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715756. Ctx: { TraceId: 01jwtnhr38533cbr6fjf0c6pab, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2Y3NTUyODMtMjViYjQwNTgtYzcyNDY5NjgtYzA0Y2RjMzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.559963Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715757. Ctx: { TraceId: 01jwtnhr3nfx97kepgyg7bj9z0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmQ0MjRkNGYtMWQ5YTBhZmQtZmRjYWRkMWUtOWI1ZDliNDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.572859Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715758. Ctx: { TraceId: 01jwtnhr418jkmfnfvn94aqt0a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmJmNGM5Ny0xNTQxNmM1ZC0xMTUwOGMwMi1hNjMwNjUzZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.586585Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715759. Ctx: { TraceId: 01jwtnhr4eft6043meqdexrakm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTkxZDU0ZDktYzg5MDk1ZGYtZWExN2I1MWUtYjVkMjdkNzE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.673985Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715760. Ctx: { TraceId: 01jwtnhr6zan8dsk3srwgdfc12, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTFmZjJjYTEtMmIxM2QxMjYtYjE1YzJmMDktYjg3ZWIxZDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root |68.5%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_external_table_reboots/test-results/unittest/{meta.json ... results_accumulator.log} |68.5%| [LD] {RESULT} $(B)/ydb/core/kesus/proxy/ut/ydb-core-kesus-proxy-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::ComplexLookupLimit [GOOD] >> TxUsage::WriteToTopic_Demo_23_RestartNo_Table >> KqpNewEngine::PkRangeSelect1 [GOOD] >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-72 [GOOD] >> Describe::Statistics Test command err: Trying to start YDB, gRPC: 19617, MsgBus: 8588 2025-06-03T10:31:45.763439Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668903774732398:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:45.764126Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027dd/r3tmp/tmpmRsJ8c/pdisk_1.dat 2025-06-03T10:31:45.838114Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19617, node 1 2025-06-03T10:31:45.865977Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:45.866009Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:45.872566Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:45.878855Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:45.878870Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:45.878872Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:45.878921Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8588 TClient is connected to server localhost:8588 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:45.964266Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:45.981964Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:46.003861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:46.034845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:46.053034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:46.236866Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668908069701134:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:46.236895Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:46.296741Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:46.305088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:46.312912Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:46.326710Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:46.340799Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:46.355159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:46.368922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:46.386602Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668908069701785:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:46.386628Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:46.386647Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668908069701790:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:46.387620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:46.395645Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668908069701792:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:31:46.476813Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668908069701843:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 27513, MsgBus: 24832 2025-06-03T10:31:46.881975Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668906641132381:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:46.882419Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027dd/r3tmp/tmpG6li36/pdisk_1.dat 2025-06-03T10:31:46.898390Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27513, node 2 2025-06-03T10:31:46.910382Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:46.910396Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:46.910398Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:46.910450Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24832 TClient is connected to server localhost:24832 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:46.981600Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:46.981642Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:46.982735Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:46.986072Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:46.989553Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178 ... fault\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:51.469789Z node 6 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_LOCKS_BROKEN;details=Operation is aborting because locks are not valid;tx_id=281474976715674; 2025-06-03T10:31:51.471739Z node 6 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2699: SelfId: [6:7511668930274710595:2507], SessionActorId: [6:7511668930274710576:2507], Got LOCKS BROKEN for table. ShardID=72075186224037889, Sink=[6:7511668930274710595:2507].{
: Error: Operation is aborting because locks are not valid, code: 2001 } 2025-06-03T10:31:51.471884Z node 6 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2935: SelfId: [6:7511668930274710595:2507], SessionActorId: [6:7511668930274710576:2507], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/TwoShard`., code: 2001
: Error: Operation is aborting because locks are not valid, code: 2001 . sessionActorId=[6:7511668930274710576:2507]. isRollback=0 2025-06-03T10:31:51.471959Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:1848: SessionId: ydb://session/3?node_id=6&id=MTM2M2NjYjUtZTQ0OWU2ODEtYTBlOGQ2MWEtNjVhMzg2ODQ=, ActorId: [6:7511668930274710576:2507], ActorState: ExecuteState, TraceId: 01jwtnhwxc8rkp5v58w6dxhm8g, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [6:7511668930274710625:2507] from: [6:7511668930274710595:2507] 2025-06-03T10:31:51.471985Z node 6 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [6:7511668930274710625:2507] TxId: 281474976715674. Ctx: { TraceId: 01jwtnhwxc8rkp5v58w6dxhm8g, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=6&id=MTM2M2NjYjUtZTQ0OWU2ODEtYTBlOGQ2MWEtNjVhMzg2ODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/TwoShard`., code: 2001 subissue: {
: Error: Operation is aborting because locks are not valid, code: 2001 } } 2025-06-03T10:31:51.472043Z node 6 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=6&id=MTM2M2NjYjUtZTQ0OWU2ODEtYTBlOGQ2MWEtNjVhMzg2ODQ=, ActorId: [6:7511668930274710576:2507], ActorState: ExecuteState, TraceId: 01jwtnhwxc8rkp5v58w6dxhm8g, Create QueryResponse for error on request, msg: 2025-06-03T10:31:51.472754Z node 6 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_ABORTED;details=Distributed transaction aborted due to commit failure;tx_id=281474976715674; 2025-06-03T10:31:51.472806Z node 6 :TX_DATASHARD ERROR: datashard.cpp:751: Complete volatile write [1748946711519 : 281474976715674] from 72075186224037888 at tablet 72075186224037888, error: Status: STATUS_ABORTED Issues: { message: "Distributed transaction aborted due to commit failure" issue_code: 2011 severity: 1 } Trying to start YDB, gRPC: 9074, MsgBus: 13022 2025-06-03T10:31:51.735076Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511668926591139518:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:51.735119Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027dd/r3tmp/tmpHSx90j/pdisk_1.dat 2025-06-03T10:31:51.749464Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9074, node 7 2025-06-03T10:31:51.757063Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:51.757075Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:51.757076Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:51.757124Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13022 TClient is connected to server localhost:13022 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:51.837401Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:51.837441Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:51.838552Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:51.838777Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:51.843008Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:51.853122Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:51.875912Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:51.889703Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:52.094981Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511668930886108404:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:52.095005Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:52.105358Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:52.114372Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:52.123099Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:52.136555Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:52.150864Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:52.164523Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:52.179347Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:52.195215Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511668930886109057:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:52.195238Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:52.195284Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511668930886109062:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:52.196091Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:52.198264Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7511668930886109064:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:31:52.251790Z node 7 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [7:7511668930886109115:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:52.392780Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 >> KqpNewEngine::PkRangeSelect2 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-LocalUser-CreateUser-24 [GOOD] Test command err: Starting YDB, grpc: 12758, msgbus: 8012 2025-06-03T10:31:19.104994Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668792704875745:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:19.105067Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001448/r3tmp/tmp9VFHlf/pdisk_1.dat 2025-06-03T10:31:19.213994Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:19.214024Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:19.225735Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:19.226470Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 12758, node 1 2025-06-03T10:31:19.244077Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:19.244093Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:19.244096Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:19.244149Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:31:19.257494Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:7511668792704875823:2114] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:31:19.260642Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 TClient is connected to server localhost:8012 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:31:19.287990Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511668792704875823:2114] Handle TEvNavigate describe path dc-1 2025-06-03T10:31:19.290334Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511668792704876319:2435] HANDLE EvNavigateScheme dc-1 2025-06-03T10:31:19.290714Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7511668792704876319:2435] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-03T10:31:19.306919Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7511668792704876319:2435] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-03T10:31:19.309806Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7511668792704876319:2435] Handle TEvDescribeSchemeResult Forward to# [1:7511668792704876318:2434] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-03T10:31:19.315888Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7511668792704875823:2114] Handle TEvProposeTransaction 2025-06-03T10:31:19.315910Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7511668792704875823:2114] TxId# 281474976715657 ProcessProposeTransaction 2025-06-03T10:31:19.315951Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7511668792704875823:2114] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:7511668792704876332:2441] 2025-06-03T10:31:19.335822Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:7511668792704876332:2441] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-03T10:31:19.335900Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:7511668792704876332:2441] txid# 281474976715657 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 0 2025-06-03T10:31:19.335907Z node 1 :TX_PROXY DEBUG: schemereq.cpp:578: Actor# [1:7511668792704876332:2441] txid# 281474976715657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-03T10:31:19.335928Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:7511668792704876332:2441] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:31:19.336611Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:7511668792704876332:2441] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:31:19.336675Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:7511668792704876332:2441] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-03T10:31:19.336697Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7511668792704876332:2441] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-03T10:31:19.336804Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:7511668792704876332:2441] txid# 281474976715657 HANDLE EvClientConnected 2025-06-03T10:31:19.337088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:31:19.340015Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [1:7511668792704876332:2441] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-03T10:31:19.340038Z node 1 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [1:7511668792704876332:2441] txid# 281474976715657 SEND to# [1:7511668792704876331:2440] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} waiting... 2025-06-03T10:31:19.349069Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7511668792704875823:2114] Handle TEvProposeTransaction 2025-06-03T10:31:19.349082Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7511668792704875823:2114] TxId# 281474976715658 ProcessProposeTransaction 2025-06-03T10:31:19.349091Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7511668792704875823:2114] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7511668792704876370:2475] 2025-06-03T10:31:19.349828Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:7511668792704876370:2475] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-03T10:31:19.349850Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:7511668792704876370:2475] txid# 281474976715658 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 0 2025-06-03T10:31:19.349853Z node 1 :TX_PROXY DEBUG: schemereq.cpp:578: Actor# [1:7511668792704876370:2475] txid# 281474976715658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-03T10:31:19.349867Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:7511668792704876370:2475] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:31:19.349970Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:7511668792704876370:2475] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:31:19.349993Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:7511668792704876370:2475] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:31:19.350008Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7511668792704876370:2475] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-03T10:31:19.350043Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:7511668792704876370:2475] txid# 281474976715658 HANDLE EvClientConnected 2025-06-03T10:31:19.350151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:31:19.350883Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [1:7511668792704876370:2475] txid# 281474976715658 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715658} 2025-06-03T10:31:19.350897Z node 1 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [1:7511668792704876370:2475] txid# 281474976715658 SEND to# ... :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [59:7511668930808094007:2542] txid# 281474976715660 Status StatusAlreadyExists HANDLE {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/dc-1/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92} 2025-06-03T10:31:52.810374Z node 59 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [59:7511668930808094007:2542] txid# 281474976715660, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:52.810384Z node 59 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [59:7511668930808094007:2542] txid# 281474976715660 SEND to# [59:7511668930808093933:2337] Source {TEvProposeTransactionStatus txid# 281474976715660 Status# 48} 2025-06-03T10:31:52.813732Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7511668930808093317:2113] Handle TEvProposeTransaction 2025-06-03T10:31:52.813745Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7511668930808093317:2113] TxId# 281474976715661 ProcessProposeTransaction 2025-06-03T10:31:52.813762Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7511668930808093317:2113] Cookie# 0 userReqId# "" txid# 281474976715661 SEND to# [59:7511668930808094031:2554] 2025-06-03T10:31:52.814392Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [59:7511668930808094031:2554] txid# 281474976715661 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "ordinaryuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:35282" 2025-06-03T10:31:52.814409Z node 59 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [59:7511668930808094031:2554] txid# 281474976715661 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-03T10:31:52.814413Z node 59 :TX_PROXY DEBUG: schemereq.cpp:578: Actor# [59:7511668930808094031:2554] txid# 281474976715661 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-03T10:31:52.814447Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [59:7511668930808094031:2554] txid# 281474976715661 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:31:52.814598Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [59:7511668930808094031:2554] txid# 281474976715661 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:31:52.814626Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [59:7511668930808094031:2554] HANDLE EvNavigateKeySetResult, txid# 281474976715661 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:31:52.814640Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7511668930808094031:2554] txid# 281474976715661 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715661 TabletId# 72057594046644480} 2025-06-03T10:31:52.814703Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [59:7511668930808094031:2554] txid# 281474976715661 HANDLE EvClientConnected 2025-06-03T10:31:52.817752Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [59:7511668930808094031:2554] txid# 281474976715661 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715661} 2025-06-03T10:31:52.817771Z node 59 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [59:7511668930808094031:2554] txid# 281474976715661 SEND to# [59:7511668930808094030:2330] Source {TEvProposeTransactionStatus txid# 281474976715661 Status# 48} 2025-06-03T10:31:52.971903Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7511668930808093317:2113] Handle TEvProposeTransaction 2025-06-03T10:31:52.971930Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7511668930808093317:2113] TxId# 281474976715662 ProcessProposeTransaction 2025-06-03T10:31:52.971946Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7511668930808093317:2113] Cookie# 0 userReqId# "" txid# 281474976715662 SEND to# [59:7511668930808094055:2572] 2025-06-03T10:31:52.972691Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [59:7511668930808094055:2572] txid# 281474976715662 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\022\010\001\022\016\032\014ordinaryuser\n\032\010\000\022\026\010\001\020\200\200\002\032\014ordinaryuser \000" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:35282" 2025-06-03T10:31:52.972712Z node 59 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [59:7511668930808094055:2572] txid# 281474976715662 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-03T10:31:52.972716Z node 59 :TX_PROXY DEBUG: schemereq.cpp:578: Actor# [59:7511668930808094055:2572] txid# 281474976715662 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-03T10:31:52.972731Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [59:7511668930808094055:2572] txid# 281474976715662 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:31:52.972844Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [59:7511668930808094055:2572] txid# 281474976715662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:31:52.972888Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [59:7511668930808094055:2572] HANDLE EvNavigateKeySetResult, txid# 281474976715662 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:31:52.972905Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7511668930808094055:2572] txid# 281474976715662 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715662 TabletId# 72057594046644480} 2025-06-03T10:31:52.972949Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [59:7511668930808094055:2572] txid# 281474976715662 HANDLE EvClientConnected 2025-06-03T10:31:52.973053Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:52.973839Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [59:7511668930808094055:2572] txid# 281474976715662 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715662} 2025-06-03T10:31:52.973857Z node 59 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [59:7511668930808094055:2572] txid# 281474976715662 SEND to# [59:7511668930808094054:2343] Source {TEvProposeTransactionStatus txid# 281474976715662 Status# 48} 2025-06-03T10:31:52.979964Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7511668930808093317:2113] Handle TEvProposeTransaction 2025-06-03T10:31:52.979981Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7511668930808093317:2113] TxId# 281474976715663 ProcessProposeTransaction 2025-06-03T10:31:52.980001Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7511668930808093317:2113] Cookie# 0 userReqId# "" txid# 281474976715663 SEND to# [59:7511668930808094092:2592] 2025-06-03T10:31:52.980762Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [59:7511668930808094092:2592] txid# 281474976715663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "targetuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014ordinaryuser\022\030\022\026\n\024all-users@well-known\032\334\003eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc0ODk4OTkxMiwiaWF0IjoxNzQ4OTQ2NzEyLCJzdWIiOiJvcmRpbmFyeXVzZXIifQ.CfSLFFjvjrsWIihvJqtfaTT3t6rTin_ejBjyhirtGRiiDpJqxCXFs0cPUDwyusN-f0JTImQezIRxz_gNsXD6_BmqTQE0r74-O3XfygGeD-KDc3owdyTKF4GzOTdey9kTZxGPW-_yAP1WAVyKvRfGggm2uQMCktsee_wNAyFm7AOBAqi2gNm2WeYxMs_wR2mv1Ao4Dz3KWOfi0MXQgys28T8463oRuvowAeret3Yo_Da-5MzI75qexnz0HQbF1B5YhIpAlUEa0udTJJa28-kFrIVzGL2ELb82Y8b0OMGnQlptrIpd6kf2-u0xCxRvKT_VMyUKlQrMuEcO-wdhT5RGfQ\"\005Login*\210\001eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc0ODk4OTkxMiwiaWF0IjoxNzQ4OTQ2NzEyLCJzdWIiOiJvcmRpbmFyeXVzZXIifQ.**" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:35282" 2025-06-03T10:31:52.980787Z node 59 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [59:7511668930808094092:2592] txid# 281474976715663 Bootstrap, UserSID: ordinaryuser CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-03T10:31:52.980793Z node 59 :TX_PROXY DEBUG: schemereq.cpp:578: Actor# [59:7511668930808094092:2592] txid# 281474976715663 Bootstrap, UserSID: ordinaryuser IsClusterAdministrator: 0 2025-06-03T10:31:52.980865Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1401: Actor# [59:7511668930808094092:2592] txid# 281474976715663 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-03T10:31:52.980883Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1436: Actor# [59:7511668930808094092:2592] txid# 281474976715663 HandleResolveDatabase, UserSID: ordinaryuser CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 0 DatabaseOwner: root@builtin 2025-06-03T10:31:52.980894Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [59:7511668930808094092:2592] txid# 281474976715663 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:31:52.980985Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [59:7511668930808094092:2592] txid# 281474976715663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:31:52.980995Z node 59 :TX_PROXY ERROR: schemereq.cpp:1079: Actor# [59:7511668930808094092:2592] txid# 281474976715663, Access denied for ordinaryuser, attempt to manage user 2025-06-03T10:31:52.981014Z node 59 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [59:7511668930808094092:2592] txid# 281474976715663, issues: { message: "Access denied for ordinaryuser" issue_code: 200000 severity: 1 } 2025-06-03T10:31:52.981021Z node 59 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [59:7511668930808094092:2592] txid# 281474976715663 SEND to# [59:7511668930808094091:2348] Source {TEvProposeTransactionStatus Status# 5} 2025-06-03T10:31:52.981109Z node 59 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=59&id=Njg5Nzg0MGYtODM4MzlkYy01YjFjYTY1OS1mZDBmN2Zh, ActorId: [59:7511668930808094077:2348], ActorState: ExecuteState, TraceId: 01jwtnhych4wwp80trk66yq8vq, Create QueryResponse for error on request, msg: 2025-06-03T10:31:52.981228Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [59:7511668930808093317:2113] Handle TEvExecuteKqpTransaction 2025-06-03T10:31:52.981238Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [59:7511668930808093317:2113] TxId# 281474976715664 ProcessProposeKqpTransaction >> KqpNewEngine::PkRangeSelect2 [GOOD] >> KqpNewEngine::OnlineRO_Consistent >> KqpNamedExpressions::NamedExpressionChanged-UseSink >> KqpNewEngine::OnlineRO_Consistent [GOOD] >> KqpNewEngine::OnlineRO_Inconsistent >> KqpNewEngine::OnlineRO_Inconsistent [GOOD] >> KqpNewEngine::Nondeterministic >> TxUsage::WriteToTopic_Demo_1_Query [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_vdisk2/unittest >> VDiskTest::HugeBlobWrite [GOOD] Test command err: Put id# [40:1:1:0:0:589824:1] totalSize# 0 blobValueIndex# 37 Trim Put id# [75:1:1:0:0:589824:1] totalSize# 589824 blobValueIndex# 35 Put id# [55:1:1:0:0:1024:1] totalSize# 1179648 blobValueIndex# 17 Put id# [63:1:1:0:0:40960:1] totalSize# 1180672 blobValueIndex# 29 Change MinHugeBlobSize# 8192 Put id# [61:1:1:0:0:1572864:1] totalSize# 1221632 blobValueIndex# 56 Put id# [7:1:1:0:0:1048576:1] totalSize# 2794496 blobValueIndex# 46 Put id# [59:1:1:0:0:589824:1] totalSize# 3843072 blobValueIndex# 39 Put id# [70:1:1:0:0:1024:1] totalSize# 4432896 blobValueIndex# 18 Put id# [21:1:1:0:0:40960:1] totalSize# 4433920 blobValueIndex# 27 Put id# [81:1:1:0:0:589824:1] totalSize# 4474880 blobValueIndex# 30 Put id# [96:1:1:0:0:1048576:1] totalSize# 5064704 blobValueIndex# 44 Put id# [89:1:1:0:0:1048576:1] totalSize# 6113280 blobValueIndex# 45 Put id# [97:1:1:0:0:40960:1] totalSize# 7161856 blobValueIndex# 23 Change MinHugeBlobSize# 61440 Restart Put id# [36:1:1:0:0:1048576:1] totalSize# 7202816 blobValueIndex# 42 Change MinHugeBlobSize# 524288 Trim Put id# [19:1:1:0:0:10:1] totalSize# 8251392 blobValueIndex# 1 Put id# [75:1:2:0:0:1048576:1] totalSize# 8251402 blobValueIndex# 40 Trim Put id# [21:1:2:0:0:10:1] totalSize# 9299978 blobValueIndex# 5 Change MinHugeBlobSize# 65536 Put id# [56:1:1:0:0:589824:1] totalSize# 9299988 blobValueIndex# 31 Trim Put id# [80:1:1:0:0:1572864:1] totalSize# 9889812 blobValueIndex# 56 Trim Put id# [87:1:1:0:0:1024:1] totalSize# 11462676 blobValueIndex# 12 Put id# [40:1:2:0:0:1024:1] totalSize# 11463700 blobValueIndex# 18 Change MinHugeBlobSize# 61440 Put id# [28:1:1:0:0:1024:1] totalSize# 11464724 blobValueIndex# 11 Change MinHugeBlobSize# 524288 Put id# [91:1:1:0:0:40960:1] totalSize# 11465748 blobValueIndex# 24 Put id# [89:1:2:0:0:40960:1] totalSize# 11506708 blobValueIndex# 27 Trim Put id# [92:1:1:0:0:1024:1] totalSize# 11547668 blobValueIndex# 16 Trim Put id# [38:1:1:0:0:1048576:1] totalSize# 11548692 blobValueIndex# 48 Put id# [38:1:2:0:0:40960:1] totalSize# 12597268 blobValueIndex# 25 Put id# [6:1:1:0:0:1024:1] totalSize# 12638228 blobValueIndex# 15 Put id# [67:1:1:0:0:1024:1] totalSize# 12639252 blobValueIndex# 18 Put id# [72:1:1:0:0:1024:1] totalSize# 12640276 blobValueIndex# 14 Change MinHugeBlobSize# 8192 Put id# [37:1:1:0:0:1572864:1] totalSize# 12641300 blobValueIndex# 53 Put id# [31:1:1:0:0:40960:1] totalSize# 14214164 blobValueIndex# 25 Put id# [29:1:1:0:0:1572864:1] totalSize# 14255124 blobValueIndex# 54 Change MinHugeBlobSize# 12288 Put id# [91:1:2:0:0:40960:1] totalSize# 15827988 blobValueIndex# 23 Put id# [32:1:1:0:0:10:1] totalSize# 15868948 blobValueIndex# 2 Put id# [19:1:2:0:0:1572864:1] totalSize# 15868958 blobValueIndex# 54 Restart Put id# [65:1:1:0:0:1048576:1] totalSize# 17441822 blobValueIndex# 42 Put id# [23:1:1:0:0:589824:1] totalSize# 18490398 blobValueIndex# 32 Put id# [93:1:1:0:0:589824:1] totalSize# 19080222 blobValueIndex# 30 Put id# [19:1:3:0:0:1024:1] totalSize# 19670046 blobValueIndex# 18 Put id# [19:1:4:0:0:1572864:1] totalSize# 19671070 blobValueIndex# 54 Put id# [92:1:2:0:0:40960:1] totalSize# 21243934 blobValueIndex# 23 Trim Put id# [24:1:1:0:0:1048576:1] totalSize# 21284894 blobValueIndex# 44 Put id# [29:1:2:0:0:1572864:1] totalSize# 22333470 blobValueIndex# 58 Trim Put id# [87:1:2:0:0:1572864:1] totalSize# 23906334 blobValueIndex# 59 Put id# [90:1:1:0:0:40960:1] totalSize# 25479198 blobValueIndex# 27 Change MinHugeBlobSize# 61440 Put id# [18:1:1:0:0:1024:1] totalSize# 25520158 blobValueIndex# 17 Trim Put id# [34:1:1:0:0:1024:1] totalSize# 25521182 blobValueIndex# 16 Change MinHugeBlobSize# 12288 Put id# [62:1:1:0:0:589824:1] totalSize# 25522206 blobValueIndex# 33 Put id# [87:1:3:0:0:40960:1] totalSize# 26112030 blobValueIndex# 24 Put id# [15:1:1:0:0:1572864:1] totalSize# 26152990 blobValueIndex# 56 Put id# [62:1:2:0:0:1048576:1] totalSize# 27725854 blobValueIndex# 43 Restart Put id# [69:1:1:0:0:1048576:1] totalSize# 28774430 blobValueIndex# 47 Trim Put id# [82:1:1:0:0:1024:1] totalSize# 29823006 blobValueIndex# 14 Put id# [33:1:1:0:0:1048576:1] totalSize# 29824030 blobValueIndex# 43 Put id# [70:1:2:0:0:1024:1] totalSize# 30872606 blobValueIndex# 14 Change MinHugeBlobSize# 8192 Put id# [61:1:2:0:0:1024:1] totalSize# 30873630 blobValueIndex# 13 Change MinHugeBlobSize# 65536 Put id# [1:1:1:0:0:1024:1] totalSize# 30874654 blobValueIndex# 14 Put id# [21:1:3:0:0:40960:1] totalSize# 30875678 blobValueIndex# 21 Put id# [86:1:1:0:0:1048576:1] totalSize# 30916638 blobValueIndex# 46 Trim Put id# [30:1:1:0:0:589824:1] totalSize# 31965214 blobValueIndex# 32 Put id# [37:1:2:0:0:1048576:1] totalSize# 32555038 blobValueIndex# 41 Put id# [48:1:1:0:0:589824:1] totalSize# 33603614 blobValueIndex# 35 Put id# [56:1:2:0:0:40960:1] totalSize# 34193438 blobValueIndex# 26 Put id# [5:1:1:0:0:10:1] totalSize# 34234398 blobValueIndex# 1 Put id# [86:1:2:0:0:1048576:1] totalSize# 34234408 blobValueIndex# 46 Put id# [98:1:1:0:0:40960:1] totalSize# 35282984 blobValueIndex# 29 Put id# [22:1:1:0:0:1572864:1] totalSize# 35323944 blobValueIndex# 51 Put id# [70:1:3:0:0:1024:1] totalSize# 36896808 blobValueIndex# 16 Put id# [95:1:1:0:0:1572864:1] totalSize# 36897832 blobValueIndex# 53 Put id# [85:1:1:0:0:1572864:1] totalSize# 38470696 blobValueIndex# 50 Put id# [46:1:1:0:0:1572864:1] totalSize# 40043560 blobValueIndex# 50 Put id# [9:1:1:0:0:1572864:1] totalSize# 41616424 blobValueIndex# 58 Put id# [42:1:1:0:0:589824:1] totalSize# 43189288 blobValueIndex# 33 Put id# [50:1:1:0:0:10:1] totalSize# 43779112 blobValueIndex# 8 Put id# [7:1:2:0:0:1572864:1] totalSize# 43779122 blobValueIndex# 58 Put id# [45:1:1:0:0:10:1] totalSize# 45351986 blobValueIndex# 0 Trim Put id# [1:1:2:0:0:40960:1] totalSize# 45351996 blobValueIndex# 29 Put id# [70:1:4:0:0:40960:1] totalSize# 45392956 blobValueIndex# 24 Change MinHugeBlobSize# 61440 Put id# [13:1:1:0:0:589824:1] totalSize# 45433916 blobValueIndex# 35 Restart Put id# [32:1:2:0:0:10:1] totalSize# 46023740 blobValueIndex# 9 Put id# [84:1:1:0:0:1024:1] totalSize# 46023750 blobValueIndex# 14 Put id# [58:1:1:0:0:1048576:1] totalSize# 46024774 blobValueIndex# 43 Trim Put id# [14:1:1:0:0:40960:1] totalSize# 47073350 blobValueIndex# 26 Put id# [65:1:2:0:0:1048576:1] totalSize# 47114310 blobValueIndex# 44 Trim Put id# [43:1:1:0:0:1572864:1] totalSize# 48162886 blobValueIndex# 51 Change MinHugeBlobSize# 8192 Put id# [18:1:2:0:0:10:1] totalSize# 49735750 blobValueIndex# 3 Put id# [6:1:2:0:0:1572864:1] totalSize# 49735760 blobValueIndex# 56 Change MinHugeBlobSize# 12288 Put id# [50:1:2:0:0:1024:1] totalSize# 51308624 blobValueIndex# 14 Put id# [15:1:2:0:0:589824:1] totalSize# 51309648 blobValueIndex# 38 Put id# [76:1:1:0:0:589824:1] totalSize# 51899472 blobValueIndex# 31 Put id# [49:1:1:0:0:10:1] totalSize# 52489296 blobValueIndex# 4 Put id# [51:1:1:0:0:589824:1] totalSize# 52489306 blobValueIndex# 34 Put id# [4:1:1:0:0:10:1] totalSize# 53079130 blobValueIndex# 9 Put id# [81:1:2:0:0:40960:1] totalSize# 53079140 blobValueIndex# 22 Put id# [77:1:1:0:0:1572864:1] totalSize# 53120100 blobValueIndex# 50 Put id# [16:1:1:0:0:1024:1] totalSize# 54692964 blobValueIndex# 15 Change MinHugeBlobSize# 8192 Put id# [63:1:2:0:0:1572864:1] totalSize# 54693988 blobValueIndex# 56 Put id# [8:1:1:0:0:1048576:1] totalSize# 56266852 blobValueIndex# 42 Put id# [65:1:3:0:0:1024:1] totalSize# 57315428 blobValueIndex# 15 Put id# [63:1:3:0:0:1024:1] totalSize# 57316452 blobValueIndex# 15 Change MinHugeBlobSize# 12288 Put id# [62:1:3:0:0:1572864:1] totalSize# 57317476 blobValueIndex# 55 Trim Put id# [28:1:2:0:0:10:1] totalSize# 58890340 blobValueIndex# 3 Put id# [92:1:3:0:0:589824:1] totalSize# 58890350 blobValueIndex# 38 Put id# [95:1:2:0:0:1048576:1] totalSize# 59480174 blobValueIndex# 49 Put id# [26:1:1:0:0:589824:1] totalSize# 60528750 blobValueIndex# 32 Trim Put id# [43:1:2:0:0:10:1] totalSize# 61118574 blobValueIndex# 2 Trim Put id# [94:1:1:0:0:1572864:1] totalSize# 61118584 blobValueIndex# 56 Trim Put id# [55:1:2:0:0:589824:1] totalSize# 62691448 blobValueIndex# 38 Trim Put id# [67:1:2:0:0:10:1] totalSize# 63281272 blobValueIndex# 0 Put id# [27:1:1:0:0:1572864:1] totalSize# 63281282 blobValueIndex# 55 Put id# [52:1:1:0:0:1024:1] totalSize# 64854146 blobValueIndex# 12 Put id# [32:1:3:0:0:40960:1] totalSize# 64855170 blobValueIndex# 20 Put id# [3:1:1:0:0:589824:1] totalSize# 64896130 blobValueIndex# 33 Put id# [27:1:2:0:0:40960:1] totalSize# 65485954 blobValueIndex# 24 Trim Put id# [17:1:1:0:0:1572864:1] totalSize# 65526914 blobValueIndex# 51 Put id# [99:1:1:0:0:1024:1] totalSize# 67099778 blobValueIndex# 11 Put id# [22:1:2:0:0:10:1] totalSize# 67100802 blobValueIndex# 2 Put id# [29:1:3:0:0:40960:1] totalSize# 67100812 blobValueIndex# 28 Put id# [11:1:1:0:0:10:1] totalSize# 67141772 blobValueIndex# 2 Put id# [83:1:1:0:0:1024:1] totalSize# 67141782 blobValueIndex# 11 Change MinHugeBlobSize# 8192 Put id# [48:1:2:0:0:1024:1] totalSize# 67142806 blobValueIndex# 10 Trim Put id# [64:1:1:0:0:40960:1] totalSize# 67143830 blobValueIndex# 28 Put id# [80:1:2:0:0:10:1] totalSize# 67184790 blobValueIndex# 4 Put id# [83:1:2:0:0:589824:1] totalSize# 67184800 blobValueIndex# 37 Put id# [52:1:2:0:0:1572864:1] totalSize# 67774624 blobValueIndex# 50 Put id# [28:1:3:0:0:1572864:1] totalSize# 69347488 blobValueIndex# 56 Put id# [63:1:4:0:0:10:1] totalSize# 70920352 blobValueIndex# 8 Put id# [31:1:2:0:0:1048576:1] totalSize# 70920362 blobValueIndex# 47 Put id# [92:1:4:0:0:1572864:1] totalSize# 71968938 blobValueIndex# 56 Change MinHugeBlobSize# 65536 Put id# [11:1:2:0:0:1024:1] totalSize# 73541802 blobValueIndex# 17 Put id# [88:1:1:0:0:1048576:1] totalSize# 73542826 blobValueIndex# 41 Change MinHugeBlobSize# 12288 Trim Put id# [100:1:1:0:0:10:1] totalSize# 74591402 blobValueIndex# 3 Put id# [71:1:1:0:0:10:1] totalSize# 74591412 blobValueIndex# 1 Put id# [68:1:1:0:0:40960:1] totalSize# 74591422 blobValueIndex# 27 Trim Put id# [36:1:2:0:0:10:1] totalSize# 74632382 blobValueIndex# 9 Trim Restart Put id# [87:1:4:0:0:1048576:1] totalSize# 74632392 blobValueIndex# 41 Put id# [33:1:2:0:0:1572864:1] totalSize# 75680968 blobValueIndex# 57 Put id# [45:1:2:0:0:10:1] totalSize# 77253832 blobValueIndex# 6 Change MinHugeBlobSize# 524288 Put id# [42:1:2:0:0:1024:1] totalSize# 77253842 blobValueIndex# 17 Put id# [41:1:1:0:0:1024:1] totalSize# 77254866 blobValueIndex# 15 Restart Put id# [4:1:2:0:0:1048576:1] totalSize# 77255890 blobValueIndex# 49 Put id# [83:1:3:0:0:1024:1] totalSize# 78304466 blobValueIndex# 10 Put id# [39:1:1:0:0:40960:1] totalSize# 78305490 blobValueIndex# 28 Put id# [11:1:3:0:0:10:1] totalSize# 78346450 blobValueIndex# 9 Put id# [16:1:2:0:0:40960:1] totalSize# 78346460 blobValueIndex# 26 Put id# [81:1:3:0:0:1572864:1] totalSize# 78387420 blobValueIndex# 58 Put id# [85:1:2:0:0:40960:1] totalSize# 79960284 blobValueIndex# 23 Trim Put id# [67:1:3:0:0:1024:1] totalSize# 80001244 blobValueIndex# 17 Trim Put id# [87:1:5:0:0:10:1] totalSize# 80002268 blobValueIndex# 8 Put id# [52:1:3:0:0:589824:1] totalSize# 80002278 blobValueIndex# 31 Trim Put id# [59:1:2:0:0:10:1] totalSize# 80592102 blobValueIndex# 0 Put id# [1:1:3:0:0:1024:1] totalSize# 80592112 blobValueIndex# 12 Put id# [18:1:3:0:0:10:1] totalSize# 80593136 blobValueIndex# 6 Put id# [94:1:2:0:0:40960:1] totalSize# 80593146 blobValueIndex# 21 Put id# [17:1:2:0:0:1024:1] totalSize# 80634106 blobValueIndex# 16 Restart Put id# [5:1:2:0:0:1048576:1] totalSize# 80635130 blobValueIndex# 48 Put id# [72:1:2:0:0:40960:1] totalSize# 81683706 blobValueIndex# 26 Put id# [11:1:4:0:0:1024:1] totalSize# 81724666 blobValueIndex# 16 Put id# [89:1:3:0:0:10:1] totalSize# 81725690 blobValueIndex# 8 Restart Put id# [2:1:1:0:0:1572864:1] totalSize# 81725700 blobValueIndex# 59 Put id# [55:1:3:0:0:1572864:1] totalSize# 83298564 blobValueIndex# 56 Put id# [44:1:1:0:0:1024:1] totalSize# 84871428 blobValueIndex# 13 Put id# [33:1:3:0:0:1024:1] totalSize# 84872452 blobValueIndex# 15 Change MinHugeBlobSize# 65536 Put id# [70:1:5:0:0:40960:1] totalSize# ... ize# 3656520306 blobValueIndex# 51 Change MinHugeBlobSize# 65536 Put id# [81:1:74:0:0:1024:1] totalSize# 3658093170 blobValueIndex# 11 Put id# [57:1:82:0:0:10:1] totalSize# 3658094194 blobValueIndex# 6 Put id# [91:1:74:0:0:589824:1] totalSize# 3658094204 blobValueIndex# 30 Change MinHugeBlobSize# 61440 Put id# [5:1:95:0:0:1024:1] totalSize# 3658684028 blobValueIndex# 12 Change MinHugeBlobSize# 12288 Put id# [40:1:102:0:0:589824:1] totalSize# 3658685052 blobValueIndex# 31 Put id# [26:1:100:0:0:10:1] totalSize# 3659274876 blobValueIndex# 7 Put id# [2:1:86:0:0:1024:1] totalSize# 3659274886 blobValueIndex# 18 Put id# [55:1:80:0:0:40960:1] totalSize# 3659275910 blobValueIndex# 27 Put id# [17:1:85:0:0:1572864:1] totalSize# 3659316870 blobValueIndex# 52 Put id# [1:1:94:0:0:10:1] totalSize# 3660889734 blobValueIndex# 4 Put id# [25:1:81:0:0:10:1] totalSize# 3660889744 blobValueIndex# 7 Put id# [25:1:82:0:0:589824:1] totalSize# 3660889754 blobValueIndex# 34 Put id# [17:1:86:0:0:1024:1] totalSize# 3661479578 blobValueIndex# 17 Put id# [30:1:103:0:0:589824:1] totalSize# 3661480602 blobValueIndex# 30 Put id# [33:1:96:0:0:1572864:1] totalSize# 3662070426 blobValueIndex# 57 Put id# [11:1:88:0:0:40960:1] totalSize# 3663643290 blobValueIndex# 27 Put id# [53:1:99:0:0:1024:1] totalSize# 3663684250 blobValueIndex# 19 Trim Put id# [85:1:101:0:0:1048576:1] totalSize# 3663685274 blobValueIndex# 47 Put id# [49:1:89:0:0:1024:1] totalSize# 3664733850 blobValueIndex# 13 Put id# [71:1:93:0:0:589824:1] totalSize# 3664734874 blobValueIndex# 34 Put id# [87:1:114:0:0:10:1] totalSize# 3665324698 blobValueIndex# 6 Put id# [28:1:88:0:0:1572864:1] totalSize# 3665324708 blobValueIndex# 56 Put id# [53:1:100:0:0:1048576:1] totalSize# 3666897572 blobValueIndex# 45 Restart Put id# [68:1:112:0:0:1048576:1] totalSize# 3667946148 blobValueIndex# 48 Trim Put id# [99:1:89:0:0:589824:1] totalSize# 3668994724 blobValueIndex# 36 Put id# [38:1:84:0:0:10:1] totalSize# 3669584548 blobValueIndex# 5 Put id# [82:1:80:0:0:1024:1] totalSize# 3669584558 blobValueIndex# 13 Put id# [78:1:88:0:0:1572864:1] totalSize# 3669585582 blobValueIndex# 57 Put id# [72:1:104:0:0:1048576:1] totalSize# 3671158446 blobValueIndex# 41 Put id# [58:1:96:0:0:589824:1] totalSize# 3672207022 blobValueIndex# 38 Put id# [70:1:99:0:0:1048576:1] totalSize# 3672796846 blobValueIndex# 44 Put id# [45:1:88:0:0:40960:1] totalSize# 3673845422 blobValueIndex# 25 Put id# [6:1:80:0:0:1024:1] totalSize# 3673886382 blobValueIndex# 17 Put id# [42:1:91:0:0:1024:1] totalSize# 3673887406 blobValueIndex# 18 Trim Put id# [69:1:86:0:0:10:1] totalSize# 3673888430 blobValueIndex# 0 Put id# [77:1:92:0:0:10:1] totalSize# 3673888440 blobValueIndex# 5 Trim Put id# [91:1:75:0:0:589824:1] totalSize# 3673888450 blobValueIndex# 37 Put id# [93:1:88:0:0:589824:1] totalSize# 3674478274 blobValueIndex# 39 Restart Put id# [29:1:98:0:0:1572864:1] totalSize# 3675068098 blobValueIndex# 59 Put id# [18:1:81:0:0:10:1] totalSize# 3676640962 blobValueIndex# 3 Put id# [81:1:75:0:0:40960:1] totalSize# 3676640972 blobValueIndex# 25 Put id# [99:1:90:0:0:40960:1] totalSize# 3676681932 blobValueIndex# 28 Put id# [20:1:86:0:0:40960:1] totalSize# 3676722892 blobValueIndex# 26 Trim Put id# [82:1:81:0:0:10:1] totalSize# 3676763852 blobValueIndex# 3 Put id# [49:1:90:0:0:1572864:1] totalSize# 3676763862 blobValueIndex# 55 Change MinHugeBlobSize# 65536 Put id# [13:1:86:0:0:40960:1] totalSize# 3678336726 blobValueIndex# 22 Put id# [14:1:83:0:0:589824:1] totalSize# 3678377686 blobValueIndex# 31 Put id# [34:1:95:0:0:1048576:1] totalSize# 3678967510 blobValueIndex# 47 Change MinHugeBlobSize# 12288 Put id# [69:1:87:0:0:1024:1] totalSize# 3680016086 blobValueIndex# 12 Put id# [81:1:76:0:0:40960:1] totalSize# 3680017110 blobValueIndex# 22 Put id# [20:1:87:0:0:40960:1] totalSize# 3680058070 blobValueIndex# 28 Put id# [88:1:93:0:0:589824:1] totalSize# 3680099030 blobValueIndex# 32 Put id# [85:1:102:0:0:40960:1] totalSize# 3680688854 blobValueIndex# 23 Trim Restart Put id# [92:1:99:0:0:40960:1] totalSize# 3680729814 blobValueIndex# 23 Put id# [23:1:92:0:0:589824:1] totalSize# 3680770774 blobValueIndex# 33 Put id# [28:1:89:0:0:1048576:1] totalSize# 3681360598 blobValueIndex# 46 Trim Put id# [76:1:89:0:0:1048576:1] totalSize# 3682409174 blobValueIndex# 43 Put id# [72:1:105:0:0:10:1] totalSize# 3683457750 blobValueIndex# 6 Put id# [62:1:108:0:0:589824:1] totalSize# 3683457760 blobValueIndex# 36 Put id# [43:1:78:0:0:1048576:1] totalSize# 3684047584 blobValueIndex# 42 Put id# [63:1:96:0:0:1048576:1] totalSize# 3685096160 blobValueIndex# 48 Put id# [93:1:89:0:0:1024:1] totalSize# 3686144736 blobValueIndex# 17 Put id# [22:1:87:0:0:589824:1] totalSize# 3686145760 blobValueIndex# 33 Change MinHugeBlobSize# 65536 Put id# [38:1:85:0:0:1572864:1] totalSize# 3686735584 blobValueIndex# 57 Put id# [92:1:100:0:0:40960:1] totalSize# 3688308448 blobValueIndex# 25 Put id# [65:1:87:0:0:1024:1] totalSize# 3688349408 blobValueIndex# 16 Put id# [4:1:93:0:0:10:1] totalSize# 3688350432 blobValueIndex# 3 Put id# [13:1:87:0:0:1572864:1] totalSize# 3688350442 blobValueIndex# 56 Put id# [1:1:95:0:0:40960:1] totalSize# 3689923306 blobValueIndex# 25 Put id# [42:1:92:0:0:1048576:1] totalSize# 3689964266 blobValueIndex# 43 Put id# [43:1:79:0:0:589824:1] totalSize# 3691012842 blobValueIndex# 30 Put id# [47:1:69:0:0:40960:1] totalSize# 3691602666 blobValueIndex# 26 Put id# [98:1:97:0:0:1048576:1] totalSize# 3691643626 blobValueIndex# 42 Restart Put id# [5:1:96:0:0:1572864:1] totalSize# 3692692202 blobValueIndex# 59 Put id# [55:1:81:0:0:10:1] totalSize# 3694265066 blobValueIndex# 8 Put id# [19:1:98:0:0:1048576:1] totalSize# 3694265076 blobValueIndex# 44 Trim Put id# [22:1:88:0:0:40960:1] totalSize# 3695313652 blobValueIndex# 24 Put id# [46:1:81:0:0:1048576:1] totalSize# 3695354612 blobValueIndex# 46 Put id# [75:1:90:0:0:589824:1] totalSize# 3696403188 blobValueIndex# 34 Put id# [20:1:88:0:0:10:1] totalSize# 3696993012 blobValueIndex# 4 Change MinHugeBlobSize# 524288 Trim Put id# [61:1:87:0:0:1048576:1] totalSize# 3696993022 blobValueIndex# 49 Put id# [33:1:97:0:0:1572864:1] totalSize# 3698041598 blobValueIndex# 57 Put id# [51:1:103:0:0:10:1] totalSize# 3699614462 blobValueIndex# 3 Put id# [3:1:78:0:0:10:1] totalSize# 3699614472 blobValueIndex# 4 Put id# [34:1:96:0:0:1024:1] totalSize# 3699614482 blobValueIndex# 10 Trim Put id# [3:1:79:0:0:589824:1] totalSize# 3699615506 blobValueIndex# 33 Put id# [85:1:103:0:0:40960:1] totalSize# 3700205330 blobValueIndex# 28 Put id# [83:1:93:0:0:1572864:1] totalSize# 3700246290 blobValueIndex# 57 Change MinHugeBlobSize# 61440 Trim Put id# [14:1:84:0:0:1048576:1] totalSize# 3701819154 blobValueIndex# 42 Put id# [61:1:88:0:0:40960:1] totalSize# 3702867730 blobValueIndex# 20 Restart Put id# [62:1:109:0:0:589824:1] totalSize# 3702908690 blobValueIndex# 35 Trim Put id# [31:1:105:0:0:40960:1] totalSize# 3703498514 blobValueIndex# 27 Put id# [63:1:97:0:0:589824:1] totalSize# 3703539474 blobValueIndex# 37 Put id# [80:1:79:0:0:10:1] totalSize# 3704129298 blobValueIndex# 4 Put id# [99:1:91:0:0:1572864:1] totalSize# 3704129308 blobValueIndex# 59 Put id# [16:1:96:0:0:1024:1] totalSize# 3705702172 blobValueIndex# 18 Put id# [42:1:93:0:0:40960:1] totalSize# 3705703196 blobValueIndex# 28 Put id# [79:1:91:0:0:589824:1] totalSize# 3705744156 blobValueIndex# 39 Trim Put id# [93:1:90:0:0:1048576:1] totalSize# 3706333980 blobValueIndex# 45 Put id# [53:1:101:0:0:589824:1] totalSize# 3707382556 blobValueIndex# 36 Put id# [53:1:102:0:0:1572864:1] totalSize# 3707972380 blobValueIndex# 55 Put id# [85:1:104:0:0:10:1] totalSize# 3709545244 blobValueIndex# 0 Change MinHugeBlobSize# 524288 Put id# [68:1:113:0:0:1024:1] totalSize# 3709545254 blobValueIndex# 11 Put id# [67:1:82:0:0:40960:1] totalSize# 3709546278 blobValueIndex# 20 Put id# [75:1:91:0:0:1572864:1] totalSize# 3709587238 blobValueIndex# 55 Put id# [30:1:104:0:0:1024:1] totalSize# 3711160102 blobValueIndex# 15 Change MinHugeBlobSize# 12288 Put id# [6:1:81:0:0:1572864:1] totalSize# 3711161126 blobValueIndex# 50 Put id# [64:1:102:0:0:1572864:1] totalSize# 3712733990 blobValueIndex# 55 Put id# [74:1:87:0:0:1024:1] totalSize# 3714306854 blobValueIndex# 19 Put id# [59:1:99:0:0:1572864:1] totalSize# 3714307878 blobValueIndex# 58 Put id# [4:1:94:0:0:1024:1] totalSize# 3715880742 blobValueIndex# 17 Put id# [58:1:97:0:0:10:1] totalSize# 3715881766 blobValueIndex# 8 Put id# [7:1:82:0:0:10:1] totalSize# 3715881776 blobValueIndex# 2 Trim Restart Put id# [94:1:93:0:0:1024:1] totalSize# 3715881786 blobValueIndex# 18 Put id# [2:1:87:0:0:1024:1] totalSize# 3715882810 blobValueIndex# 14 Put id# [23:1:93:0:0:40960:1] totalSize# 3715883834 blobValueIndex# 29 Put id# [31:1:106:0:0:1024:1] totalSize# 3715924794 blobValueIndex# 19 Put id# [64:1:103:0:0:1572864:1] totalSize# 3715925818 blobValueIndex# 59 Put id# [73:1:93:0:0:589824:1] totalSize# 3717498682 blobValueIndex# 39 Put id# [90:1:73:0:0:40960:1] totalSize# 3718088506 blobValueIndex# 24 Put id# [78:1:89:0:0:589824:1] totalSize# 3718129466 blobValueIndex# 36 Put id# [82:1:82:0:0:1024:1] totalSize# 3718719290 blobValueIndex# 13 Put id# [1:1:96:0:0:10:1] totalSize# 3718720314 blobValueIndex# 4 Trim Put id# [83:1:94:0:0:1024:1] totalSize# 3718720324 blobValueIndex# 18 Restart Put id# [58:1:98:0:0:10:1] totalSize# 3718721348 blobValueIndex# 2 Put id# [13:1:88:0:0:1572864:1] totalSize# 3718721358 blobValueIndex# 52 Put id# [10:1:77:0:0:1024:1] totalSize# 3720294222 blobValueIndex# 13 Put id# [16:1:97:0:0:1572864:1] totalSize# 3720295246 blobValueIndex# 55 Put id# [70:1:100:0:0:589824:1] totalSize# 3721868110 blobValueIndex# 32 Put id# [14:1:85:0:0:1024:1] totalSize# 3722457934 blobValueIndex# 10 Put id# [15:1:87:0:0:10:1] totalSize# 3722458958 blobValueIndex# 8 Put id# [85:1:105:0:0:40960:1] totalSize# 3722458968 blobValueIndex# 27 Put id# [38:1:86:0:0:1572864:1] totalSize# 3722499928 blobValueIndex# 55 Put id# [27:1:89:0:0:1024:1] totalSize# 3724072792 blobValueIndex# 10 Put id# [68:1:114:0:0:1572864:1] totalSize# 3724073816 blobValueIndex# 59 Put id# [58:1:99:0:0:589824:1] totalSize# 3725646680 blobValueIndex# 35 Put id# [17:1:87:0:0:1572864:1] totalSize# 3726236504 blobValueIndex# 53 Restart Put id# [31:1:107:0:0:1024:1] totalSize# 3727809368 blobValueIndex# 10 Put id# [71:1:94:0:0:1048576:1] totalSize# 3727810392 blobValueIndex# 43 Put id# [52:1:79:0:0:1572864:1] totalSize# 3728858968 blobValueIndex# 52 Trim Put id# [60:1:80:0:0:589824:1] totalSize# 3730431832 blobValueIndex# 35 Put id# [72:1:106:0:0:40960:1] totalSize# 3731021656 blobValueIndex# 22 Put id# [82:1:83:0:0:589824:1] totalSize# 3731062616 blobValueIndex# 34 Put id# [57:1:83:0:0:40960:1] totalSize# 3731652440 blobValueIndex# 29 Put id# [79:1:92:0:0:1572864:1] totalSize# 3731693400 blobValueIndex# 57 Put id# [20:1:89:0:0:1572864:1] totalSize# 3733266264 blobValueIndex# 51 Trim Put id# [50:1:70:0:0:10:1] totalSize# 3734839128 blobValueIndex# 3 Put id# [52:1:80:0:0:40960:1] totalSize# 3734839138 blobValueIndex# 29 Put id# [27:1:90:0:0:1024:1] totalSize# 3734880098 blobValueIndex# 15 Put id# [5:1:97:0:0:589824:1] totalSize# 3734881122 blobValueIndex# 37 Put id# [3:1:80:0:0:589824:1] totalSize# 3735470946 blobValueIndex# 32 Trim Put id# [34:1:97:0:0:589824:1] totalSize# 3736060770 blobValueIndex# 30 Put id# [46:1:82:0:0:1024:1] totalSize# 3736650594 blobValueIndex# 13 Trim Put id# [51:1:104:0:0:1048576:1] totalSize# 3736651618 blobValueIndex# 40 Change MinHugeBlobSize# 65536 Put id# [19:1:99:0:0:10:1] totalSize# 3737700194 blobValueIndex# 3 Trim Put id# [95:1:83:0:0:40960:1] totalSize# 3737700204 blobValueIndex# 26 Change MinHugeBlobSize# 61440 Put id# [28:1:90:0:0:40960:1] totalSize# 3737741164 blobValueIndex# 28 Put id# [34:1:98:0:0:589824:1] totalSize# 3737782124 blobValueIndex# 32 Put id# [44:1:76:0:0:10:1] totalSize# 3738371948 blobValueIndex# 9 Put id# [65:1:88:0:0:1048576:1] totalSize# 3738371958 blobValueIndex# 40 Put id# [67:1:83:0:0:10:1] totalSize# 3739420534 blobValueIndex# 0 Restart ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/runtime/unittest >> KqpScanSpilling::SpillingInRuntimeNodes-EnabledSpilling [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/u93c/002926/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk8 Trying to start YDB, gRPC: 10087, MsgBus: 24665 2025-06-03T10:31:04.499890Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668727190636907:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:04.499968Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002926/r3tmp/tmpRQs0Mp/pdisk_1.dat 2025-06-03T10:31:04.585772Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668727190636721:2079] 1748946664495753 != 1748946664495756 2025-06-03T10:31:04.589482Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10087, node 1 2025-06-03T10:31:04.621377Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:04.621395Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:04.621398Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:04.621453Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24665 2025-06-03T10:31:04.653255Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:04.653289Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:04.657769Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24665 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:31:04.766450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.770038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:04.786267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.868364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:04.942641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:04.986316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:05.094048Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668731485605663:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.094086Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.162182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.176952Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.195794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.222413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.238290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.253246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.283951Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.318191Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668731485606316:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.318226Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.318389Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668731485606321:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.320011Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:05.324237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:31:05.324281Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668731485606323:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:31:05.398098Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668731485606374:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:09.505408Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668727190636907:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:09.505461Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:31:19.589397Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7306: Cannot get console configs 2025-06-03T10:31:19.589414Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (OptionalType (DataType 'Uint64))) (let $4 (OptionalType (DataType 'String))) (let $5 '('('"_logical_id" '787) '('"_id" '"222898ff-8e6d1207-35cd6761-d00304b8") '('"_wide_channels" (StructType '('"Key" $3) '('"Value" $4))))) (let $6 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($17) (block '( (let $18 (lambda '($19) (Member $19 '"Key") (Member $19 '"Value"))) (return (FromFlow (ExpandMap (ToFlow $17) $18))) ))) $5)) (let $7 '('1)) (let $8 (DqCnHashShuffle (TDqOutput $6 '0) $7 '1)) (let $9 (StructType '('"t1.Key" $3) '('"t1.Value" $4) '('"t2.Key" $3) '('"t2.Value" $4))) (let $10 '('('"_logical_id" '685) '('"_id" '"6ec1782-f59ea981-ff7ba4ed-96345fb0") '('"_wide_channels" $9))) (let $11 (DqPhyStage '($8) (lambda '($20) (block '( (let $21 '('0 '0 '1 '1)) (let $22 '('0 '2 '1 '3)) (let $23 (GraceSelfJoinCore (ToFlow $20) 'Full $7 $7 $21 $22 '('"t1.Value") '('"t2.Value") '())) (return (FromFlow (WideSort $23 '('('1 (Bool 'true)))))) ))) $10)) (let $12 (DqCnMerge (TDqOutput $11 '0) '('('1 '"Asc")))) (let $13 (DqPhyStage '($12) (lambda '($24) (FromFlow (NarrowMap (ToFlow $24) (lambda '($25 $26 $27 $28) (AsStruct '('"t1.Key" $25) '('"t1.Value" $26) '('"t2.Key" $27) '('"t2.Value" $28)))))) '('('"_logical_id" '697) '('"_id" '"fb12d6f0-ded124fc-a891515d-9b830dc5")))) (let $14 '($6 $11 $13)) (let $15 '('"t1.Key" '"t1.Value" '"t2.Key" '"t2.Value")) (let $16 (DqCnResult (TDqOutput $13 '0) $15)) (return (KqpPhysicalQuery '((KqpPhysicalTx $14 '($16) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType $9) '0 '0)) '('('"type" '"query")))) ) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-NoAuth-BuiltinUser-DropUser-72 [GOOD] Test command err: Starting YDB, grpc: 23174, msgbus: 64443 2025-06-03T10:31:18.917977Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668786049128285:2084];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:18.918006Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001455/r3tmp/tmpcYRZRD/pdisk_1.dat 2025-06-03T10:31:19.012512Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:19.024614Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:19.024653Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:19.026569Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23174, node 1 2025-06-03T10:31:19.049565Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:19.049579Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:19.049581Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:19.049636Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64443 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:31:19.073267Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511668786049128484:2137] Handle TEvNavigate describe path dc-1 2025-06-03T10:31:19.075479Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:7511668786049128484:2137] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:31:19.075533Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511668790344096242:2433] HANDLE EvNavigateScheme dc-1 2025-06-03T10:31:19.075968Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7511668790344096242:2433] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-03T10:31:19.078685Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-03T10:31:19.090472Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7511668790344096242:2433] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-03T10:31:19.093189Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7511668790344096242:2433] Handle TEvDescribeSchemeResult Forward to# [1:7511668790344096237:2428] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-03T10:31:19.099126Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7511668786049128484:2137] Handle TEvProposeTransaction 2025-06-03T10:31:19.099147Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7511668786049128484:2137] TxId# 281474976715657 ProcessProposeTransaction 2025-06-03T10:31:19.099180Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7511668786049128484:2137] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:7511668790344096255:2439] 2025-06-03T10:31:19.114102Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:7511668790344096255:2439] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-03T10:31:19.114165Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:7511668790344096255:2439] txid# 281474976715657 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-03T10:31:19.114172Z node 1 :TX_PROXY DEBUG: schemereq.cpp:578: Actor# [1:7511668790344096255:2439] txid# 281474976715657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-03T10:31:19.114196Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:7511668790344096255:2439] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:31:19.114353Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:7511668790344096255:2439] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:31:19.114414Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:7511668790344096255:2439] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-03T10:31:19.114431Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7511668790344096255:2439] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-03T10:31:19.114484Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:7511668790344096255:2439] txid# 281474976715657 HANDLE EvClientConnected 2025-06-03T10:31:19.114772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:19.117011Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [1:7511668790344096255:2439] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-03T10:31:19.117039Z node 1 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [1:7511668790344096255:2439] txid# 281474976715657 SEND to# [1:7511668790344096254:2438] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-03T10:31:19.124278Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7511668786049128484:2137] Handle TEvProposeTransaction 2025-06-03T10:31:19.124319Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7511668786049128484:2137] TxId# 281474976715658 ProcessProposeTransaction 2025-06-03T10:31:19.124333Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7511668786049128484:2137] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7511668790344096295:2475] 2025-06-03T10:31:19.125358Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:7511668790344096295:2475] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-03T10:31:19.125394Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:7511668790344096295:2475] txid# 281474976715658 Bootstrap, UserSID: root@builtin CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-03T10:31:19.125399Z node 1 :TX_PROXY DEBUG: schemereq.cpp:578: Actor# [1:7511668790344096295:2475] txid# 281474976715658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-03T10:31:19.125422Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:7511668790344096295:2475] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:31:19.125560Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:7511668790344096295:2475] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:31:19.125591Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:7511668790344096295:2475] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:31:19.125610Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7511668790344096295:2475] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-03T10:31:19.125663Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:7511668790344096295:2475] txid# 281474976715658 HANDLE EvClientConnected 2025-06-03T10:31:19.125810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:31:19.126858Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [1:7511668790344096295:2475] txid# 281474976715658 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715658} 2025-06-03T10:31:19.126878Z node 1 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [1:7511668790344096295:2475] txid# 281474976715658 SEND t ... e 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7511668927082887981:2113] Handle TEvProposeTransaction 2025-06-03T10:31:52.303070Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7511668927082887981:2113] TxId# 281474976715661 ProcessProposeTransaction 2025-06-03T10:31:52.303096Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7511668927082887981:2113] Cookie# 0 userReqId# "" txid# 281474976715661 SEND to# [59:7511668931377855973:2543] 2025-06-03T10:31:52.303970Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [59:7511668931377855973:2543] txid# 281474976715661 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1/.metadata/workload_manager/pools" OperationType: ESchemeOpCreateResourcePool ModifyACL { Name: "default" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003\n!\010\000\022\035\010\001\020\201\004\032\024all-users@well-known \003\n\031\010\000\022\025\010\001\020\201\004\032\014root@builtin \003" NewOwner: "metadata@system" } Internal: true CreateResourcePool { Name: "default" Properties { Properties { key: "concurrent_query_limit" value: "-1" } Properties { key: "database_load_cpu_threshold" value: "-1" } Properties { key: "query_cancel_after_seconds" value: "0" } Properties { key: "query_cpu_limit_percent_per_node" value: "-1" } Properties { key: "query_memory_limit_percent_per_node" value: "-1" } Properties { key: "queue_size" value: "-1" } Properties { key: "resource_weight" value: "-1" } Properties { key: "total_cpu_limit_percent_per_node" value: "-1" } } } } } UserToken: "\n\017metadata@system\022\000" DatabaseName: "/dc-1" 2025-06-03T10:31:52.303995Z node 59 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [59:7511668931377855973:2543] txid# 281474976715661 Bootstrap, UserSID: metadata@system CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-03T10:31:52.304001Z node 59 :TX_PROXY DEBUG: schemereq.cpp:578: Actor# [59:7511668931377855973:2543] txid# 281474976715661 Bootstrap, UserSID: metadata@system IsClusterAdministrator: 0 2025-06-03T10:31:52.304096Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1401: Actor# [59:7511668931377855973:2543] txid# 281474976715661 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-03T10:31:52.304132Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1436: Actor# [59:7511668931377855973:2543] txid# 281474976715661 HandleResolveDatabase, UserSID: metadata@system CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 0 DatabaseOwner: root@builtin 2025-06-03T10:31:52.304284Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1627: Actor# [59:7511668931377855973:2543] txid# 281474976715661 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-03T10:31:52.304309Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [59:7511668931377855973:2543] txid# 281474976715661 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:31:52.304361Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [59:7511668931377855973:2543] txid# 281474976715661 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:31:52.304405Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [59:7511668931377855973:2543] HANDLE EvNavigateKeySetResult, txid# 281474976715661 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:31:52.304423Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7511668931377855973:2543] txid# 281474976715661 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715661 TabletId# 72057594046644480} 2025-06-03T10:31:52.304491Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [59:7511668931377855973:2543] txid# 281474976715661 HANDLE EvClientConnected 2025-06-03T10:31:52.305702Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [59:7511668931377855973:2543] txid# 281474976715661 Status StatusAlreadyExists HANDLE {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715661 Reason# Check failed: path: '/dc-1/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92} 2025-06-03T10:31:52.305757Z node 59 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [59:7511668931377855973:2543] txid# 281474976715661, issues: { message: "Check failed: path: \'/dc-1/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:52.305768Z node 59 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [59:7511668931377855973:2543] txid# 281474976715661 SEND to# [59:7511668931377855903:2338] Source {TEvProposeTransactionStatus txid# 281474976715661 Status# 48} 2025-06-03T10:31:52.308946Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7511668927082887981:2113] Handle TEvProposeTransaction 2025-06-03T10:31:52.308965Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7511668927082887981:2113] TxId# 281474976715662 ProcessProposeTransaction 2025-06-03T10:31:52.308986Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7511668927082887981:2113] Cookie# 0 userReqId# "" txid# 281474976715662 SEND to# [59:7511668931377855997:2555] 2025-06-03T10:31:52.309881Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [59:7511668931377855997:2555] txid# 281474976715662 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "targetuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:33782" 2025-06-03T10:31:52.309934Z node 59 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [59:7511668931377855997:2555] txid# 281474976715662 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-03T10:31:52.309939Z node 59 :TX_PROXY DEBUG: schemereq.cpp:578: Actor# [59:7511668931377855997:2555] txid# 281474976715662 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-03T10:31:52.309957Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [59:7511668931377855997:2555] txid# 281474976715662 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:31:52.310121Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [59:7511668931377855997:2555] txid# 281474976715662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:31:52.310173Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [59:7511668931377855997:2555] HANDLE EvNavigateKeySetResult, txid# 281474976715662 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:31:52.310209Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7511668931377855997:2555] txid# 281474976715662 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715662 TabletId# 72057594046644480} 2025-06-03T10:31:52.310284Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [59:7511668931377855997:2555] txid# 281474976715662 HANDLE EvClientConnected 2025-06-03T10:31:52.314176Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [59:7511668931377855997:2555] txid# 281474976715662 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715662} 2025-06-03T10:31:52.314202Z node 59 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [59:7511668931377855997:2555] txid# 281474976715662 SEND to# [59:7511668931377855996:2331] Source {TEvProposeTransactionStatus txid# 281474976715662 Status# 48} 2025-06-03T10:31:52.321843Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7511668927082887981:2113] Handle TEvProposeTransaction 2025-06-03T10:31:52.321868Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7511668927082887981:2113] TxId# 281474976715663 ProcessProposeTransaction 2025-06-03T10:31:52.321894Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7511668927082887981:2113] Cookie# 0 userReqId# "" txid# 281474976715663 SEND to# [59:7511668931377856029:2569] 2025-06-03T10:31:52.322825Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [59:7511668931377856029:2569] txid# 281474976715663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveUser { User: "targetuser" MissingOk: false } } } } UserToken: "\n\024ordinaryuser@builtin\022\030\022\026\n\024all-users@well-known\032\024ordinaryuser@builtin\"\007Builtin*\027ordi****ltin (32520BBF)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:33782" 2025-06-03T10:31:52.322860Z node 59 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [59:7511668931377856029:2569] txid# 281474976715663 Bootstrap, UserSID: ordinaryuser@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-03T10:31:52.322866Z node 59 :TX_PROXY DEBUG: schemereq.cpp:578: Actor# [59:7511668931377856029:2569] txid# 281474976715663 Bootstrap, UserSID: ordinaryuser@builtin IsClusterAdministrator: 0 2025-06-03T10:31:52.322946Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1401: Actor# [59:7511668931377856029:2569] txid# 281474976715663 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-03T10:31:52.322966Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1436: Actor# [59:7511668931377856029:2569] txid# 281474976715663 HandleResolveDatabase, UserSID: ordinaryuser@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 0 DatabaseOwner: root@builtin 2025-06-03T10:31:52.322982Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [59:7511668931377856029:2569] txid# 281474976715663 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:31:52.323068Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [59:7511668931377856029:2569] txid# 281474976715663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:31:52.323080Z node 59 :TX_PROXY ERROR: schemereq.cpp:1079: Actor# [59:7511668931377856029:2569] txid# 281474976715663, Access denied for ordinaryuser@builtin, attempt to manage user 2025-06-03T10:31:52.323109Z node 59 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [59:7511668931377856029:2569] txid# 281474976715663, issues: { message: "Access denied for ordinaryuser@builtin" issue_code: 200000 severity: 1 } 2025-06-03T10:31:52.323121Z node 59 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [59:7511668931377856029:2569] txid# 281474976715663 SEND to# [59:7511668931377856028:2347] Source {TEvProposeTransactionStatus Status# 5} 2025-06-03T10:31:52.323240Z node 59 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=59&id=ZGMzMDI3MjEtNjNlMWFjNTUtZDRlZDY3NmYtNjAzNDEyYTI=, ActorId: [59:7511668931377856014:2347], ActorState: ExecuteState, TraceId: 01jwtnhxqy0498pwgf89jqjga9, Create QueryResponse for error on request, msg: 2025-06-03T10:31:52.323358Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [59:7511668927082887981:2113] Handle TEvExecuteKqpTransaction 2025-06-03T10:31:52.323367Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [59:7511668927082887981:2113] TxId# 281474976715664 ProcessProposeKqpTransaction >> KqpScanLogs::WideCombine-EnabledLogs [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::MultiShardTableOneUniqIndexDataColumn [GOOD] Test command err: Trying to start YDB, gRPC: 7472, MsgBus: 32485 2025-06-03T10:31:04.143293Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668725631941296:2138];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:04.144353Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001c6a/r3tmp/tmpoEeRog/pdisk_1.dat 2025-06-03T10:31:04.225626Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:04.225741Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668725631941196:2079] 1748946664142639 != 1748946664142642 TServer::EnableGrpc on GrpcPort 7472, node 1 2025-06-03T10:31:04.242357Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:04.242374Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:04.242377Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:04.242452Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32485 2025-06-03T10:31:04.289963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:04.290007Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:04.290562Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:32485 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:04.338691Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:04.341727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:04.350103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.427726Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:04.462856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:04.494841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:04.702498Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668725631942834:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:04.702564Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:04.761486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.780201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.851557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.878810Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.894757Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.923582Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.998540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.027272Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668729926910788:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.027321Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.027336Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668729926910793:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.029720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:05.033931Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668729926910795:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:31:05.134721Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668729926910846:3404] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:05.511518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.729723Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710677. Ctx: { TraceId: 01jwtngg7x5s5965grpys5t6f8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDY0YzJlNmUtZWU4MWY3NDktYTU4YjBlYjUtMTFkODdmNTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:05.733558Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710673. Ctx: { TraceId: 01jwtngg7x2xcrmncey1venwwe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTRhYjllZWEtNmZkNDEyYzYtNTdiNDA3YmEtZGZiZTU4ZTY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:05.733810Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710674. Ctx: { TraceId: 01jwtngg7x56857w2xf3dcnw72, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTIzM2Y5NWUtOWRiY2JkYTEtYWRlMmU0YmYtZjdlNjAxZDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:05.733851Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710678. Ctx: { TraceId: 01jwtngg7yakzt6d6vvkt55tzc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2U2YzhmMjYtOWEwMDg2OTAtZmFlZjhhMzMtYWRkZTRiMGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:05.734163Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710675. Ctx: { TraceId: 01jwtngg7xd87qcwmrjrf0gn3k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjU3NDdkOTAtNDA4MThjODEtNmY3YzZlZWMtY2U0Yjk3ZDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:05.734377Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710676. Ctx: { TraceId: 01jwtngg7x9ydryjm8raehgr18, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjE2Njg0YWItMzViOTE3MzEtYTg0MjdmZjQtYWQ1OTMyYTI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:05.737779Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710679. Ctx: { TraceId: 01jwtngg7ydqzs9zsdmrwp18gs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDllMzNiNjgtNDFkOWVhNGUtNjU0Y2VjZGMtODc4YmNhMmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:05.738664Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710680. Ctx: { TraceId: 01jwtngg7yahtzd2q80nh5pj02, Database: , DatabaseId: /R ... zI5MDEtNmY4NTFjODktNWZlZjlkM2QtZjM5YTg2YmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.304405Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727070. Ctx: { TraceId: 01jwtnhypm978q0nxdmp27t93s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzAwMWMzYTctNjI2YmVjOTgtNDc1MzlmNDQtNTIxZTQ2ODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.304572Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727071. Ctx: { TraceId: 01jwtnhypjafcyvptbh3h8syw9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTk0YzY1MjItNTE0ZDIyMDAtNDAxZjNhZi0zYTRhMjE4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.304961Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727072. Ctx: { TraceId: 01jwtnhypp7gqvjn8q617ge52k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTQ0ZjUyZTktNWMxZjk0MWUtODJhMGJkMGEtNTU1MDJlYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.305013Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727073. Ctx: { TraceId: 01jwtnhypj6qwzvs17df1r18bn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzY0NmUyMTktZDFjOGMwZmQtNDNjZWUzZjctOGQ0Yjg3ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.306212Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727074. Ctx: { TraceId: 01jwtnhypjafcyvptbh3h8syw9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTk0YzY1MjItNTE0ZDIyMDAtNDAxZjNhZi0zYTRhMjE4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.307011Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727075. Ctx: { TraceId: 01jwtnhypm978q0nxdmp27t93s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzAwMWMzYTctNjI2YmVjOTgtNDc1MzlmNDQtNTIxZTQ2ODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.307566Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727076. Ctx: { TraceId: 01jwtnhypp7gqvjn8q617ge52k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTQ0ZjUyZTktNWMxZjk0MWUtODJhMGJkMGEtNTU1MDJlYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.308894Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727077. Ctx: { TraceId: 01jwtnhypp7gqvjn8q617ge52k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTQ0ZjUyZTktNWMxZjk0MWUtODJhMGJkMGEtNTU1MDJlYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.308909Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727078. Ctx: { TraceId: 01jwtnhyptdbs5bkggvzbcth23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTc0MGY0ZjMtNmI3Y2MwZjAtY2JhMTkyMmUtM2FmNjhmMGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.311006Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727079. Ctx: { TraceId: 01jwtnhyptdbs5bkggvzbcth23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTc0MGY0ZjMtNmI3Y2MwZjAtY2JhMTkyMmUtM2FmNjhmMGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.311688Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727081. Ctx: { TraceId: 01jwtnhypx85nn9gasae63y1xq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YWQ0NTNlZWYtYWQ2Y2JmNS0xOTMzMDg3MC1iYzBhNmQwYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.311963Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727080. Ctx: { TraceId: 01jwtnhyptdbs5bkggvzbcth23, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTc0MGY0ZjMtNmI3Y2MwZjAtY2JhMTkyMmUtM2FmNjhmMGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.313853Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727082. Ctx: { TraceId: 01jwtnhypx85nn9gasae63y1xq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YWQ0NTNlZWYtYWQ2Y2JmNS0xOTMzMDg3MC1iYzBhNmQwYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.314312Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727083. Ctx: { TraceId: 01jwtnhypz3nvebrzq34qt423y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzY0NmUyMTktZDFjOGMwZmQtNDNjZWUzZjctOGQ0Yjg3ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.314677Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727084. Ctx: { TraceId: 01jwtnhyq0e0dc8q4ajh460dj5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTgyN2I2Zi03NzRhYjIyNS0yZjgwZmExOC00OTUwYzk1Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.316478Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727085. Ctx: { TraceId: 01jwtnhypx85nn9gasae63y1xq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YWQ0NTNlZWYtYWQ2Y2JmNS0xOTMzMDg3MC1iYzBhNmQwYg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.316563Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727087. Ctx: { TraceId: 01jwtnhypz3nvebrzq34qt423y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzY0NmUyMTktZDFjOGMwZmQtNDNjZWUzZjctOGQ0Yjg3ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.316701Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727086. Ctx: { TraceId: 01jwtnhyq0e0dc8q4ajh460dj5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTgyN2I2Zi03NzRhYjIyNS0yZjgwZmExOC00OTUwYzk1Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.318042Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727088. Ctx: { TraceId: 01jwtnhypz3nvebrzq34qt423y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YzY0NmUyMTktZDFjOGMwZmQtNDNjZWUzZjctOGQ0Yjg3ODc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.318162Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727089. Ctx: { TraceId: 01jwtnhyq0e0dc8q4ajh460dj5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTgyN2I2Zi03NzRhYjIyNS0yZjgwZmExOC00OTUwYzk1Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.319603Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727090. Ctx: { TraceId: 01jwtnhyq3fe7dp024xvd7rn5x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTk0YzY1MjItNTE0ZDIyMDAtNDAxZjNhZi0zYTRhMjE4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.320066Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727091. Ctx: { TraceId: 01jwtnhyq0e0dc8q4ajh460dj5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTgyN2I2Zi03NzRhYjIyNS0yZjgwZmExOC00OTUwYzk1Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.321744Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727092. Ctx: { TraceId: 01jwtnhyq3fe7dp024xvd7rn5x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTk0YzY1MjItNTE0ZDIyMDAtNDAxZjNhZi0zYTRhMjE4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.322315Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727093. Ctx: { TraceId: 01jwtnhyq68vshhec794jwqzx4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTQ0ZjUyZTktNWMxZjk0MWUtODJhMGJkMGEtNTU1MDJlYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.322458Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727094. Ctx: { TraceId: 01jwtnhyq3fe7dp024xvd7rn5x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZTk0YzY1MjItNTE0ZDIyMDAtNDAxZjNhZi0zYTRhMjE4YQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.322535Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727095. Ctx: { TraceId: 01jwtnhyq97qznscwzptmhkwdd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NTc0MGY0ZjMtNmI3Y2MwZjAtY2JhMTkyMmUtM2FmNjhmMGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-03T10:31:53.324002Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727096. Ctx: { TraceId: 01jwtnhyq68vshhec794jwqzx4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTQ0ZjUyZTktNWMxZjk0MWUtODJhMGJkMGEtNTU1MDJlYmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-03T10:31:53.326059Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727097. Ctx: { TraceId: 01jwtnhyqc21mkcda0xyye7wsr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzAwMWMzYTctNjI2YmVjOTgtNDc1MzlmNDQtNTIxZTQ2ODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS 2025-06-03T10:31:53.326529Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727098. Ctx: { TraceId: 01jwtnhyqcfsy14prhehme77s0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTgyN2I2Zi03NzRhYjIyNS0yZjgwZmExOC00OTUwYzk1Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.327304Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727099. Ctx: { TraceId: 01jwtnhyqc21mkcda0xyye7wsr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzAwMWMzYTctNjI2YmVjOTgtNDc1MzlmNDQtNTIxZTQ2ODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.327424Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727100. Ctx: { TraceId: 01jwtnhyqcfsy14prhehme77s0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTgyN2I2Zi03NzRhYjIyNS0yZjgwZmExOC00OTUwYzk1Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.328194Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727101. Ctx: { TraceId: 01jwtnhyqc21mkcda0xyye7wsr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NzAwMWMzYTctNjI2YmVjOTgtNDc1MzlmNDQtNTIxZTQ2ODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:53.328262Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976727102. Ctx: { TraceId: 01jwtnhyqcfsy14prhehme77s0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTgyN2I2Zi03NzRhYjIyNS0yZjgwZmExOC00OTUwYzk1Zg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS finished with status: SUCCESS ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_proxy/ut_schemereq/unittest >> SchemeReqAccess::AlterLoginProtect-RootDB-Auth-LocalUser-DropUser-72 [GOOD] Test command err: Starting YDB, grpc: 10652, msgbus: 20647 2025-06-03T10:31:20.135364Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668793111934724:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:20.135449Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//dc-1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001430/r3tmp/tmpaiSPDH/pdisk_1.dat 2025-06-03T10:31:20.241681Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:20.246291Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:20.246323Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:20.249275Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10652, node 1 2025-06-03T10:31:20.271583Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:20.271597Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:20.271600Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:20.271656Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:31:20.299301Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:7511668793111934800:2139] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 TClient is connected to server localhost:20647 WaitRootIsUp 'dc-1'... TClient::Ls request: dc-1 2025-06-03T10:31:20.303719Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-03T10:31:20.306946Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:7511668793111934800:2139] Handle TEvNavigate describe path dc-1 2025-06-03T10:31:20.310257Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:7511668793111935226:2413] HANDLE EvNavigateScheme dc-1 2025-06-03T10:31:20.311623Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:7511668793111935226:2413] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-03T10:31:20.339994Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:7511668793111935226:2413] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "dc-1" Options { ReturnBoundaries: true ShowPrivateTable: true ReturnRangeKey: true } 2025-06-03T10:31:20.342358Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:7511668793111935226:2413] Handle TEvDescribeSchemeResult Forward to# [1:7511668793111935223:2411] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 2 Record# Status: StatusSuccess Path: "dc-1" PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/dc-1" } } } PathId: 1 PathOwnerId: 72057594046644480 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "dc-1" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'dc-1' success. 2025-06-03T10:31:20.348725Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7511668793111934800:2139] Handle TEvProposeTransaction 2025-06-03T10:31:20.348738Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7511668793111934800:2139] TxId# 281474976715657 ProcessProposeTransaction 2025-06-03T10:31:20.348781Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7511668793111934800:2139] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:7511668793111935274:2444] 2025-06-03T10:31:20.362357Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:7511668793111935274:2444] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "dc-1" StoragePools { Name: "" Kind: "tenant-db" } StoragePools { Name: "/dc-1:test" Kind: "test" } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-03T10:31:20.362434Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:7511668793111935274:2444] txid# 281474976715657 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-03T10:31:20.362439Z node 1 :TX_PROXY DEBUG: schemereq.cpp:578: Actor# [1:7511668793111935274:2444] txid# 281474976715657 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-03T10:31:20.362458Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:7511668793111935274:2444] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:31:20.362588Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:7511668793111935274:2444] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:31:20.362635Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:7511668793111935274:2444] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# false 2025-06-03T10:31:20.362650Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7511668793111935274:2444] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-03T10:31:20.362704Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:7511668793111935274:2444] txid# 281474976715657 HANDLE EvClientConnected 2025-06-03T10:31:20.362948Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:31:20.364124Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [1:7511668793111935274:2444] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-03T10:31:20.364142Z node 1 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [1:7511668793111935274:2444] txid# 281474976715657 SEND to# [1:7511668793111935273:2443] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} waiting... 2025-06-03T10:31:20.368831Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:7511668793111934800:2139] Handle TEvProposeTransaction 2025-06-03T10:31:20.368843Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:7511668793111934800:2139] TxId# 281474976715658 ProcessProposeTransaction 2025-06-03T10:31:20.368854Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:7511668793111934800:2139] Cookie# 0 userReqId# "" txid# 281474976715658 SEND to# [1:7511668793111935312:2478] 2025-06-03T10:31:20.369669Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:7511668793111935312:2478] txid# 281474976715658 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\032\010\000\022\026\010\001\020\377\377\003\032\014root@builtin \003" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "" 2025-06-03T10:31:20.369693Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:7511668793111935312:2478] txid# 281474976715658 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-03T10:31:20.369699Z node 1 :TX_PROXY DEBUG: schemereq.cpp:578: Actor# [1:7511668793111935312:2478] txid# 281474976715658 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-03T10:31:20.369721Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:7511668793111935312:2478] txid# 281474976715658 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:31:20.369826Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:7511668793111935312:2478] txid# 281474976715658 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:31:20.369852Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:7511668793111935312:2478] HANDLE EvNavigateKeySetResult, txid# 281474976715658 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:31:20.369867Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:7511668793111935312:2478] txid# 281474976715658 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715658 TabletId# 72057594046644480} 2025-06-03T10:31:20.369907Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:7511668793111935312:2478] txid# 281474976715658 HANDLE EvClientConnected 2025-06-03T10:31:20.370040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:31:20.371028Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [1:7511668793111935312:2478] txid# 281474976715658 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715658} 2025-06-03T10:31:20.371047Z node 1 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [1:7511668793111935312:2478] txid# 281474976715658 SEND t ... :31:55.721047Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [59:7511668943404238819:2548] HANDLE EvNavigateKeySetResult, txid# 281474976715661 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:31:55.721061Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7511668943404238819:2548] txid# 281474976715661 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715661 TabletId# 72057594046644480} 2025-06-03T10:31:55.721116Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [59:7511668943404238819:2548] txid# 281474976715661 HANDLE EvClientConnected 2025-06-03T10:31:55.724483Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [59:7511668943404238819:2548] txid# 281474976715661 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715661} 2025-06-03T10:31:55.724503Z node 59 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [59:7511668943404238819:2548] txid# 281474976715661 SEND to# [59:7511668943404238818:2330] Source {TEvProposeTransactionStatus txid# 281474976715661 Status# 48} 2025-06-03T10:31:55.878491Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7511668943404238112:2113] Handle TEvProposeTransaction 2025-06-03T10:31:55.878509Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7511668943404238112:2113] TxId# 281474976715662 ProcessProposeTransaction 2025-06-03T10:31:55.878527Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7511668943404238112:2113] Cookie# 0 userReqId# "" txid# 281474976715662 SEND to# [59:7511668943404238840:2563] 2025-06-03T10:31:55.879440Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [59:7511668943404238840:2563] txid# 281474976715662 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "" OperationType: ESchemeOpModifyACL ModifyACL { Name: "dc-1" DiffACL: "\n\022\010\001\022\016\032\014ordinaryuser\n\032\010\000\022\026\010\001\020\200\200\002\032\014ordinaryuser \000" } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:48574" 2025-06-03T10:31:55.879462Z node 59 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [59:7511668943404238840:2563] txid# 281474976715662 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-03T10:31:55.879466Z node 59 :TX_PROXY DEBUG: schemereq.cpp:578: Actor# [59:7511668943404238840:2563] txid# 281474976715662 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-03T10:31:55.879485Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [59:7511668943404238840:2563] txid# 281474976715662 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:31:55.879601Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [59:7511668943404238840:2563] txid# 281474976715662 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:31:55.879637Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [59:7511668943404238840:2563] HANDLE EvNavigateKeySetResult, txid# 281474976715662 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:31:55.879649Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7511668943404238840:2563] txid# 281474976715662 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715662 TabletId# 72057594046644480} 2025-06-03T10:31:55.879694Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [59:7511668943404238840:2563] txid# 281474976715662 HANDLE EvClientConnected 2025-06-03T10:31:55.879832Z node 59 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:55.880636Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [59:7511668943404238840:2563] txid# 281474976715662 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715662} 2025-06-03T10:31:55.880651Z node 59 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [59:7511668943404238840:2563] txid# 281474976715662 SEND to# [59:7511668943404238839:2343] Source {TEvProposeTransactionStatus txid# 281474976715662 Status# 48} 2025-06-03T10:31:55.885966Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7511668943404238112:2113] Handle TEvProposeTransaction 2025-06-03T10:31:55.885986Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7511668943404238112:2113] TxId# 281474976715663 ProcessProposeTransaction 2025-06-03T10:31:55.886006Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7511668943404238112:2113] Cookie# 0 userReqId# "" txid# 281474976715663 SEND to# [59:7511668943404238873:2582] 2025-06-03T10:31:55.886956Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [59:7511668943404238873:2582] txid# 281474976715663 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { CreateUser { User: "targetuser" Password: "passwd" CanLogin: true IsHashedPassword: false } } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:48574" 2025-06-03T10:31:55.886993Z node 59 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [59:7511668943404238873:2582] txid# 281474976715663 Bootstrap, UserSID: root@builtin CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-03T10:31:55.886998Z node 59 :TX_PROXY DEBUG: schemereq.cpp:578: Actor# [59:7511668943404238873:2582] txid# 281474976715663 Bootstrap, UserSID: root@builtin IsClusterAdministrator: 1 2025-06-03T10:31:55.887012Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [59:7511668943404238873:2582] txid# 281474976715663 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:31:55.887132Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [59:7511668943404238873:2582] txid# 281474976715663 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:31:55.887166Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [59:7511668943404238873:2582] HANDLE EvNavigateKeySetResult, txid# 281474976715663 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:31:55.887186Z node 59 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [59:7511668943404238873:2582] txid# 281474976715663 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715663 TabletId# 72057594046644480} 2025-06-03T10:31:55.887247Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [59:7511668943404238873:2582] txid# 281474976715663 HANDLE EvClientConnected 2025-06-03T10:31:55.890778Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [59:7511668943404238873:2582] txid# 281474976715663 Status StatusSuccess HANDLE {TEvModifySchemeTransactionResult Status# StatusSuccess txid# 281474976715663} 2025-06-03T10:31:55.890800Z node 59 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [59:7511668943404238873:2582] txid# 281474976715663 SEND to# [59:7511668943404238872:2345] Source {TEvProposeTransactionStatus txid# 281474976715663 Status# 48} 2025-06-03T10:31:55.897584Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [59:7511668943404238112:2113] Handle TEvProposeTransaction 2025-06-03T10:31:55.897605Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [59:7511668943404238112:2113] TxId# 281474976715664 ProcessProposeTransaction 2025-06-03T10:31:55.897627Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [59:7511668943404238112:2113] Cookie# 0 userReqId# "" txid# 281474976715664 SEND to# [59:7511668943404238900:2594] 2025-06-03T10:31:55.898708Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [59:7511668943404238900:2594] txid# 281474976715664 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/dc-1" OperationType: ESchemeOpAlterLogin AlterLogin { RemoveUser { User: "targetuser" MissingOk: false } } } } UserToken: "\n\014ordinaryuser\022\030\022\026\n\024all-users@well-known\032\334\003eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc0ODk4OTkxNSwiaWF0IjoxNzQ4OTQ2NzE1LCJzdWIiOiJvcmRpbmFyeXVzZXIifQ.jf7uOuPGJNn-Yd3rErQvmOKY4EMYqcOWfNjdKV5swqzwFk-JPQal503J0sEtig_cPjPh9ZWvWYQidLo4BBtRtgRwG53z0O_OVfbpNL9C2qDjf9Mq9SwIl1HRa9ttDWnh-JBZHNKV4f4_oqROgDv0GETVx9EQIOFcakbjGUAqDSrh5LdRBBFK_czcj3eQPkvHq2lQgacx9HG2uq1R_FmMZdGuIiJt4vatV_NC73eXZydd7V7M1tLgvRR7QqmOjOn2ckjhkTIwBNgt8t_47LswhIkkA1xwpGDCI7Uq3HNHV37wil-9-yZSz6rTdubQjbzMOkLXkE-5QwAPhcF5C9fNFQ\"\005Login*\210\001eyJhbGciOiJQUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOlsiXC9kYy0xIl0sImV4cCI6MTc0ODk4OTkxNSwiaWF0IjoxNzQ4OTQ2NzE1LCJzdWIiOiJvcmRpbmFyeXVzZXIifQ.**" DatabaseName: "/dc-1" RequestType: "" PeerName: "ipv6:[::1]:48574" 2025-06-03T10:31:55.898741Z node 59 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [59:7511668943404238900:2594] txid# 281474976715664 Bootstrap, UserSID: ordinaryuser CheckAdministrator: 1 CheckDatabaseAdministrator: 1 2025-06-03T10:31:55.898747Z node 59 :TX_PROXY DEBUG: schemereq.cpp:578: Actor# [59:7511668943404238900:2594] txid# 281474976715664 Bootstrap, UserSID: ordinaryuser IsClusterAdministrator: 0 2025-06-03T10:31:55.898854Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1401: Actor# [59:7511668943404238900:2594] txid# 281474976715664 HandleResolveDatabase, ResultSet size: 1 ResultSet error count: 0 2025-06-03T10:31:55.898875Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1436: Actor# [59:7511668943404238900:2594] txid# 281474976715664 HandleResolveDatabase, UserSID: ordinaryuser CheckAdministrator: 1 CheckDatabaseAdministrator: 1 IsClusterAdministrator: 0 IsDatabaseAdministrator: 0 DatabaseOwner: root@builtin 2025-06-03T10:31:55.898891Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [59:7511668943404238900:2594] txid# 281474976715664 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:31:55.898980Z node 59 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [59:7511668943404238900:2594] txid# 281474976715664 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:31:55.898988Z node 59 :TX_PROXY ERROR: schemereq.cpp:1079: Actor# [59:7511668943404238900:2594] txid# 281474976715664, Access denied for ordinaryuser, attempt to manage user 2025-06-03T10:31:55.899014Z node 59 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [59:7511668943404238900:2594] txid# 281474976715664, issues: { message: "Access denied for ordinaryuser" issue_code: 200000 severity: 1 } 2025-06-03T10:31:55.899022Z node 59 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [59:7511668943404238900:2594] txid# 281474976715664 SEND to# [59:7511668943404238899:2356] Source {TEvProposeTransactionStatus Status# 5} 2025-06-03T10:31:55.899104Z node 59 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=59&id=ZDQxNzgwMzUtMjE5NzM1YTItZTM4NDIxZmYtNDA4NjU5MTc=, ActorId: [59:7511668943404238890:2356], ActorState: ExecuteState, TraceId: 01jwtnj17q1za7m2r9bvk3vvwx, Create QueryResponse for error on request, msg: 2025-06-03T10:31:55.899207Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [59:7511668943404238112:2113] Handle TEvExecuteKqpTransaction 2025-06-03T10:31:55.899221Z node 59 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [59:7511668943404238112:2113] TxId# 281474976715665 ProcessProposeKqpTransaction ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/service/unittest >> KqpQueryService::DdlExecuteScript [GOOD] Test command err: Trying to start YDB, gRPC: 20090, MsgBus: 3234 2025-06-03T10:30:12.600866Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668503740469580:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:30:12.601024Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d8a/r3tmp/tmpvYLIUB/pdisk_1.dat 2025-06-03T10:30:12.719201Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:30:12.720665Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668503740469423:2079] 1748946612598945 != 1748946612598948 TServer::EnableGrpc on GrpcPort 20090, node 1 2025-06-03T10:30:12.744538Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:30:12.744552Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:30:12.744554Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:30:12.744603Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:30:12.754895Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:30:12.754923Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:30:12.755600Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3234 TClient is connected to server localhost:3234 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:30:12.869987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:30:12.876208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:12.924815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:13.007033Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:13.064486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:30:13.394336Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668508035438358:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.394371Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.538356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.569387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.590658Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.612396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.624003Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.635960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.649177Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:30:13.669765Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668508035439019:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.669795Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.669911Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668508035439024:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:30:13.671025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:30:13.673611Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668508035439026:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:30:13.760781Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668508035439077:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:30:13.970640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:30:14.034283Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668512330406773:3637] txid# 281474976715675, issues: { message: "Check failed: path: \'/Root/TestDdl_0\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:484" severity: 1 } 2025-06-03T10:30:14.034447Z node 1 :KQP_GATEWAY ERROR: scheme.h:178: Unexpected error on scheme request, TxId: 281474976715675, ProxyStatus: ExecComplete, SchemeShardReason: Check failed: path: '/Root/TestDdl_0', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:484 2025-06-03T10:30:14.034497Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=1&id=Y2MyMWRmOGItMzBhMzg1NzktYmQ3YmU3OWUtZWI2YTdmYmU=, ActorId: [1:7511668512330406761:2532], ActorState: ExecuteState, TraceId: 01jwtnexrbarejbmk99t0jm89g, Create QueryResponse for error on request, msg: 2025-06-03T10:30:14.052897Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668512330406796:3648] txid# 281474976715677, issues: { message: "Check failed: path: \'/Root/TestDdl_0\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 17], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:484" severity: 1 } 2025-06-03T10:30:14.071240Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037922 not found 2025-06-03T10:30:14.074293Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668512330406877:2557], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:17: Error: At function: KiReadTable!
:2:17: Error: Cannot find table 'db.[/Root/TestDdl_0]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:30:14.074411Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=YmQ5Njc2NGMtOGY2MzVkNzAtOGY1ZTgzMmQtMWViMzIxNzg=, ActorId: [1:7511668512330406873:2556], ActorState: ExecuteState, TraceId: 01jwtnexsp193938tz6xgzbvg1, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:30:14.078645Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668512330406885:2561], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:2 ... e_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511668953319083320:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:57.143961Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:57.144719Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:57.147412Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511668953319083322:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:31:57.202803Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511668953319083373:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:57.325890Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:31:57.345861Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037922 not found 2025-06-03T10:31:57.350388Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 22867, MsgBus: 19691 2025-06-03T10:31:57.560072Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511668952850908063:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:57.560105Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d8a/r3tmp/tmp9ihQ9K/pdisk_1.dat 2025-06-03T10:31:57.575596Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22867, node 4 2025-06-03T10:31:57.586596Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:57.586609Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:57.586611Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:57.586656Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19691 TClient is connected to server localhost:19691 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:57.660647Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:57.660672Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:57.661856Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:57.663077Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:57.669023Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:57.678623Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:57.695881Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:57.706813Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:57.887243Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511668952850909650:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:57.887290Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:57.894458Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:57.903653Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:57.911869Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:57.925698Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:57.939903Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:57.953760Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:57.967673Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:57.983929Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511668952850910303:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:57.983945Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511668952850910308:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:57.983953Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:57.984637Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:57.987041Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511668952850910310:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:31:58.080189Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511668957145877657:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:58.385382Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:31:58.385776Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:31:58.386203Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:31:58.530850Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715676:0, at schemeshard: 72057594046644480 >> KqpReturning::ReturningWorksIndexedDelete+QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedDelete-QueryService >> TxUsage::WriteToTopic_Demo_19_RestartNo_Table ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithReads [GOOD] Test command err: 2025-06-03T10:31:05.122299Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668732775894264:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:05.122403Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00228d/r3tmp/tmp46j650/pdisk_1.dat 2025-06-03T10:31:05.222845Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:05.226440Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:05.226468Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:05.237031Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20531, node 1 2025-06-03T10:31:05.252204Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:05.252232Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:05.252243Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:05.252304Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19116 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:05.329023Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:05.337882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 TClient is connected to server localhost:19116 2025-06-03T10:31:05.669616Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668732775895067:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.669677Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.723169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.785108Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668732775895228:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.785146Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.787058Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946665802 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 1 shards TClient::Ls request: /Root/Foo 2025-06-03T10:31:05.814398Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668732775895329:2379], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.814435Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.814565Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668732775895354:2396], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.814569Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668732775895355:2397], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.814579Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668732775895356:2398], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.814588Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668732775895358:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.814596Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668732775895357:2399], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.814666Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668732775895352:2394], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.814675Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668732775895353:2395], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.815861Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668732775895433:2413], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.815873Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668732775895440:2415], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.815880Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.815925Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.815983Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715664:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:31:05.815989Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager, operationId: 281474976715664:1, at schemeshard: 72057594046644480 2025-06-03T10:31:05.816006Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715664:2, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:31:05.816010Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager/pools, operationId: 281474976715664:2, at schemeshard: 72057594046644480 2025-06-03T10:31:05.816026Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715664:3, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:31:05.816036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_resource_pool.cpp:148: [72057594046644480] TCreateResourcePool Propose: opId# 281474976715664:3, path# /Root/.metadata/workload_manager/pools/default 2025-06-03T10:31:05.816089Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976715664:3 1 -> 128 2025-06-03T10:31:05.816160Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715664:4, propose status:StatusAccepted, reason: , at schemeshard ... 0. Ctx: { TraceId: 01jwtnhqdn3kannjdm09cveqr9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjVhNzkzMWEtYzBjYjgyMmEtOTkzOTNmZjAtOGRlZmEwZTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.850968Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776261. Ctx: { TraceId: 01jwtnhqdtf4ee0wzq2zweqmdb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjIwOWZiYTEtNTA4YTUwN2QtYjgwOGI1NDYtZTk1NTlkODk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.852593Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776262. Ctx: { TraceId: 01jwtnhqdwd1cd52h4gg3fkn7a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTQ2NzI2YTgtZGQ3NDE0MDUtYjZhY2I3ODYtMzdkMjBjNDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.858181Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776263. Ctx: { TraceId: 01jwtnhqdx64gt9dxt6rye6bs4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTI3MGJiNGEtNDg4ODJiZDUtYjI0MzM3NTktYjliZThkODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.858379Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776267. Ctx: { TraceId: 01jwtnhqdxbvcn9e04yabwdknh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTFmNTNkMDAtMjIzYmE2YjAtZTRhNzFlOWEtNDRhYmExZWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.858418Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776265. Ctx: { TraceId: 01jwtnhqdxfd0mrrzf2zfasbf9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjkzZjA4MzAtZjZlODIxNTctZGE5MzBhYjktZjUxYTg2M2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.858557Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776264. Ctx: { TraceId: 01jwtnhqdx7n3r305n4h1qga02, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2MzZTBjZGMtMWM2OTljMjUtMWQ5MmEyOTktNjhlYTdjNWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.858575Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776266. Ctx: { TraceId: 01jwtnhqdx5fpqe1krba6rjsqq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWM2NmJlZDMtYjM5NDA2NGYtZjFkZTE0YTItZWU5NDRhODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.858681Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776268. Ctx: { TraceId: 01jwtnhqdyfxvapsr4r6jnkfj5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDkwOGI1NDEtZjdjZjlhMWYtNzYxNGU2ZGMtNjIzMDM5NDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.858797Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776270. Ctx: { TraceId: 01jwtnhqdycr3asp3xamxfkrqv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjIwOWZiYTEtNTA4YTUwN2QtYjgwOGI1NDYtZTk1NTlkODk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.858892Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776269. Ctx: { TraceId: 01jwtnhqdy64bta6z1pbpkdj4f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjVhNzkzMWEtYzBjYjgyMmEtOTkzOTNmZjAtOGRlZmEwZTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.858909Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776271. Ctx: { TraceId: 01jwtnhqdza3b5g8fejyc23hqf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTQ2NzI2YTgtZGQ3NDE0MDUtYjZhY2I3ODYtMzdkMjBjNDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.859012Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776272. Ctx: { TraceId: 01jwtnhqdycqyhaz54r85jx4mx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGExYTJmMjktODFmODEwZDItMWI4NzFjNi1lMmQ5NmZhMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.864054Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776275. Ctx: { TraceId: 01jwtnhqe7frn11axkz97vwzvb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDkwOGI1NDEtZjdjZjlhMWYtNzYxNGU2ZGMtNjIzMDM5NDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.864268Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776273. Ctx: { TraceId: 01jwtnhqe7fe6wa6rd8wn04gm2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTI3MGJiNGEtNDg4ODJiZDUtYjI0MzM3NTktYjliZThkODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.864378Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776274. Ctx: { TraceId: 01jwtnhqe71xg3wbjbr06we8h9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWM2NmJlZDMtYjM5NDA2NGYtZjFkZTE0YTItZWU5NDRhODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.866388Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776276. Ctx: { TraceId: 01jwtnhqe97j5wavzx7g1f0s02, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjkzZjA4MzAtZjZlODIxNTctZGE5MzBhYjktZjUxYTg2M2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.868170Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776277. Ctx: { TraceId: 01jwtnhqeb8myxcxzcagbtbkmb, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTQ2NzI2YTgtZGQ3NDE0MDUtYjZhY2I3ODYtMzdkMjBjNDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo 2025-06-03T10:31:45.876466Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776278. Ctx: { TraceId: 01jwtnhqej62x98zr30x9a52zq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2MzZTBjZGMtMWM2OTljMjUtMWQ5MmEyOTktNjhlYTdjNWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.876715Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776279. Ctx: { TraceId: 01jwtnhqej2ac98w2bmbcz0xpg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjIwOWZiYTEtNTA4YTUwN2QtYjgwOGI1NDYtZTk1NTlkODk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.876885Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776280. Ctx: { TraceId: 01jwtnhqejcbwcwq8q88swkkfs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTFmNTNkMDAtMjIzYmE2YjAtZTRhNzFlOWEtNDRhYmExZWI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.877034Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776281. Ctx: { TraceId: 01jwtnhqej7d5734jneqe0ytyn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDkwOGI1NDEtZjdjZjlhMWYtNzYxNGU2ZGMtNjIzMDM5NDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.877159Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776282. Ctx: { TraceId: 01jwtnhqej56b65mvc881zbkct, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NjVhNzkzMWEtYzBjYjgyMmEtOTkzOTNmZjAtOGRlZmEwZTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.877266Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776283. Ctx: { TraceId: 01jwtnhqej35skg96cgmvxrf4m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NGExYTJmMjktODFmODEwZDItMWI4NzFjNi1lMmQ5NmZhMw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.877402Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776284. Ctx: { TraceId: 01jwtnhqej7gv9gfkahpjxf0g8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWM2NmJlZDMtYjM5NDA2NGYtZjFkZTE0YTItZWU5NDRhODQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.877496Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776285. Ctx: { TraceId: 01jwtnhqej9f3w95spk5t4hbf4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTI3MGJiNGEtNDg4ODJiZDUtYjI0MzM3NTktYjliZThkODY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.877601Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776286. Ctx: { TraceId: 01jwtnhqejcf8722141esqn4s1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjkzZjA4MzAtZjZlODIxNTctZGE5MzBhYjktZjUxYTg2M2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:45.877718Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976776287. Ctx: { TraceId: 01jwtnhqejbxj5vktz35b44xzg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTQ2NzI2YTgtZGQ3NDE0MDUtYjZhY2I3ODYtMzdkMjBjNDU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946665802 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946665802 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 2 shards ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::JoinSameKey [GOOD] Test command err: Trying to start YDB, gRPC: 8761, MsgBus: 21114 2025-06-03T10:31:46.601688Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668907437540355:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:46.601722Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027d9/r3tmp/tmpVyfWro/pdisk_1.dat 2025-06-03T10:31:46.666174Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:46.666720Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668907437540324:2079] 1748946706601549 != 1748946706601552 TServer::EnableGrpc on GrpcPort 8761, node 1 2025-06-03T10:31:46.688875Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:46.688889Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:46.688892Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:46.688928Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21114 2025-06-03T10:31:46.738690Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:46.738718Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:21114 2025-06-03T10:31:46.739859Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:46.753921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:46.762339Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:46.780683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:46.842590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:46.855784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:46.985235Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668907437541963:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:46.985281Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:47.036595Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:47.045922Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:47.055256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:47.069749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:47.083992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:47.098276Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:47.156986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:47.171714Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668911732509912:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:47.171766Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:47.171928Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668911732509917:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:47.173128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:47.179909Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668911732509919:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:31:47.268981Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668911732509970:3396] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 1993, MsgBus: 5935 2025-06-03T10:31:47.721717Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668909788507347:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:47.721755Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027d9/r3tmp/tmp97YYjw/pdisk_1.dat 2025-06-03T10:31:47.737764Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1993, node 2 2025-06-03T10:31:47.745316Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:47.745330Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:47.745332Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:47.745379Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5935 TClient is connected to server localhost:5935 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:47.826259Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:47.826298Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:47.826695Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:47.827229Z node 2 :HIV ... e: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:52.465441Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:52.480021Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:52.493951Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:52.508101Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:52.524697Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511668932938688925:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:52.524704Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511668932938688930:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:52.524719Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:52.525490Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:52.534837Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511668932938688932:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:31:52.625548Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511668932938688983:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 12266, MsgBus: 25732 2025-06-03T10:31:53.077852Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511668936089806830:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:53.078262Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027d9/r3tmp/tmpqdTZ0y/pdisk_1.dat 2025-06-03T10:31:53.102063Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:53.102467Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7511668936089806810:2079] 1748946713077720 != 1748946713077723 TServer::EnableGrpc on GrpcPort 12266, node 7 2025-06-03T10:31:53.118423Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:53.118437Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:53.118439Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:53.118506Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25732 TClient is connected to server localhost:25732 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:53.185359Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:53.185391Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:53.185878Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:53.186416Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:53.192494Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:53.206785Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:53.268335Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:53.284126Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:53.413232Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511668936089808443:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:53.413260Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:53.423197Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:53.431392Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:53.445463Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:53.459913Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:53.473929Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:53.487864Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:53.502674Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:53.519356Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511668936089809096:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:53.519401Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511668936089809101:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:53.519400Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:53.520155Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:53.528680Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7511668936089809103:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:31:53.597698Z node 7 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [7:7511668936089809154:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithDeletes [GOOD] Test command err: 2025-06-03T10:31:10.812302Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668750739687218:2207];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:10.812426Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002247/r3tmp/tmpK6XOV2/pdisk_1.dat 2025-06-03T10:31:10.947359Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:10.947404Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:10.955817Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:10.957497Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 23647, node 1 2025-06-03T10:31:10.997626Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:10.997641Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:10.997643Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:10.997700Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20996 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:11.070389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:20996 2025-06-03T10:31:11.731710Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668755034655328:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:11.731757Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:11.738081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:31:11.837658Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668755034655508:2348], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:11.837691Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:11.841254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946671857 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 1 shards TClient::Ls request: /Root/Foo 2025-06-03T10:31:11.876480Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668755034655607:2381], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:11.876542Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:11.876671Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668755034655623:2390], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:11.876682Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668755034655626:2393], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } TClient::Ls response: 2025-06-03T10:31:11.876691Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668755034655631:2396], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:11.876700Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668755034655635:2399], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:11.876708Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668755034655637:2400], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:11.876737Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:11.876853Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668755034655624:2391], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:11.876862Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668755034655625:2392], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946671857 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-03T10:31:11.878184Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:11.878247Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715664:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:31:11.878251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager, operationId: 281474976715664:1, at schemeshard: 72057594046644480 2025-06-03T10:31:11.878268Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715664:2, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:31:11.878273Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager/pools, operationId: 281474976715664:2, at schemeshard: 72057594046644480 2025-06-03T10:31:11.878286Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715664:3, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:31:11.878296Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_resource_pool.cpp:148: [72057594046644480] TCreateResourcePool Propose: opId# 281474976715664:3, path# /Root/.metadata/workload_manager/pools/default 2025-06-03T10:31:11.878352Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976715664:3 1 -> 128 2025-06-03 ... 1474976748251. Ctx: { TraceId: 01jwtnhrfc80k49a25kevtejhs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTQxYjYyM2UtZTM3ODA3Mi04MjlmODExNS1jMTQ5ZWMzMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.926634Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748252. Ctx: { TraceId: 01jwtnhrfce9xfjfxw2rtd829g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGUxNGM1Mi1iMjlkZWY1MS1iMTQ1OWQ1Zi02N2FkZWI1ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.927512Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748247. Ctx: { TraceId: 01jwtnhrfc3xrhwq6keewkqym3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWMwYTk5ZWQtYzA1MDY0M2UtOGNiZGNhYzgtZDUyNDQ4NmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.927616Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748254. Ctx: { TraceId: 01jwtnhrfc6n26658fb8fxbm2q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWE5YjY3OWUtMWUwODkzYjItODM0MzNjNGYtYWQ5MzQ1ZmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.927871Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748248. Ctx: { TraceId: 01jwtnhrfcbkf23c22h8ts2r35, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjI1OGYxMC1hOTY5YWE1Mi1hZDczOGYzMC1iMmVjZDJhOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.928213Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748249. Ctx: { TraceId: 01jwtnhrfc48zc3gqxnfee4rn5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzQ0M2FjOC1hYTNiNTI4Yi00ZWY1NzdhLTQwY2RiNDY5, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.929324Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748256. Ctx: { TraceId: 01jwtnhrfgcy1e5hvp3cpeqre7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWM2ODJhODgtYWVkZmY3ZjgtZTQ2MGM1MDAtM2M2ZjcyYTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.929345Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748255. Ctx: { TraceId: 01jwtnhrfg5zktd1zqawkvn5m7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGMzZjhhYmItMWVjZWVlZTgtZmY4ZGE1NDUtYTQyZDUzNzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.930664Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748257. Ctx: { TraceId: 01jwtnhrfh8d3df4zqdq2bnec8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTQxYjYyM2UtZTM3ODA3Mi04MjlmODExNS1jMTQ5ZWMzMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.930698Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748258. Ctx: { TraceId: 01jwtnhrfh09523087vhdgvem4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODc4ODI4NDYtZGFiODFlNjktZTRhNTgwNWYtMWQ4MTkzMDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.930962Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748259. Ctx: { TraceId: 01jwtnhrfh8srwm7arma16w08k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTY1MjZiNy1lZDdjOWYyMy0xMjlhNzg5MS1kNTc0MGY2ZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.930977Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748260. Ctx: { TraceId: 01jwtnhrfhepfsvf7jqjadfkt1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGUxNGM1Mi1iMjlkZWY1MS1iMTQ1OWQ1Zi02N2FkZWI1ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.932018Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748261. Ctx: { TraceId: 01jwtnhrfkfm8cq0cm88jzxqgd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWMwYTk5ZWQtYzA1MDY0M2UtOGNiZGNhYzgtZDUyNDQ4NmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.932050Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748262. Ctx: { TraceId: 01jwtnhrfk4ph18j26djef2wjv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjI1OGYxMC1hOTY5YWE1Mi1hZDczOGYzMC1iMmVjZDJhOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.932566Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748263. Ctx: { TraceId: 01jwtnhrfk09tbbfr30hgghf5q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWE5YjY3OWUtMWUwODkzYjItODM0MzNjNGYtYWQ5MzQ1ZmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.932638Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748264. Ctx: { TraceId: 01jwtnhrfm4dzt32f1anmj4mbn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzQ0M2FjOC1hYTNiNTI4Yi00ZWY1NzdhLTQwY2RiNDY5, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.933632Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748265. Ctx: { TraceId: 01jwtnhrfnb5z6fk601ty7hsf2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWM2ODJhODgtYWVkZmY3ZjgtZTQ2MGM1MDAtM2M2ZjcyYTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.933664Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748266. Ctx: { TraceId: 01jwtnhrfn889jw7026sqbzsht, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGMzZjhhYmItMWVjZWVlZTgtZmY4ZGE1NDUtYTQyZDUzNzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo 2025-06-03T10:31:46.935989Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748269. Ctx: { TraceId: 01jwtnhrfp3z3frfda3h60wj2h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTY1MjZiNy1lZDdjOWYyMy0xMjlhNzg5MS1kNTc0MGY2ZA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.936018Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748268. Ctx: { TraceId: 01jwtnhrfp6mjgdqrqsyps8p96, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTQxYjYyM2UtZTM3ODA3Mi04MjlmODExNS1jMTQ5ZWMzMA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.936289Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748270. Ctx: { TraceId: 01jwtnhrfp2v4tpa52g3a7h8xa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGUxNGM1Mi1iMjlkZWY1MS1iMTQ1OWQ1Zi02N2FkZWI1ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.936405Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748267. Ctx: { TraceId: 01jwtnhrfp0btbdnkw9pma0rjg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODc4ODI4NDYtZGFiODFlNjktZTRhNTgwNWYtMWQ4MTkzMDc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.936625Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748274. Ctx: { TraceId: 01jwtnhrfq1rjsqcmptqbswa2d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWE5YjY3OWUtMWUwODkzYjItODM0MzNjNGYtYWQ5MzQ1ZmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls response: 2025-06-03T10:31:46.936834Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748271. Ctx: { TraceId: 01jwtnhrfq47n4mb882etq132h, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MWMwYTk5ZWQtYzA1MDY0M2UtOGNiZGNhYzgtZDUyNDQ4NmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.936873Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748272. Ctx: { TraceId: 01jwtnhrfq6r43bat6tx2t4e44, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzQ0M2FjOC1hYTNiNTI4Yi00ZWY1NzdhLTQwY2RiNDY5, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.937030Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748273. Ctx: { TraceId: 01jwtnhrfqb4qd5f1mpgkf2h0t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MjI1OGYxMC1hOTY5YWE1Mi1hZDczOGYzMC1iMmVjZDJhOA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946671857 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-03T10:31:46.937423Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748276. Ctx: { TraceId: 01jwtnhrfr1yke1yfmkw804fnf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWM2ODJhODgtYWVkZmY3ZjgtZTQ2MGM1MDAtM2M2ZjcyYTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.937425Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976748275. Ctx: { TraceId: 01jwtnhrfr4w0xxg2xmqqd3r9q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGMzZjhhYmItMWVjZWVlZTgtZmY4ZGE1NDUtYTQyZDUzNzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946671857 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 2 shards ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithNonEmptyRangeReads [GOOD] Test command err: 2025-06-03T10:31:09.019182Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668749830954228:2216];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:09.019245Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00224e/r3tmp/tmpv6qFWX/pdisk_1.dat 2025-06-03T10:31:09.139798Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:09.139828Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:09.143192Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:09.149440Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19878, node 1 2025-06-03T10:31:09.201571Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:09.201587Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:09.201590Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:09.201654Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30842 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:09.270190Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:30842 2025-06-03T10:31:09.605915Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668749830955023:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:09.605946Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:09.653610Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:31:09.782587Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668749830955193:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:09.782614Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:09.782725Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668749830955198:2350], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:09.783784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:31:09.825512Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668749830955200:2351], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:31:09.913880Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668749830955269:2777] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:09.937314Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtngm6p8jqsg5betxc3s3sn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTczM2IwZTItMjM4ZTRkMTAtNzg4MWM3NTgtM2ZkN2FmYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:09.953776Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtngmc0am2ghq9tpr5ygks6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTczM2IwZTItMjM4ZTRkMTAtNzg4MWM3NTgtM2ZkN2FmYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:09.957861Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jwtngmc53fcf8kc64ceswy8z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTczM2IwZTItMjM4ZTRkMTAtNzg4MWM3NTgtM2ZkN2FmYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:09.961914Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jwtngmc9bb47mmx154mybyf0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTczM2IwZTItMjM4ZTRkMTAtNzg4MWM3NTgtM2ZkN2FmYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:09.965006Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jwtngmcc2mdch04h6cvvp72s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTczM2IwZTItMjM4ZTRkMTAtNzg4MWM3NTgtM2ZkN2FmYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:09.968222Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715666. Ctx: { TraceId: 01jwtngmcf4ag4wr8sqmrefyxy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTczM2IwZTItMjM4ZTRkMTAtNzg4MWM3NTgtM2ZkN2FmYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:09.975574Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jwtngmcpaxghe7sf97ephdnv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTczM2IwZTItMjM4ZTRkMTAtNzg4MWM3NTgtM2ZkN2FmYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:09.981989Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715668. Ctx: { TraceId: 01jwtngmcxdby4f26eqb9q4bda, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTczM2IwZTItMjM4ZTRkMTAtNzg4MWM3NTgtM2ZkN2FmYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:09.987183Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715669. Ctx: { TraceId: 01jwtngmd22kjxrstfy51t773f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTczM2IwZTItMjM4ZTRkMTAtNzg4MWM3NTgtM2ZkN2FmYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:09.990770Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715670. Ctx: { TraceId: 01jwtngmd62cr6zpgr0ypcngb6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTczM2IwZTItMjM4ZTRkMTAtNzg4MWM3NTgtM2ZkN2FmYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:09.995257Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715671. Ctx: { TraceId: 01jwtngmdafjzw7psjjsbrghvs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTczM2IwZTItMjM4ZTRkMTAtNzg4MWM3NTgtM2ZkN2FmYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:09.998589Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715672. Ctx: { TraceId: 01jwtngmdeekep661h96hymebc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTczM2IwZTItMjM4ZTRkMTAtNzg4MWM3NTgtM2ZkN2FmYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.007469Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jwtngmdjfg15wnq2nxkjv8y7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTczM2IwZTItMjM4ZTRkMTAtNzg4MWM3NTgtM2ZkN2FmYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.025453Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715674. Ctx: { TraceId: 01jwtngme58jjk3mgtcqw3bf3x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTczM2IwZTItMjM4ZTRkMTAtNzg4MWM3NTgtM2ZkN2FmYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.042236Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715675. Ctx: { TraceId: 01jwtngmes4j0ne542qmddmx60, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTczM2IwZTItMjM4ZTRkMTAtNzg4MWM3NTgtM2ZkN2FmYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.055430Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715676. Ctx: { TraceId: 01jwtngmf6d99j19fzen2gx501, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTczM2IwZTItMjM4ZTRkMTAtNzg4MWM3NTgtM2ZkN2FmYjI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.078131Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715677. Ctx: { TraceId: 01jwtngmfvepm3d03tapt20s75, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTczM2IwZTItMjM4ZTRkMTAtNzg4MWM3NTgtM2ZkN2FmYjI=, CurrentExecutionId: , CustomerS ... 2. Ctx: { TraceId: 01jwtnhqq5c7edve1tr6fs68p0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTFlNzM1NmEtYmU0MzQ5MzQtN2YzNDFhMGYtY2FiMTQ1NmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.151856Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742125. Ctx: { TraceId: 01jwtnhqq516qh6x3mjqfhexmz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmQ0MjQwZWItMjQ5YzFiNDUtMTgyN2Q0MjgtZDEyYmQyNjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.151864Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742123. Ctx: { TraceId: 01jwtnhqq5dv9rdb5czhb4cs6y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjY5ZmY3MmItZWNkZWJhYTMtNjliM2EwYTctZDNjYzc5MTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.152012Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742124. Ctx: { TraceId: 01jwtnhqq54yc3c5pw7pdfgaz5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWFiYzA0MTAtODJjM2VhZGMtNGUzNjI3ZTUtN2JhM2NhOTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.154816Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742126. Ctx: { TraceId: 01jwtnhqq6ema3wfkyhmmbjzhz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTFlOGI3ZDktYTMxODhlYzYtMjAwZTU3ZGEtMzc3MjNjZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.156544Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742127. Ctx: { TraceId: 01jwtnhqq94pwp9k412xv5nn6x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Zjk3ZjliNDAtMTRiZTZjYWQtN2IxNGI0NjktODkwZmM1MGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.156547Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742128. Ctx: { TraceId: 01jwtnhqq9bf150gpya4s0gy6j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzhhNmVjZGEtNGIzMDFlMDYtZWY4YjBmOWEtMzM1ZTlmMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.156977Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742129. Ctx: { TraceId: 01jwtnhqqb1wysdv4yaz6a7574, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDc2NWYxZDQtMjRmOTAzYmEtNzhjODBkYjYtNTE2Mzg2ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.158016Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742130. Ctx: { TraceId: 01jwtnhqqb5jz3mqk4ejwff9jg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmQ0MjQwZWItMjQ5YzFiNDUtMTgyN2Q0MjgtZDEyYmQyNjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.158627Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742134. Ctx: { TraceId: 01jwtnhqqc0pvrpfy7zshpvcg4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWFiYzA0MTAtODJjM2VhZGMtNGUzNjI3ZTUtN2JhM2NhOTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.158705Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742131. Ctx: { TraceId: 01jwtnhqqb8rvmncscg4ppjqyw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmExOGIyOGItMWRiYTIwMzQtZjk4MzI2MjgtZGFhNzE1M2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.158880Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742132. Ctx: { TraceId: 01jwtnhqqb60rescmrpypkq27d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjY5ZmY3MmItZWNkZWJhYTMtNjliM2EwYTctZDNjYzc5MTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.158981Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742133. Ctx: { TraceId: 01jwtnhqqc6yjfv82recw1hmab, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTFlNzM1NmEtYmU0MzQ5MzQtN2YzNDFhMGYtY2FiMTQ1NmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.160584Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742137. Ctx: { TraceId: 01jwtnhqqedjeqnafeknhcxgry, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzhhNmVjZGEtNGIzMDFlMDYtZWY4YjBmOWEtMzM1ZTlmMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo 2025-06-03T10:31:46.160773Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742138. Ctx: { TraceId: 01jwtnhqqe2rfk0exv1rpveq0f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Zjk3ZjliNDAtMTRiZTZjYWQtN2IxNGI0NjktODkwZmM1MGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.160897Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742135. Ctx: { TraceId: 01jwtnhqqcdyg6e7zq1yc607sr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTFlOGI3ZDktYTMxODhlYzYtMjAwZTU3ZGEtMzc3MjNjZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.160918Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742136. Ctx: { TraceId: 01jwtnhqqc1xzvf44f1b3qs3c5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTk5ZWU2NDctZDM0N2JkMzMtNjBkNjRhMmYtZmY0NjIzMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.162369Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742139. Ctx: { TraceId: 01jwtnhqqeab2sbtk9ap4t5jn5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDc2NWYxZDQtMjRmOTAzYmEtNzhjODBkYjYtNTE2Mzg2ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.164485Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742142. Ctx: { TraceId: 01jwtnhqqhf07nt37451e1kkxe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWFiYzA0MTAtODJjM2VhZGMtNGUzNjI3ZTUtN2JhM2NhOTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.164895Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742140. Ctx: { TraceId: 01jwtnhqqh6mm977zkjc2y2pdc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmExOGIyOGItMWRiYTIwMzQtZjk4MzI2MjgtZGFhNzE1M2U=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.164983Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742141. Ctx: { TraceId: 01jwtnhqqh6xwtn60204gqkb9m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmQ0MjQwZWItMjQ5YzFiNDUtMTgyN2Q0MjgtZDEyYmQyNjc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.165093Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742143. Ctx: { TraceId: 01jwtnhqqhd76ac71mqftvn35m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjY5ZmY3MmItZWNkZWJhYTMtNjliM2EwYTctZDNjYzc5MTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946669771 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-03T10:31:46.166849Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742147. Ctx: { TraceId: 01jwtnhqqnb4dzawcvq9y84jyj, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzhhNmVjZGEtNGIzMDFlMDYtZWY4YjBmOWEtMzM1ZTlmMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.166881Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742148. Ctx: { TraceId: 01jwtnhqqndd9knwbx1xpx17cy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTFlNzM1NmEtYmU0MzQ5MzQtN2YzNDFhMGYtY2FiMTQ1NmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.166922Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742149. Ctx: { TraceId: 01jwtnhqqn7ambtp4xfrwypv8j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTFlOGI3ZDktYTMxODhlYzYtMjAwZTU3ZGEtMzc3MjNjZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.167067Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742144. Ctx: { TraceId: 01jwtnhqqp46s3mhnbs71b1s3q, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDc2NWYxZDQtMjRmOTAzYmEtNzhjODBkYjYtNTE2Mzg2ZTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.167118Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742145. Ctx: { TraceId: 01jwtnhqqnfmhqhk3mgthdsqb9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Zjk3ZjliNDAtMTRiZTZjYWQtN2IxNGI0NjktODkwZmM1MGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:46.167182Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976742146. Ctx: { TraceId: 01jwtnhqqn3e6vnsgnmd4ddqsv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTk5ZWU2NDctZDM0N2JkMzMtNjBkNjRhMmYtZmY0NjIzMTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946669771 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 2 shards ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet_flat/ut/unittest >> Self::Literals [GOOD] Test command err: + BTreeIndex{PageId: 0 RowCount: 1155 DataSize: 11055 GroupDataSize: 22055 ErasedRowCount: 385, 13 rev 1, 683b} | PageId: 10000 RowCount: 100 DataSize: 1000 GroupDataSize: 2000 ErasedRowCount: 30 | > {0, a, false, 0} | PageId: 10001 RowCount: 201 DataSize: 2001 GroupDataSize: 4001 ErasedRowCount: 61 | > {1, b, true, 10} | PageId: 10002 RowCount: 303 DataSize: 3003 GroupDataSize: 6003 ErasedRowCount: 93 | > {2, c, false, 20} | PageId: 10003 RowCount: 406 DataSize: 4006 GroupDataSize: 8006 ErasedRowCount: 126 | > {3, d, true, 30} | PageId: 10004 RowCount: 510 DataSize: 5010 GroupDataSize: 10010 ErasedRowCount: 160 | > {4, e, false, 40} | PageId: 10005 RowCount: 615 DataSize: 6015 GroupDataSize: 12015 ErasedRowCount: 195 | > {5, f, true, 50} | PageId: 10006 RowCount: 721 DataSize: 7021 GroupDataSize: 14021 ErasedRowCount: 231 | > {6, g, false, 60} | PageId: 10007 RowCount: 828 DataSize: 8028 GroupDataSize: 16028 ErasedRowCount: 268 | > {7, h, true, 70} | PageId: 10008 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306 | > {8, i, false, 80} | PageId: 10009 RowCount: 1045 DataSize: 10045 GroupDataSize: 20045 ErasedRowCount: 345 | > {9, j, true, 90} | PageId: 10010 RowCount: 1155 DataSize: 11055 GroupDataSize: 22055 ErasedRowCount: 385 + BTreeIndex{PageId: 9 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840, 13 rev 1, 116b} | + BTreeIndex{PageId: 5 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306, 13 rev 1, 179b} | | + BTreeIndex{PageId: 0 RowCount: 303 DataSize: 3003 GroupDataSize: 6003 ErasedRowCount: 93, 13 rev 1, 179b} | | | PageId: 10000 RowCount: 100 DataSize: 1000 GroupDataSize: 2000 ErasedRowCount: 30 | | | > {0, a, false, 0} | | | PageId: 10001 RowCount: 201 DataSize: 2001 GroupDataSize: 4001 ErasedRowCount: 61 | | | > {1, b, true, 10} | | | PageId: 10002 RowCount: 303 DataSize: 3003 GroupDataSize: 6003 ErasedRowCount: 93 | | > {2, c, false, 20} | | + BTreeIndex{PageId: 1 RowCount: 615 DataSize: 6015 GroupDataSize: 12015 ErasedRowCount: 195, 13 rev 1, 179b} | | | PageId: 10003 RowCount: 406 DataSize: 4006 GroupDataSize: 8006 ErasedRowCount: 126 | | | > {3, d, true, 30} | | | PageId: 10004 RowCount: 510 DataSize: 5010 GroupDataSize: 10010 ErasedRowCount: 160 | | | > {4, e, false, 40} | | | PageId: 10005 RowCount: 615 DataSize: 6015 GroupDataSize: 12015 ErasedRowCount: 195 | | > {5, f, true, 50} | | + BTreeIndex{PageId: 2 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306, 13 rev 1, 179b} | | | PageId: 10006 RowCount: 721 DataSize: 7021 GroupDataSize: 14021 ErasedRowCount: 231 | | | > {6, g, false, 60} | | | PageId: 10007 RowCount: 828 DataSize: 8028 GroupDataSize: 16028 ErasedRowCount: 268 | | | > {7, h, true, 70} | | | PageId: 10008 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306 | > {8, i, false, 80} | + BTreeIndex{PageId: 8 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840, 13 rev 1, 242b} | | + BTreeIndex{PageId: 3 RowCount: 1266 DataSize: 12066 GroupDataSize: 24066 ErasedRowCount: 426, 13 rev 1, 179b} | | | PageId: 10009 RowCount: 1045 DataSize: 10045 GroupDataSize: 20045 ErasedRowCount: 345 | | | > {9, j, true, 90} | | | PageId: 10010 RowCount: 1155 DataSize: 11055 GroupDataSize: 22055 ErasedRowCount: 385 | | | > {10, k, false, 100} | | | PageId: 10011 RowCount: 1266 DataSize: 12066 GroupDataSize: 24066 ErasedRowCount: 426 | | > {11, l, true, 110} | | + BTreeIndex{PageId: 4 RowCount: 1605 DataSize: 15105 GroupDataSize: 30105 ErasedRowCount: 555, 13 rev 1, 179b} | | | PageId: 10012 RowCount: 1378 DataSize: 13078 GroupDataSize: 26078 ErasedRowCount: 468 | | | > {12, m, false, 120} | | | PageId: 10013 RowCount: 1491 DataSize: 14091 GroupDataSize: 28091 ErasedRowCount: 511 | | | > {13, n, true, 130} | | | PageId: 10014 RowCount: 1605 DataSize: 15105 GroupDataSize: 30105 ErasedRowCount: 555 | | > {14, o, false, 140} | | + BTreeIndex{PageId: 6 RowCount: 1953 DataSize: 18153 GroupDataSize: 36153 ErasedRowCount: 693, 13 rev 1, 179b} | | | PageId: 10015 RowCount: 1720 DataSize: 16120 GroupDataSize: 32120 ErasedRowCount: 600 | | | > {15, p, true, 150} | | | PageId: 10016 RowCount: 1836 DataSize: 17136 GroupDataSize: 34136 ErasedRowCount: 646 | | | > {16, q, false, 160} | | | PageId: 10017 RowCount: 1953 DataSize: 18153 GroupDataSize: 36153 ErasedRowCount: 693 | | > {17, r, true, 170} | | + BTreeIndex{PageId: 7 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840, 13 rev 1, 179b} | | | PageId: 10018 RowCount: 2071 DataSize: 19171 GroupDataSize: 38171 ErasedRowCount: 741 | | | > {18, s, false, 180} | | | PageId: 10019 RowCount: 2190 DataSize: 20190 GroupDataSize: 40190 ErasedRowCount: 790 | | | > {19, t, true, 190} | | | PageId: 10020 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840 + BTreeIndex{PageId: 15 RowCount: 15150 DataSize: 106050 GroupDataSize: 207050 ErasedRowCount: 8080, 13 rev 1, 174b} | + BTreeIndex{PageId: 12 RowCount: 9078 DataSize: 70278 GroupDataSize: 138278 ErasedRowCount: 4318, 13 rev 1, 690b} | | + BTreeIndex{PageId: 0 RowCount: 1266 DataSize: 12066 GroupDataSize: 24066 ErasedRowCount: 426, 13 rev 1, 702b} | | | PageId: 10000 RowCount: 100 DataSize: 1000 GroupDataSize: 2000 ErasedRowCount: 30 | | | > {0, x, NULL, NULL} | | | PageId: 10001 RowCount: 201 DataSize: 2001 GroupDataSize: 4001 ErasedRowCount: 61 | | | > {1, xx, NULL, NULL} | | | PageId: 10002 RowCount: 303 DataSize: 3003 GroupDataSize: 6003 ErasedRowCount: 93 | | | > {2, xxx, NULL, NULL} | | | PageId: 10003 RowCount: 406 DataSize: 4006 GroupDataSize: 8006 ErasedRowCount: 126 | | | > {3, xxxx, NULL, NULL} | | | PageId: 10004 RowCount: 510 DataSize: 5010 GroupDataSize: 10010 ErasedRowCount: 160 | | | > {4, xxxxx, NULL, NULL} | | | PageId: 10005 RowCount: 615 DataSize: 6015 GroupDataSize: 12015 ErasedRowCount: 195 | | | > {5, xxxxxx, NULL, NULL} | | | PageId: 10006 RowCount: 721 DataSize: 7021 GroupDataSize: 14021 ErasedRowCount: 231 | | | > {6, xxxxxxx, NULL, NULL} | | | PageId: 10007 RowCount: 828 DataSize: 8028 GroupDataSize: 16028 ErasedRowCount: 268 | | | > {7, xxxxxxxx, NULL, NULL} | | | PageId: 10008 RowCount: 936 DataSize: 9036 GroupDataSize: 18036 ErasedRowCount: 306 | | | > {8, xxxxxxxxx, NULL, NULL} | | | PageId: 10009 RowCount: 1045 DataSize: 10045 GroupDataSize: 20045 ErasedRowCount: 345 | | | > {9, xxxxxxxxxx, NULL, NULL} | | | PageId: 10010 RowCount: 1155 DataSize: 11055 GroupDataSize: 22055 ErasedRowCount: 385 | | | > {10, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10011 RowCount: 1266 DataSize: 12066 GroupDataSize: 24066 ErasedRowCount: 426 | | > {11, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 1 RowCount: 2431 DataSize: 22231 GroupDataSize: 44231 ErasedRowCount: 891, 13 rev 1, 683b} | | | PageId: 10012 RowCount: 1378 DataSize: 13078 GroupDataSize: 26078 ErasedRowCount: 468 | | | > {12, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10013 RowCount: 1491 DataSize: 14091 GroupDataSize: 28091 ErasedRowCount: 511 | | | > {13, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10014 RowCount: 1605 DataSize: 15105 GroupDataSize: 30105 ErasedRowCount: 555 | | | > {14, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10015 RowCount: 1720 DataSize: 16120 GroupDataSize: 32120 ErasedRowCount: 600 | | | > {15, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10016 RowCount: 1836 DataSize: 17136 GroupDataSize: 34136 ErasedRowCount: 646 | | | > {16, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10017 RowCount: 1953 DataSize: 18153 GroupDataSize: 36153 ErasedRowCount: 693 | | | > {17, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10018 RowCount: 2071 DataSize: 19171 GroupDataSize: 38171 ErasedRowCount: 741 | | | > {18, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10019 RowCount: 2190 DataSize: 20190 GroupDataSize: 40190 ErasedRowCount: 790 | | | > {19, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10020 RowCount: 2310 DataSize: 21210 GroupDataSize: 42210 ErasedRowCount: 840 | | | > {20, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10021 RowCount: 2431 DataSize: 22231 GroupDataSize: 44231 ErasedRowCount: 891 | | > {21, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 2 RowCount: 3565 DataSize: 31465 GroupDataSize: 62465 ErasedRowCount: 1395, 13 rev 1, 689b} | | | PageId: 10022 RowCount: 2553 DataSize: 23253 GroupDataSize: 46253 ErasedRowCount: 943 | | | > {22, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10023 RowCount: 2676 DataSize: 24276 GroupDataSize: 48276 ErasedRowCount: 996 | | | > {23, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10024 RowCount: 2800 DataSize: 25300 GroupDataSize: 50300 ErasedRowCount: 1050 | | | > {24, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10025 RowCount: 2925 DataSize: 26325 GroupDataSize: 52325 ErasedRowCount: 1105 | | | > {25, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10026 RowCount: 3051 DataSize: 27351 GroupDataSize: 54351 ErasedRowCount: 1161 | | | > {26, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10027 RowCount: 3178 DataSize: 28378 GroupDataSize: 56378 ErasedRowCount: 1218 | | | > {27, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10028 RowCount: 3306 DataSize: 29406 GroupDataSize: 58406 ErasedRowCount: 1276 | | | > {28, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10029 RowCount: 3435 DataSize: 30435 GroupDataSize: 60435 ErasedRowCount: 1335 | | | > {29, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10030 RowCount: 3565 DataSize: 31465 GroupDataSize: 62465 ErasedRowCount: 1395 | | > {30, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 3 RowCount: 4641 DataSize: 39741 GroupDataSize: 78741 ErasedRowCount: 1911, 13 rev 1, 669b} | | | PageId: 10031 RowCount: 3696 DataSize: 32496 GroupDataSize: 64496 ErasedRowCount: 1456 | | | > {31, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10032 RowCount: 3828 DataSize: 33528 GroupDataSize: 66528 ErasedRowCount: 1518 | | | > {32, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10033 RowCount: 3961 DataSize: 34561 GroupDataSize: 68561 ErasedRowCount: 1581 | | | > {33, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10034 RowCount: 4095 DataSize: 35595 GroupDataSize: 70595 ErasedRowCount: 1645 | | | > {34, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10035 RowCount: 4230 DataSize: 36630 GroupDataSize: 72630 ErasedRowCount: 1710 | | | > {35, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10036 RowCount: 4366 DataSize: 37666 GroupDataSize: 74666 ErasedRowCount: 1776 | | | > {36, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10037 RowCount: 4503 DataSize: 38703 GroupDataSize: 76703 ErasedRowCount: 1843 | | | > {37, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10038 RowCount: 4641 DataSize: 39741 GroupDataSize: 78741 ErasedRowCount: 1911 | | > {38, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 4 RowCount: 5781 DataSize: 48081 GroupDataSize: 95081 ErasedRowCount: 2491, 13 rev 1, 725b} | | | PageId: 10039 RowCount: 4780 DataSize: 40780 GroupDataSize: 80780 ErasedRowCount: 1980 | | | > {39, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10040 RowCount: 4920 DataSize: 41820 GroupDataSize: 82820 ErasedRowCount: 2050 | | | > {40, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10041 RowCount: 5061 DataSize: 42861 GroupDataSize: 84861 ErasedRowCount: 2121 | | | > {41, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10042 RowCount: 5203 DataSize: 43903 GroupDataSize: 86903 ErasedRowCount: 2193 | | | > {42, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10043 RowCount: 5346 DataSize: 44946 GroupDataSize: 88946 ErasedRowCount: 2266 | | | > {43, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10044 RowCount: 5490 DataSize: 45990 GroupDataSize: 90990 ErasedRowCount: 2340 | | | > {44, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10045 RowCount: 5635 DataSize: 47035 GroupDataSize: 93035 ErasedRowCount: 2415 | | | > {45, xxxxxxxxxx.., NULL, NULL} | | | PageId: 10046 RowCount: 5781 DataSize: 48081 GroupDataSize: 95081 ErasedRowCount: 2491 | | > {46, xxxxxxxxxx.., NULL, NULL} | | + BTreeIndex{PageId: 5 RowCount: 6831 DataSize: 55431 GroupDataSize: 109431 ErasedRowCount: 3051, 13 ... 7 RowCount: 34 DataSize: 1202 GroupDataSize: 7632 ErasedRowCount: 0 | | | > {4, 10} | | | PageId: 70 RowCount: 36 DataSize: 1284 GroupDataSize: 8163 ErasedRowCount: 0 | | | > {5, 3} | | | PageId: 82 RowCount: 38 DataSize: 1350 GroupDataSize: 8602 ErasedRowCount: 0 | | | > {5, 6} | | | PageId: 87 RowCount: 40 DataSize: 1416 GroupDataSize: 9358 ErasedRowCount: 0 + Rows{0} Label{04 rev 1, 66b}, [0, +2)row | ERowOp 1: {0, 1} | ERowOp 1: {0, 3} + Rows{2} Label{24 rev 1, 66b}, [2, +2)row | ERowOp 1: {0, 4} | ERowOp 1: {0, 6} + Rows{4} Label{44 rev 1, 82b}, [4, +2)row | ERowOp 1: {0, 7} | ERowOp 1: {0, 8} + Rows{8} Label{84 rev 1, 66b}, [6, +2)row | ERowOp 1: {0, 10} | ERowOp 1: {1, 1} + Rows{11} Label{114 rev 1, 66b}, [8, +2)row | ERowOp 1: {1, 3} | ERowOp 1: {1, 4} + Rows{14} Label{144 rev 1, 82b}, [10, +2)row | ERowOp 1: {1, 6} | ERowOp 1: {1, 7} + Rows{20} Label{204 rev 1, 66b}, [12, +2)row | ERowOp 1: {1, 8} | ERowOp 1: {1, 10} + Rows{23} Label{234 rev 1, 66b}, [14, +2)row | ERowOp 1: {2, 1} | ERowOp 1: {2, 3} + Rows{26} Label{264 rev 1, 82b}, [16, +2)row | ERowOp 1: {2, 4} | ERowOp 1: {2, 6} + Rows{36} Label{364 rev 1, 66b}, [18, +2)row | ERowOp 1: {2, 7} | ERowOp 1: {2, 8} + Rows{39} Label{394 rev 1, 66b}, [20, +2)row | ERowOp 1: {2, 10} | ERowOp 1: {3, 1} + Rows{42} Label{424 rev 1, 82b}, [22, +2)row | ERowOp 1: {3, 3} | ERowOp 1: {3, 4} + Rows{48} Label{484 rev 1, 66b}, [24, +2)row | ERowOp 1: {3, 6} | ERowOp 1: {3, 7} + Rows{53} Label{534 rev 1, 66b}, [26, +2)row | ERowOp 1: {3, 8} | ERowOp 1: {3, 10} + Rows{58} Label{584 rev 1, 82b}, [28, +2)row | ERowOp 1: {4, 1} | ERowOp 1: {4, 3} + Rows{64} Label{644 rev 1, 66b}, [30, +2)row | ERowOp 1: {4, 4} | ERowOp 1: {4, 6} + Rows{67} Label{674 rev 1, 66b}, [32, +2)row | ERowOp 1: {4, 7} | ERowOp 1: {4, 8} + Rows{70} Label{704 rev 1, 82b}, [34, +2)row | ERowOp 1: {4, 10} | ERowOp 1: {5, 1} + Rows{82} Label{824 rev 1, 66b}, [36, +2)row | ERowOp 1: {5, 3} | ERowOp 1: {5, 4} + Rows{87} Label{874 rev 1, 66b}, [38, +2)row | ERowOp 1: {5, 6} | ERowOp 1: {5, 7} Slices{ [0, 39] } 0.29448 Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} Part{[1:2:3:0:0:0:0] eph 0, 1000b 40r} data 1479b + FlatIndex{20} Label{3 rev 3, 453b} 21 rec | Page Row Bytes (Uint32) | 0 0 50b {0} | 1 2 50b {2} | 2 4 50b {4} | 3 6 50b {6} | 4 8 50b {8} | 5 10 50b {10} | 6 12 50b {12} | 7 14 50b {14} | 8 16 50b {16} | 9 18 50b {18} | 10 20 50b {20} | 11 22 50b {22} | 12 24 50b {24} | 13 26 50b {26} | 14 28 50b {28} | 15 30 50b {30} | 16 32 50b {32} | 17 34 50b {34} | 18 36 50b {36} | 19 38 50b {38} | 19 39 50b {39} + Rows{0} Label{04 rev 1, 50b}, [0, +2)row | ERowOp 1: {0} {Set 1 Uint32 : 0} | ERowOp 1: {1} {Set 1 Uint32 : 100} + Rows{1} Label{14 rev 1, 50b}, [2, +2)row | ERowOp 1: {2} {Set 1 Uint32 : 200} | ERowOp 1: {3} {Set 1 Uint32 : 300} + Rows{2} Label{24 rev 1, 50b}, [4, +2)row | ERowOp 1: {4} {Set 1 Uint32 : 400} | ERowOp 1: {5} {Set 1 Uint32 : 500} + Rows{3} Label{34 rev 1, 50b}, [6, +2)row | ERowOp 1: {6} {Set 1 Uint32 : 600} | ERowOp 1: {7} {Set 1 Uint32 : 700} + Rows{4} Label{44 rev 1, 50b}, [8, +2)row | ERowOp 1: {8} {Set 1 Uint32 : 800} | ERowOp 1: {9} {Set 1 Uint32 : 900} + Rows{5} Label{54 rev 1, 50b}, [10, +2)row | ERowOp 1: {10} {Set 1 Uint32 : 1000} | ERowOp 1: {11} {Set 1 Uint32 : 1100} + Rows{6} Label{64 rev 1, 50b}, [12, +2)row | ERowOp 1: {12} {Set 1 Uint32 : 1200} | ERowOp 1: {13} {Set 1 Uint32 : 1300} + Rows{7} Label{74 rev 1, 50b}, [14, +2)row | ERowOp 1: {14} {Set 1 Uint32 : 1400} | ERowOp 1: {15} {Set 1 Uint32 : 1500} + Rows{8} Label{84 rev 1, 50b}, [16, +2)row | ERowOp 1: {16} {Set 1 Uint32 : 1600} | ERowOp 1: {17} {Set 1 Uint32 : 1700} + Rows{9} Label{94 rev 1, 50b}, [18, +2)row | ERowOp 1: {18} {Set 1 Uint32 : 1800} | ERowOp 1: {19} {Set 1 Uint32 : 1900} + Rows{10} Label{104 rev 1, 50b}, [20, +2)row | ERowOp 1: {20} {Set 1 Uint32 : 2000} | ERowOp 1: {21} {Set 1 Uint32 : 2100} + Rows{11} Label{114 rev 1, 50b}, [22, +2)row | ERowOp 1: {22} {Set 1 Uint32 : 2200} | ERowOp 1: {23} {Set 1 Uint32 : 2300} + Rows{12} Label{124 rev 1, 50b}, [24, +2)row | ERowOp 1: {24} {Set 1 Uint32 : 2400} | ERowOp 1: {25} {Set 1 Uint32 : 2500} + Rows{13} Label{134 rev 1, 50b}, [26, +2)row | ERowOp 1: {26} {Set 1 Uint32 : 2600} | ERowOp 1: {27} {Set 1 Uint32 : 2700} + Rows{14} Label{144 rev 1, 50b}, [28, +2)row | ERowOp 1: {28} {Set 1 Uint32 : 2800} | ERowOp 1: {29} {Set 1 Uint32 : 2900} + Rows{15} Label{154 rev 1, 50b}, [30, +2)row | ERowOp 1: {30} {Set 1 Uint32 : 3000} | ERowOp 1: {31} {Set 1 Uint32 : 3100} + Rows{16} Label{164 rev 1, 50b}, [32, +2)row | ERowOp 1: {32} {Set 1 Uint32 : 3200} | ERowOp 1: {33} {Set 1 Uint32 : 3300} + Rows{17} Label{174 rev 1, 50b}, [34, +2)row | ERowOp 1: {34} {Set 1 Uint32 : 3400} | ERowOp 1: {35} {Set 1 Uint32 : 3500} + Rows{18} Label{184 rev 1, 50b}, [36, +2)row | ERowOp 1: {36} {Set 1 Uint32 : 3600} | ERowOp 1: {37} {Set 1 Uint32 : 3700} + Rows{19} Label{194 rev 1, 50b}, [38, +2)row | ERowOp 1: {38} {Set 1 Uint32 : 3800} | ERowOp 1: {39} {Set 1 Uint32 : 3900} >> KqpNewEngine::Nondeterministic [GOOD] >> KqpNewEngine::OrderedScalarContext ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/runtime/unittest >> KqpScanLogs::WideCombine-EnabledLogs [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/u93c/002915/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk3 Trying to start YDB, gRPC: 17767, MsgBus: 3574 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002915/r3tmp/tmp1zMKh1/pdisk_1.dat TServer::EnableGrpc on GrpcPort 17767, node 1 TClient is connected to server localhost:3574 TClient is connected to server localhost:3574 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (DataType 'Uint64)) (let $4 '('('"_logical_id" '505) '('"_id" '"22c7a060-b6c90dfa-4726fd6d-ab1923dc") '('"_wide_channels" (StructType '('"Value" (OptionalType (DataType 'String))) '('_yql_agg_0 $3))))) (let $5 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($12) (block '( (let $13 (lambda '($15) (Member $15 '"Key") (Member $15 '"Value"))) (let $14 (lambda '($25 $26) $25 $26)) (return (FromFlow (WideCombiner (ExpandMap (ToFlow $12) $13) '-1073741824 (lambda '($16 $17) $17) (lambda '($18 $19 $20) (AggrCountInit $19)) (lambda '($21 $22 $23 $24) (AggrCountUpdate $22 $24)) $14))) ))) $4)) (let $6 (DqCnHashShuffle (TDqOutput $5 '0) '('0))) (let $7 (DqPhyStage '($6) (lambda '($27) (block '( (let $28 (WideCombiner (ToFlow $27) '"" (lambda '($29 $30) $29) (lambda '($31 $32 $33) $33) (lambda '($34 $35 $36 $37) (AggrAdd $36 $37)) (lambda '($38 $39) $39))) (return (FromFlow (NarrowMap $28 (lambda '($40) (AsStruct '('"column0" $40)))))) ))) '('('"_logical_id" '1265) '('"_id" '"81e49c45-907344a6-5164b75d-6d098784")))) (let $8 (DqCnUnionAll (TDqOutput $7 '0))) (let $9 (DqPhyStage '($8) (lambda '($41) $41) '('('"_logical_id" '1533) '('"_id" '"6a9851f0-a8e9a7c4-92c6389d-28127e14")))) (let $10 '($5 $7 $9)) (let $11 (DqCnResult (TDqOutput $9 '0) '('"column0"))) (return (KqpPhysicalQuery '((KqpPhysicalTx $10 '($11) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType (StructType '('"column0" $3))) '0 '0)) '('('"type" '"query")))) ) |68.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk |68.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk |68.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_read_only_vdisk/ut_blobstorage-ut_read_only_vdisk >> KqpRanges::WhereInSubquery >> KqpNotNullColumns::ReplaceNotNull |68.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut |68.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut |68.6%| [LD] {RESULT} $(B)/ydb/core/tx/balance_coverage/ut/ydb-core-tx-balance_coverage-ut |68.6%| [TA] $(B)/ydb/core/blobstorage/ut_vdisk2/test-results/unittest/{meta.json ... results_accumulator.log} |68.6%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_vdisk2/test-results/unittest/{meta.json ... results_accumulator.log} |68.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut >> TxUsage::WriteToTopic_Demo_14_Table [GOOD] >> KqpNewEngine::KeyColumnOrder |68.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut |68.6%| [LD] {RESULT} $(B)/ydb/core/tx/tx_allocator/ut/ydb-core-tx-tx_allocator-ut >> KqpReturning::ReturningTwice >> KqpSort::TopSortParameter >> KqpSqlIn::KeySuffix_OnlyTail >> KqpNamedExpressions::NamedExpressionChanged-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomChanged+UseSink >> KqpNewEngine::StreamLookupWithView |68.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest |68.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut >> TProxyActorTest::TestDisconnectWhileAttaching >> TProxyActorTest::TestCreateSemaphore |68.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut |68.6%| [LD] {RESULT} $(B)/ydb/services/persqueue_cluster_discovery/ut/ydb-services-persqueue_cluster_discovery-ut |68.7%| [TA] $(B)/ydb/core/tx/schemeshard/ut_index_build/test-results/unittest/{meta.json ... results_accumulator.log} |68.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest |68.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest |68.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots |68.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots >> TxUsage::WriteToTopic_Demo_31_Table [GOOD] >> TxUsage::WriteToTopic_Demo_14_Query >> KqpNewEngine::OrderedScalarContext [GOOD] >> KqpReturning::ReturningWorksIndexedDelete-QueryService [GOOD] >> KqpNewEngine::PagingNoPredicateExtract >> KqpReturning::ReturningWorksIndexedDeleteV2+QueryService >> TProxyActorTest::TestCreateSemaphore [GOOD] |68.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep |68.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks |68.6%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_cdc_stream_reboots/ydb-core-tx-schemeshard-ut_cdc_stream_reboots |68.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration |68.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations |68.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest |68.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut |68.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut |68.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep |68.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration |68.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks |68.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_minstep/ydb-core-tx-datashard-ut_minstep |68.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest |68.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_locks/ydb-core-tx-datashard-ut_locks |68.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations |68.7%| [LD] {RESULT} $(B)/ydb/services/fq/ut_integration/ydb-services-fq-ut_integration |68.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest |68.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut |68.7%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/batch_operations/ydb-core-kqp-ut-batch_operations |68.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut >> KqpNotNullColumns::ReplaceNotNull [GOOD] |68.7%| [LD] {RESULT} $(B)/ydb/core/tx/tiering/ut/ydb-core-tx-tiering-ut |68.8%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/ydb-core-kqp-ut-federated_query-generic_ut |68.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest >> KqpNotNullColumns::JoinLeftTableWithNotNullPk+StreamLookup >> KqpNewEngine::KeyColumnOrder [GOOD] >> KqpNewEngine::KeyColumnOrder2 >> KqpRanges::WhereInSubquery [GOOD] >> KqpNamedExpressions::NamedExpressionRandomChanged+UseSink [GOOD] >> KqpScanLogs::GraceJoin+EnabledLogs [GOOD] >> KqpRanges::UpdateWhereInNoFullScan+UseSink >> TProxyActorTest::TestDisconnectWhileAttaching [GOOD] >> KqpScanLogs::GraceJoin-EnabledLogs >> KqpNamedExpressions::NamedExpressionRandomChanged-UseSink |68.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest >> TProxyActorTest::TestCreateSemaphore [GOOD] >> KqpReturning::ReturningTwice [GOOD] >> KqpNewEngine::PagingNoPredicateExtract [GOOD] >> KqpSort::TopSortParameter [GOOD] >> KqpReturning::ReplaceSerial >> KqpSort::TopSortExpr |68.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest >> KqpSqlIn::KeySuffix_OnlyTail [GOOD] >> TProxyActorTest::TestCreateSemaphoreInterrupted >> TProxyActorTest::TestAttachSession |68.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest >> KqpReturning::ReturningWorksIndexedDeleteV2+QueryService [GOOD] >> KqpNewEngine::StreamLookupWithView [GOOD] >> KqpSqlIn::KeyTypeMissmatch_Int |68.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest >> KqpReturning::ReturningWorksIndexedDeleteV2-QueryService ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest >> TProxyActorTest::TestDisconnectWhileAttaching [GOOD] Test command err: ... waiting for blocked registrations ... blocking NKikimr::NKesus::TEvKesus::TEvRegisterProxy from KESUS_PROXY_ACTOR to KESUS_TABLET_ACTOR cookie 0 ... waiting for blocked registrations (done) 2025-06-03T10:32:04.018477Z node 1 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [72057594037927937] NodeDisconnected NodeId# 2 ... unblocking NKikimr::NKesus::TEvKesus::TEvRegisterProxy from KESUS_PROXY_ACTOR to KESUS_TABLET_ACTOR >> KqpNewEngine::Truncated >> KqpNotNullColumns::JoinLeftTableWithNotNullPk+StreamLookup [GOOD] >> KqpNewEngine::KeyColumnOrder2 [GOOD] >> KqpRanges::UpdateWhereInNoFullScan+UseSink [GOOD] >> KqpNotNullColumns::JoinLeftTableWithNotNullPk-StreamLookup >> KqpNewEngine::LocksMultiShard >> KqpRanges::UpdateWhereInNoFullScan-UseSink |68.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest |68.8%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index_build/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::PagingNoPredicateExtract [GOOD] Test command err: Trying to start YDB, gRPC: 26924, MsgBus: 25631 2025-06-03T10:31:57.455856Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668952958186529:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:57.455884Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027d6/r3tmp/tmptGuege/pdisk_1.dat 2025-06-03T10:31:57.517804Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26924, node 1 2025-06-03T10:31:57.534333Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:57.534343Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:57.534345Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:57.534382Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25631 2025-06-03T10:31:57.557286Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:57.557330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:57.558373Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25631 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:57.589362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:57.597874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:57.616686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:57.634721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:57.645749Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:57.879766Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668952958188122:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:57.879810Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:57.935470Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:57.944040Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:57.953854Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:57.967648Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:58.024506Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:58.037937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:58.051613Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:58.067207Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668957253156072:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:58.067227Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:58.067235Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668957253156077:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:58.067882Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:58.070995Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668957253156079:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:31:58.130404Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668957253156130:3396] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 23308, MsgBus: 3368 2025-06-03T10:31:58.589135Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668957975492206:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:58.589180Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027d6/r3tmp/tmpmA9o1U/pdisk_1.dat 2025-06-03T10:31:58.606250Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23308, node 2 2025-06-03T10:31:58.616894Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:58.616907Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:58.616910Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:58.616968Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3368 TClient is connected to server localhost:3368 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:58.689431Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:58.689458Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:58.690447Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:58.695457Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:58.702620Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178 ... hemeshard: 72057594046644480 2025-06-03T10:32:03.373841Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:03.388121Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:03.402872Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:03.419883Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:03.479191Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:03.503933Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511668981907106955:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:03.503970Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:03.504274Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511668981907106960:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:03.505882Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:03.511206Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511668981907106962:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:03.581720Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511668981907107013:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 17610, MsgBus: 14317 2025-06-03T10:32:04.158985Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511668985374216298:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:04.159004Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027d6/r3tmp/tmpxLO5wE/pdisk_1.dat 2025-06-03T10:32:04.173848Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 17610, node 7 2025-06-03T10:32:04.183450Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:04.183475Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:04.183479Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:04.183529Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14317 TClient is connected to server localhost:14317 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:04.259400Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:04.259436Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:04.260538Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:04.262291Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.274944Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.286737Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.311016Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.324399Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.520453Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511668985374217884:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.520482Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.531595Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.540474Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.548193Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.561539Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.576133Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.590068Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.604142Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.620832Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511668985374218536:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.620853Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.620875Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511668985374218541:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.621782Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:04.631062Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7511668985374218543:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:04.702044Z node 7 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [7:7511668985374218594:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> TProxyActorTest::TestCreateSemaphoreInterrupted [GOOD] >> TProxyActorTest::TestAttachSession [GOOD] >> KqpNamedExpressions::NamedExpressionRandomChanged-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomChanged2+UseSink >> KqpScanSpilling::SpillingInRuntimeNodes+EnabledSpilling [GOOD] >> KqpReturning::ReplaceSerial [GOOD] >> KqpReturning::ReturningSerial >> KqpSort::TopSortExpr [GOOD] >> KqpSort::TopSortExprPk ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_vector_index_build_reboots/unittest >> VectorIndexBuildTestReboots::BaseCase[PipeResets] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:29:18.276416Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:18.276452Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:18.276459Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:18.276467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:18.276475Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:18.276480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:18.276491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:18.276509Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:18.276647Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:18.276750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:18.297336Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:29:18.297378Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:18.297496Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:29:18.300711Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:18.300895Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:18.300945Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:18.302808Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:18.302876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:18.303025Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:18.303125Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:18.303750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:18.303808Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:18.304165Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:18.304179Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:18.304197Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:18.304207Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:18.304215Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:18.304270Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:29:18.306161Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:29:18.333549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:18.333678Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:18.333771Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:18.333827Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:18.333841Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:18.334950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:18.334994Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:18.335091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:18.335107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:18.335117Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:18.335127Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:18.335911Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:18.335933Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:18.335943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:18.336511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:18.336527Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:18.336537Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:18.336548Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:18.337739Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:18.338320Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:18.338420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:18.338688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:18.338731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:18.338741Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:18.338835Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Ch ... ToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 7 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:55.559035Z node 236 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1/indexImplPostingTable0build" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:31:55.559054Z node 236 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1/indexImplPostingTable0build" took 19us result status StatusPathDoesNotExist 2025-06-03T10:31:55.559067Z node 236 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/dir/Table/index1/indexImplPostingTable0build\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/dir/Table/index1\' (id: [OwnerId: 72057594046678944, LocalPathId: 5]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/dir/Table/index1/indexImplPostingTable0build" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/dir/Table/index1" LastExistedPrefixPathId: 5 LastExistedPrefixDescription { Self { Name: "index1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:31:55.559105Z node 236 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1/indexImplPostingTable1build" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:31:55.559114Z node 236 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1/indexImplPostingTable1build" took 9us result status StatusPathDoesNotExist 2025-06-03T10:31:55.559123Z node 236 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/dir/Table/index1/indexImplPostingTable1build\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/dir/Table/index1\' (id: [OwnerId: 72057594046678944, LocalPathId: 5]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/dir/Table/index1/indexImplPostingTable1build" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/dir/Table/index1" LastExistedPrefixPathId: 5 LastExistedPrefixDescription { Self { Name: "index1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:31:55.559179Z node 236 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1/indexImplPostingTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:31:55.559195Z node 236 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1/indexImplPostingTable" took 17us result status StatusSuccess 2025-06-03T10:31:55.559302Z node 236 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/Table/index1/indexImplPostingTable" PathDescription { Self { Name: "indexImplPostingTable" PathId: 7 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplPostingTable" Columns { Name: "__ydb_parent" Type: "Uint64" TypeId: 4 Id: 1 NotNull: true IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "String" TypeId: 4097 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "__ydb_parent" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 7 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ... posting table contains 400 rows |68.9%| [TA] $(B)/ydb/core/tablet_flat/ut/test-results/unittest/{meta.json ... results_accumulator.log} |68.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest |68.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest |68.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest >> TProxyActorTest::TestCreateSemaphoreInterrupted [GOOD] |68.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest >> TProxyActorTest::TestAttachSession [GOOD] >> TTxLocatorTest::TestImposibleSize >> TPQCDTest::TestUnavailableWithoutBoth >> KqpNewEngine::LocksMultiShard [GOOD] >> KqpNewEngine::LocksEffects >> KqpNewEngine::Truncated [GOOD] >> KqpNewEngine::Update+UseSink |68.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest |68.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kesus/proxy/ut/unittest >> KqpSqlIn::KeyTypeMissmatch_Int [GOOD] >> KqpSqlIn::KeyTypeMissmatch_Str >> TTxLocatorTest::TestImposibleSize [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/runtime/unittest >> KqpScanSpilling::SpillingInRuntimeNodes+EnabledSpilling [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/u93c/0028fb/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk7 Trying to start YDB, gRPC: 22215, MsgBus: 13147 2025-06-03T10:31:16.178667Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668778596662480:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:16.178848Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0028fb/r3tmp/tmpCx95nN/pdisk_1.dat 2025-06-03T10:31:16.375102Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22215, node 1 2025-06-03T10:31:16.404747Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:16.404761Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:16.404764Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:16.404814Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13147 TClient is connected to server localhost:13147 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:31:16.520250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:16.520284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:16.522447Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:16.524038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:16.528343Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:31:16.534131Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:16.606764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:16.695317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:31:16.738165Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:31:16.796623Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668778596663921:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:16.796667Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:16.851375Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:16.863655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:16.930950Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:16.990758Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:17.005779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:17.068542Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:17.134803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:17.158278Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668782891631879:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:17.158312Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:17.158411Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668782891631884:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:17.162590Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:17.165237Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668782891631886:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:31:17.245439Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668782891631946:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:21.174293Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668778596662480:2225];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:21.174350Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:31:31.372149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7306: Cannot get console configs 2025-06-03T10:31:31.372168Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (OptionalType (DataType 'Uint64))) (let $4 (OptionalType (DataType 'String))) (let $5 '('('"_logical_id" '787) '('"_id" '"8b85283b-46b84800-f28f1f63-2b803549") '('"_wide_channels" (StructType '('"Key" $3) '('"Value" $4))))) (let $6 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($17) (block '( (let $18 (lambda '($19) (Member $19 '"Key") (Member $19 '"Value"))) (return (FromFlow (ExpandMap (ToFlow $17) $18))) ))) $5)) (let $7 '('1)) (let $8 (DqCnHashShuffle (TDqOutput $6 '0) $7 '1)) (let $9 (StructType '('"t1.Key" $3) '('"t1.Value" $4) '('"t2.Key" $3) '('"t2.Value" $4))) (let $10 '('('"_logical_id" '685) '('"_id" '"f99740e3-2e3eb4af-18c8cfd2-9bdbfbab") '('"_wide_channels" $9))) (let $11 (DqPhyStage '($8) (lambda '($20) (block '( (let $21 '('0 '0 '1 '1)) (let $22 '('0 '2 '1 '3)) (let $23 (GraceSelfJoinCore (ToFlow $20) 'Full $7 $7 $21 $22 '('"t1.Value") '('"t2.Value") '())) (return (FromFlow (WideSort $23 '('('1 (Bool 'true)))))) ))) $10)) (let $12 (DqCnMerge (TDqOutput $11 '0) '('('1 '"Asc")))) (let $13 (DqPhyStage '($12) (lambda '($24) (FromFlow (NarrowMap (ToFlow $24) (lambda '($25 $26 $27 $28) (AsStruct '('"t1.Key" $25) '('"t1.Value" $26) '('"t2.Key" $27) '('"t2.Value" $28)))))) '('('"_logical_id" '697) '('"_id" '"4e03a539-b2fa7d65-75980718-adabad4e")))) (let $14 '($6 $11 $13)) (let $15 '('"t1.Key" '"t1.Value" '"t2.Key" '"t2.Value")) (let $16 (DqCnResult (TDqOutput $13 '0) $15)) (return (KqpPhysicalQuery '((KqpPhysicalTx $14 '($16) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType $9) '0 '0)) '('('"type" '"query")))) ) >> KqpScanLogs::WideCombine+EnabledLogs [GOOD] >> KqpReturning::ReturningWorksIndexedDeleteV2-QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedInsert+QueryService >> KqpRanges::UpdateWhereInNoFullScan-UseSink [GOOD] >> KqpRanges::UpdateWhereInWithNull ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tx_allocator/ut/unittest >> TTxLocatorTest::TestImposibleSize [GOOD] Test command err: 2025-06-03T10:32:06.488238Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1925: Tablet: 72057594046447617 LockedInitializationPath Marker# TSYS32 2025-06-03T10:32:06.488368Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:911: Tablet: 72057594046447617 HandleFindLatestLogEntry, NODATA Promote Marker# TSYS19 2025-06-03T10:32:06.488499Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:225: Tablet: 72057594046447617 TTablet::WriteZeroEntry. logid# [72057594046447617:2:0:0:0:0:0] Marker# TSYS01 2025-06-03T10:32:06.489006Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:0:0:0:20:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-03T10:32:06.489110Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:17: tablet# 72057594046447617 OnActivateExecutor 2025-06-03T10:32:06.491868Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:1:28672:35:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-03T10:32:06.491903Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:1:0:0:42:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-03T10:32:06.491924Z node 1 :TABLET_MAIN DEBUG: tablet_sys.cpp:1396: Tablet: 72057594046447617 GcCollect 0 channel, tablet:gen:step => 2:0 Marker# TSYS28 2025-06-03T10:32:06.491956Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:1:8192:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-03T10:32:06.491976Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:2:0:0:71:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-03T10:32:06.491994Z node 1 :TX_ALLOCATOR DEBUG: txallocator__scheme.cpp:22: tablet# 72057594046447617 TTxSchema Complete 2025-06-03T10:32:06.492017Z node 1 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72057594046447617 Active! Generation: 2, Type: TxAllocator started in 0msec Marker# TSYS24 2025-06-03T10:32:06.492198Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:71:2105] requested range size#281474976710656 2025-06-03T10:32:06.492246Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 0 Reserved to# 0 2025-06-03T10:32:06.493053Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:71:2105] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE 2025-06-03T10:32:06.493127Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:75:2108] requested range size#123456 2025-06-03T10:32:06.493203Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:1:24576:70:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-03T10:32:06.493215Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:3:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-03T10:32:06.493230Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 0 Reserved to# 123456 2025-06-03T10:32:06.493235Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:75:2108] TEvAllocateResult from# 0 to# 123456 expected SUCCESS 2025-06-03T10:32:06.493282Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:79:2112] requested range size#281474976587200 2025-06-03T10:32:06.493336Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 123456 Reserved to# 0 2025-06-03T10:32:06.493343Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:79:2112] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE 2025-06-03T10:32:06.493394Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:82:2115] requested range size#246912 2025-06-03T10:32:06.493437Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:1:24576:76:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-03T10:32:06.493447Z node 1 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594046447617:2:4:0:0:69:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0} 2025-06-03T10:32:06.493460Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 1 Reserved from# 123456 Reserved to# 370368 2025-06-03T10:32:06.493465Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:70: tablet# 72057594046447617 Send to Sender# [1:82:2115] TEvAllocateResult from# 123456 to# 370368 expected SUCCESS 2025-06-03T10:32:06.493515Z node 1 :TX_ALLOCATOR DEBUG: txallocator_impl.cpp:60: tablet# 72057594046447617 HANDLE TEvAllocate Sender# [1:86:2119] requested range size#281474976340288 2025-06-03T10:32:06.493524Z node 1 :TX_ALLOCATOR DEBUG: txallocator__reserve.cpp:56: tablet# 72057594046447617 TTxReserve Complete Successed# 0 Reserved from# 370368 Reserved to# 0 2025-06-03T10:32:06.493529Z node 1 :TX_ALLOCATOR ERROR: txallocator_impl.cpp:84: tablet# 72057594046447617 Send to Sender# [1:86:2119] TEvAllocateResult status# IMPOSIBLE expected IMPOSIBLE >> KqpNotNullColumns::JoinLeftTableWithNotNullPk-StreamLookup [GOOD] >> KqpNotNullColumns::JoinRightTableWithNotNullColumns+StreamLookup >> GenericFederatedQuery::ClickHouseManagedSelectAll >> KqpNamedExpressions::NamedExpressionRandomChanged2+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomChanged2-UseSink |68.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_cluster_discovery/ut/unittest >> KqpSort::TopSortExprPk [GOOD] >> KqpSort::TopSortTableExpr ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/runtime/unittest >> KqpScanLogs::WideCombine+EnabledLogs [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/u93c/0028fa/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk2 Trying to start YDB, gRPC: 4073, MsgBus: 16367 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0028fa/r3tmp/tmpCGIg0T/pdisk_1.dat TServer::EnableGrpc on GrpcPort 4073, node 1 TClient is connected to server localhost:16367 TClient is connected to server localhost:16367 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (DataType 'Uint64)) (let $4 '('('"_logical_id" '505) '('"_id" '"ecce4715-74682c84-3737e45f-f8e3235c") '('"_wide_channels" (StructType '('"Value" (OptionalType (DataType 'String))) '('_yql_agg_0 $3))))) (let $5 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($12) (block '( (let $13 (lambda '($15) (Member $15 '"Key") (Member $15 '"Value"))) (let $14 (lambda '($25 $26) $25 $26)) (return (FromFlow (WideCombiner (ExpandMap (ToFlow $12) $13) '-1073741824 (lambda '($16 $17) $17) (lambda '($18 $19 $20) (AggrCountInit $19)) (lambda '($21 $22 $23 $24) (AggrCountUpdate $22 $24)) $14))) ))) $4)) (let $6 (DqCnHashShuffle (TDqOutput $5 '0) '('0))) (let $7 (DqPhyStage '($6) (lambda '($27) (block '( (let $28 (WideCombiner (ToFlow $27) '"" (lambda '($29 $30) $29) (lambda '($31 $32 $33) $33) (lambda '($34 $35 $36 $37) (AggrAdd $36 $37)) (lambda '($38 $39) $39))) (return (FromFlow (NarrowMap $28 (lambda '($40) (AsStruct '('"column0" $40)))))) ))) '('('"_logical_id" '1265) '('"_id" '"1678f09d-9dafc14e-181673f0-85aecb3a")))) (let $8 (DqCnUnionAll (TDqOutput $7 '0))) (let $9 (DqPhyStage '($8) (lambda '($41) $41) '('('"_logical_id" '1533) '('"_id" '"17600abc-e0e4357d-357fd6e2-e75b578f")))) (let $10 '($5 $7 $9)) (let $11 (DqCnResult (TDqOutput $9 '0) '('"column0"))) (return (KqpPhysicalQuery '((KqpPhysicalTx $10 '($11) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType (StructType '('"column0" $3))) '0 '0)) '('('"type" '"query")))) ) >> GenericFederatedQuery::IcebergHadoopTokenSelectAll >> GenericFederatedQuery::PostgreSQLOnPremSelectAll >> GenericFederatedQuery::IcebergHiveBasicSelectAll >> KqpReturning::ReturningSerial [GOOD] >> KqpReturning::ReturningWorks+QueryService >> GenericFederatedQuery::YdbFilterPushdown >> GenericFederatedQuery::YdbManagedSelectAll >> KqpNewEngine::Update+UseSink [GOOD] >> KqpNewEngine::StaleRO_IndexFollowers-EnableFollowers >> GenericFederatedQuery::IcebergHiveSaSelectAll >> KqpNewEngine::LocksEffects [GOOD] >> KqpNewEngine::LeftSemiJoin >> KqpSqlIn::KeyTypeMissmatch_Str [GOOD] >> KqpSqlIn::SecondaryIndex_PgKey >> GenericFederatedQuery::IcebergHadoopBasicSelectAll >> GenericFederatedQuery::IcebergHadoopSaSelectAll >> KqpNotNullColumns::JoinRightTableWithNotNullColumns+StreamLookup [GOOD] >> KqpNotNullColumns::JoinRightTableWithNotNullColumns-StreamLookup >> KqpNamedExpressions::NamedExpressionRandomChanged2-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandom+UseSink >> KqpReturning::ReturningWorksIndexedInsert+QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedInsert-QueryService >> KqpSort::TopSortTableExpr [GOOD] >> KqpSort::TopSortTableExprOffset >> KqpRanges::UpdateWhereInWithNull [GOOD] >> KqpRanges::ValidatePredicates |68.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest |68.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest >> KqpNewEngine::LeftSemiJoin [GOOD] >> KqpReturning::ReturningWorks+QueryService [GOOD] >> KqpReturning::ReturningWorks-QueryService >> KqpNewEngine::LocksInRoTx >> YdbTableSplit::SplitByLoadWithReadsMultipleSplitsWithData [GOOD] >> TxUsage::WriteToTopic_Demo_23_RestartNo_Table [GOOD] >> KqpNotNullColumns::JoinRightTableWithNotNullColumns-StreamLookup [GOOD] >> KqpNotNullColumns::OptionalParametersDataQuery >> KqpSqlIn::SecondaryIndex_PgKey [GOOD] >> KqpSqlIn::SecondaryIndex_SimpleKey >> KqpSort::TopSortTableExprOffset [GOOD] >> KqpSort::TopSortResults >> TxUsage::WriteToTopic_Demo_23_RestartNo_Query >> TxUsage::Sinks_Oltp_WriteToTopic_2_Query [GOOD] >> S3SettingsConversion::FoldersStyleDeduction [GOOD] >> TPQCDTest::TestUnavailableWithoutBoth [GOOD] >> GenericFederatedQuery::YdbFilterPushdown [GOOD] >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionOperationId1 >> ColumnShardTiers::DSConfigsWithQueryServiceDdl >> GenericFederatedQuery::IcebergHiveTokenSelectAll >> KqpReturning::ReturningWorksIndexedInsert-QueryService [GOOD] >> KqpReturning::ReturningWorksIndexedOperationsWithDefault+QueryService >> TxUsage::WriteToTopic_Demo_14_Query [GOOD] >> GenericFederatedQuery::ClickHouseManagedSelectAll [GOOD] >> GenericFederatedQuery::ClickHouseManagedSelectConstant >> TxUsage::WriteToTopic_Demo_15_Table >> TxUsage::Sinks_Oltp_WriteToTopic_3_Table ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/persqueue_cluster_discovery/ut/unittest >> TPQCDTest::TestUnavailableWithoutBoth [GOOD] Test command err: 2025-06-03T10:32:06.551769Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668994348152540:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:06.551959Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001c49/r3tmp/tmpS62XUk/pdisk_1.dat 2025-06-03T10:32:06.612784Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668994348152515:2079] 1748946726551460 != 1748946726551463 2025-06-03T10:32:06.615238Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6840, node 1 2025-06-03T10:32:06.633620Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:06.633639Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:06.633642Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:06.633661Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:06.654989Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:06.655025Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:06.656123Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:06.969711Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668994348153142:2330], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:06.969774Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:06.970428Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668994348153169:2333], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:06.972296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480 2025-06-03T10:32:06.976530Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668994348153171:2334], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-03T10:32:07.046451Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668998643120524:2314] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:07.101801Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668998643120545:2340], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:32:07.101930Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=NWVlNzFkZTEtODMwMmJhZDYtZmE3NDRjYjAtM2FhZjY3NTE=, ActorId: [1:7511668994348153140:2329], ActorState: ExecuteState, TraceId: 01jwtnjc1s1xcdccftzz1wxq0x, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:32:07.104611Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:32:08.110215Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511669002938087922:2369], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:32:08.110345Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=NThkNjVkYTktYmZjZDZkMzYtYWY3OGNjOS1jZmNkMTU1Ng==, ActorId: [1:7511669002938087915:2365], ActorState: ExecuteState, TraceId: 01jwtnjd5974ayfp4vkaa3xy9s, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:32:08.110516Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:32:09.116933Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511669007233055272:2395], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:32:09.117421Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=NTI1NWM5NmItYzk1MGU3MmItNDNlOTBkNzQtY2NmM2JiMDk=, ActorId: [1:7511669007233055270:2394], ActorState: ExecuteState, TraceId: 01jwtnje4q4z8a6fjzf0k5v8hr, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:32:09.117619Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } >> KqpNewEngine::LocksInRoTx [GOOD] >> KqpNewEngine::LiteralKeys |68.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest >> S3SettingsConversion::FoldersStyleDeduction [GOOD] |68.9%| [TA] $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/test-results/unittest/{meta.json ... results_accumulator.log} >> GenericFederatedQuery::IcebergHiveBasicSelectAll [GOOD] >> GenericFederatedQuery::IcebergHiveBasicSelectConstant >> GenericFederatedQuery::IcebergHadoopTokenSelectAll [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenSelectConstant >> TxUsage::WriteToTopic_Demo_19_RestartNo_Table [GOOD] >> KqpReturning::ReturningWorks-QueryService [GOOD] >> KqpReturning::ReturningColumnsOrder >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionOperationId1 [GOOD] >> GenericFederatedQuery::YdbManagedSelectAll [GOOD] >> GenericFederatedQuery::IcebergHadoopBasicSelectAll [GOOD] >> GenericFederatedQuery::PostgreSQLOnPremSelectAll [GOOD] >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionOperationId2 >> GenericFederatedQuery::YdbManagedSelectConstant >> GenericFederatedQuery::IcebergHadoopBasicSelectConstant >> GenericFederatedQuery::PostgreSQLOnPremSelectConstant >> KqpNotNullColumns::OptionalParametersDataQuery [GOOD] >> GenericFederatedQuery::IcebergHiveSaSelectAll [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartNo_Query >> GenericFederatedQuery::IcebergHadoopSaSelectAll [GOOD] >> KqpNotNullColumns::OptionalParametersScanQuery >> GenericFederatedQuery::IcebergHiveSaSelectConstant >> GenericFederatedQuery::IcebergHadoopSaSelectConstant ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::SplitByLoadWithReadsMultipleSplitsWithData [GOOD] Test command err: 2025-06-03T10:31:09.385061Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668747629962767:2207];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:09.385159Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002249/r3tmp/tmpCojrfJ/pdisk_1.dat 2025-06-03T10:31:09.497549Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:09.499320Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:09.499360Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:09.506321Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 3012, node 1 2025-06-03T10:31:09.538957Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:09.538973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:09.538975Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:09.539037Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29331 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:09.602513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:29331 2025-06-03T10:31:10.037894Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668751924930882:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:10.037936Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:10.095705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:31:10.246364Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668751924931070:2365], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:10.246399Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:10.246524Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668751924931075:2368], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:10.247499Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:3, at schemeshard: 72057594046644480 2025-06-03T10:31:10.274346Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668751924931077:2369], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:31:10.322629Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jwtngmnsbr9cx9j7n5ncw2gm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjJjNDEyYjctM2I3MTYxNjUtYTZjOTcxOGEtMmY3OTJkNTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.324254Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jwtngmn91s88nkcvs1bjvnjn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzYyODNiY2MtMjhhNGJkZjAtNGUxZmMxYzItNzdiYjQyYzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.324576Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jwtngmndfky48bg4sxknnnr4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjgxYmM2OTktNDMyZWIwNzEtNmUzYTBkNjQtNjc5ZmE0OGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.324807Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715666. Ctx: { TraceId: 01jwtngmnda64w4a52xnfne5v0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTFiOTk2MC05ZGVmN2Y1Zi0zNzk5ZWViYS0yOTBmZjNiZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.324971Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jwtngmn91h4k7n2tc1tzhk0a, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThjODZlYWItMzFmMGMyZmEtMzBlNzAzZWUtZDY4Mjg4Yjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.325141Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715668. Ctx: { TraceId: 01jwtngmn9ah3zgptc53zsagsa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTE4MzNkYzktNTEzYTBjMGYtZmM0MTRmMzAtMzdlNWY5NGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.325331Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtngmn8c00ve7sxn0hzf4dc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2MyZTIwN2EtODkyNTFiNTUtMWE5NmFlMTctMmRjMjliMGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.325571Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtngmn99hmgyt3bn6kjy7nc, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2Y2NzcwNi1hZTE2OTlkNC1iYTRhMGU5ZC00Y2U4YWQwOQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.334034Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jwtngmn90xrb4jcs6b6bbsfz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWU0YzFlNjktODNhY2ZiZGMtNDRmMmNlOGMtNjUyY2UyNDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.357494Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715669. Ctx: { TraceId: 01jwtngmr9eq6awrdfdhpnw0cm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjJjNDEyYjctM2I3MTYxNjUtYTZjOTcxOGEtMmY3OTJkNTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.365631Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715672. Ctx: { TraceId: 01jwtngmr9ba3z55j0p2xg2y60, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2MyZTIwN2EtODkyNTFiNTUtMWE5NmFlMTctMmRjMjliMGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.366597Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jwtngmr95t52tez6hvmwt9qm, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThjODZlYWItMzFmMGMyZmEtMzBlNzAzZWUtZDY4Mjg4Yjk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.366698Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715670. Ctx: { TraceId: 01jwtngmr9fmrqen48p5d1jmbv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjgxYmM2OTktNDMyZWIwNzEtNmUzYTBkNjQtNjc5ZmE0OGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.366872Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715674. Ctx: { TraceId: 01jwtngmr91h3p584rmha0zxdd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NTFiOTk2MC05ZGVmN2Y1Zi0zNzk5ZWViYS0yOTBmZjNiZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.366974Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715671. Ctx: { TraceId: 01jwtngmr9ay6z7sbmg9qsww86, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTE4MzNkYzktNTEzYTBjMGYtZmM0MTRmMzAtMzdlNWY5NGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.367326Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715675. Ctx: { TraceId: 01jwtngmrd8x01dzdjvp6qk6b1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzYyODNiY2MtMjhhNGJkZjAtNGUxZmMxYzItNzdiYjQyYzA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.367596Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715676. Ctx: { TraceId: 01jwtngmrd7cz5r7y10nn8n12g, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=M2Y2NzcwNi1hZTE2OTlkNC1iYTRhMGU5ZC00Y2U4YWQwOQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.368110Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715677. Ctx: { TraceId: 01jwtngmrn24b1ym5t2z3wmn8f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWU0YzFlNjktODNhY2ZiZGMtNDRmMmNlOGMtNjUyY2UyNDk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:10.369902Z node 1 :TX_PROXY ERROR: schemereq.c ... 2. Ctx: { TraceId: 01jwtnjcfn1cy4a9sw15q5603x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTE5OTRkMjYtYThkNTZhMDItNTY0MWRmOTItZGE5MTcyMjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.414776Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822315. Ctx: { TraceId: 01jwtnjcfn0j1ywk1ewrtgqps7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTgxNzAyMWMtZmVlYjA3YzYtNmVkOTk0ZDYtZGNhYmQzNTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.415434Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822309. Ctx: { TraceId: 01jwtnjcfnes8hwzcbktkdmvdh, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGRkY2UwZmItOGQwZjk4NTgtZmM5ZDNlOGUtZDdiMTU1NTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.415608Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822310. Ctx: { TraceId: 01jwtnjcfn27v8rqem5fr457cz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThmYWU3NGQtZjkwNWYzMGQtNTU1NWFmM2ItMzhjOTI0OWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.415811Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822317. Ctx: { TraceId: 01jwtnjcfn5rasqt0s8nf0hg2k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTVjODY2MjktMWFjM2NlYjMtYzA4NWE5OTItNmI5NTMxOTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.415931Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822313. Ctx: { TraceId: 01jwtnjcfn8ftdb73bb5pvkrzg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTYwYmYzZDUtNTI3NGUyZTYtMTYxNzNhZi02ZDcwYmI5ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.415961Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822316. Ctx: { TraceId: 01jwtnjcfn4hd3vf84bhe493qe, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWQyMGJlNzgtNzdhN2ExNzEtNjk5MzZmYjgtYzRiMmIxZGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.416168Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822314. Ctx: { TraceId: 01jwtnjcfn6wn1t3mt9zbszqhf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTEwZDk1NGQtMTExZDQzNjMtYTU5N2IxNjYtNWI2Zjc2Nzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.416689Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822318. Ctx: { TraceId: 01jwtnjcfn6x02jbe135heeprq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmNiZmYzNWQtZmUyMTExYzktNzFmNDY2NS1jMDA2ZWY1Nw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.418333Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822319. Ctx: { TraceId: 01jwtnjcfs0h2h13tykxxa9ta8, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTE5OTRkMjYtYThkNTZhMDItNTY0MWRmOTItZGE5MTcyMjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.418334Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822320. Ctx: { TraceId: 01jwtnjcfsdjkq8bfgz0nb22w4, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTgxNzAyMWMtZmVlYjA3YzYtNmVkOTk0ZDYtZGNhYmQzNTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.419446Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822322. Ctx: { TraceId: 01jwtnjcfte4bgze6a273p9b1d, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWQyMGJlNzgtNzdhN2ExNzEtNjk5MzZmYjgtYzRiMmIxZGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.419449Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822321. Ctx: { TraceId: 01jwtnjcft2sha8f50m81vcg4s, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGY1NjQ3YjItYjY5NzczNjQtMjBiODFiNTUtYzllMTc5M2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.419589Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822324. Ctx: { TraceId: 01jwtnjcft29mht7grffbzfb97, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTEwZDk1NGQtMTExZDQzNjMtYTU5N2IxNjYtNWI2Zjc2Nzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.419673Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822325. Ctx: { TraceId: 01jwtnjcft6k0x55g6chvgzmqs, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThmYWU3NGQtZjkwNWYzMGQtNTU1NWFmM2ItMzhjOTI0OWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo 2025-06-03T10:32:07.420406Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822326. Ctx: { TraceId: 01jwtnjcft9xj838fcqtf91sr3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTYwYmYzZDUtNTI3NGUyZTYtMTYxNzNhZi02ZDcwYmI5ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.420528Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822323. Ctx: { TraceId: 01jwtnjcft2pzh1081zbyaw08x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTVjODY2MjktMWFjM2NlYjMtYzA4NWE5OTItNmI5NTMxOTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.420607Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822327. Ctx: { TraceId: 01jwtnjcft8yztckc52gn8rrqz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGRkY2UwZmItOGQwZjk4NTgtZmM5ZDNlOGUtZDdiMTU1NTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.423588Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822328. Ctx: { TraceId: 01jwtnjcfvbvjmc6kbqwsqv892, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NmNiZmYzNWQtZmUyMTExYzktNzFmNDY2NS1jMDA2ZWY1Nw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.423814Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822336. Ctx: { TraceId: 01jwtnjcfx2mafpxvqrxpvec2p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGRkY2UwZmItOGQwZjk4NTgtZmM5ZDNlOGUtZDdiMTU1NTg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.423955Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822337. Ctx: { TraceId: 01jwtnjcfydmwem53zysxm5t8p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTYwYmYzZDUtNTI3NGUyZTYtMTYxNzNhZi02ZDcwYmI5ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946670226 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 4 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-03T10:32:07.424805Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822334. Ctx: { TraceId: 01jwtnjcfx87654fad2b3mdvr2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThmYWU3NGQtZjkwNWYzMGQtNTU1NWFmM2ItMzhjOTI0OWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.425035Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822335. Ctx: { TraceId: 01jwtnjcfxdr1w0hjpzyvdnpp5, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTVjODY2MjktMWFjM2NlYjMtYzA4NWE5OTItNmI5NTMxOTE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.425046Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822329. Ctx: { TraceId: 01jwtnjcfv9s23qpcxaq2sy9r2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTE5OTRkMjYtYThkNTZhMDItNTY0MWRmOTItZGE5MTcyMjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.425216Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822330. Ctx: { TraceId: 01jwtnjcfv69w5wa0hmc881b1x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OTgxNzAyMWMtZmVlYjA3YzYtNmVkOTk0ZDYtZGNhYmQzNTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.425377Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822331. Ctx: { TraceId: 01jwtnjcfx8wyrcdcfzztht5mx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YWQyMGJlNzgtNzdhN2ExNzEtNjk5MzZmYjgtYzRiMmIxZGY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.425520Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822332. Ctx: { TraceId: 01jwtnjcfx99xeagx7dch9218t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZTEwZDk1NGQtMTExZDQzNjMtYTU5N2IxNjYtNWI2Zjc2Nzg=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:07.425534Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976822333. Ctx: { TraceId: 01jwtnjcfx326enx558ryk929m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZGY1NjQ3YjItYjY5NzczNjQtMjBiODFiNTUtYzllMTc5M2Q=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946670226 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 4 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 4 shards >> KqpSqlIn::SecondaryIndex_SimpleKey [GOOD] >> KqpSqlIn::SecondaryIndex_ComplexKey_In_And_In >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionOperationId2 [GOOD] >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionFetchToken >> KqpNewEngine::LiteralKeys [GOOD] >> KqpReturning::ReturningWorksIndexedOperationsWithDefault+QueryService [GOOD] |68.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest |68.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest >> KqpSort::TopSortResults [GOOD] >> KqpSort::UnionAllSortLimit >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionFetchToken [GOOD] >> Describe::Statistics [GOOD] >> Describe::Location >> KqpReturning::ReturningColumnsOrder [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpReturning::ReturningWorksIndexedOperationsWithDefault+QueryService [GOOD] Test command err: Trying to start YDB, gRPC: 31007, MsgBus: 14817 2025-06-03T10:32:00.975059Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668966536336640:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:00.975092Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027c6/r3tmp/tmpICp6by/pdisk_1.dat 2025-06-03T10:32:01.042213Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31007, node 1 2025-06-03T10:32:01.064181Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:01.064194Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:01.064197Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:01.064246Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:01.076433Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:01.076459Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:01.077544Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:14817 TClient is connected to server localhost:14817 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:01.119187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:01.121429Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:32:01.125092Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:01.142062Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:01.203321Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:01.217167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:01.393967Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668970831305567:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:01.394004Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:01.454679Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:01.463312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:01.474927Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:01.489346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:01.544988Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:01.559513Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:01.573859Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:01.589991Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668970831306221:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:01.590044Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668970831306226:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:01.590049Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:01.591153Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:01.600192Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668970831306228:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:01.699349Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668970831306279:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:01.869275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:01.879708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:32:01.892100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 22433, MsgBus: 10809 2025-06-03T10:32:02.515402Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668976633984454:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:02.515450Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027c6/r3tmp/tmp847gw6/pdisk_1.dat 2025-06-03T10:32:02.530162Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22433, node 2 2025-06-03T10:32:02.541426Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:02.541442Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:02.541445Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:02.541507Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10809 TClient is connected to server localhost:10809 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { S ... hemeshard: 72057594046644480 2025-06-03T10:32:09.071813Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.087202Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669003885500132:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:09.087239Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:09.087241Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669003885500137:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:09.088219Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:09.097132Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511669003885500139:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:09.171598Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511669003885500190:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:09.348704Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.360705Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.373112Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 27061, MsgBus: 63322 2025-06-03T10:32:10.025661Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511669011609412659:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:10.026210Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027c6/r3tmp/tmp4oEkFt/pdisk_1.dat 2025-06-03T10:32:10.042952Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27061, node 7 2025-06-03T10:32:10.057670Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:10.057688Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:10.057690Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:10.057747Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:63322 TClient is connected to server localhost:63322 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:10.124574Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:10.124634Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:10.125673Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:10.130633Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:10.141727Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:10.153205Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:10.177920Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:10.202236Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:10.438725Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669011609414098:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:10.438757Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:10.449469Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.458782Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.470792Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.527897Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.542203Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.555195Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.568740Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.585613Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669011609414751:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:10.585641Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:10.585774Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669011609414756:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:10.587043Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:10.595253Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7511669011609414758:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:10.672212Z node 7 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [7:7511669011609414809:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:10.870663Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 |69.0%| [TA] {RESULT} $(B)/ydb/core/tablet_flat/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::LiteralKeys [GOOD] Test command err: Trying to start YDB, gRPC: 19343, MsgBus: 9973 2025-06-03T10:32:03.636325Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668980381950062:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:03.636346Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027a6/r3tmp/tmpgr6EZi/pdisk_1.dat TServer::EnableGrpc on GrpcPort 19343, node 1 2025-06-03T10:32:03.739763Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:03.739792Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:03.740821Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:03.758523Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:03.764546Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668980381950036:2079] 1748946723636163 != 1748946723636166 2025-06-03T10:32:03.764713Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:03.764722Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:03.764724Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:03.764785Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9973 TClient is connected to server localhost:9973 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:03.855483Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:03.861778Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:32:03.874552Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:03.910545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:32:03.945721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:32:03.961210Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.087925Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668984676918975:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.087959Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.148275Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.203786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.212200Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.225963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.239283Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.253830Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.267793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.284171Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668984676919630:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.284217Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.284309Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668984676919635:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.285147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:04.288114Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668984676919637:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:04.374047Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668984676919688:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:04.508805Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 3427, MsgBus: 29721 2025-06-03T10:32:04.724978Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668982847200205:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:04.725159Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027a6/r3tmp/tmpKqoMk3/pdisk_1.dat 2025-06-03T10:32:04.739345Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3427, node 2 2025-06-03T10:32:04.748664Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:04.748678Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:04.748680Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:04.748718Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29721 TClient is connected to server localhost:29721 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:04.829383Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T ... schemeshard: 72057594046644480 2025-06-03T10:32:09.593908Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.603096Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.616683Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.631813Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.690117Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.713509Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669005718652164:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:09.713585Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:09.713902Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669005718652169:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:09.715111Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:09.718836Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511669005718652171:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:09.790870Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511669005718652222:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 4692, MsgBus: 21839 2025-06-03T10:32:10.300163Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511669010001289413:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:10.300191Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027a6/r3tmp/tmp7Tqh5F/pdisk_1.dat 2025-06-03T10:32:10.316740Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4692, node 7 2025-06-03T10:32:10.337181Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:10.337197Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:10.337199Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:10.337253Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21839 TClient is connected to server localhost:21839 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:10.401398Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:10.401433Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:10.402000Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.402432Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:32:10.413431Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:10.424815Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:10.449399Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:10.461411Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:10.754296Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669010001290995:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:10.754326Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:10.761337Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.772635Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.786016Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.798958Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.808431Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.822306Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.833667Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.853847Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669010001291647:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:10.853874Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:10.854026Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669010001291652:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:10.854892Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:10.865240Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7511669010001291654:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:10.922125Z node 7 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [7:7511669010001291705:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> KqpReturning::ReturningTypes >> KqpNotNullColumns::OptionalParametersScanQuery [GOOD] >> KqpNamedExpressions::NamedExpressionRandom+UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandom-UseSink >> LocalPartition::DiscoveryServiceBadNodeId [GOOD] >> LocalPartition::DiscoveryHang >> GenericFederatedQuery::ClickHouseManagedSelectConstant [GOOD] >> GenericFederatedQuery::ClickHouseSelectCount |69.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest >> KqpSqlIn::SecondaryIndex_ComplexKey_In_And_In [GOOD] >> GenericFederatedQuery::IcebergHiveBasicSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHiveBasicSelectCount >> GenericFederatedQuery::IcebergHiveTokenSelectAll [GOOD] >> GenericFederatedQuery::IcebergHadoopBasicSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHadoopSaSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenSelectConstant [GOOD] >> KqpSort::UnionAllSortLimit [GOOD] >> GenericFederatedQuery::PostgreSQLOnPremSelectConstant [GOOD] >> GenericFederatedQuery::YdbManagedSelectConstant [GOOD] >> GenericFederatedQuery::YdbSelectCount >> YdbIndexTable::OnlineBuildWithDataColumn [GOOD] >> KqpReturning::ReturningTypes [GOOD] >> KqpSqlIn::PhasesCount >> GenericFederatedQuery::IcebergHiveSaSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenSelectCount >> TxUsage::WriteToTopic_Demo_31_Query >> GenericFederatedQuery::PostgreSQLSelectCount >> GenericFederatedQuery::IcebergHadoopBasicSelectCount >> GenericFederatedQuery::IcebergHiveTokenSelectConstant >> GenericFederatedQuery::IcebergHadoopSaSelectCount >> GenericFederatedQuery::IcebergHiveSaSelectCount >> TTablesWithReboots::DropCopyWithRebootsAtCommit [GOOD] |69.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest |69.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest |69.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest >> KqpSqlIn::PhasesCount [GOOD] |69.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest >> TxUsage::WriteToTopic_Demo_4_Table [GOOD] >> GenericFederatedQuery::IcebergHadoopSaSelectCount [GOOD] >> GenericFederatedQuery::IcebergHiveBasicSelectCount [GOOD] >> GenericFederatedQuery::IcebergHadoopBasicSelectCount [GOOD] >> GenericFederatedQuery::IcebergHiveTokenSelectConstant [GOOD] >> GenericFederatedQuery::IcebergHadoopTokenSelectCount [GOOD] >> GenericFederatedQuery::PostgreSQLSelectCount [GOOD] >> GenericFederatedQuery::YdbSelectCount [GOOD] >> GenericFederatedQuery::ClickHouseSelectCount [GOOD] >> GenericFederatedQuery::IcebergHadoopSaFilterPushdown >> TxUsage::WriteToTopic_Demo_5_Table >> GenericFederatedQuery::IcebergHiveTokenSelectCount >> GenericFederatedQuery::IcebergHiveBasicFilterPushdown >> GenericFederatedQuery::PostgreSQLFilterPushdown >> GenericFederatedQuery::IcebergHadoopTokenFilterPushdown >> GenericFederatedQuery::ClickHouseFilterPushdown >> GenericFederatedQuery::IcebergHadoopBasicFilterPushdown |69.0%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_index_build_reboots/test-results/unittest/{meta.json ... results_accumulator.log} |69.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base_reboots/unittest >> TTablesWithReboots::DropCopyWithRebootsAtCommit [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:31:28.960383Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:31:28.960411Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:28.960417Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:31:28.960432Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:31:28.960446Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:31:28.960451Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:31:28.960461Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:28.960476Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:31:28.960606Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:31:28.960683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:31:28.977827Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:31:28.977850Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:28.977965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:31:28.980724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:31:28.980837Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:31:28.980870Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:31:28.984183Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:31:28.984263Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:31:28.984394Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:28.984463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:31:28.984984Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:28.985039Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:31:28.985355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:28.985369Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:28.985389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:31:28.985398Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:28.985406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:31:28.985447Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:31:28.987134Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:31:29.011391Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:31:29.011500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:29.011569Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:31:29.011621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:31:29.011633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:29.012502Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:29.012533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:31:29.012595Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:29.012608Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:31:29.012615Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:31:29.012621Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:31:29.013385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:29.013403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:31:29.013410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:31:29.013826Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:29.013838Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:29.013845Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:29.013854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:31:29.014683Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:31:29.015235Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:31:29.015281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:31:29.015506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:29.015540Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:29.015548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:29.015618Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... 78944, LocalPathId: 1] was 3 2025-06-03T10:32:14.159700Z node 147 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:32:14.159715Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:32:14.159720Z node 147 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046678944, txId: 1005 2025-06-03T10:32:14.159726Z node 147 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-03T10:32:14.159732Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-03T10:32:14.159747Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1005, ready parts: 0/1, is published: true 2025-06-03T10:32:14.160345Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1005:0, at schemeshard: 72057594046678944 2025-06-03T10:32:14.160359Z node 147 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_drop_table.cpp:414: TDropTable TProposedDeletePart operationId: 1005:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:32:14.160433Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove table for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 3 2025-06-03T10:32:14.160468Z node 147 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1005:0 progress is 1/1 2025-06-03T10:32:14.160474Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:32:14.160481Z node 147 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1005:0 progress is 1/1 2025-06-03T10:32:14.160485Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:32:14.160490Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1005, ready parts: 1/1, is published: true 2025-06-03T10:32:14.160495Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:32:14.160501Z node 147 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1005:0 2025-06-03T10:32:14.160507Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1005:0 2025-06-03T10:32:14.160527Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:32:14.160756Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-03T10:32:14.161152Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-03T10:32:14.162504Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5554: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 435 RawX2: 631360194916 } TabletId: 72075186233409547 State: 4 2025-06-03T10:32:14.162527Z node 147 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409547, state: Offline, at schemeshard: 72057594046678944 2025-06-03T10:32:14.162903Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:32:14.162992Z node 147 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409547 2025-06-03T10:32:14.163600Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:32:14.163663Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:32:14.163825Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:32:14.163833Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-03T10:32:14.163849Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:32:14.164631Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:32:14.164648Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:32:14.164698Z node 147 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification wait txId: 1004 2025-06-03T10:32:14.164758Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1004: send EvNotifyTxCompletion 2025-06-03T10:32:14.164767Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1004 TestWaitNotification wait txId: 1005 2025-06-03T10:32:14.164785Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1005: send EvNotifyTxCompletion 2025-06-03T10:32:14.164790Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1005 2025-06-03T10:32:14.164872Z node 147 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1004, at schemeshard: 72057594046678944 2025-06-03T10:32:14.164895Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1004: got EvNotifyTxCompletionResult 2025-06-03T10:32:14.164902Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1004: satisfy waiter [147:679:2639] 2025-06-03T10:32:14.164912Z node 147 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1005, at schemeshard: 72057594046678944 2025-06-03T10:32:14.164926Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1005: got EvNotifyTxCompletionResult 2025-06-03T10:32:14.164931Z node 147 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1005: satisfy waiter [147:679:2639] TestWaitNotification: OK eventTxId 1004 TestWaitNotification: OK eventTxId 1005 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted wait until 72075186233409548 is deleted wait until 72075186233409549 is deleted 2025-06-03T10:32:14.164999Z node 147 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-03T10:32:14.165012Z node 147 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 2025-06-03T10:32:14.165021Z node 147 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409548 2025-06-03T10:32:14.165029Z node 147 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409549 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 Deleted tabletId 72075186233409548 Deleted tabletId 72075186233409549 2025-06-03T10:32:14.165118Z node 147 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/NewTable" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:32:14.165161Z node 147 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/NewTable" took 59us result status StatusPathDoesNotExist 2025-06-03T10:32:14.165205Z node 147 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/NewTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/NewTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:32:14.165261Z node 147 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:32:14.165274Z node 147 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table" took 16us result status StatusPathDoesNotExist 2025-06-03T10:32:14.165313Z node 147 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Table" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::YdbSelectCount [GOOD] Test command err: Trying to start YDB, gRPC: 12285, MsgBus: 22219 2025-06-03T10:32:07.690021Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668998704641925:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:07.690047Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c25/r3tmp/tmp8td9Gp/pdisk_1.dat TServer::EnableGrpc on GrpcPort 12285, node 1 2025-06-03T10:32:07.783408Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668998704641905:2079] 1748946727689818 != 1748946727689821 2025-06-03T10:32:07.783583Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:07.788531Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:07.788545Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:07.788548Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:07.788599Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22219 2025-06-03T10:32:07.838146Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:07.838179Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:07.839165Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22219 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:07.856955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:07.859185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:32:08.159988Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669002999609859:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.160043Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.692307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480 2025-06-03T10:32:08.754704Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669002999609990:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.754730Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.754734Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669002999609995:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.755492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480 2025-06-03T10:32:08.760545Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669002999609997:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:32:08.842968Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669002999610037:2388] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:08.933781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.000206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480 2025-06-03T10:32:09.138076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.220637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.291278Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.366458Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:09.382290Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.671390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715690:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.684254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715693:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.684549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715691:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.684852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715692:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } what { items { column { name: "col1" type { type_id: UINT16 } } } } ... Ky8fYN/pdisk_1.dat 2025-06-03T10:32:13.331969Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:32:13.339907Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511669023592269353:2079] 1748946733297666 != 1748946733297669 2025-06-03T10:32:13.342440Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31088, node 3 2025-06-03T10:32:13.356910Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:13.356924Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:13.356926Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:13.356981Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25150 2025-06-03T10:32:13.409493Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:13.409526Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:13.409936Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:25150 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:32:13.438435Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:32:13.441758Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:32:13.784230Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669023592270010:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:13.784267Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:14.331447Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480 2025-06-03T10:32:14.395234Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669027887237437:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:14.395267Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:14.395306Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669027887237442:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:14.396287Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480 2025-06-03T10:32:14.402810Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511669027887237444:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:32:14.467954Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511669027887237484:2390] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:14.544439Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:14.611694Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480 2025-06-03T10:32:14.713239Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:32:14.811771Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:14.932828Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:32:15.012423Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:15.029191Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:32:15.363886Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715692:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } columns { name: "col2" type { type_id: DOUBLE } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } what { } from { table: "example_1" } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 |69.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest |69.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/idx_test/unittest >> YdbIndexTable::OnlineBuildWithDataColumn [GOOD] Test command err: Trying to start YDB, gRPC: 11538, MsgBus: 3725 2025-06-03T10:31:03.400506Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668722537891522:2206];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:03.400609Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001c6c/r3tmp/tmpCGiaEp/pdisk_1.dat 2025-06-03T10:31:03.490557Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:03.493419Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668722537891343:2079] 1748946663393510 != 1748946663393513 2025-06-03T10:31:03.498450Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:03.498492Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 11538, node 1 2025-06-03T10:31:03.499558Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:03.509536Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:03.509554Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:03.509556Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:03.509602Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3725 TClient is connected to server localhost:3725 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:03.593861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:03.597738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:03.610416Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:31:03.678842Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:03.709640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:03.726079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:03.916044Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668722537892980:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:03.916093Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:03.982148Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.001374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.019296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.079380Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.094295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.109696Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.170999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.192034Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668726832860936:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:04.192066Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:04.192201Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668726832860941:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:04.193708Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:31:04.198788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:31:04.198904Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668726832860943:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:31:04.282271Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668726832860994:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:04.506843Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:31:04.655803Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jwtngf65cxfjdda0fm5h0e39, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODQ0MWQ1NzQtMWY4ZjY3YzUtZTk2M2U5ZTctZjgwYzQzNmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:04.663419Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715674. Ctx: { TraceId: 01jwtngf65cxfjdda0fm5h0e39, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODQ0MWQ1NzQtMWY4ZjY3YzUtZTk2M2U5ZTctZjgwYzQzNmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:04.684291Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715675. Ctx: { TraceId: 01jwtngf754c93w034erzydqnv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGIwNWZlZTctNGM1ZTMzNTYtN2FhNzFjMTMtMjFkYWUxMzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:04.689599Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715676. Ctx: { TraceId: 01jwtngf754c93w034erzydqnv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGIwNWZlZTctNGM1ZTMzNTYtN2FhNzFjMTMtMjFkYWUxMzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:04.708490Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715677. Ctx: { TraceId: 01jwtngf7z38qtj6pry4fx5csd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODQ0MWQ1NzQtMWY4ZjY3YzUtZTk2M2U5ZTctZjgwYzQzNmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:04.718813Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715678. Ctx: { TraceId: 01jwtngf7z38qtj6pry4fx5csd, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODQ0MWQ1NzQtMWY4ZjY3YzUtZTk2M2U5ZTctZjgwYzQzNmU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:31:04.735935Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715679. Ctx: { TraceId: 01jwtngf8xe4rge6mcq86gcfvz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OGIwNWZlZTctNGM1ZTMzNTYtN2FhNzFjMTMtMjFkYWUxMzk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database n ... Ctx: { TraceId: 01jwtnjh2w74e5a9pvgk20h7az, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzA3YTdmNzgtYjk0MDBlYjQtNjhkNWMxMWUtYjRkYzJlZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.139178Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720637. Ctx: { TraceId: 01jwtnjh2w74e5a9pvgk20h7az, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzA3YTdmNzgtYjk0MDBlYjQtNjhkNWMxMWUtYjRkYzJlZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.151779Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720638. Ctx: { TraceId: 01jwtnjh3n7661nmgkmbmzyan7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTAwYzZhNDQtNDI5MzBjNGUtYTcwNTc0ZTgtNTQ2N2JmMDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.155904Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720639. Ctx: { TraceId: 01jwtnjh3n7661nmgkmbmzyan7, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTAwYzZhNDQtNDI5MzBjNGUtYTcwNTc0ZTgtNTQ2N2JmMDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.194598Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720640. Ctx: { TraceId: 01jwtnjh4q2b1gt4mk016cb3ez, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzA3YTdmNzgtYjk0MDBlYjQtNjhkNWMxMWUtYjRkYzJlZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.197582Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720641. Ctx: { TraceId: 01jwtnjh4q2b1gt4mk016cb3ez, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzA3YTdmNzgtYjk0MDBlYjQtNjhkNWMxMWUtYjRkYzJlZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.214474Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720642. Ctx: { TraceId: 01jwtnjh5h39s44p9664x4crdt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTAwYzZhNDQtNDI5MzBjNGUtYTcwNTc0ZTgtNTQ2N2JmMDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.217502Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720643. Ctx: { TraceId: 01jwtnjh5h39s44p9664x4crdt, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTAwYzZhNDQtNDI5MzBjNGUtYTcwNTc0ZTgtNTQ2N2JmMDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.233158Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720644. Ctx: { TraceId: 01jwtnjh67bwsppkjb88gbq0kx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzA3YTdmNzgtYjk0MDBlYjQtNjhkNWMxMWUtYjRkYzJlZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.235332Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720645. Ctx: { TraceId: 01jwtnjh67bwsppkjb88gbq0kx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzA3YTdmNzgtYjk0MDBlYjQtNjhkNWMxMWUtYjRkYzJlZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.248243Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720646. Ctx: { TraceId: 01jwtnjh6p8k1xhxxe9ztrv6e6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTAwYzZhNDQtNDI5MzBjNGUtYTcwNTc0ZTgtNTQ2N2JmMDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.250650Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720647. Ctx: { TraceId: 01jwtnjh6p8k1xhxxe9ztrv6e6, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTAwYzZhNDQtNDI5MzBjNGUtYTcwNTc0ZTgtNTQ2N2JmMDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.263395Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720648. Ctx: { TraceId: 01jwtnjh741b6hhdz41kq1r6hn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzA3YTdmNzgtYjk0MDBlYjQtNjhkNWMxMWUtYjRkYzJlZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.269519Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720649. Ctx: { TraceId: 01jwtnjh741b6hhdz41kq1r6hn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzA3YTdmNzgtYjk0MDBlYjQtNjhkNWMxMWUtYjRkYzJlZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.290156Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720650. Ctx: { TraceId: 01jwtnjh7y8rzrn1c74efv7hv2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTAwYzZhNDQtNDI5MzBjNGUtYTcwNTc0ZTgtNTQ2N2JmMDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.293201Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720651. Ctx: { TraceId: 01jwtnjh7y8rzrn1c74efv7hv2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTAwYzZhNDQtNDI5MzBjNGUtYTcwNTc0ZTgtNTQ2N2JmMDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.304749Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720652. Ctx: { TraceId: 01jwtnjh8d2ggt8es6r2agr1pw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzA3YTdmNzgtYjk0MDBlYjQtNjhkNWMxMWUtYjRkYzJlZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.309714Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720653. Ctx: { TraceId: 01jwtnjh8d2ggt8es6r2agr1pw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzA3YTdmNzgtYjk0MDBlYjQtNjhkNWMxMWUtYjRkYzJlZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.327956Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720654. Ctx: { TraceId: 01jwtnjh9118skftwape87f8fr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTAwYzZhNDQtNDI5MzBjNGUtYTcwNTc0ZTgtNTQ2N2JmMDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.334169Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720655. Ctx: { TraceId: 01jwtnjh9118skftwape87f8fr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTAwYzZhNDQtNDI5MzBjNGUtYTcwNTc0ZTgtNTQ2N2JmMDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.347724Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720656. Ctx: { TraceId: 01jwtnjh9s98xk7yb14681zkgy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzA3YTdmNzgtYjk0MDBlYjQtNjhkNWMxMWUtYjRkYzJlZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.355300Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720657. Ctx: { TraceId: 01jwtnjh9s98xk7yb14681zkgy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzA3YTdmNzgtYjk0MDBlYjQtNjhkNWMxMWUtYjRkYzJlZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.367666Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720658. Ctx: { TraceId: 01jwtnjhadfrgs99kphga6jeft, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTAwYzZhNDQtNDI5MzBjNGUtYTcwNTc0ZTgtNTQ2N2JmMDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.371539Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720659. Ctx: { TraceId: 01jwtnjhadfrgs99kphga6jeft, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTAwYzZhNDQtNDI5MzBjNGUtYTcwNTc0ZTgtNTQ2N2JmMDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.381979Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720660. Ctx: { TraceId: 01jwtnjhavdazkk7bvh18mh6q0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzA3YTdmNzgtYjk0MDBlYjQtNjhkNWMxMWUtYjRkYzJlZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.386037Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720661. Ctx: { TraceId: 01jwtnjhavdazkk7bvh18mh6q0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzA3YTdmNzgtYjk0MDBlYjQtNjhkNWMxMWUtYjRkYzJlZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.407069Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720662. Ctx: { TraceId: 01jwtnjhbf86q4qf1wrnj2fk0j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTAwYzZhNDQtNDI5MzBjNGUtYTcwNTc0ZTgtNTQ2N2JmMDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.410170Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720663. Ctx: { TraceId: 01jwtnjhbf86q4qf1wrnj2fk0j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTAwYzZhNDQtNDI5MzBjNGUtYTcwNTc0ZTgtNTQ2N2JmMDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.457539Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720664. Ctx: { TraceId: 01jwtnjhc40nkhdrgxcxn9ezyr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzA3YTdmNzgtYjk0MDBlYjQtNjhkNWMxMWUtYjRkYzJlZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.489626Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720665. Ctx: { TraceId: 01jwtnjhc40nkhdrgxcxn9ezyr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzA3YTdmNzgtYjk0MDBlYjQtNjhkNWMxMWUtYjRkYzJlZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.509958Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720666. Ctx: { TraceId: 01jwtnjher09p2m48r9316wdn0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTAwYzZhNDQtNDI5MzBjNGUtYTcwNTc0ZTgtNTQ2N2JmMDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.519677Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720667. Ctx: { TraceId: 01jwtnjher09p2m48r9316wdn0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MTAwYzZhNDQtNDI5MzBjNGUtYTcwNTc0ZTgtNTQ2N2JmMDI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.566554Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720668. Ctx: { TraceId: 01jwtnjhg23zycw23fj90bp5mg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzA3YTdmNzgtYjk0MDBlYjQtNjhkNWMxMWUtYjRkYzJlZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:12.574138Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720669. Ctx: { TraceId: 01jwtnjhg23zycw23fj90bp5mg, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzA3YTdmNzgtYjk0MDBlYjQtNjhkNWMxMWUtYjRkYzJlZmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root finished with status: SUCCESS ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNotNullColumns::OptionalParametersScanQuery [GOOD] Test command err: Trying to start YDB, gRPC: 11008, MsgBus: 32607 2025-06-03T10:32:03.547243Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668978204488535:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:03.547262Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027c2/r3tmp/tmpNbHo3W/pdisk_1.dat 2025-06-03T10:32:03.630638Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11008, node 1 2025-06-03T10:32:03.648505Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:03.648542Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:03.649567Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:03.658197Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:03.658213Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:03.658216Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:03.658281Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32607 TClient is connected to server localhost:32607 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:03.732717Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.055376Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668982499456453:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.055406Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.102007Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.162483Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668982499456555:2337], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.162541Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.162576Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668982499456560:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.163684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:3, at schemeshard: 72057594046644480 2025-06-03T10:32:04.168781Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668982499456562:2341], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-03T10:32:04.226559Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668982499456613:2384] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:04.256939Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668982499456655:2352], status: BAD_REQUEST, issues:
: Error: Type annotation, code: 1030
:1:14: Error: At function: KiWriteTable!
:1:14: Error: Missing not null column in input: Value. All not null columns should be initialized, code: 2032 2025-06-03T10:32:04.257059Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=ZmQwNTYzMmQtM2U0NzU0MWYtNTlhYzgyNGYtYWYzYzhkZDQ=, ActorId: [1:7511668982499456435:2326], ActorState: ExecuteState, TraceId: 01jwtnj9cveaxhage468z5qgak, ReplyQueryCompileError, status BAD_REQUEST remove tx with tx_id: 2025-06-03T10:32:04.262091Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668982499456664:2356], status: GENERIC_ERROR, issues:
: Error: Type annotation, code: 1030
:1:14: Error: At function: KiWriteTable!
:1:47: Error: Failed to convert type: Struct<'Key':Int32,'Value':Null> to Struct<'Key':Uint64?,'Value':String>
:1:47: Error: Failed to convert 'Value': Null to String
:1:47: Error: Failed to convert input columns types to scheme types, code: 2031 2025-06-03T10:32:04.262186Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=ZmQwNTYzMmQtM2U0NzU0MWYtNTlhYzgyNGYtYWYzYzhkZDQ=, ActorId: [1:7511668982499456435:2326], ActorState: ExecuteState, TraceId: 01jwtnj9d24edsrgzdhksb4znw, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 22971, MsgBus: 29732 2025-06-03T10:32:04.425960Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668985353004017:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:04.425992Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027c2/r3tmp/tmpFlQcP0/pdisk_1.dat 2025-06-03T10:32:04.440687Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22971, node 2 2025-06-03T10:32:04.457377Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:04.457390Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:04.457395Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:04.457452Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29732 TClient is connected to server localhost:29732 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:04.526389Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:04.526441Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:04.527618Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:04.530355Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.537477Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.549724Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.573070Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.587682Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 202 ... operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.143201Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669010524119249:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:10.143238Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669010524119254:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:10.143237Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:10.144050Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:10.146555Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511669010524119256:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:10.201428Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511669010524119307:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:10.377436Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 10970, MsgBus: 61802 2025-06-03T10:32:10.933183Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511669011546265299:2093];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:10.933529Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027c2/r3tmp/tmpcqdH93/pdisk_1.dat 2025-06-03T10:32:10.956210Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10970, node 7 2025-06-03T10:32:10.968779Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:10.968792Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:10.968795Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:10.968852Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61802 2025-06-03T10:32:11.025856Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:11.025891Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:11.026927Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61802 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:11.037395Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:11.044638Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:32:11.055490Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.078250Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:11.093257Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:11.338123Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669015841234133:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:11.338162Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:11.344699Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.353244Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.365938Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.380409Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.394537Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.408868Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.422953Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.438713Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669015841234784:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:11.438738Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:11.438753Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669015841234789:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:11.439584Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:11.449148Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7511669015841234791:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:11.522235Z node 7 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [7:7511669015841234842:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:11.696070Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.770698Z node 7 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946731812, txId: 281474976715674] shutting down 2025-06-03T10:32:11.816646Z node 7 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946731861, txId: 281474976715676] shutting down 2025-06-03T10:32:11.846246Z node 7 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946731889, txId: 281474976715678] shutting down |69.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest |69.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpReturning::ReturningTypes [GOOD] Test command err: Trying to start YDB, gRPC: 26206, MsgBus: 1493 2025-06-03T10:32:03.750293Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668981046224139:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:03.750315Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027a0/r3tmp/tmp3rBKF7/pdisk_1.dat 2025-06-03T10:32:03.841251Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:03.852244Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:03.852277Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:03.853257Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 26206, node 1 2025-06-03T10:32:03.874197Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:03.874212Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:03.874215Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:03.874268Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1493 TClient is connected to server localhost:1493 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:03.955780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:03.964435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.027901Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.088555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.102661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.209770Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668985341193067:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.209809Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.271435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.280208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.288301Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.343402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.351032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.365909Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.380028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.395875Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668985341193723:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.395916Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.395967Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668985341193728:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.396888Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:04.399327Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668985341193730:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:32:04.473934Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668985341193781:3401] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:04.661935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.670572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710673:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.680635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710674:0, at schemeshard: 72057594046644480 access count 1 Trying to start YDB, gRPC: 2968, MsgBus: 24579 2025-06-03T10:32:05.074845Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668990289444666:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:05.074876Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027a0/r3tmp/tmpWFvCBR/pdisk_1.dat 2025-06-03T10:32:05.088606Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2968, node 2 2025-06-03T10:32:05.098561Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:05.098576Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:05.098578Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:05.098628Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24579 TClient is connected to server localhost:24579 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:0 ... itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.213217Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.226139Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.285724Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669015856584163:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:11.285768Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669015856584168:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:11.285775Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:11.286698Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:11.295472Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511669015856584170:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:11.374773Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511669015856584221:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:11.532459Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 [[[2];["321"]];[["111"];[2]]] Trying to start YDB, gRPC: 23339, MsgBus: 22064 2025-06-03T10:32:11.887915Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511669015645152779:2139];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:11.894138Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027a0/r3tmp/tmp6x9qux/pdisk_1.dat 2025-06-03T10:32:11.912783Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:11.913006Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7511669015645152670:2079] 1748946731887307 != 1748946731887310 TServer::EnableGrpc on GrpcPort 23339, node 7 2025-06-03T10:32:11.921331Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:11.921348Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:11.921351Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:11.921413Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22064 2025-06-03T10:32:11.997897Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:11.997935Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:12.001358Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22064 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:12.029951Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:12.034880Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:32:12.041259Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:12.055688Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:12.083747Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:12.100088Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:12.343353Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669019940121597:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:12.343386Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:12.348643Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:12.364785Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:12.376915Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:12.391960Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:12.414664Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:12.429948Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:12.441583Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:12.506266Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669019940122255:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:12.506301Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:12.506370Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669019940122260:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:12.507296Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:12.510979Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7511669019940122262:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:12.578099Z node 7 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [7:7511669019940122313:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpSort::UnionAllSortLimit [GOOD] Test command err: Trying to start YDB, gRPC: 1896, MsgBus: 5931 2025-06-03T10:32:03.760614Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668978106414709:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:03.760632Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00279a/r3tmp/tmpf3CiZS/pdisk_1.dat 2025-06-03T10:32:03.839793Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1896, node 1 2025-06-03T10:32:03.861976Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:03.861991Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:03.861995Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:03.862051Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:03.862512Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:03.862527Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:03.863561Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5931 TClient is connected to server localhost:5931 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:03.929716Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:32:03.939849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:32:03.967336Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:03.991026Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.004363Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.242184Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668982401383599:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.242210Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.302457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.310261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.323853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.337620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.392991Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.402068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.415443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.431651Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668982401384256:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.431680Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668982401384261:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.431684Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.432442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:04.441809Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668982401384263:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:04.542438Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668982401384314:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 5058, MsgBus: 8436 2025-06-03T10:32:05.053630Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668990145028840:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:05.053653Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00279a/r3tmp/tmpZI2teY/pdisk_1.dat 2025-06-03T10:32:05.069648Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 5058, node 2 2025-06-03T10:32:05.078755Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:05.078767Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:05.078769Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:05.078813Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8436 TClient is connected to server localhost:8436 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:05.156358Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:05.156391Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:05.157128Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:32:05.157396Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:32:05.159539Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Opera ... hemeshard: 72057594046644480 2025-06-03T10:32:10.281879Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.296108Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.311270Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.326933Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.345963Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669011124705411:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:10.346002Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:10.346059Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669011124705416:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:10.347005Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:10.349902Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511669011124705418:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:10.449151Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511669011124705469:3393] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:10.598260Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 15186, MsgBus: 22583 2025-06-03T10:32:11.699203Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511669015509707350:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:11.699267Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00279a/r3tmp/tmpJiWqR4/pdisk_1.dat TServer::EnableGrpc on GrpcPort 15186, node 7 2025-06-03T10:32:11.723204Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:11.725494Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:11.725506Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:11.725509Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:11.725558Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22583 TClient is connected to server localhost:22583 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:11.799565Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:11.799606Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:11.800694Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:11.805234Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:11.816071Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:11.883354Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:11.950590Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:11.983493Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:12.168188Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669019804676255:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:12.168228Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:12.175430Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:12.234028Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:12.245002Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:12.256065Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:12.314658Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:12.335400Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:12.357082Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:12.385898Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669019804676912:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:12.385930Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:12.386114Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669019804676917:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:12.387230Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:12.390962Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7511669019804676919:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:12.490630Z node 7 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [7:7511669019804676970:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::TestFailsOnIncorrectScriptExecutionFetchToken [GOOD] Test command err: Trying to start YDB, gRPC: 20674, MsgBus: 23616 2025-06-03T10:32:07.633506Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668996078684958:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:07.633532Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c2d/r3tmp/tmpZzjeQC/pdisk_1.dat 2025-06-03T10:32:07.701999Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:07.702298Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668996078684936:2079] 1748946727633248 != 1748946727633251 TServer::EnableGrpc on GrpcPort 20674, node 1 2025-06-03T10:32:07.717680Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:07.717696Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:07.717700Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:07.717755Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:07.736499Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:07.736543Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:07.737684Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:23616 TClient is connected to server localhost:23616 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:07.785602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:08.029325Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669000373652891:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.029354Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.636246Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480 2025-06-03T10:32:08.701030Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669000373653021:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.701063Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.701111Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669000373653026:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.701998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480 2025-06-03T10:32:08.704509Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669000373653028:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-03T10:32:08.780727Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669000373653069:2389] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:08.869938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:08.945761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:1, at schemeshard: 72057594046644480 2025-06-03T10:32:09.028884Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.112675Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.187747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710675:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.270906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:09.280828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.640047Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710690:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: YDB endpoint { host: "localhost" port: 2136 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } descripti ... 14358, MsgBus: 23817 2025-06-03T10:32:09.961886Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669005003642217:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:09.961952Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c2d/r3tmp/tmpS3DPeg/pdisk_1.dat 2025-06-03T10:32:09.980322Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:09.980650Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669005003642196:2079] 1748946729961720 != 1748946729961723 TServer::EnableGrpc on GrpcPort 14358, node 2 2025-06-03T10:32:09.997578Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:09.997596Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:09.997599Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:09.997660Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23817 TClient is connected to server localhost:23817 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:10.067099Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:10.067138Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:10.067653Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.068210Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:32:10.070699Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 2110, MsgBus: 22102 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c2d/r3tmp/tmpUzcJtk/pdisk_1.dat 2025-06-03T10:32:10.541993Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511669010315320333:2217];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:10.542063Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:32:10.555528Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:10.555795Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511669010315320131:2079] 1748946730536662 != 1748946730536665 TServer::EnableGrpc on GrpcPort 2110, node 3 2025-06-03T10:32:10.569612Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:10.569625Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:10.569628Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:10.569691Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22102 TClient is connected to server localhost:22102 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:32:10.643363Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:10.643395Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:10.643863Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.645664Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:10.647186Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:32:11.001250Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480 2025-06-03T10:32:11.001847Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.002163Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.106073Z node 3 :KQP_PROXY WARN: kqp_script_executions.cpp:1077: [ScriptExecutions] [TForgetScriptExecutionOperationActor] ExecutionId: , reply BAD_REQUEST, issues: {
: Error: Invalid operation id: ydb/public/sdk/cpp/src/library/operation_id/operation_id.cpp:184: Unable to find key: id } 2025-06-03T10:32:11.107289Z node 3 :KQP_PROXY WARN: kqp_script_executions.cpp:1366: [ScriptExecutions] [TGetScriptExecutionOperationActor] ExecutionId: , reply BAD_REQUEST, issues: {
: Error: Invalid operation id: ydb/public/sdk/cpp/src/library/operation_id/operation_id.cpp:184: Unable to find key: id } Trying to start YDB, gRPC: 26525, MsgBus: 18124 2025-06-03T10:32:11.256018Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511669013257489034:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:11.256042Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c2d/r3tmp/tmphCZXSb/pdisk_1.dat 2025-06-03T10:32:11.278670Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:11.279207Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7511669013257489015:2079] 1748946731255873 != 1748946731255876 TServer::EnableGrpc on GrpcPort 26525, node 4 2025-06-03T10:32:11.292850Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:11.292862Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:11.292865Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:11.292911Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18124 TClient is connected to server localhost:18124 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:11.360929Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:11.360959Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:11.361455Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:11.362034Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected >> GenericFederatedQuery::IcebergHiveSaSelectCount [GOOD] >> GenericFederatedQuery::IcebergHiveSaFilterPushdown >> KqpRanges::ValidatePredicates [GOOD] >> KqpRanges::ValidatePredicatesDataQuery >> KqpNamedExpressions::NamedExpressionRandom-UseSink [GOOD] >> KqpNamedExpressions::NamedExpressionRandomDataQuery+UseSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpSqlIn::PhasesCount [GOOD] Test command err: Trying to start YDB, gRPC: 3715, MsgBus: 15097 2025-06-03T10:32:03.815506Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668981182327615:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:03.815535Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027a3/r3tmp/tmp82lh5x/pdisk_1.dat 2025-06-03T10:32:03.939556Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:03.939599Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:03.940042Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:03.945674Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3715, node 1 2025-06-03T10:32:03.963656Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:03.963673Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:03.963676Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:03.963737Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15097 TClient is connected to server localhost:15097 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:04.034902Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.050782Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.116915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.176877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.188789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.298656Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668985477296535:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.298689Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.361491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.371019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.381157Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.394073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.407711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.422893Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.435936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.452271Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668985477297186:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.452302Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.452331Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668985477297191:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.453202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:04.455333Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668985477297193:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:04.520893Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668985477297244:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:04.686992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.697342Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.709462Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480
: Warning: Type annotation, code: 1030
:4:17: Warning: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:5:22: Warning: At function: Filter, At lambda, At function: Coalesce
:6:23: Warning: At function: SqlIn
:6:23: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108 Trying to start YDB, gRPC: 24683, MsgBus: 25875 2025-06-03T10:32:05.336156Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668990559745230:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:05.336177Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027a3/r3tmp/tmpDAZD8t/pdisk_1.dat 2025-06-03T10:32:05.352609Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24683, node 2 2025-06-03T10:32:05.362448Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:05.362461Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:05.362463Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:05.362509Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25875 TClient is connected to server localhost:25875 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 Security ... Id: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.794697Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.806940Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.828616Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669016071781166:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:11.828639Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:11.828740Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669016071781171:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:11.829737Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:11.834607Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511669016071781173:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:11.890481Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511669016071781224:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:12.122304Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:12.141240Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:32:12.164628Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 14813, MsgBus: 26153 2025-06-03T10:32:13.062424Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027a3/r3tmp/tmpDPnOPp/pdisk_1.dat 2025-06-03T10:32:13.078363Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14813, node 7 2025-06-03T10:32:13.086268Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:13.086283Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:13.086286Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:13.086353Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26153 2025-06-03T10:32:13.161729Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:13.161762Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:13.166121Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26153 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:13.203288Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:13.205930Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:32:13.222591Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:13.259990Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:13.342839Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:13.388746Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:13.769715Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669023972993449:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:13.769744Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:13.778080Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:13.788787Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:13.802191Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:13.815569Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:13.829704Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:13.843795Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:13.857507Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:13.873686Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669023972994099:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:13.873706Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:13.873723Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669023972994104:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:13.874570Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:13.877156Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7511669023972994106:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:13.966122Z node 7 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [7:7511669023972994157:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> TTablesWithReboots::CopyWithRebootsAtCommit [GOOD] >> GenericFederatedQuery::IcebergHiveBasicFilterPushdown [GOOD] |69.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_sysview/ydb-core-tx-schemeshard-ut_sysview |69.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_sysview/ydb-core-tx-schemeshard-ut_sysview |69.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_sysview/ydb-core-tx-schemeshard-ut_sysview >> GenericFederatedQuery::IcebergHadoopBasicFilterPushdown [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHiveBasicFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 61371, MsgBus: 3139 2025-06-03T10:32:07.553190Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668995546778872:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:07.553519Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c32/r3tmp/tmpP75lEU/pdisk_1.dat 2025-06-03T10:32:07.612483Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668995546778850:2079] 1748946727552974 != 1748946727552977 2025-06-03T10:32:07.614019Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61371, node 1 2025-06-03T10:32:07.626691Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:07.626707Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:07.626709Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:07.626758Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3139 TClient is connected to server localhost:3139 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:07.688818Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:07.688874Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:07.689827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:32:07.689874Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:32:07.956589Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668995546779515:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:07.956615Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.562640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480 2025-06-03T10:32:08.625508Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668999841746941:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.625550Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.625612Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668999841746946:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.626344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480 2025-06-03T10:32:08.627754Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668999841746948:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:32:08.709499Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668999841746988:2389] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:08.781572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:08.852561Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480 2025-06-03T10:32:08.921656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.000129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.082836Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.163232Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:09.177389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.496827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715690:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.507764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715691:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.508174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715693:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.508598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715692:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" ... tus: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:16.092518Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511669037139070383:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:16.093445Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480 2025-06-03T10:32:16.104319Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511669037139070385:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-03T10:32:16.177585Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511669037139070425:2387] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:16.257163Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:16.342683Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:1, at schemeshard: 72057594046644480 2025-06-03T10:32:16.435635Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480 2025-06-03T10:32:16.551179Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:16.634082Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710675:0, at schemeshard: 72057594046644480 2025-06-03T10:32:16.701791Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:16.715038Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-06-03T10:32:17.009049Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710690:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 |69.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut >> GenericFederatedQuery::IcebergHiveTokenSelectCount [GOOD] >> GenericFederatedQuery::IcebergHiveTokenFilterPushdown >> GenericFederatedQuery::IcebergHadoopSaFilterPushdown [GOOD] |69.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut |69.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/storagepoolmon/ut/ydb-core-blobstorage-storagepoolmon-ut >> GenericFederatedQuery::IcebergHadoopTokenFilterPushdown [GOOD] |69.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest |69.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut |69.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut |69.1%| [LD] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut/ydb-core-blobstorage-dsproxy-ut |69.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest |69.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest >> GenericFederatedQuery::PostgreSQLFilterPushdown [GOOD] >> TxUsage::WriteToTopic_Demo_23_RestartNo_Query [GOOD] |69.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest |69.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest |69.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::DSConfigsStub |69.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHadoopBasicFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 5212, MsgBus: 11692 2025-06-03T10:32:07.847453Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668997529083305:2199];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:07.848000Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c10/r3tmp/tmpmUFaxj/pdisk_1.dat 2025-06-03T10:32:07.922340Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:07.922567Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668997529083145:2079] 1748946727846014 != 1748946727846017 TServer::EnableGrpc on GrpcPort 5212, node 1 2025-06-03T10:32:07.933681Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:07.933700Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:07.933702Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:07.933759Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11692 TClient is connected to server localhost:11692 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:32:07.992416Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:07.992457Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:07.993574Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:08.001019Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:08.199812Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669001824051101:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.199880Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.849518Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480 2025-06-03T10:32:08.913141Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669001824051232:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.913174Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.913176Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669001824051237:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.913864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480 2025-06-03T10:32:08.915548Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669001824051239:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:32:08.986075Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669001824051279:2388] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:09.072725Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.150871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480 2025-06-03T10:32:09.245747Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.354855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.453317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.528860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:09.539002Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.824546Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715690:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.840683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715692:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.841013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715691:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.841195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715693:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } ... u don't have access permissions } 2025-06-03T10:32:16.353496Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:16.353673Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511669036529035851:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:16.354377Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480 2025-06-03T10:32:16.360368Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-03T10:32:16.360467Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511669036529035854:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:32:16.438257Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511669036529035894:2390] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:16.541334Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:16.633012Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480 2025-06-03T10:32:16.709797Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:32:16.785012Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:16.862372Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:32:16.941933Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:16.958103Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:32:17.405147Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715692:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 |69.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHadoopSaFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 14511, MsgBus: 16123 2025-06-03T10:32:07.912523Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668998673780289:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:07.912555Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c18/r3tmp/tmpB8kFuD/pdisk_1.dat 2025-06-03T10:32:07.972384Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668998673780266:2079] 1748946727912271 != 1748946727912274 2025-06-03T10:32:07.975825Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14511, node 1 2025-06-03T10:32:07.988249Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:07.988279Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:07.988283Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:07.988332Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16123 2025-06-03T10:32:08.014840Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:08.014888Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:08.016135Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16123 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:08.052692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:08.282194Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669002968748221:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.282236Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.914721Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:2, at schemeshard: 72057594046644480 2025-06-03T10:32:08.979951Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669002968748353:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.979985Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.980076Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669002968748358:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.980891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710659:2, at schemeshard: 72057594046644480 2025-06-03T10:32:08.984475Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669002968748360:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710659 completed, doublechecking } 2025-06-03T10:32:09.056557Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669007263715696:2389] txid# 281474976710660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:09.149719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.223354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:1, at schemeshard: 72057594046644480 2025-06-03T10:32:09.355575Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.446840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.522335Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710675:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.601786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:09.616515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.931365Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710689:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.939916Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710693:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.940293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710695:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.940521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710694:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE ... P_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511669034788955871:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:16.571894Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:16.571947Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511669034788955876:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:16.572721Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480 2025-06-03T10:32:16.580244Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511669034788955878:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:32:16.666384Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511669034788955918:2389] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:16.741202Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:16.812445Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480 2025-06-03T10:32:16.893875Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:32:16.982171Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:17.064192Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:32:17.148942Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:17.215681Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:32:17.632136Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715692:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 |69.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base_reboots/unittest >> TTablesWithReboots::CopyWithRebootsAtCommit [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:31:30.290256Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:31:30.290294Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:30.290299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:31:30.290317Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:31:30.290332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:31:30.290336Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:31:30.290346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:30.290360Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:31:30.290510Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:31:30.290604Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:31:30.309030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:31:30.309058Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:30.309183Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:31:30.316270Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:31:30.316419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:31:30.316463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:31:30.319281Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:31:30.319355Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:31:30.319549Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:30.319658Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:31:30.320307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:30.320371Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:31:30.320698Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:30.320713Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:30.320733Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:31:30.320742Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:30.320750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:31:30.320804Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:31:30.323038Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:31:30.347496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:31:30.347595Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:30.347667Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:31:30.347728Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:31:30.347740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:30.348712Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:30.348747Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:31:30.348815Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:30.348825Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:31:30.348829Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:31:30.348834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:31:30.349543Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:30.349561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:31:30.349569Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:31:30.350200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:30.350219Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:30.350227Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:30.350238Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:31:30.351102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:31:30.351746Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:31:30.351825Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:31:30.352096Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:30.352122Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:30.352133Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:30.352202Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... bleBarrier operationId: 1003:0ProgressState, operation type TxCopyTable 2025-06-03T10:32:17.252033Z node 158 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:1059: Set barrier, OperationId: 1003:0, name: CopyTableBarrier, done: 0, blocked: 1, parts count: 1 2025-06-03T10:32:17.252040Z node 158 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1103: All parts have reached barrier, tx: 1003, done: 0, blocked: 1 2025-06-03T10:32:17.252054Z node 158 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_copy_table.cpp:289: TCopyTable TCopyTableBarrier operationId: 1003:0 HandleReply TEvPrivate::TEvCompleteBarrier, msg: NKikimr::NSchemeShard::TEvPrivate::TEvCompleteBarrier { TxId: 1003 Name: CopyTableBarrier }, at tablet# 72057594046678944 2025-06-03T10:32:17.252061Z node 158 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1003:0 240 -> 240 2025-06-03T10:32:17.252946Z node 158 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-03T10:32:17.252964Z node 158 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-03T10:32:17.252983Z node 158 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1003:0 progress is 1/1 2025-06-03T10:32:17.252988Z node 158 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-03T10:32:17.252994Z node 158 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1003:0 progress is 1/1 2025-06-03T10:32:17.252997Z node 158 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-03T10:32:17.253003Z node 158 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: true 2025-06-03T10:32:17.253010Z node 158 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-03T10:32:17.253017Z node 158 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1003:0 2025-06-03T10:32:17.253022Z node 158 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1003:0 2025-06-03T10:32:17.253065Z node 158 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-03T10:32:17.253071Z node 158 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate source path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 TestWaitNotification wait txId: 1003 2025-06-03T10:32:17.253556Z node 158 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-03T10:32:17.253567Z node 158 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-03T10:32:17.253641Z node 158 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-03T10:32:17.253660Z node 158 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-03T10:32:17.253665Z node 158 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [158:654:2589] TestWaitNotification: OK eventTxId 1003 2025-06-03T10:32:17.253749Z node 158 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:32:17.253801Z node 158 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot" took 78us result status StatusSuccess 2025-06-03T10:32:17.253936Z node 158 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot" PathDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 9 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 9 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 7 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1000 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "NewTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Table" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1002 CreateStep: 5000003 ParentPathId: 1 PathState: EPathStateCopying Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 SecurityState { Audience: "/MyRoot" } } } PathId: 1 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:32:17.253993Z node 158 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/NewTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:32:17.254034Z node 158 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/NewTable" took 43us result status StatusSuccess 2025-06-03T10:32:17.254237Z node 158 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/NewTable" PathDescription { Self { Name: "NewTable" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 1003 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "NewTable" Columns { Name: "key1" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key2" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "key3" Type: "Uint64" TypeId: 4 Id: 3 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "Utf8" TypeId: 4608 Id: 4 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key1" KeyColumnNames: "key2" KeyColumnNames: "key3" KeyColumnIds: 1 KeyColumnIds: 2 KeyColumnIds: 3 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 2 } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "\003\000\004\000\000\000\377\377\377\177\000\000\000\200\000\000\000\200" IsPoint: false IsInclusive: false DatashardId: 72075186233409548 } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409549 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 2 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 3 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |69.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest >> TxUsage::WriteToTopic_Demo_15_Table [GOOD] >> THeavyPerfTest::TTestLoadEverything [GOOD] >> THiveImplTest::BootQueueSpeed >> TxUsage::WriteToTopic_Demo_23_RestartBeforeCommit_Table |69.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHadoopTokenFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 31352, MsgBus: 8848 2025-06-03T10:32:07.487315Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668998629743097:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:07.487861Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c3e/r3tmp/tmpBIl97g/pdisk_1.dat 2025-06-03T10:32:07.544572Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668998629743078:2079] 1748946727487134 != 1748946727487137 2025-06-03T10:32:07.544582Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31352, node 1 2025-06-03T10:32:07.558315Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:07.558326Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:07.558328Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:07.558364Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8848 TClient is connected to server localhost:8848 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:32:07.621004Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:07.621047Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:07.621941Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:07.622737Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:07.857103Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668998629743738:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:07.857128Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.490501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480 2025-06-03T10:32:08.566391Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669002924711163:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.566428Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.566465Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669002924711168:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.567219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480 2025-06-03T10:32:08.568814Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669002924711170:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:32:08.632770Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669002924711211:2388] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:08.720293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:08.784738Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480 2025-06-03T10:32:08.862291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:32:08.945563Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.013568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.080750Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:09.092298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.377955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715690:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.387317Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715692:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.387906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715693:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.388250Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715691:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE ic ... P_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511669036768697692:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:16.717845Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:16.717892Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511669036768697697:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:16.718935Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480 2025-06-03T10:32:16.726960Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511669036768697699:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:32:16.789724Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511669036768697739:2387] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:16.862249Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:16.946867Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480 2025-06-03T10:32:17.054312Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:32:17.214876Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:17.316378Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:32:17.406720Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:17.424363Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:32:17.750648Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715692:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hadoop { } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 >> TxUsage::WriteToTopic_Demo_19_RestartNo_Query [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::PostgreSQLFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 29251, MsgBus: 9947 2025-06-03T10:32:07.551610Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668995846762790:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:07.551676Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c31/r3tmp/tmp103X0p/pdisk_1.dat 2025-06-03T10:32:07.608999Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668995846762769:2079] 1748946727551415 != 1748946727551418 2025-06-03T10:32:07.611680Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29251, node 1 2025-06-03T10:32:07.625785Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:07.625798Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:07.625802Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:07.625861Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9947 TClient is connected to server localhost:9947 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:32:07.681524Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:07.681560Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:07.682712Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:07.692498Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:07.969228Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668995846763428:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:07.969259Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.555584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480 2025-06-03T10:32:08.594871Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669000141730852:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.594905Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.594933Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669000141730858:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.595740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480 2025-06-03T10:32:08.597555Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669000141730860:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:32:08.664458Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669000141730900:2388] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:08.757619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:08.829614Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480 2025-06-03T10:32:08.915298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:32:08.988090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.055743Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.132090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:09.147503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.469273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715690:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.484012Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715691:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.484558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715692:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.484826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715693:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "pub ... 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:32:16.309344Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511669035243572632:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:16.309371Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:16.815514Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480 2025-06-03T10:32:16.837195Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511669035243572760:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:16.837219Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:16.837289Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511669035243572765:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:16.838249Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480 2025-06-03T10:32:16.845878Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511669035243572767:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:32:16.919338Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511669035243572808:2390] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:16.993736Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:17.108579Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480 2025-06-03T10:32:17.218509Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:32:17.318979Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:17.449374Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:32:17.541139Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:17.558153Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:32:17.849109Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715692:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: POSTGRESQL endpoint { host: "localhost" port: 5432 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: NATIVE pg_options { schema: "public" } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 |69.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest >> LocalPartition::DiscoveryHang [GOOD] >> LocalPartition::WithoutPartition >> TxUsage::WriteToTopic_Demo_15_Query >> KqpNewEngine::StaleRO_IndexFollowers-EnableFollowers [GOOD] >> KqpNewEngine::UnionAllPure >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Table >> GenericFederatedQuery::ClickHouseFilterPushdown [GOOD] >> ColumnShardTiers::DSConfigs >> GenericFederatedQuery::IcebergHiveSaFilterPushdown [GOOD] |69.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::ClickHouseFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 14117, MsgBus: 3308 2025-06-03T10:32:07.285567Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668998583857306:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:07.285586Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c46/r3tmp/tmpZ9XGBs/pdisk_1.dat 2025-06-03T10:32:07.345191Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668998583857282:2079] 1748946727285288 != 1748946727285291 2025-06-03T10:32:07.346556Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14117, node 1 2025-06-03T10:32:07.359592Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:07.359605Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:07.359608Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:07.359657Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3308 TClient is connected to server localhost:3308 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:32:07.416932Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:07.416959Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:07.418098Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:07.432449Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:07.435196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:32:07.682772Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668998583857943:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:07.682804Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.287730Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480 2025-06-03T10:32:08.351448Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669002878825371:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.351488Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.351491Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669002878825376:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.352510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480 2025-06-03T10:32:08.355125Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669002878825378:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:32:08.418937Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669002878825418:2389] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:08.512969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:08.592723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480 2025-06-03T10:32:08.676302Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:32:08.742709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:08.814127Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:32:08.879857Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:08.895046Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.349374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715699:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.358289Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715700:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.358639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715701:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.358861Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715702:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" ... 124Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:32:16.684686Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511669033758896840:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:16.684731Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:17.125199Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480 2025-06-03T10:32:17.163751Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511669038053864265:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:17.163802Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:17.164039Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511669038053864271:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:17.164955Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480 2025-06-03T10:32:17.177163Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-03T10:32:17.179741Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511669038053864273:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:32:17.264587Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511669038053864313:2390] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:17.353434Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:17.520192Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480 2025-06-03T10:32:17.652836Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:32:17.761626Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:17.856615Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:32:17.943677Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:17.960467Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:32:18.548655Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715699:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: CLICKHOUSE endpoint { host: "rc1a-d6dv17lv47v5mcop.db.yandex.net" port: 8443 } database: "pgdb" credentials { basic { username: "crab" password: "qwerty12345" } } use_tls: true protocol: HTTP } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHiveSaFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 25932, MsgBus: 28695 2025-06-03T10:32:07.767004Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668996971202767:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:07.767031Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c23/r3tmp/tmpYKt9Xm/pdisk_1.dat 2025-06-03T10:32:07.840171Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:07.844979Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668996971202745:2079] 1748946727766780 != 1748946727766783 TServer::EnableGrpc on GrpcPort 25932, node 1 2025-06-03T10:32:07.859891Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:07.859905Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:07.859908Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:07.859947Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:07.869674Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:07.869724Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:07.870784Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28695 TClient is connected to server localhost:28695 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:07.932160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:08.214120Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669001266170702:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.214158Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.769110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480 2025-06-03T10:32:08.831170Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669001266170832:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.831198Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.831210Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669001266170837:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:08.832079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480 2025-06-03T10:32:08.837870Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669001266170839:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:32:08.918684Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669001266170880:2388] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:09.003522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.071233Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480 2025-06-03T10:32:09.160533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.262753Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.363120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.464852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:09.477594Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.788664Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715690:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.798571Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715692:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.799069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715694:0, at schemeshard: 72057594046644480 2025-06-03T10:32:09.799248Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715693:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_so ... e] [TPoolFetcherActor] ActorId: [4:7511669039781961603:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:17.856819Z node 4 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:17.857514Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480 2025-06-03T10:32:17.860259Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511669039781961605:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:32:17.948621Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511669039781961645:2386] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:18.019417Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:18.074830Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480 2025-06-03T10:32:18.220965Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:32:18.356348Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:18.491138Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:32:18.576832Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:18.600448Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:32:19.028468Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715692:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 |69.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest >> TxUsage::WriteToTopic_Demo_31_Query [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_3_Table [GOOD] |69.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest |69.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest |69.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema >> TBlobStorageProxySequenceTest::TestGivenBlock42IntersectingPutWhenNodataOkThenOk |69.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema |69.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest |69.2%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/ut_schema/ydb-core-tx-columnshard-ut_schema |69.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest |69.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest >> GenericFederatedQuery::IcebergHiveTokenFilterPushdown [GOOD] >> TxUsage::WriteToTopic_Demo_32_Table >> TBlobStorageProxySequenceTest::TestGivenStripe42WhenGet2PartsOfBlobThenGetOk >> TBlobStorageProxySequenceTest::TestGivenBlock42Put6PartsOnOneVDiskWhenDiscoverThenRecoverFirst >> TBlobStorageProxySequenceTest::TestGivenBlock42IntersectingPutWhenNodataOkThenOk [GOOD] >> TDSProxyGetTest::TestMirror32GetBlobCrcCheck >> DSProxyCounters::MultiPutGeneratedSubrequestBytes [GOOD] >> TDSProxyGetTest::TestBlock42GetSpecific >> TxUsage::Sinks_Oltp_WriteToTopic_3_Query >> KqpNewEngine::UnionAllPure [GOOD] >> KqpNewEngine::StreamLookupForDataQuery+StreamLookupJoin >> TDSProxyGetTest::TestBlock42GetSpecific [GOOD] >> TSchemeShardSysViewTest::AsyncCreateDirWithSysView >> TDSProxyPatchTest::NaiveErrorOnGetItem_ErasureNone [GOOD] >> TDSProxyPatchTest::SecuredOk_ErasureMirror3dc >> TBlobStorageProxySequenceTest::TestGivenBlock42Put6PartsOnOneVDiskWhenDiscoverThenRecoverFirst [GOOD] >> TDSProxyGetTest::TestMirror32GetIntervalsWipedAllOk >> TBlobStorageProxySequenceTest::TestGivenStripe42WhenGet2PartsOfBlobThenGetOk [GOOD] >> TDSProxyPatchTest::SecuredErrorOnGetItem_ErasureNone >> TDSProxyGetTest::TestBlock42GetIntervalsAllOk >> KqpRanges::ValidatePredicatesDataQuery [GOOD] >> KqpReturning::Random >> TDSProxyGetTest::TestMirror32GetBlobCrcCheck [GOOD] >> TDSProxyPatchTest::NaiveErrorOnPut_ErasureNone |69.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TDSProxyPatchTest::SecuredOk_ErasureMirror3dc [GOOD] >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_3_1_0_VdiskErrors >> TDSProxyPatchTest::SecuredErrorOnGetItem_ErasureNone [GOOD] >> TDSProxyPatchTest::MovedError_Erasure4Plus2Block ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/federated_query/generic_ut/unittest >> GenericFederatedQuery::IcebergHiveTokenFilterPushdown [GOOD] Test command err: Trying to start YDB, gRPC: 1116, MsgBus: 27338 2025-06-03T10:32:10.014101Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669011763407624:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:10.014225Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002c06/r3tmp/tmpfMKpcj/pdisk_1.dat 2025-06-03T10:32:10.072565Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:10.073511Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669011763407462:2079] 1748946730012177 != 1748946730012180 TServer::EnableGrpc on GrpcPort 1116, node 1 2025-06-03T10:32:10.093613Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:10.093630Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:10.093634Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:10.093691Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27338 2025-06-03T10:32:10.115265Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:10.115297Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:10.116437Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27338 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:10.188885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:10.193772Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:32:10.438972Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669011763408121:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:10.439005Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:11.016437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:2, at schemeshard: 72057594046644480 2025-06-03T10:32:11.081463Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669016058375549:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:11.081488Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669016058375554:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:11.081493Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:11.082310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480 2025-06-03T10:32:11.083869Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669016058375556:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:32:11.156921Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669016058375596:2389] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:11.254763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.323680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480 2025-06-03T10:32:11.404088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.498680Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.568847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.641637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:11.652860Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.945097Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715690:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.950881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715691:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.951255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715693:0, at schemeshard: 72057594046644480 2025-06-03T10:32:11.951604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715692:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "col1" type { type_id: UINT16 } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { ... 06-03T10:32:18.984335Z node 4 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [4:7511669046191435934:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:18.985236Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715659:2, at schemeshard: 72057594046644480 2025-06-03T10:32:18.993919Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715659, at schemeshard: 72057594046644480 2025-06-03T10:32:18.994072Z node 4 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [4:7511669046191435936:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715659 completed, doublechecking } 2025-06-03T10:32:19.090379Z node 4 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [4:7511669050486403273:2387] txid# 281474976715660, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:19.198796Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:19.343788Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480 2025-06-03T10:32:19.468007Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:32:19.667206Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:32:19.770969Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:32:19.877966Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:19.903144Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:32:20.288237Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715692:0, at schemeshard: 72057594046644480 Call DescribeTable. data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Expected: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } CRAB Actual: data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } table: "example_1" type_mapping_settings { date_time_format: STRING_FORMAT } DescribeTable result. GRpcStatusCode: 0 schema { columns { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } columns { name: "data_column" type { optional_type { item { type_id: STRING } } } } } error { status: SUCCESS } Call ListSplits. selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Expected: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } CRAB Actual: selects { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } from { table: "example_1" } } ListSplits result. GRpcStatusCode: 0 Call ReadSplits. splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Expected: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL CRAB Actual: splits { select { data_source_instance { kind: ICEBERG endpoint { } database: "pgdb" credentials { token { type: "IAM" value: "qwerty12345" } } use_tls: false protocol: NATIVE iceberg_options { catalog { hive_metastore { uri: "hive_metastore_uri" } } warehouse { s3 { uri: "s3_uri" endpoint: "s3_endpoint" region: "s3_region" } } } } what { items { column { name: "data_column" type { optional_type { item { type_id: STRING } } } } } items { column { name: "filtered_column" type { optional_type { item { type_id: INT32 } } } } } } from { table: "example_1" } where { filter_typed { comparison { operation: EQ left_value { column: "filtered_column" } right_value { typed_value { type { type_id: INT32 } value { int32_value: 42 } } } } } } } description: "some binary description" } format: ARROW_IPC_STREAMING filtering: FILTERING_OPTIONAL ReadSplits result. GRpcStatusCode: 0 >> TDSProxyPatchTest::NaiveErrorOnPut_ErasureNone [GOOD] >> TDSProxyPutTest::TestBlock42PutStatusErrorWith_2_1_VdiskErrors >> TSchemeShardSysViewTest::AsyncCreateDirWithSysView [GOOD] >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_3_1_0_VdiskErrors [GOOD] >> TDSProxyGetTest::TestBlock42GetIntervalsWipedError >> TDSProxyGetTest::TestBlock42GetIntervalsAllOk [GOOD] >> TDSProxyPatchTest::MovedOk_ErasureNone >> TDSProxyPatchTest::MovedError_Erasure4Plus2Block [GOOD] >> TDSProxyPutTest::TestMirror3dcPutStatusErrorWith_1_1_1_VdiskErrors >> TDSProxyPutTest::TestBlock42PutStatusErrorWith_2_1_VdiskErrors [GOOD] >> TBlobStorageProxySequenceTest::TestGivenMirror3DCGetWithFirstSlowDisk >> TDsProxyQuorumTracker::CheckFailModelErasure3Plus2Block >> Describe::Location [GOOD] >> Describe::DescribePartitionPermissions >> TDSProxyPatchTest::MovedOk_ErasureNone [GOOD] >> TDSProxyPatchTest::SecuredErrorOnPut_ErasureMirror3dc ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_3_1_0_VdiskErrors [GOOD] Test command err: 2025-06-03T10:32:20.961787Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [7e4afa7ea38a37be] bootstrap ActorId# [3:82:2128] Group# 0 BlobCount# 1 BlobIDs# [[72075186224047637:1:863:1:24576:786:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-03T10:32:20.961883Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:20.961891Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:20.961898Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:20.961903Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:20.961909Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:32:20.961914Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:32:20.966143Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:1:0] Marker# BPP01 2025-06-03T10:32:20.966234Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 4 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:20.966249Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 4 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:20.966364Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:1:1:0] Marker# BPP01 2025-06-03T10:32:20.966378Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 5 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:32:20.966383Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 5 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:32:20.966421Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:2:1:0] Marker# BPP01 2025-06-03T10:32:20.966484Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:2:0] Marker# BPP01 2025-06-03T10:32:20.966493Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 7 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:20.966497Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 7 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:20.966518Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:1:2:0] Marker# BPP01 2025-06-03T10:32:20.966548Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:0:0] Marker# BPP01 2025-06-03T10:32:20.966557Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 3 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:20.966561Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 3 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:20.966566Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 8 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:32:20.966570Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 8 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:32:20.966618Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:1:0:0] Marker# BPP01 2025-06-03T10:32:20.966631Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:2:2:0] Marker# BPP01 2025-06-03T10:32:20.966650Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [7e4afa7ea38a37be] Result# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} GroupId# 0 Marker# BPP12 2025-06-03T10:32:20.966658Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [7e4afa7ea38a37be] SendReply putResult# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:32:20.966723Z node 3 :BS_PROXY_PUT NOTICE: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.405 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:1:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.405 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:2:1:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.405 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:1:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 4.657 VDiskId# [0:1:0:1:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 4.734 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:2:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 4.825 VDiskId# [0:1:1:1:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 4.845 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:2:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 4.879 VDiskId# [0:1:2:1:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 4.942 VDiskId# [0:1:0:2:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 4.957 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 4.978 VDiskId# [0:1:1:2:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.007 VDiskId# [0:1:0:0:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 5.031 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:0:0] NodeId# 3 } TEvVPut{ TimestampMs# 5.033 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:2:2:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 5.076 VDiskId# [0:1:1:0:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.087 VDiskId# [0:1:2:2:0] NodeId# 3 Status# OK } ] } >> TBlobStorageProxySequenceTest::TestBlock42CheckLwtrack >> TDSProxyPutTest::TestMirror3dcPutStatusErrorWith_1_1_1_VdiskErrors [GOOD] >> TDsProxyQuorumTracker::CheckFailModelErasureMirror3dc [GOOD] >> TBlobStorageProxySequenceTest::TestBlock42PutWithChangingSlowDisk >> TBlobStorageProxySequenceTest::TestGivenMirror3DCGetWithFirstSlowDisk [GOOD] >> TDSProxyLooksLikeLostTheBlob::TDSProxyLooksLikeLostTheBlobBlock42 >> TBlobStorageProxySequenceTest::TestGivenBlock42GroupGenerationGreaterThanVDiskGenerations [GOOD] >> TDSProxyGetTest::TestBlock42WipedOneDiskAndErrorDurringGet [GOOD] >> TDSProxyPatchTest::NaiveErrorOnPut_ErasureMirror3dc ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_sysview/unittest >> TSchemeShardSysViewTest::AsyncCreateDirWithSysView [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:32:20.927483Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:32:20.927516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:32:20.927522Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:32:20.927528Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:32:20.927535Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:32:20.927540Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:32:20.927551Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:32:20.927568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:32:20.927682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:32:20.927761Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:32:20.945436Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:32:20.945468Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:20.963304Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:32:20.963464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:32:20.963503Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:32:20.987738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:32:20.987826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:32:20.987998Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:32:20.988087Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:32:20.988981Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:32:20.989036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:32:20.989429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:32:20.989444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:32:20.989456Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:32:20.989467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:32:20.989473Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:32:20.989498Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:32:20.991127Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:32:21.022639Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:32:21.022743Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:21.022860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:32:21.022912Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:32:21.022926Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:21.023848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:32:21.023880Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:32:21.023949Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:21.023996Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:32:21.024003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:32:21.024010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:32:21.024594Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:21.024609Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:32:21.024616Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:32:21.025092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:21.025106Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:21.025138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:32:21.025147Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:32:21.026114Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:32:21.026646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:32:21.026692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:32:21.026907Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:32:21.026941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:32:21.026949Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:32:21.027030Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:32:21.027040Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:32:21.027074Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:32:21.027088Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:32:21.027588Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:32:21.027598Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:32:21.027643Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... 44, LocalPathId: 2] 2025-06-03T10:32:21.045601Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 102, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:32:21.045649Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:32:21.045661Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 102, path id: 2 2025-06-03T10:32:21.045688Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:206:2207], at schemeshard: 72057594046678944, txId: 102, path id: 3 2025-06-03T10:32:21.045848Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 102:0, at schemeshard: 72057594046678944 2025-06-03T10:32:21.045858Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 102:0 ProgressState 2025-06-03T10:32:21.045873Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:32:21.045879Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:32:21.045885Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#102:0 progress is 1/1 2025-06-03T10:32:21.045889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:32:21.045894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 102, ready parts: 1/1, is published: false 2025-06-03T10:32:21.045900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 102 ready parts: 1/1 2025-06-03T10:32:21.045905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 102:0 2025-06-03T10:32:21.045910Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 102:0 2025-06-03T10:32:21.045925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:32:21.045932Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 102, publications: 2, subscribers: 0 2025-06-03T10:32:21.045937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 2], 4 2025-06-03T10:32:21.045941Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 102, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-03T10:32:21.046105Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:32:21.046120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 4 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:32:21.046126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:32:21.046132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 4 2025-06-03T10:32:21.046137Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:32:21.046241Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:32:21.046255Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 102 2025-06-03T10:32:21.046266Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 102 2025-06-03T10:32:21.046276Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 102, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-03T10:32:21.046291Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:32:21.046319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 102, subscribers: 0 2025-06-03T10:32:21.047253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 2025-06-03T10:32:21.047273Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 102 TestModificationResult got TxId: 101, wait until txId: 101 TestModificationResults wait txId: 102 TestModificationResult got TxId: 102, wait until txId: 102 TestWaitNotification wait txId: 101 2025-06-03T10:32:21.047339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 101: send EvNotifyTxCompletion 2025-06-03T10:32:21.047348Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 101 TestWaitNotification wait txId: 102 2025-06-03T10:32:21.047366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 102: send EvNotifyTxCompletion 2025-06-03T10:32:21.047371Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 102 2025-06-03T10:32:21.047451Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 101, at schemeshard: 72057594046678944 2025-06-03T10:32:21.047474Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 101: got EvNotifyTxCompletionResult 2025-06-03T10:32:21.047481Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 101: satisfy waiter [1:321:2311] 2025-06-03T10:32:21.047524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 102, at schemeshard: 72057594046678944 2025-06-03T10:32:21.047534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 102: got EvNotifyTxCompletionResult 2025-06-03T10:32:21.047538Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 102: satisfy waiter [1:321:2311] TestWaitNotification: OK eventTxId 101 TestWaitNotification: OK eventTxId 102 2025-06-03T10:32:21.047613Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:32:21.047647Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys" took 47us result status StatusSuccess 2025-06-03T10:32:21.047791Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys" PathDescription { Self { Name: ".sys" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 } ChildrenExist: true } Children { Name: "new_sys_view" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:32:21.047861Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/.sys/new_sys_view" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:32:21.047881Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/.sys/new_sys_view" took 23us result status StatusSuccess 2025-06-03T10:32:21.047926Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/.sys/new_sys_view" PathDescription { Self { Name: "new_sys_view" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeSysView CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 SysViewVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 0 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } SysViewDescription { Name: "new_sys_view" Type: EPartitionStats } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 |69.3%| [TA] $(B)/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TDSProxyPatchTest::SecuredErrorOnPut_ErasureMirror3dc [GOOD] >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_2_0_0_VdiskErrors >> TDSProxyFaultTolerancePatchTest::mirror3dc >> TBlobStorageProxySequenceTest::TestBlock42CheckLwtrack [GOOD] >> TDSProxyGetTest::TestBlock42GetSpecific3 >> TDSProxyGetTest::TestBlock42GetIntervalsWipedAllOk >> TBlobStorageProxySequenceTest::TestGivenBlock42MultiPut2ItemsStatuses [GOOD] >> TDSProxyGetTest::TestMirror32GetIntervalsAllOk >> TBlobStorageProxySequenceTest::TestBlock42PutWithChangingSlowDisk [GOOD] >> TDSProxyGetTest::TestBlock42VGetCountWithErasure >> TDSProxyGetTest::TestBlock42GetSpecific3 [GOOD] >> TDSProxyPatchTest::NaiveErrorOnGet_ErasureMirror3dc >> TBlobStorageProxySequenceTest::TestProtobufSizeWithMultiGet >> TDSProxyPatchTest::NaiveErrorOnPut_ErasureMirror3dc [GOOD] >> TDSProxyPutTest::TestBlock42PutStatusErrorWith_1_2_VdiskErrors >> TDSProxyGetTest::TestBlock42VGetCountWithErasure [GOOD] >> TDSProxyPatchTest::NaiveErrorOnGet_ErasureNone >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_2_0_0_VdiskErrors [GOOD] >> KqpNamedExpressions::NamedExpressionRandomDataQuery+UseSink [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDsProxyQuorumTracker::CheckFailModelErasureMirror3dc [GOOD] Test command err: 2025-06-03T10:32:21.269174Z node 4 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [7e4afa7ea38a37be] bootstrap ActorId# [4:82:2128] Group# 0 BlobCount# 1 BlobIDs# [[72075186224047637:1:863:1:24576:786:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-03T10:32:21.269279Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:21.269290Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:21.269320Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:21.269324Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:21.269329Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:32:21.269335Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:32:21.274648Z node 4 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:1:0] Marker# BPP01 2025-06-03T10:32:21.274748Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 4 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:21.274759Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 4 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:21.274873Z node 4 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:1:1:0] Marker# BPP01 2025-06-03T10:32:21.274884Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 5 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:32:21.274889Z node 4 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 5 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:32:21.274910Z node 4 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:2:1:0] Marker# BPP01 2025-06-03T10:32:21.274928Z node 4 :BS_PROXY_PUT ERROR: dsproxy_put_impl.cpp:72: [7e4afa7ea38a37be] Result# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# ERROR StatusFlags# { } ErrorReason# "TPut3dcStrategy failed the Fail Model check" ApproximateFreeSpaceShare# 0} GroupId# 0 Marker# BPP12 2025-06-03T10:32:21.274939Z node 4 :BS_PROXY_PUT NOTICE: dsproxy_put.cpp:486: [7e4afa7ea38a37be] SendReply putResult# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# ERROR StatusFlags# { } ErrorReason# "TPut3dcStrategy failed the Fail Model check" ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:32:21.274995Z node 4 :BS_PROXY_PUT NOTICE: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.47 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:1:0] NodeId# 4 } TEvVPut{ TimestampMs# 0.47 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:2:1:0] NodeId# 4 } TEvVPut{ TimestampMs# 0.47 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:1:0] NodeId# 4 } TEvVPutResult{ TimestampMs# 5.814 VDiskId# [0:1:0:1:0] NodeId# 4 Status# ERROR } TEvVPut{ TimestampMs# 5.89 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:2:0] NodeId# 4 } TEvVPutResult{ TimestampMs# 5.978 VDiskId# [0:1:1:1:0] NodeId# 4 Status# ERROR } TEvVPut{ TimestampMs# 5.994 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:2:0] NodeId# 4 } TEvVPutResult{ TimestampMs# 6.012 VDiskId# [0:1:2:1:0] NodeId# 4 Status# ERROR } ] } |69.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup |69.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup |69.3%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/generic_ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TDSProxyPatchTest::NaiveErrorOnGet_ErasureMirror3dc [GOOD] >> TDSProxyPutTest::TestBlock42MaxPartCountOnHandoff [GOOD] >> TDSProxyRequestReportningTest::CheckDefaultBehaviour >> TDSProxyPutTest::TestBlock42PutStatusErrorWith_1_2_VdiskErrors [GOOD] >> TDsProxyQuorumTracker::CheckFailModelErasure3Plus1Stripe >> TDSProxyGetTest::TestBlock42GetBlobCrcCheck >> KqpReturning::Random [GOOD] >> TDSProxyPatchTest::NaiveErrorOnGet_ErasureNone [GOOD] >> TDSProxyPutTest::TestBlock42MultiPutAllOk |69.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/ydb-core-tx-schemeshard-ut_continuous_backup >> TDSProxyGetTest::TestMirror32GetIntervalsAllOk [GOOD] >> TDSProxyPatchTest::NaiveOk_Erasure4Plus2Block >> TDsProxyQuorumTracker::CheckFailModelErasure3Plus1Stripe [GOOD] >> BasicStatistics::NotFullStatisticsDatashard [GOOD] >> TDSProxyRequestReportningTest::CheckDefaultBehaviour [GOOD] >> TBlobStorageProxySequenceTest::TestGivenBlock42GetThenVGetResponseParts2523Nodata4ThenGetOk >> ReadIteratorExternalBlobs::ExtBlobsWithCompactingMiddleRows [GOOD] >> ReadIteratorExternalBlobs::ExtBlobsEmptyTable >> TDSProxyPatchTest::NaiveOk_Erasure4Plus2Block [GOOD] >> TDSProxyPutTest::TestBlock42PutStatusErrorWith_3_0_VdiskErrors >> KqpNewEngine::StreamLookupForDataQuery+StreamLookupJoin [GOOD] >> KqpNewEngine::StreamLookupForDataQuery-StreamLookupJoin >> TBlobStorageProxySequenceTest::TestGivenBlock42GetThenVGetResponseParts2523Nodata4ThenGetOk [GOOD] >> TDSProxyGetTest::TestBlock42WipedErrorWithTwoBlobs |69.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning |69.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning |69.3%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/ydb-core-tx-schemeshard-ut_background_cleaning ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_2_0_0_VdiskErrors [GOOD] Test command err: 2025-06-03T10:32:21.713981Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [7e4afa7ea38a37be] bootstrap ActorId# [3:82:2128] Group# 0 BlobCount# 1 BlobIDs# [[72075186224047637:1:863:1:24576:786:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-03T10:32:21.714085Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:21.714095Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:21.714101Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:21.714107Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:21.714112Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:32:21.714117Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:32:21.718541Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:1:0] Marker# BPP01 2025-06-03T10:32:21.718616Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 4 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:21.718625Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 4 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:21.718713Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:1:1:0] Marker# BPP01 2025-06-03T10:32:21.718734Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:2:1:0] Marker# BPP01 2025-06-03T10:32:21.718784Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:2:0] Marker# BPP01 2025-06-03T10:32:21.718795Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 7 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:21.718800Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 7 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:21.718842Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:0:0] Marker# BPP01 2025-06-03T10:32:21.718861Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [7e4afa7ea38a37be] Result# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} GroupId# 0 Marker# BPP12 2025-06-03T10:32:21.718874Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [7e4afa7ea38a37be] SendReply putResult# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:32:21.718927Z node 3 :BS_PROXY_PUT NOTICE: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.438 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:1:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.438 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:2:1:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.438 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:1:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 4.881 VDiskId# [0:1:0:1:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 4.936 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:2:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 5.004 VDiskId# [0:1:1:1:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.024 VDiskId# [0:1:2:1:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.076 VDiskId# [0:1:0:2:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 5.093 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 5.131 VDiskId# [0:1:0:0:0] NodeId# 3 Status# OK } ] } >> TDSProxyPutTest::TestBlock42PutStatusErrorWith_3_0_VdiskErrors [GOOD] >> TDsProxyQuorumTracker::CheckFailModelErasure3Plus2Stripe >> TDSProxyGetTest::TestBlock42GetBlobCrcCheck [GOOD] >> TDSProxyPatchTest::SecuredErrorOnPut_Erasure4Plus2Block ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDSProxyRequestReportningTest::CheckDefaultBehaviour [GOOD] Test command err: 2025-06-03T10:32:21.458067Z node 1 :BS_PROXY_GET INFO: dsproxy_get.cpp:479: [a33ccc40f398d531] bootstrap ActorId# [1:76:2122] Group# 0 Query# {MustRestoreFirst# 0 [1:0:0:0:0:99:0]@0:0} Deadline# 586524-01-19T08:01:49.551615Z RestartCounter# 0 Marker# BPG01 2025-06-03T10:32:21.458266Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.cpp:238: [a33ccc40f398d531] query.Id# [1:0:0:0:0:99:0] shift# 0 size# 0 Marker# BPG56 2025-06-03T10:32:21.458301Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:135: [a33ccc40f398d531] Id# [1:0:0:0:0:99:0] considerSlowAsError# 0 Parts# {? ? ? ? ? ? ?????? ??????} pessimisticReplicas# 0 p.State# EBS_DISINTEGRATED optimisticReplicas# 6 o.State# EBS_FULL altruisticReplicas# 6 a.State# EBS_FULL Marker# BPG44 2025-06-03T10:32:21.458310Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:135: [a33ccc40f398d531] Id# [1:0:0:0:0:99:0] considerSlowAsError# 1 Parts# {? ? ? ? ? ? ?????? ??????} pessimisticReplicas# 0 p.State# EBS_DISINTEGRATED optimisticReplicas# 6 o.State# EBS_FULL altruisticReplicas# 6 a.State# EBS_FULL Marker# BPG44 2025-06-03T10:32:21.458323Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:222: [a33ccc40f398d531] AddGet disk# 6 Id# [1:0:0:0:0:99:1] Intervals# {[0, 32)} Marker# BPG46 2025-06-03T10:32:21.458329Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:222: [a33ccc40f398d531] AddGet disk# 4 Id# [1:0:0:0:0:99:1] Intervals# {[0, 32)} Marker# BPG46 2025-06-03T10:32:21.458334Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:222: [a33ccc40f398d531] AddGet disk# 5 Id# [1:0:0:0:0:99:1] Intervals# {[0, 32)} Marker# BPG46 2025-06-03T10:32:21.458339Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:222: [a33ccc40f398d531] AddGet disk# 7 Id# [1:0:0:0:0:99:2] Intervals# {[0, 32)} Marker# BPG46 2025-06-03T10:32:21.458343Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:222: [a33ccc40f398d531] AddGet disk# 4 Id# [1:0:0:0:0:99:2] Intervals# {[0, 32)} Marker# BPG46 2025-06-03T10:32:21.458348Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:222: [a33ccc40f398d531] AddGet disk# 5 Id# [1:0:0:0:0:99:2] Intervals# {[0, 32)} Marker# BPG46 2025-06-03T10:32:21.458353Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:222: [a33ccc40f398d531] AddGet disk# 0 Id# [1:0:0:0:0:99:3] Intervals# {[0, 32)} Marker# BPG46 2025-06-03T10:32:21.458358Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:222: [a33ccc40f398d531] AddGet disk# 4 Id# [1:0:0:0:0:99:3] Intervals# {[0, 32)} Marker# BPG46 2025-06-03T10:32:21.458363Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:222: [a33ccc40f398d531] AddGet disk# 5 Id# [1:0:0:0:0:99:3] Intervals# {[0, 32)} Marker# BPG46 2025-06-03T10:32:21.458374Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:222: [a33ccc40f398d531] AddGet disk# 1 Id# [1:0:0:0:0:99:4] Intervals# {[0, 32)} Marker# BPG46 2025-06-03T10:32:21.458379Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:222: [a33ccc40f398d531] AddGet disk# 4 Id# [1:0:0:0:0:99:4] Intervals# {[0, 32)} Marker# BPG46 2025-06-03T10:32:21.458383Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:222: [a33ccc40f398d531] AddGet disk# 5 Id# [1:0:0:0:0:99:4] Intervals# {[0, 32)} Marker# BPG46 2025-06-03T10:32:21.458416Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.cpp:278: [a33ccc40f398d531] Send get to orderNumber# 0 vget# {ExtrQuery# [1:0:0:0:0:99:3] sh# 0 sz# 32} {MsgQoS ExtQueueId# GetFastRead} Notify# 0 Internals# 0 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BPG14 2025-06-03T10:32:21.458423Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.cpp:278: [a33ccc40f398d531] Send get to orderNumber# 1 vget# {ExtrQuery# [1:0:0:0:0:99:4] sh# 0 sz# 32} {MsgQoS ExtQueueId# GetFastRead} Notify# 0 Internals# 0 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BPG14 2025-06-03T10:32:21.458431Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.cpp:278: [a33ccc40f398d531] Send get to orderNumber# 4 vget# {ExtrQuery# [1:0:0:0:0:99:1] sh# 0 sz# 32}{ExtrQuery# [1:0:0:0:0:99:2] sh# 0 sz# 32}{ExtrQuery# [1:0:0:0:0:99:3] sh# 0 sz# 32}{ExtrQuery# [1:0:0:0:0:99:4] sh# 0 sz# 32} {MsgQoS ExtQueueId# GetFastRead} Notify# 0 Internals# 0 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BPG14 2025-06-03T10:32:21.458441Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.cpp:278: [a33ccc40f398d531] Send get to orderNumber# 5 vget# {ExtrQuery# [1:0:0:0:0:99:1] sh# 0 sz# 32}{ExtrQuery# [1:0:0:0:0:99:2] sh# 0 sz# 32}{ExtrQuery# [1:0:0:0:0:99:3] sh# 0 sz# 32}{ExtrQuery# [1:0:0:0:0:99:4] sh# 0 sz# 32} {MsgQoS ExtQueueId# GetFastRead} Notify# 0 Internals# 0 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BPG14 2025-06-03T10:32:21.458447Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.cpp:278: [a33ccc40f398d531] Send get to orderNumber# 6 vget# {ExtrQuery# [1:0:0:0:0:99:1] sh# 0 sz# 32} {MsgQoS ExtQueueId# GetFastRead} Notify# 0 Internals# 0 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BPG14 2025-06-03T10:32:21.458452Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.cpp:278: [a33ccc40f398d531] Send get to orderNumber# 7 vget# {ExtrQuery# [1:0:0:0:0:99:2] sh# 0 sz# 32} {MsgQoS ExtQueueId# GetFastRead} Notify# 0 Internals# 0 TabletId# 0 AcquireBlockedGeneration# 0 ForceBlockedGeneration# 0} Marker# BPG14 2025-06-03T10:32:21.458740Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.h:174: [a33ccc40f398d531] handle result# {EvVGetResult QueryResult Status# OK {[1:0:0:0:0:99:2] OK Size# 32 FullDataSize# 99 BufferData# 32b Cookie# 0} BlockedGeneration# 0} Marker# BPG57 2025-06-03T10:32:21.458752Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.h:186: [a33ccc40f398d531] Handle TEvVGetResult status# OK From# [0:1:0:4:0] orderNumber# 4 ev {EvVGetResult QueryResult Status# OK {[1:0:0:0:0:99:2] OK Size# 32 FullDataSize# 99 BufferData# 32b Cookie# 0} BlockedGeneration# 0} Marker# BPG12 2025-06-03T10:32:21.458760Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.h:225: [a33ccc40f398d531] Got# OK orderNumber# 4 vDiskId# [0:1:0:4:0] Marker# BPG58 2025-06-03T10:32:21.458782Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:135: [a33ccc40f398d531] Id# [1:0:0:0:0:99:0] considerSlowAsError# 0 Parts# {? ? ? ? ? ? ?+???? ??????} pessimisticReplicas# 1 p.State# EBS_DISINTEGRATED optimisticReplicas# 6 o.State# EBS_FULL altruisticReplicas# 6 a.State# EBS_FULL Marker# BPG44 2025-06-03T10:32:21.458790Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:135: [a33ccc40f398d531] Id# [1:0:0:0:0:99:0] considerSlowAsError# 1 Parts# {? ? ? ? ? ? ?+???? ??????} pessimisticReplicas# 1 p.State# EBS_DISINTEGRATED optimisticReplicas# 6 o.State# EBS_FULL altruisticReplicas# 6 a.State# EBS_FULL Marker# BPG44 2025-06-03T10:32:21.458805Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.h:174: [a33ccc40f398d531] handle result# {EvVGetResult QueryResult Status# OK {[1:0:0:0:0:99:2] OK Size# 32 FullDataSize# 99 BufferData# 32b Cookie# 0} BlockedGeneration# 0} Marker# BPG57 2025-06-03T10:32:21.458813Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.h:186: [a33ccc40f398d531] Handle TEvVGetResult status# OK From# [0:1:0:7:0] orderNumber# 7 ev {EvVGetResult QueryResult Status# OK {[1:0:0:0:0:99:2] OK Size# 32 FullDataSize# 99 BufferData# 32b Cookie# 0} BlockedGeneration# 0} Marker# BPG12 2025-06-03T10:32:21.458818Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.h:225: [a33ccc40f398d531] Got# OK orderNumber# 7 vDiskId# [0:1:0:7:0] Marker# BPG58 2025-06-03T10:32:21.458831Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:135: [a33ccc40f398d531] Id# [1:0:0:0:0:99:0] considerSlowAsError# 0 Parts# {? + ? ? ? ? ?+???? ??????} pessimisticReplicas# 1 p.State# EBS_DISINTEGRATED optimisticReplicas# 6 o.State# EBS_FULL altruisticReplicas# 6 a.State# EBS_FULL Marker# BPG44 2025-06-03T10:32:21.458838Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:135: [a33ccc40f398d531] Id# [1:0:0:0:0:99:0] considerSlowAsError# 1 Parts# {? + ? ? ? ? ?+???? ??????} pessimisticReplicas# 1 p.State# EBS_DISINTEGRATED optimisticReplicas# 6 o.State# EBS_FULL altruisticReplicas# 6 a.State# EBS_FULL Marker# BPG44 2025-06-03T10:32:21.458851Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.h:174: [a33ccc40f398d531] handle result# {EvVGetResult QueryResult Status# OK {[1:0:0:0:0:99:3] OK Size# 32 FullDataSize# 99 BufferData# 32b Cookie# 0} BlockedGeneration# 0} Marker# BPG57 2025-06-03T10:32:21.458857Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.h:186: [a33ccc40f398d531] Handle TEvVGetResult status# OK From# [0:1:0:0:0] orderNumber# 0 ev {EvVGetResult QueryResult Status# OK {[1:0:0:0:0:99:3] OK Size# 32 FullDataSize# 99 BufferData# 32b Cookie# 0} BlockedGeneration# 0} Marker# BPG12 2025-06-03T10:32:21.458862Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.h:225: [a33ccc40f398d531] Got# OK orderNumber# 0 vDiskId# [0:1:0:0:0] Marker# BPG58 2025-06-03T10:32:21.458876Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:135: [a33ccc40f398d531] Id# [1:0:0:0:0:99:0] considerSlowAsError# 0 Parts# {? + + ? ? ? ?+???? ??????} pessimisticReplicas# 2 p.State# EBS_DISINTEGRATED optimisticReplicas# 6 o.State# EBS_FULL altruisticReplicas# 6 a.State# EBS_FULL Marker# BPG44 2025-06-03T10:32:21.458882Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:135: [a33ccc40f398d531] Id# [1:0:0:0:0:99:0] considerSlowAsError# 1 Parts# {? + + ? ? ? ?+???? ??????} pessimisticReplicas# 2 p.State# EBS_DISINTEGRATED optimisticReplicas# 6 o.State# EBS_FULL altruisticReplicas# 6 a.State# EBS_FULL Marker# BPG44 2025-06-03T10:32:21.458893Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.h:174: [a33ccc40f398d531] handle result# {EvVGetResult QueryResult Status# OK {[1:0:0:0:0:99:1] NODATA Size# 0 FullDataSize# 99 Cookie# 0} BlockedGeneration# 0} Marker# BPG57 2025-06-03T10:32:21.458899Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.h:186: [a33ccc40f398d531] Handle TEvVGetResult status# OK From# [0:1:0:5:0] orderNumber# 5 ev {EvVGetResult QueryResult Status# OK {[1:0:0:0:0:99:1] NODATA Size# 0 FullDataSize# 99 Cookie# 0} BlockedGeneration# 0} Marker# BPG12 2025-06-03T10:32:21.458905Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.h:235: [a33ccc40f398d531] Got# NODATA orderNumber# 5 vDiskId# [0:1:0:5:0] Marker# BPG59 2025-06-03T10:32:21.458916Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:135: [a33ccc40f398d531] Id# [1:0:0:0:0:99:0] considerSlowAsError# 0 Parts# {? + + ? ? ? ?+???? -?????} pessimisticReplicas# 2 p.State# EBS_DISINTEGRATED optimisticReplicas# 6 o.State# EBS_FULL altruisticReplicas# 6 a.State# EBS_FULL Marker# BPG44 2025-06-03T10:32:21.458922Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:135: [a33ccc40f398d531] Id# [1:0:0:0:0:99:0] considerSlowAsError# 1 Parts# {? + + ? ? ? ?+???? -?????} pessimisticReplicas# 2 p.State# EBS_DISINTEGRATED optimisticReplicas# 6 o.State# EBS_FULL altruisticReplicas# 6 a.State# EBS_FULL Marker# BPG44 2025-06-03T10:32:21.458939Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:135: [a33ccc40f398d531] Id# [1:0:0:0:0:99:0] considerSlowAsError# 0 Parts# {? + + ? ? ? ?+???? -?????} pessimisticReplicas# 2 p.State# EBS_DISINTEGRATED optimisticReplicas# 6 o.State# EBS_FULL altruisticReplicas# 6 a.State# EBS_FULL Marker# BPG44 2025-06-03T10:32:21.458946Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:135: [a33ccc40f398d531] Id# [1:0:0:0:0:99:0] considerSlowAsError# 1 Parts# {? + + ? ? ? ?+???? -?????} pessimisticReplicas# 2 p.State# EBS_DISINTEGRATED optimisticReplicas# 6 o.State# EBS_FULL altruisticReplicas# 6 a.State# EBS_FULL Marker# BPG44 2025-06-03T10:32:21.458960Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:135: [a33ccc40f398d531] Id# [1:0:0:0:0:99:0] considerSlowAsError# 0 Parts# {? + + ? ? ? ?+???? -?????} pessimisticReplicas# 2 p.State# EBS_DISINTEGRATED optimisticReplicas# 6 o.State# EBS_FULL altruisticReplicas# 6 a.State# EBS_FULL Marker# BPG44 2025-06-03T10:32:21.458967Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:135: [a33ccc40f398d531] Id# [1:0:0:0:0:99:0] considerSlowAsError# 1 Parts# {? + + ? ? ? ?+???? -?????} pessimisticReplicas# 2 p.State# EBS_DISINTEGRATED optimisticReplicas# 6 o.State# EBS_FULL altruisticReplicas# 6 a.State# EBS_FULL Marker# BPG44 2025-06-03T10:32:21.458979Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.h:174: [a33ccc40f398d531] handle result# {EvVGetResult QueryResult Status# OK {[1:0:0:0:0:99:4] OK Size# 32 FullDataSize# 99 BufferData# 32b Cookie# 0} BlockedGeneration# 0} Marker# BPG57 2025-06-03T10:32:21.458986Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.h:186: [a33ccc40f398d531] Handle TEvVGetResult status# OK From# [0:1:0:1:0] orderNumber# 1 ev {EvVGetResult QueryResult Status# OK {[1:0:0:0:0:99:4] OK Size# 32 FullDataSize# 99 BufferData# 32b Cookie# 0} BlockedGeneration# 0} Marker# BPG12 2025-06-03T10:32:21.458991Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.h:225: [a33ccc40f398d531] Got# OK orderNumber# 1 vDiskId# [0:1:0:1:0] Marker# BPG58 2025-06-03T10:32:21.459004Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:135: [a33ccc40f398d531] Id# [1:0:0:0:0:99:0] considerSlowAsError# 0 Parts# {? + + + ? ? ?+???? -?????} pessimisticReplicas# 3 p.State# EBS_DISINTEGRATED optimisticReplicas# 6 o.State# EBS_FULL altruisticReplicas# 6 a.State# EBS_FULL Marker# BPG44 2025-06-03T10:32:21.459012Z node 1 :BS_PROXY_GET DEBUG: dsproxy_strategy_base.cpp:135: [a33ccc40f398d531] Id# [1:0:0:0:0:99:0] considerSlowAsError# 1 Parts# {? + + + ? ? ?+???? -?????} pessimisticReplicas# 3 p.State# EBS_DISINTEGRATED optimisticReplicas# 6 o.State# EBS_FULL altruisticReplicas# 6 a.State# EBS_FULL Marker# BPG44 2025-06-03T10:32:21.459024Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.h:174: [a33ccc40f398d531] handle result# {EvVGetResult QueryResult Status# OK {[1:0:0:0:0:99:1] OK Size# 32 FullDataSize# 99 BufferData# 32b Cookie# 0} BlockedGeneration# 0} Marker# BPG57 2025-06-03T10:32:21.459030Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.h:186: [a33ccc40f398d531] Handle TEvVGetResult status# OK From# [0:1:0:6:0] orderNumber# 6 ev {EvVGetResult QueryResult Status# OK {[1:0:0:0:0:99:1] OK Size# 32 FullDataSize# 99 BufferData# 32b Cookie# 0} BlockedGeneration# 0} Marker# BPG12 2025-06-03T10:32:21.459034Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.h:225: [a33ccc40f398d531] Got# OK orderNumber# 6 vDiskId# [0:1:0:6:0] Marker# BPG58 2025-06-03T10:32:21.459059Z node 1 :BS_PROXY_GET DEBUG: dsproxy_get_impl.cpp:134: [a33ccc40f398d531] Response# TEvGetResult {Status# OK ResponseSz# 1 {[1:0:0:0:0:99:0] OK Size# 99}} Marker# BPG29 2025-06-03T10:32:21.459071Z node 1 :BS_PROXY_GET INFO: dsproxy_get.cpp:407: [a33ccc40f398d531] Result# TEvGetResult {Status# OK ResponseSz# 1 {[1:0:0:0:0:99:0] OK Size# 99}} GroupId# 0 Marker# BPG68 2025-06-03T10:32:21.459110Z node 1 :BS_PROXY_GET DEBUG: {BPG72@dsproxy_get.cpp:425} Query history GroupId# 0 HandleClass# FastRead History# THistory { Entries# [ TEvVGet{ TimestampMs# 0.467 sample PartId# [1:0:0:0:0:99:3] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 1 } TEvVGet{ TimestampMs# 0.467 sample PartId# [1:0:0:0:0:99:4] QueryCount# 1 VDiskId# [0:1:0:1:0] NodeId# 1 } TEvVGet{ TimestampMs# 0.467 sample PartId# [1:0:0:0:0:99:1] QueryCount# 4 VDiskId# [0:1:0:4:0] NodeId# 1 } TEvVGet{ TimestampMs# 0.467 sample PartId# [1:0:0:0:0:99:1] QueryCount# 4 VDiskId# [0:1:0:5:0] NodeId# 1 } TEvVGet{ TimestampMs# 0.467 sample PartId# [1:0:0:0:0:99:1] QueryCount# 1 VDiskId# [0:1:0:6:0] NodeId# 1 } TEvVGet{ TimestampMs# 0.467 sample PartId# [1:0:0:0:0:99:2] QueryCount# 1 VDiskId# [0:1:0:7:0] NodeId# 1 } TEvVGetResult{ TimestampMs# 0.769 VDiskId# [0:1:0:4:0] NodeId# 1 Status# OK } TEvVGetResult{ TimestampMs# 0.818 VDiskId# [0:1:0:7:0] NodeId# 1 Status# OK } TEvVGetResult{ TimestampMs# 0.86 VDiskId# [0:1:0:0:0] NodeId# 1 Status# OK } TEvVGetResult{ TimestampMs# 0.9 VDiskId# [0:1:0:5:0] NodeId# 1 Status# OK } GetAcceleration{ TimestampMs# 0.903 } GetAcceleration{ TimestampMs# 0.925 } TEvVGetResult{ TimestampMs# 0.989 VDiskId# [0:1:0:1:0] NodeId# 1 Status# OK } TEvVGetResult{ TimestampMs# 1.035 VDiskId# [0:1:0:6:0] NodeId# 1 Status# OK } ] } |69.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/cms/ut/ydb-services-cms-ut |69.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/cms/ut/ydb-services-cms-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpReturning::Random [GOOD] Test command err: Trying to start YDB, gRPC: 1842, MsgBus: 16008 2025-06-03T10:32:03.547304Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668978726150293:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:03.547611Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027b2/r3tmp/tmpM2dnne/pdisk_1.dat 2025-06-03T10:32:03.627074Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1842, node 1 2025-06-03T10:32:03.648918Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:03.648932Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:03.648934Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:03.648996Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:03.650277Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:03.650301Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:03.651458Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16008 TClient is connected to server localhost:16008 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:03.729236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:03.733963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:32:03.743785Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:03.779529Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:03.846507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:03.863731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:04.006408Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668983021119213:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.006444Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.049319Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.056252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.111982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.120838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.134789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.148609Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.162995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.182465Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668983021119866:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.182489Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668983021119871:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.182506Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.183300Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:04.189594Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668983021119873:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:04.253957Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668983021119924:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 }
: Warning: Type annotation, code: 1030
:4:13: Warning: At function: RemovePrefixMembers, At function: RemoveSystemMembers, At function: PersistableRepr, At function: SqlProject
:4:27: Warning: At function: Filter, At lambda, At function: Coalesce
:4:50: Warning: At function: SqlIn
:4:50: Warning: IN may produce unexpected result when used with nullable arguments. Consider adding 'PRAGMA AnsiInForEmptyOrNullableItemsCollections;', code: 1108
: Warning: Execution, code: 1060
:4:13: Warning: Cost Based Optimizer could not be applied to this query: couldn't load statistics, code: 8001 Trying to start YDB, gRPC: 29899, MsgBus: 23309 2025-06-03T10:32:04.661393Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668986166698737:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:04.661415Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027b2/r3tmp/tmpUC2JPY/pdisk_1.dat 2025-06-03T10:32:04.679065Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29899, node 2 2025-06-03T10:32:04.687661Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:04.687677Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:04.687679Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:04.687749Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23309 TClient is connected to server localhost:23309 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Versi ... part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:17.455117Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:17.474012Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669040071538459:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:17.474041Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:17.474147Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669040071538464:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:17.475191Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:17.478747Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7511669040071538466:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:17.580549Z node 7 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [7:7511669040071538517:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:17.780602Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 26871, MsgBus: 16273 2025-06-03T10:32:20.866808Z node 8 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[8:7511669053625914956:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:20.870331Z node 8 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027b2/r3tmp/tmpjnhJxg/pdisk_1.dat 2025-06-03T10:32:20.892941Z node 8 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:20.894442Z node 8 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [8:7511669053625914769:2079] 1748946740864336 != 1748946740864339 TServer::EnableGrpc on GrpcPort 26871, node 8 2025-06-03T10:32:20.904984Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:20.905000Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:20.905002Z node 8 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:20.905068Z node 8 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16273 TClient is connected to server localhost:16273 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:20.978104Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:20.978164Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:20.978698Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:20.980799Z node 8 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(8, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:20.981836Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:20.994263Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:21.014343Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:21.039765Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:32:21.056068Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:21.351288Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7511669057920883698:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:21.351309Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:21.363391Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:21.381720Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:21.393238Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:21.407605Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:21.419905Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:21.433136Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:21.447280Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:21.466711Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7511669057920884350:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:21.466881Z node 8 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:21.466935Z node 8 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [8:7511669057920884355:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:21.467729Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:21.473447Z node 8 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [8:7511669057920884357:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:21.532797Z node 8 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [8:7511669057920884408:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:21.684871Z node 8 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 |69.3%| [LD] {RESULT} $(B)/ydb/services/cms/ut/ydb-services-cms-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNamedExpressions::NamedExpressionRandomDataQuery+UseSink [GOOD] Test command err: Trying to start YDB, gRPC: 29128, MsgBus: 2601 2025-06-03T10:32:02.393981Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668976279596261:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:02.394004Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027d3/r3tmp/tmpNCQN8h/pdisk_1.dat TServer::EnableGrpc on GrpcPort 29128, node 1 2025-06-03T10:32:02.466493Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:02.478027Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:02.478042Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:02.478045Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:02.478105Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:02.495703Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:02.495742Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:02.496776Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:2601 TClient is connected to server localhost:2601 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:02.560515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:02.565120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:32:02.569496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:02.594521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:02.660348Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:02.679711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:02.807170Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668976279597863:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:02.807198Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:02.870826Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:02.880402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:02.889930Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:02.906551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:02.920923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:02.979261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:02.991598Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:03.014458Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668980574565816:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:03.014508Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:03.014634Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668980574565821:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:03.015980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:03.021704Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668980574565823:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:03.094993Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668980574565874:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 15815, MsgBus: 9695 2025-06-03T10:32:03.767288Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668979353966750:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:03.767323Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027d3/r3tmp/tmp9OEYO1/pdisk_1.dat 2025-06-03T10:32:03.793686Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15815, node 2 2025-06-03T10:32:03.808507Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:03.808523Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:03.808526Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:03.808588Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9695 2025-06-03T10:32:03.874417Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:03.874467Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:03.875471Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:9695 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:32:03.918353Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperati ... :32:18.987314Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:19.013076Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:19.037451Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:19.085725Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7511669049437030164:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:19.085806Z node 13 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:19.093658Z node 13 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [13:7511669049437030169:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:19.095052Z node 13 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:19.100379Z node 13 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [13:7511669049437030171:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:19.190666Z node 13 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [13:7511669049437030222:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } [["28519ce5-f4f1-494a-a1fd-ab823969eca0"];["fceb378f-9950-4312-a74e-5fb0553be712"]] [[["9469880e-f4e3-4720-8790-df0c9b7bf051"]];[["a6e22a86-9aae-4892-a92a-7fc7410f8f64"]]] [[["9469880e-f4e3-4720-8790-df0c9b7bf051"]];[["a6e22a86-9aae-4892-a92a-7fc7410f8f64"]]] Trying to start YDB, gRPC: 24790, MsgBus: 61012 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0027d3/r3tmp/tmpcYgDjZ/pdisk_1.dat 2025-06-03T10:32:20.295060Z node 14 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:32:20.309404Z node 14 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:20.310511Z node 14 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [14:7511669053453563156:2079] 1748946740286950 != 1748946740286953 TServer::EnableGrpc on GrpcPort 24790, node 14 2025-06-03T10:32:20.317630Z node 14 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:20.317650Z node 14 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:20.317652Z node 14 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:20.317718Z node 14 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61012 TClient is connected to server localhost:61012 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:20.394676Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:20.394712Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:20.395171Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:20.395725Z node 14 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(14, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:20.405814Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:20.417775Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:20.438538Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:20.449046Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:20.705432Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7511669053453564788:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:20.705609Z node 14 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:20.710114Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:20.751011Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:20.767439Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:20.788884Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:20.800291Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:20.812489Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:20.873117Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:21.034525Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7511669057748532740:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:21.034560Z node 14 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:21.034714Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7511669057748532745:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:21.035844Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:21.039254Z node 14 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [14:7511669057748532747:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:21.142796Z node 14 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [14:7511669057748532798:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } [["4850d81b-c08d-4e4f-92a5-d691b0ce6b68"];["4d40cec8-6442-4539-bdbc-30e2740d48ee"]] [[["68ff5df9-9b6e-4551-a72d-9a1650a3d0b3"]];[["cef5bec9-79c1-43b0-b8b5-ef2e960e3404"]]] [[["68ff5df9-9b6e-4551-a72d-9a1650a3d0b3"]];[["cef5bec9-79c1-43b0-b8b5-ef2e960e3404"]]] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDsProxyQuorumTracker::CheckFailModelErasure3Plus1Stripe [GOOD] Test command err: 2025-06-03T10:32:21.863979Z node 2 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [7e4afa7ea38a37be] bootstrap ActorId# [2:74:2120] Group# 0 BlobCount# 1 BlobIDs# [[72075186224047637:1:863:1:24576:786:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-03T10:32:21.864035Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.864041Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.864044Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.864047Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.864050Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.864052Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.864055Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.864058Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.864060Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.864063Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.864066Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.864069Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.864071Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.864074Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.864077Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.864079Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.864082Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.864087Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.864091Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 6 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:32:21.864101Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:21.864106Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:21.864110Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:21.864113Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:21.864117Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:32:21.864119Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:32:21.864123Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 3 blob Id# [72075186224047637:1:863:1:24576:786:4] Marker# BPG33 2025-06-03T10:32:21.864125Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 3 to# 3 blob Id# [72075186224047637:1:863:1:24576:786:4] Marker# BPG32 2025-06-03T10:32:21.864129Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 4 blob Id# [72075186224047637:1:863:1:24576:786:5] Marker# BPG33 2025-06-03T10:32:21.864132Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 4 to# 4 blob Id# [72075186224047637:1:863:1:24576:786:5] Marker# BPG32 2025-06-03T10:32:21.864135Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 5 blob Id# [72075186224047637:1:863:1:24576:786:6] Marker# BPG33 2025-06-03T10:32:21.864138Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 5 to# 5 blob Id# [72075186224047637:1:863:1:24576:786:6] Marker# BPG32 2025-06-03T10:32:21.867220Z node 2 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:0:0] Marker# BPP01 2025-06-03T10:32:21.867257Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 error Marker# BPG50 2025-06-03T10:32:21.867263Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:21.867267Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:21.867271Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:21.867273Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:21.867276Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:21.867279Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.867281Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.867284Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.867287Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.867289Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.867292Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.867294Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.867297Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.867299Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.867302Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.867305Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.867307Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.867311Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 6 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:32:21.867323Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 6 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:21.867327Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 6 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:21.867384Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:1:0] Marker# BPP01 2025-06-03T10:32:21.867399Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:2:0] Marker# BPP01 2025-06-03T10:32:21.867407Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:4] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:3:0] Marker# BPP01 2025-06-03T10:32:21.867416Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:5] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:4:0] Marker# BPP01 2025-06-03T10:32:21.867427Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:6] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:5:0] Marker# BPP01 2025-06-03T10:32:21.867472Z node 2 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:6:0] Marker# BPP01 2025-06-03T10:32:21.867478Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 error Marker# BPG50 2025-06-03T10:32:21.867481Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 situation# ESituation::Present Marker# BPG51 2025-06-03T10:32:21.867484Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 situation# ESituation::Present Marker# BPG51 2025-06-03T10:32:21.867487Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Present Marker# BPG51 2025-06-03T10:32:21.867489Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Present Marker# BPG51 2025-06-03T10:32:21.867492Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Present Marker# BPG51 2025-06-03T10:32:21.867495Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 error Marker# BPG50 2025-06-03T10:32:21.867497Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.867500Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.867502Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.867505Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.867508Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.867512Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.867515Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 6 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:32:21.867520Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 7 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:21.867523Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 7 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:21.867556Z node 2 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:7:0] Marker# BPP01 2025-06-03T10:32:21.867561Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 error Marker# BPG50 2025-06-03T10:32:21.867564Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 situation# ESituation::Present Marker# BPG51 2025-06-03T10:32:21.867567Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 situation# ESituation::Present Marker# BPG51 2025-06-03T10:32:21.867569Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Present Marker# BPG51 2025-06-03T10:32:21.867572Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Present Marker# BPG51 2025-06-03T10:32:21.867574Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Present Marker# BPG51 2025-06-03T10:32:21.867577Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 error Marker# BPG50 2025-06-03T10:32:21.867580Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 error Marker# BPG50 2025-06-03T10:32:21.867583Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 5 optimisticState# EBS_DISINTEGRATED Marker# BPG55 2025-06-03T10:32:21.867602Z node 2 :BS_PROXY_PUT ERROR: dsproxy_put_impl.cpp:72: [7e4afa7ea38a37be] Result# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 0 BlobId# [72075186224047637:1:863:1:24576:786:0] Reported ErrorReasons# [ ] Part situations# [ { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# UPUUUU } { OrderNumber# 2 Situations# UUPUUU } { OrderNumber# 3 Situations# UUUPUU } { OrderNumber# 4 Situations# UUUUPU } { OrderNumber# 5 Situations# UUUUUP } { OrderNumber# 6 Situations# EUUUUU } { OrderNumber# 7 Situations# EUUUUU } ] " ApproximateFreeSpaceShare# 0} GroupId# 0 Marker# BPP12 2025-06-03T10:32:21.867609Z node 2 :BS_PROXY_PUT NOTICE: dsproxy_put.cpp:486: [7e4afa7ea38a37be] SendReply putResult# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 0 BlobId# [72075186224047637:1:863:1:24576:786:0] Reported ErrorReasons# [ ] Part situations# [ { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# UPUUUU } { OrderNumber# 2 Situations# UUPUUU } { OrderNumber# 3 Situations# UUUPUU } { OrderNumber# 4 Situations# UUUUPU } { OrderNumber# 5 Situations# UUUUUP } { OrderNumber# 6 Situations# EUUUUU } { OrderNumber# 7 Situations# EUUUUU } ] " ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:32:21.867657Z node 2 :BS_PROXY_PUT NOTICE: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.384 sample PartId# [72075186224047637:1:863:1:24576:786:6] QueryCount# 1 VDiskId# [0:1:0:5:0] NodeId# 2 } TEvVPut{ TimestampMs# 0.384 sample PartId# [72075186224047637:1:863:1:24576:786:5] QueryCount# 1 VDiskId# [0:1:0:4:0] NodeId# 2 } TEvVPut{ TimestampMs# 0.384 sample PartId# [72075186224047637:1:863:1:24576:786:4] QueryCount# 1 VDiskId# [0:1:0:3:0] NodeId# 2 } TEvVPut{ TimestampMs# 0.384 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:0:2:0] NodeId# 2 } TEvVPut{ TimestampMs# 0.384 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:1:0] NodeId# 2 } TEvVPut{ TimestampMs# 0.385 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 2 } TEvVPutResult{ TimestampMs# 3.47 VDiskId# [0:1:0:0:0] NodeId# 2 Status# ERROR } TEvVPut{ TimestampMs# 3.565 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:0:6:0] NodeId# 2 } TEvVPutResult{ TimestampMs# 3.611 VDiskId# [0:1:0:1:0] NodeId# 2 Status# OK } TEvVPutResult{ TimestampMs# 3.622 VDiskId# [0:1:0:2:0] NodeId# 2 Status# OK } TEvVPutResult{ TimestampMs# 3.631 VDiskId# [0:1:0:3:0] NodeId# 2 Status# OK } TEvVPutResult{ TimestampMs# 3.639 VDiskId# [0:1:0:4:0] NodeId# 2 Status# OK } TEvVPutResult{ TimestampMs# 3.651 VDiskId# [0:1:0:5:0] NodeId# 2 Status# OK } TEvVPutResult{ TimestampMs# 3.697 VDiskId# [0:1:0:6:0] NodeId# 2 Status# ERROR } TEvVPut{ TimestampMs# 3.75 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:0:7:0] NodeId# 2 } TEvVPutResult{ TimestampMs# 3.78 VDiskId# [0:1:0:7:0] NodeId# 2 Status# ERROR } ] } >> TDSProxyPatchTest::SecuredErrorOnPut_Erasure4Plus2Block [GOOD] >> TDSProxyPatchTest::MovedOk_ErasureMirror3dc >> TBlobStorageProxySequenceTest::TestGivenStripe42GetThenVGetResponsePartsNodata263451ThenGetOk >> THiveImplTest::BootQueueSpeed [GOOD] >> THiveImplTest::BalancerSpeedAndDistribution >> TDSProxyFaultTolerancePatchTest::block42 >> TDsProxyQuorumTracker::CheckFailModelErasure3Plus2Block [GOOD] >> TDSProxyPutTest::TestBlock42MultiPutAllOk [GOOD] >> TDSProxyRequestReportningTest::CheckLeakyBucketBehaviour >> TDSProxyPatchTest::MovedOk_ErasureMirror3dc [GOOD] >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_1_1_0_VdiskErrors >> TBlobStorageProxySequenceTest::TestGivenStripe42GetThenVGetResponsePartsNodata263451ThenGetOk [GOOD] >> TDSProxyLooksLikeLostTheBlob::TDSProxyNoDataRegressionBlock42 [GOOD] >> TDSProxyPatchTest::SecuredErrorOnGetItem_ErasureMirror3dc >> TBlobStorageProxySequenceTest::TestGivenBlock42PutWhenPartialGetThenSingleDiskRequestOk >> TDSProxyRequestReportningTest::CheckLeakyBucketBehaviour [GOOD] >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_1_1_0_VdiskErrors [GOOD] >> TBlobStorageProxySequenceTest::TestProtobufSizeWithMultiGet [GOOD] >> TDSProxyPatchTest::SecuredErrorOnGet_Erasure4Plus2Block >> TDSProxyPatchTest::SecuredErrorOnGetItem_ErasureMirror3dc [GOOD] >> TDSProxyPutTest::TestBlock42PutStatusOkWith_2_0_VdiskErrors >> TBlobStorageProxySequenceTest::TestGivenBlock42PutWhenPartialGetThenSingleDiskRequestOk [GOOD] >> TDSProxyLooksLikeLostTheBlob::TDSProxyErrorRegressionBlock42 [GOOD] >> TDSProxyPatchTest::NaiveOk_ErasureNone >> TTabletPipeTest::TestPipeConnectToHint >> TDSProxyPutTest::TestBlock42PutStatusOkWith_2_0_VdiskErrors [GOOD] >> TDsProxyQuorumTracker::CheckFailModelErasureMirror3Plus2 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> BasicStatistics::NotFullStatisticsDatashard [GOOD] Test command err: 2025-06-03T10:26:31.884679Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:31.884712Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:26:31.884720Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001bae/r3tmp/tmp39Q6hG/pdisk_1.dat 2025-06-03T10:26:31.988834Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 11237, node 1 2025-06-03T10:26:32.094423Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:32.094450Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:32.094455Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:32.094527Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:32.095315Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:26:32.174491Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:32.174533Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:32.186562Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3516 2025-06-03T10:26:32.534874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:26:33.308776Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:26:33.320539Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:33.320593Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:33.377123Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:26:33.377840Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:33.542815Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:33.543027Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:33.543212Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:33.543254Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:33.543308Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:33.543328Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:33.543344Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:33.543366Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:33.543387Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:33.696328Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:33.696379Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:33.708056Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:33.747782Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:33.762144Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:26:33.762182Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:26:33.770687Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:26:33.770753Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:26:33.770778Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:26:33.770784Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:26:33.770791Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:26:33.770798Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:26:33.770804Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:26:33.770812Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:26:33.770975Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:26:33.785146Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:33.785182Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:33.786808Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:26:33.787762Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:26:33.787904Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:26:33.789870Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:26:33.793659Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:26:33.793683Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:26:33.793697Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:26:33.798650Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:26:33.800809Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:26:33.800853Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:26:33.911218Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:26:33.985272Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:26:34.027836Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:26:34.596597Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2216:3061], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:34.596647Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:34.601224Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:26:34.822306Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2514:3109], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:34.822365Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:34.829470Z node 1 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 1 ], ReplyToActorId[ [1:2519:3113]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:26:34.829525Z node 1 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 1 ] 2025-06-03T10:26:34.829536Z node 1 :STATISTICS DEBUG: service_impl.cpp:1219: ConnectToSA(), pipe client id = [1:2521:3115] 2025-06-03T10:26:34.829546Z node 1 :STATISTICS DEBUG: service_impl.cpp:1248: SyncNode(), pipe client id = [1:2521:3115] 2025-06-03T10:26:34.829703Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:2522:2979] 2025-06-03T10:26:34.829775Z node 1 :STATISTICS DEBUG: service_impl.cpp:1086: EvClientConnected, node id = 1, client id = [1:2521:3115], server id = [2:2522:2979], tablet id = 72075186224037894, status = OK 2025-06-03T10:26:34.829817Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:133: [72075186224037894] EvConnectNode, pipe server id = [2:2522:2979], node id = 1, have schemeshards count = 0, need schemeshards count = 1 2025-06-03T10:26:34.829854Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:314: [72075186224037894] SendStatisticsToNode(), node id = 1, schemeshard count = 1 2025-06-03T10:26:34.829905Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:26:34.829914Z node 1 :STATISTICS DEBUG ... .cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:31:33.136581Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:31:33.146972Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:31:33.147012Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:31:35.545651Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:31:35.545694Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:31:36.746733Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:31:37.963526Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-03T10:31:37.963577Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7881: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-03T10:31:37.963583Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7912: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-03T10:31:37.963589Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-03T10:31:38.144317Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:31:38.144358Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:31:39.390077Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-03T10:31:39.390272Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:31:39.390379Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:31:40.673679Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:31:40.673723Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:31:43.056691Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:31:43.067093Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:31:43.067141Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:31:45.403217Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-03T10:31:45.403330Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:31:45.403471Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:31:45.413919Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:31:45.413958Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:31:47.581087Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:31:47.581129Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:31:48.685716Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:31:49.976455Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:31:49.976485Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:31:51.217546Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-03T10:31:51.217754Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:31:51.217842Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:31:52.433564Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:31:52.433607Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:31:54.646073Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:31:54.656363Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:31:54.656392Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:31:57.177049Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-03T10:31:57.177208Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:31:57.177280Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:31:57.187676Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:31:57.187718Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:31:59.458180Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:31:59.458212Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:32:00.505703Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:32:01.696649Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:32:01.696688Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:32:02.890922Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-03T10:32:02.891024Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:32:02.891202Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:32:04.166453Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:32:04.166506Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:32:06.433839Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:32:06.444286Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:32:06.444322Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:32:08.334287Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-03T10:32:08.334361Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:32:08.334501Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:32:08.344917Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:32:08.344963Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:32:10.266434Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:32:10.266487Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:32:11.252844Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:32:12.347088Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-03T10:32:12.347132Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7881: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-03T10:32:12.347138Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7912: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-03T10:32:12.347143Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-03T10:32:12.537589Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:32:12.537623Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:32:13.836036Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-03T10:32:13.836193Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 2025-06-03T10:32:13.836283Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:32:15.149574Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:32:15.149610Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:32:17.621387Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:32:17.631802Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:32:17.631841Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:32:18.965396Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7996: SendBaseStatsToSA(), path count: 2, at schemeshard: 72075186224037897 2025-06-03T10:32:18.965451Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 226.000000s, at schemeshard: 72075186224037897 2025-06-03T10:32:18.965700Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 49 ... waiting for TEvSchemeShardStats 2 (done) ... waiting for TEvPropagateStatistics 2025-06-03T10:32:18.990065Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-03T10:32:20.319157Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 2, schemeshard count = 1 2025-06-03T10:32:20.319358Z node 1 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 1 ... waiting for TEvPropagateStatistics (done) 2025-06-03T10:32:20.319479Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 3 ], ReplyToActorId[ [2:12627:7158]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:32:20.319662Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:32:20.335795Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 3 ] 2025-06-03T10:32:20.335829Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 3, ReplyToActorId = [2:12627:7158], StatRequests.size() = 1 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDsProxyQuorumTracker::CheckFailModelErasure3Plus2Block [GOOD] Test command err: 2025-06-03T10:32:21.141261Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [7e4afa7ea38a37be] bootstrap ActorId# [3:74:2120] Group# 0 BlobCount# 1 BlobIDs# [[72075186224047637:1:863:1:24576:786:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-03T10:32:21.141540Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.141553Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.141558Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.141561Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.141565Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.141570Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.141575Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.141581Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.141585Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.141589Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.141593Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.141598Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.141602Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.141606Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.141611Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.141615Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.141619Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.141626Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.141633Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 6 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:32:21.141652Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:21.141659Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:21.141666Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:21.141670Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:21.141675Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:32:21.141680Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:32:21.141686Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 3 blob Id# [72075186224047637:1:863:1:24576:786:4] Marker# BPG33 2025-06-03T10:32:21.141691Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 3 to# 3 blob Id# [72075186224047637:1:863:1:24576:786:4] Marker# BPG32 2025-06-03T10:32:21.141695Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 4 blob Id# [72075186224047637:1:863:1:24576:786:5] Marker# BPG33 2025-06-03T10:32:21.141699Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 4 to# 4 blob Id# [72075186224047637:1:863:1:24576:786:5] Marker# BPG32 2025-06-03T10:32:21.141705Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 5 blob Id# [72075186224047637:1:863:1:24576:786:6] Marker# BPG33 2025-06-03T10:32:21.141709Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 5 to# 5 blob Id# [72075186224047637:1:863:1:24576:786:6] Marker# BPG32 2025-06-03T10:32:21.146871Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:0:0] Marker# BPP01 2025-06-03T10:32:21.146943Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 error Marker# BPG50 2025-06-03T10:32:21.146955Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:21.146961Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:21.146968Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:21.146973Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:21.146979Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:21.146985Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.146990Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.146995Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147001Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147006Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147011Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147016Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147022Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147027Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147032Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147037Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147042Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147051Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 6 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:32:21.147071Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 6 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:21.147080Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 6 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:21.147175Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075 ... :30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 error Marker# BPG50 2025-06-03T10:32:21.147189Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 error Marker# BPG50 2025-06-03T10:32:21.147194Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:21.147200Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:21.147205Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:21.147210Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:21.147215Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:21.147220Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147226Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147231Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147236Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147241Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147245Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147248Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147252Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147256Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147260Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147265Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147269Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 6 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:32:21.147276Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 7 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:21.147281Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 7 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:21.147309Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:2:0] Marker# BPP01 2025-06-03T10:32:21.147323Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:4] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:3:0] Marker# BPP01 2025-06-03T10:32:21.147335Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:5] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:4:0] Marker# BPP01 2025-06-03T10:32:21.147350Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:6] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:5:0] Marker# BPP01 2025-06-03T10:32:21.147414Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:6:0] Marker# BPP01 2025-06-03T10:32:21.147421Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 error Marker# BPG50 2025-06-03T10:32:21.147425Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 error Marker# BPG50 2025-06-03T10:32:21.147429Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 situation# ESituation::Present Marker# BPG51 2025-06-03T10:32:21.147433Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Present Marker# BPG51 2025-06-03T10:32:21.147437Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Present Marker# BPG51 2025-06-03T10:32:21.147441Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Present Marker# BPG51 2025-06-03T10:32:21.147445Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 error Marker# BPG50 2025-06-03T10:32:21.147449Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147453Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 1 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:21.147459Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147463Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147466Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147470Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:21.147475Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 5 optimisticState# EBS_DISINTEGRATED Marker# BPG55 2025-06-03T10:32:21.147504Z node 3 :BS_PROXY_PUT ERROR: dsproxy_put_impl.cpp:72: [7e4afa7ea38a37be] Result# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 0 BlobId# [72075186224047637:1:863:1:24576:786:0] Reported ErrorReasons# [ ] Part situations# [ { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# UEUUUU } { OrderNumber# 2 Situations# UUPUUU } { OrderNumber# 3 Situations# UUUPUU } { OrderNumber# 4 Situations# UUUUPU } { OrderNumber# 5 Situations# UUUUUP } { OrderNumber# 6 Situations# EUUUUU } { OrderNumber# 7 Situations# USUUUU } ] " ApproximateFreeSpaceShare# 0} GroupId# 0 Marker# BPP12 2025-06-03T10:32:21.147516Z node 3 :BS_PROXY_PUT NOTICE: dsproxy_put.cpp:486: [7e4afa7ea38a37be] SendReply putResult# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 0 BlobId# [72075186224047637:1:863:1:24576:786:0] Reported ErrorReasons# [ ] Part situations# [ { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# UEUUUU } { OrderNumber# 2 Situations# UUPUUU } { OrderNumber# 3 Situations# UUUPUU } { OrderNumber# 4 Situations# UUUUPU } { OrderNumber# 5 Situations# UUUUUP } { OrderNumber# 6 Situations# EUUUUU } { OrderNumber# 7 Situations# USUUUU } ] " ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:32:21.147598Z node 3 :BS_PROXY_PUT NOTICE: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.798 sample PartId# [72075186224047637:1:863:1:24576:786:6] QueryCount# 1 VDiskId# [0:1:0:5:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.799 sample PartId# [72075186224047637:1:863:1:24576:786:5] QueryCount# 1 VDiskId# [0:1:0:4:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.799 sample PartId# [72075186224047637:1:863:1:24576:786:4] QueryCount# 1 VDiskId# [0:1:0:3:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.799 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:0:2:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.799 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:1:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.799 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 5.969 VDiskId# [0:1:0:0:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 6.154 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:0:6:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 6.222 VDiskId# [0:1:0:1:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 6.33 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:7:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 6.356 VDiskId# [0:1:0:2:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 6.368 VDiskId# [0:1:0:3:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 6.381 VDiskId# [0:1:0:4:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 6.396 VDiskId# [0:1:0:5:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 6.461 VDiskId# [0:1:0:6:0] NodeId# 3 Status# ERROR } ] } >> TDSProxyPatchTest::NaiveOk_ErasureNone [GOOD] >> TDSProxyPutTest::TestBlock42PutStatusOkWith_1_0_VdiskErrors >> TTabletLabeledCountersAggregator::Version3Aggregation >> TDSProxyPatchTest::SecuredErrorOnGet_Erasure4Plus2Block [GOOD] >> TDSProxyPatchTest::MovedError_ErasureMirror3dc >> TDsProxyQuorumTracker::CheckFailModelErasureMirror3Plus2 [GOOD] >> TTabletPipeTest::TestKillClientBeforServerIdKnown >> ReadIteratorExternalBlobs::ExtBlobsEmptyTable [GOOD] >> ReadIteratorExternalBlobs::NotExtBlobs ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_1_1_0_VdiskErrors [GOOD] Test command err: 2025-06-03T10:32:23.091638Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [7e4afa7ea38a37be] bootstrap ActorId# [3:82:2128] Group# 0 BlobCount# 1 BlobIDs# [[72075186224047637:1:863:1:24576:786:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-03T10:32:23.091712Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:23.091721Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:23.091727Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:23.091732Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:23.091737Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:32:23.091742Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:32:23.096761Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:1:0] Marker# BPP01 2025-06-03T10:32:23.096830Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 4 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:23.096840Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 4 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:23.096927Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:1:1:0] Marker# BPP01 2025-06-03T10:32:23.096938Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 5 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:32:23.096943Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 5 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:32:23.096967Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:2:1:0] Marker# BPP01 2025-06-03T10:32:23.097029Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:2:0] Marker# BPP01 2025-06-03T10:32:23.097043Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:1:2:0] Marker# BPP01 2025-06-03T10:32:23.097062Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [7e4afa7ea38a37be] Result# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} GroupId# 0 Marker# BPP12 2025-06-03T10:32:23.097076Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [7e4afa7ea38a37be] SendReply putResult# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:32:23.097135Z node 3 :BS_PROXY_PUT NOTICE: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.415 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:1:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.415 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:2:1:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.415 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:1:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 5.453 VDiskId# [0:1:0:1:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 5.509 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:2:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 5.577 VDiskId# [0:1:1:1:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 5.595 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:2:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 5.616 VDiskId# [0:1:2:1:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.677 VDiskId# [0:1:0:2:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.692 VDiskId# [0:1:1:2:0] NodeId# 3 Status# OK } ] } |69.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDSProxyRequestReportningTest::CheckLeakyBucketBehaviour [GOOD] >> TDSProxyPutTest::TestBlock42PutStatusOkWith_1_0_VdiskErrors [GOOD] >> TDsProxyQuorumTracker::CheckFailModelErasure4Plus2Stripe >> TDSProxyPatchTest::MovedError_ErasureMirror3dc [GOOD] >> TDSProxyPutTest::TestMirror3dcPutStatusErrorWith_2_2_0_VdiskErrors >> KqpNewEngine::StreamLookupForDataQuery-StreamLookupJoin [GOOD] >> TTabletLabeledCountersAggregator::Version3Aggregation [GOOD] >> TTabletPipeTest::TestClientDisconnectAfterPipeOpen >> TTabletPipeTest::TestPipeConnectToHint [GOOD] |69.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDsProxyQuorumTracker::CheckFailModelErasureMirror3Plus2 [GOOD] Test command err: 2025-06-03T10:32:23.440421Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [7e4afa7ea38a37be] bootstrap ActorId# [3:74:2120] Group# 0 BlobCount# 1 BlobIDs# [[72075186224047637:1:863:1:24576:786:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-03T10:32:23.440496Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.440504Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.440508Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.440513Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.440518Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.440523Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.440528Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.440532Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.440537Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.440541Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.440545Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.440550Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.440554Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.440559Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.440563Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.440567Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.440571Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.440579Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.440586Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 6 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:32:23.440600Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:23.440607Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:23.440614Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:23.440618Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:23.440624Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:32:23.440629Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:32:23.440635Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 3 blob Id# [72075186224047637:1:863:1:24576:786:4] Marker# BPG33 2025-06-03T10:32:23.440639Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 3 to# 3 blob Id# [72075186224047637:1:863:1:24576:786:4] Marker# BPG32 2025-06-03T10:32:23.440644Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 4 blob Id# [72075186224047637:1:863:1:24576:786:5] Marker# BPG33 2025-06-03T10:32:23.440649Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 4 to# 4 blob Id# [72075186224047637:1:863:1:24576:786:5] Marker# BPG32 2025-06-03T10:32:23.440656Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 5 blob Id# [72075186224047637:1:863:1:24576:786:6] Marker# BPG33 2025-06-03T10:32:23.440660Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 5 to# 5 blob Id# [72075186224047637:1:863:1:24576:786:6] Marker# BPG32 2025-06-03T10:32:23.445610Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:0:0] Marker# BPP01 2025-06-03T10:32:23.445670Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 error Marker# BPG50 2025-06-03T10:32:23.445681Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:23.445686Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:23.445691Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:23.445696Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:23.445700Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:23.445706Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445710Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445714Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445719Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445724Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445729Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445734Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445740Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445745Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445750Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445755Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445760Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445771Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 6 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:32:23.445790Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 6 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:23.445796Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 6 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:23.445887Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:1:0] Marker# BPP01 2025-06-03T10:32:23.445896Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 error Marker# BPG50 2025-06-03T10:32:23.445901Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 error Marker# BPG50 2025-06-03T10:32:23.445906Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:23.445911Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:23.445915Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:23.445919Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:23.445924Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:23.445929Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445933Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445937Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445941Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445945Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445949Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445953Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445960Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445964Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445968Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445973Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.445977Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 6 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:32:23.445986Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 7 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:23.445992Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 7 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:23.446028Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:2:0] Marker# BPP01 2025-06-03T10:32:23.446045Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:4] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:3:0] Marker# BPP01 2025-06-03T10:32:23.446057Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:5] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:4:0] Marker# BPP01 2025-06-03T10:32:23.446072Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:6] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:5:0] Marker# BPP01 2025-06-03T10:32:23.446134Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:6:0] Marker# BPP01 2025-06-03T10:32:23.446147Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:7:0] Marker# BPP01 2025-06-03T10:32:23.446165Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [7e4afa7ea38a37be] Result# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} GroupId# 0 Marker# BPP12 2025-06-03T10:32:23.446175Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [7e4afa7ea38a37be] SendReply putResult# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:32:23.446272Z node 3 :BS_PROXY_PUT NOTICE: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.531 sample PartId# [72075186224047637:1:863:1:24576:786:6] QueryCount# 1 VDiskId# [0:1:0:5:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.531 sample PartId# [72075186224047637:1:863:1:24576:786:5] QueryCount# 1 VDiskId# [0:1:0:4:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.531 sample PartId# [72075186224047637:1:863:1:24576:786:4] QueryCount# 1 VDiskId# [0:1:0:3:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.531 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:0:2:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.532 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:1:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.532 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 5.489 VDiskId# [0:1:0:0:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 5.655 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:0:6:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 5.725 VDiskId# [0:1:0:1:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 5.833 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:7:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 5.865 VDiskId# [0:1:0:2:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.881 VDiskId# [0:1:0:3:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.893 VDiskId# [0:1:0:4:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.908 VDiskId# [0:1:0:5:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.972 VDiskId# [0:1:0:6:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.985 VDiskId# [0:1:0:7:0] NodeId# 3 Status# OK } ] } >> TTabletPipeTest::TestKillClientBeforServerIdKnown [GOOD] >> TTabletPipeTest::TestInterconnectSession >> TDSProxyPutTest::TestMirror3dcPutStatusErrorWith_2_2_0_VdiskErrors [GOOD] >> TDsProxyQuorumTracker::CheckFailModelErasureNone [GOOD] >> DSProxyCounters::PutGeneratedSubrequestBytes |69.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/storagepoolmon/ut/unittest >> TDsProxyQuorumTracker::CheckFailModelErasure3Plus2Stripe [GOOD] |69.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/ut_schema/unittest |69.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut |69.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut |69.4%| [LD] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/ydb-public-sdk-cpp-src-client-persqueue_public-ut |69.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestPipeConnectToHint [GOOD] >> TTabletPipeTest::TestInterconnectSession [GOOD] >> TTabletPipeTest::TestClientDisconnectAfterPipeOpen [GOOD] >> DSProxyCounters::PutGeneratedSubrequestBytes [GOOD] >> TDSProxyGetTest::TestBlock42GetSpecific2 [GOOD] >> TDSProxyPatchTest::SecuredOk_ErasureNone [GOOD] >> TDSProxyPatchTest::NaiveErrorOnGet_Erasure4Plus2Block ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/opt/unittest >> KqpNewEngine::StreamLookupForDataQuery-StreamLookupJoin [GOOD] Test command err: Trying to start YDB, gRPC: 16034, MsgBus: 30118 2025-06-03T10:32:03.788216Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668981293640740:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:03.788235Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00279d/r3tmp/tmpmDlkZw/pdisk_1.dat 2025-06-03T10:32:03.869976Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16034, node 1 2025-06-03T10:32:03.888592Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:03.888636Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:03.889700Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:03.890185Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:03.890196Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:03.890198Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:03.890246Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30118 TClient is connected to server localhost:30118 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:03.955968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:03.961660Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:32:04.256170Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668985588608651:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.256196Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.301687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.364104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:04.372727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.392331Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715762:2, at schemeshard: 72057594046644480 2025-06-03T10:32:04.400579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715763:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.414438Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.476978Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715766:2, at schemeshard: 72057594046644480 2025-06-03T10:32:04.492327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715767:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.511556Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715770:2, at schemeshard: 72057594046644480 2025-06-03T10:32:04.526939Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715771:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.546431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715774:2, at schemeshard: 72057594046644480 2025-06-03T10:32:04.554770Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715775:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.568028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.588396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715778:2, at schemeshard: 72057594046644480 2025-06-03T10:32:04.643869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715779:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.665385Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715782:2, at schemeshard: 72057594046644480 2025-06-03T10:32:04.680505Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715783:0, at schemeshard: 72057594046644480 2025-06-03T10:32:04.695592Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668985588609981:2407], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.695622Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.722350Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668985588610273:2443], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.722381Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.722386Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668985588610278:2446], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:04.723189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:04.729045Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668985588610280:2447], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:32:04.823434Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668985588610340:3478] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 23], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } [] Trying to start YDB, gRPC: 29129, MsgBus: 27707 2025-06-03T10:32:05.480461Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511668988937092893:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:05.480487Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00279d/r3tmp/tmp6vrxrb/pdisk_1.dat 2025-06-03T10:32:05.498987Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29129, node 2 2025-06-0 ... tion part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:21.461060Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:21.475471Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:21.488695Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:21.510454Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669057239862726:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:21.510484Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:21.510598Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669057239862731:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:21.513247Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:21.517537Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511669057239862733:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:32:21.618610Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511669057239862784:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 5197, MsgBus: 8540 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00279d/r3tmp/tmpm8408V/pdisk_1.dat 2025-06-03T10:32:22.419465Z node 7 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[7:7511669059890653684:2130];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:22.419717Z node 7 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:32:22.454097Z node 7 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:22.454496Z node 7 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [7:7511669059890653595:2079] 1748946742417262 != 1748946742417265 TServer::EnableGrpc on GrpcPort 5197, node 7 2025-06-03T10:32:22.473762Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:22.473775Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:22.473777Z node 7 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:22.473843Z node 7 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8540 2025-06-03T10:32:22.538915Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:22.538950Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:22.539605Z node 7 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(7, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:8540 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:22.565509Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:22.614234Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:22.662392Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:22.702453Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:22.726869Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:22.926095Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669059890655245:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:22.926137Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:22.936269Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:22.946875Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:32:22.958639Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:22.980029Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:32:22.994097Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:32:23.014923Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:32:23.075027Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:32:23.109433Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669064185623199:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:23.109519Z node 7 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:23.109717Z node 7 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [7:7511669064185623204:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:23.110969Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:32:23.114347Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710669, at schemeshard: 72057594046644480 2025-06-03T10:32:23.114560Z node 7 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [7:7511669064185623206:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:32:23.194393Z node 7 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [7:7511669064185623257:3399] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } |69.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/ut_schema/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDsProxyQuorumTracker::CheckFailModelErasure3Plus2Stripe [GOOD] Test command err: 2025-06-03T10:32:22.330064Z node 2 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [7e4afa7ea38a37be] bootstrap ActorId# [2:74:2120] Group# 0 BlobCount# 1 BlobIDs# [[72075186224047637:1:863:1:24576:786:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-03T10:32:22.330162Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.330169Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.330174Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.330179Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.330184Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.330188Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.330194Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.330198Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.330203Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.330207Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.330212Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.330216Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.330221Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.330225Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.330230Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.330234Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.330238Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.330245Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.330253Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 6 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:32:22.330270Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:22.330278Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:22.330284Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:22.330288Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:22.330294Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:32:22.330299Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:32:22.330304Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 3 blob Id# [72075186224047637:1:863:1:24576:786:4] Marker# BPG33 2025-06-03T10:32:22.330308Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 3 to# 3 blob Id# [72075186224047637:1:863:1:24576:786:4] Marker# BPG32 2025-06-03T10:32:22.330313Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 4 blob Id# [72075186224047637:1:863:1:24576:786:5] Marker# BPG33 2025-06-03T10:32:22.330317Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 4 to# 4 blob Id# [72075186224047637:1:863:1:24576:786:5] Marker# BPG32 2025-06-03T10:32:22.330323Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 5 blob Id# [72075186224047637:1:863:1:24576:786:6] Marker# BPG33 2025-06-03T10:32:22.330327Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 5 to# 5 blob Id# [72075186224047637:1:863:1:24576:786:6] Marker# BPG32 2025-06-03T10:32:22.334074Z node 2 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:0:0] Marker# BPP01 2025-06-03T10:32:22.334123Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 error Marker# BPG50 2025-06-03T10:32:22.334129Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:22.334133Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:22.334138Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:22.334141Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:22.334143Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:22.334147Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334149Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334152Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334155Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334158Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334161Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334163Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334166Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334169Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334172Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334175Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334177Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334183Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 6 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:32:22.334201Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 6 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:22.334205Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 6 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:22.334276Z node 2 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:1:0] Marker# BPP01 2025-06-03T10:32:22.334283Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 error Marker# BPG50 2025-06-03T10:32:22.334289Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 error Marker# BPG50 2025-06-03T10:32:22.334294Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:22.334299Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:22.334303Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:22.334307Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:22.334312Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:22.334316Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334320Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334325Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334330Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334334Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334337Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334340Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334343Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334346Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334348Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334353Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334357Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 6 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:32:22.334363Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 7 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:22.334368Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 7 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:22.334396Z node 2 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:2:0] Marker# BPP01 2025-06-03T10:32:22.334404Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 error Marker# BPG50 2025-06-03T10:32:22.334409Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 error Marker# BPG50 2025-06-03T10:32:22.334413Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 error Marker# BPG50 2025-06-03T10:32:22.334418Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:22.334421Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:22.334423Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:22.334426Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:22.334429Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334432Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334434Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334437Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334440Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334443Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334447Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 1 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:22.334450Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334453Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334456Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334458Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:22.334462Z node 2 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 5 optimisticState# EBS_DISINTEGRATED Marker# BPG55 2025-06-03T10:32:22.334483Z node 2 :BS_PROXY_PUT ERROR: dsproxy_put_impl.cpp:72: [7e4afa7ea38a37be] Result# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 0 BlobId# [72075186224047637:1:863:1:24576:786:0] Reported ErrorReasons# [ ] Part situations# [ { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# UEUUUU } { OrderNumber# 2 Situations# UUEUUU } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUSU } { OrderNumber# 5 Situations# UUUUUS } { OrderNumber# 6 Situations# SUUUUU } { OrderNumber# 7 Situations# USUUUU } ] " ApproximateFreeSpaceShare# 0} GroupId# 0 Marker# BPP12 2025-06-03T10:32:22.334490Z node 2 :BS_PROXY_PUT NOTICE: dsproxy_put.cpp:486: [7e4afa7ea38a37be] SendReply putResult# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# ERROR StatusFlags# { } ErrorReason# "TRestoreStrategy saw optimisticState# EBS_DISINTEGRATED GroupId# 0 BlobId# [72075186224047637:1:863:1:24576:786:0] Reported ErrorReasons# [ ] Part situations# [ { OrderNumber# 0 Situations# EUUUUU } { OrderNumber# 1 Situations# UEUUUU } { OrderNumber# 2 Situations# UUEUUU } { OrderNumber# 3 Situations# UUUSUU } { OrderNumber# 4 Situations# UUUUSU } { OrderNumber# 5 Situations# UUUUUS } { OrderNumber# 6 Situations# SUUUUU } { OrderNumber# 7 Situations# USUUUU } ] " ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:32:22.334531Z node 2 :BS_PROXY_PUT NOTICE: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.567 sample PartId# [72075186224047637:1:863:1:24576:786:6] QueryCount# 1 VDiskId# [0:1:0:5:0] NodeId# 2 } TEvVPut{ TimestampMs# 0.567 sample PartId# [72075186224047637:1:863:1:24576:786:5] QueryCount# 1 VDiskId# [0:1:0:4:0] NodeId# 2 } TEvVPut{ TimestampMs# 0.567 sample PartId# [72075186224047637:1:863:1:24576:786:4] QueryCount# 1 VDiskId# [0:1:0:3:0] NodeId# 2 } TEvVPut{ TimestampMs# 0.567 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:0:2:0] NodeId# 2 } TEvVPut{ TimestampMs# 0.567 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:1:0] NodeId# 2 } TEvVPut{ TimestampMs# 0.567 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 2 } TEvVPutResult{ TimestampMs# 4.298 VDiskId# [0:1:0:0:0] NodeId# 2 Status# ERROR } TEvVPut{ TimestampMs# 4.414 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:0:6:0] NodeId# 2 } TEvVPutResult{ TimestampMs# 4.465 VDiskId# [0:1:0:1:0] NodeId# 2 Status# ERROR } TEvVPut{ TimestampMs# 4.561 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:7:0] NodeId# 2 } TEvVPutResult{ TimestampMs# 4.586 VDiskId# [0:1:0:2:0] NodeId# 2 Status# ERROR } ] } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDsProxyQuorumTracker::CheckFailModelErasureNone [GOOD] Test command err: 2025-06-03T10:32:23.930398Z node 7 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [7e4afa7ea38a37be] bootstrap ActorId# [7:82:2128] Group# 0 BlobCount# 1 BlobIDs# [[72075186224047637:1:863:1:24576:786:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-03T10:32:23.930475Z node 7 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:23.930484Z node 7 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:23.930490Z node 7 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:23.930495Z node 7 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:23.930501Z node 7 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:32:23.930507Z node 7 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:32:23.934678Z node 7 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:1:0] Marker# BPP01 2025-06-03T10:32:23.934751Z node 7 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 4 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:23.934761Z node 7 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 4 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:23.934855Z node 7 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:1:1:0] Marker# BPP01 2025-06-03T10:32:23.934864Z node 7 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 5 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:32:23.934868Z node 7 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 5 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:32:23.934881Z node 7 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:2:1:0] Marker# BPP01 2025-06-03T10:32:23.934949Z node 7 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:2:0] Marker# BPP01 2025-06-03T10:32:23.934955Z node 7 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 7 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:23.934958Z node 7 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 7 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:23.934972Z node 7 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:1:2:0] Marker# BPP01 2025-06-03T10:32:23.934988Z node 7 :BS_PROXY_PUT ERROR: dsproxy_put_impl.cpp:72: [7e4afa7ea38a37be] Result# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# ERROR StatusFlags# { } ErrorReason# "TPut3dcStrategy failed the Fail Model check" ApproximateFreeSpaceShare# 0} GroupId# 0 Marker# BPP12 2025-06-03T10:32:23.934996Z node 7 :BS_PROXY_PUT NOTICE: dsproxy_put.cpp:486: [7e4afa7ea38a37be] SendReply putResult# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# ERROR StatusFlags# { } ErrorReason# "TPut3dcStrategy failed the Fail Model check" ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:32:23.935047Z node 7 :BS_PROXY_PUT NOTICE: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.363 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:1:0] NodeId# 7 } TEvVPut{ TimestampMs# 0.363 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:2:1:0] NodeId# 7 } TEvVPut{ TimestampMs# 0.363 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:1:0] NodeId# 7 } TEvVPutResult{ TimestampMs# 4.554 VDiskId# [0:1:0:1:0] NodeId# 7 Status# ERROR } TEvVPut{ TimestampMs# 4.613 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:2:0] NodeId# 7 } TEvVPutResult{ TimestampMs# 4.686 VDiskId# [0:1:1:1:0] NodeId# 7 Status# ERROR } TEvVPut{ TimestampMs# 4.698 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:2:0] NodeId# 7 } TEvVPutResult{ TimestampMs# 4.71 VDiskId# [0:1:2:1:0] NodeId# 7 Status# OK } TEvVPutResult{ TimestampMs# 4.777 VDiskId# [0:1:0:2:0] NodeId# 7 Status# ERROR } TEvVPut{ TimestampMs# 4.788 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 7 } TEvVPutResult{ TimestampMs# 4.802 VDiskId# [0:1:1:2:0] NodeId# 7 Status# ERROR } ] } >> TDSProxyPatchTest::NaiveErrorOnGet_Erasure4Plus2Block [GOOD] >> TDSProxyPutTest::TestMirror3dcWith3x3MinLatencyMod >> TDSProxyLooksLikeLostTheBlob::TDSProxyLooksLikeLostTheBlobBlock42 [GOOD] >> TDSProxyPatchTest::SecuredErrorOnGetItem_Erasure4Plus2Block >> TDSProxyPutTest::TestMirror3dcWith3x3MinLatencyMod [GOOD] >> TColumnShardTestSchema::HotTiersAfterTtl ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestClientDisconnectAfterPipeOpen [GOOD] Test command err: LabeledCountersByGroup { Group: "aba/caba/daba|man" LabeledCounter { Value: 13 AggregateFunc: EAF_SUM Type: CT_SIMPLE NameId: 0 } Delimiter: "|" } LabeledCountersByGroup { Group: "cons/aaa|1|aba/caba/daba|man" LabeledCounter { Value: 13 AggregateFunc: EAF_SUM Type: CT_SIMPLE NameId: 0 } Delimiter: "|" } CounterNames: "value1" { LabeledCountersByGroup { Group: "aba/caba/daba|man" LabeledCounter { Value: 13 AggregateFunc: EAF_SUM Type: CT_SIMPLE NameId: 0 } Delimiter: "|" } LabeledCountersByGroup { Group: "cons/aaa|1|aba/caba/daba|man" LabeledCounter { Value: 13 AggregateFunc: EAF_SUM Type: CT_SIMPLE NameId: 0 } Delimiter: "|" } CounterNames: "value1" } 2025-06-03T10:32:24.149614Z node 3 :PIPE_SERVER ERROR: tablet_pipe_server.cpp:228: [9437185] NodeDisconnected NodeId# 2 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestInterconnectSession [GOOD] Test command err: 2025-06-03T10:32:23.889903Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:315: [9437185] Detach 2025-06-03T10:32:23.907538Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:338: [9437185] Activate 2025-06-03T10:32:23.909884Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:338: [9437185] Activate 2025-06-03T10:32:23.911441Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[9437185] ::Bootstrap [1:129:2154] 2025-06-03T10:32:23.911475Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[9437185] lookup [1:129:2154] 2025-06-03T10:32:23.911565Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[9437185] forward result local node, try to connect [1:129:2154] 2025-06-03T10:32:23.911576Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[9437185]::SendEvent [1:129:2154] 2025-06-03T10:32:23.911587Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:387: TClient[9437185] poison pill while connecting [1:129:2154] 2025-06-03T10:32:23.911591Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:498: TClient[9437185] connect failed [1:129:2154] 2025-06-03T10:32:23.911603Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [9437185] Accept Connect Originator# [1:129:2154] 2025-06-03T10:32:23.912654Z node 1 :PIPE_SERVER INFO: tablet_pipe_server.cpp:236: [9437185] Undelivered Target# [1:129:2154] Type# 269877249 Reason# ActorUnknown 2025-06-03T10:32:23.912746Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[9437185] ::Bootstrap [1:132:2156] 2025-06-03T10:32:23.912752Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[9437185] lookup [1:132:2156] 2025-06-03T10:32:23.912770Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[9437185] forward result local node, try to connect [1:132:2156] 2025-06-03T10:32:23.912779Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[9437185]::SendEvent [1:132:2156] 2025-06-03T10:32:23.912788Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:387: TClient[9437185] poison pill while connecting [1:132:2156] 2025-06-03T10:32:23.912794Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:498: TClient[9437185] connect failed [1:132:2156] 2025-06-03T10:32:23.912806Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [9437185] Accept Connect Originator# [1:132:2156] 2025-06-03T10:32:23.912841Z node 1 :PIPE_SERVER INFO: tablet_pipe_server.cpp:236: [9437185] Undelivered Target# [1:132:2156] Type# 269877249 Reason# ActorUnknown 2025-06-03T10:32:23.912860Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[9437185] ::Bootstrap [1:134:2158] 2025-06-03T10:32:23.912864Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[9437185] lookup [1:134:2158] 2025-06-03T10:32:23.912873Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[9437185] forward result local node, try to connect [1:134:2158] 2025-06-03T10:32:23.912877Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[9437185]::SendEvent [1:134:2158] 2025-06-03T10:32:23.912884Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:387: TClient[9437185] poison pill while connecting [1:134:2158] 2025-06-03T10:32:23.912888Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:498: TClient[9437185] connect failed [1:134:2158] 2025-06-03T10:32:23.912896Z node 1 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [9437185] Accept Connect Originator# [1:134:2158] 2025-06-03T10:32:23.912910Z node 1 :PIPE_SERVER INFO: tablet_pipe_server.cpp:236: [9437185] Undelivered Target# [1:134:2158] Type# 269877249 Reason# ActorUnknown >> TDSProxyPatchTest::SecuredErrorOnGetItem_Erasure4Plus2Block [GOOD] >> TDSProxyPutTest::TestBlock42PutStatusOkWith_1_1_VdiskErrors >> TDSProxyPutTest::TestBlock42PutStatusOkWith_1_1_VdiskErrors [GOOD] >> TDsProxyQuorumTracker::CheckFailModelErasureMirror3 [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestCreateCleanWithRetry |69.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/ut_schema/unittest |69.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDSProxyPutTest::TestMirror3dcWith3x3MinLatencyMod [GOOD] >> TColumnShardTestSchema::TTL-Reboot-Internal-FirstPkColumn-WritePortionsOnInsert >> TColumnShardTestSchema::TTL-Reboot+Internal+FirstPkColumn-WritePortionsOnInsert >> TColumnShardTestSchema::TTL-Reboot-Internal+FirstPkColumn-WritePortionsOnInsert >> TColumnShardTestSchema::TTL-Reboot+Internal-FirstPkColumn-WritePortionsOnInsert >> LocalPartition::WithoutPartition [GOOD] >> LocalPartition::WithoutPartitionWithRestart >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleDrop >> TGRpcCmsTest::AlterRemoveTest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDsProxyQuorumTracker::CheckFailModelErasureMirror3 [GOOD] Test command err: 2025-06-03T10:32:25.253412Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [7e4afa7ea38a37be] bootstrap ActorId# [3:74:2120] Group# 0 BlobCount# 1 BlobIDs# [[72075186224047637:1:863:1:24576:786:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-03T10:32:25.253492Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.253499Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.253504Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.253508Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.253512Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.253516Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.253519Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.253523Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.253526Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.253530Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.253534Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.253537Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.253541Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.253545Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.253548Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.253552Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.253556Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.253562Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.253570Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 6 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:32:25.253587Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:25.253595Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:25.253601Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:25.253605Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:25.253610Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:32:25.253614Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:32:25.253618Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 3 blob Id# [72075186224047637:1:863:1:24576:786:4] Marker# BPG33 2025-06-03T10:32:25.253622Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 3 to# 3 blob Id# [72075186224047637:1:863:1:24576:786:4] Marker# BPG32 2025-06-03T10:32:25.253626Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 4 blob Id# [72075186224047637:1:863:1:24576:786:5] Marker# BPG33 2025-06-03T10:32:25.253630Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 4 to# 4 blob Id# [72075186224047637:1:863:1:24576:786:5] Marker# BPG32 2025-06-03T10:32:25.253636Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 5 blob Id# [72075186224047637:1:863:1:24576:786:6] Marker# BPG33 2025-06-03T10:32:25.253640Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 5 to# 5 blob Id# [72075186224047637:1:863:1:24576:786:6] Marker# BPG32 2025-06-03T10:32:25.258370Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:0:0] Marker# BPP01 2025-06-03T10:32:25.258424Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 error Marker# BPG50 2025-06-03T10:32:25.258432Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:25.258437Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:25.258444Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:25.258449Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:25.258453Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:25.258457Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.258462Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.258466Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.258469Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.258474Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.258478Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.258482Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.258486Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.258490Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.258493Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.258498Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.258502Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.258508Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 6 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:32:25.258526Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 6 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:25.258533Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 6 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:25.258620Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:1:0] Marker# BPP01 2025-06-03T10:32:25.258636Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:2:0] Marker# BPP01 2025-06-03T10:32:25.258648Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:4] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:3:0] Marker# BPP01 2025-06-03T10:32:25.258658Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:5] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:4:0] Marker# BPP01 2025-06-03T10:32:25.258672Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:6] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:5:0] Marker# BPP01 2025-06-03T10:32:25.258717Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:6:0] Marker# BPP01 2025-06-03T10:32:25.258725Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 error Marker# BPG50 2025-06-03T10:32:25.258730Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 situation# ESituation::Present Marker# BPG51 2025-06-03T10:32:25.258735Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 situation# ESituation::Present Marker# BPG51 2025-06-03T10:32:25.258739Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Present Marker# BPG51 2025-06-03T10:32:25.258743Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Present Marker# BPG51 2025-06-03T10:32:25.258747Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Present Marker# BPG51 2025-06-03T10:32:25.258751Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 error Marker# BPG50 2025-06-03T10:32:25.258755Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.258759Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.258763Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.258767Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.258771Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.258777Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:25.258781Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 6 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:32:25.258788Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 7 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:25.258792Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 7 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:25.258830Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:7:0] Marker# BPP01 2025-06-03T10:32:25.258888Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [7e4afa7ea38a37be] Result# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} GroupId# 0 Marker# BPP12 2025-06-03T10:32:25.258896Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [7e4afa7ea38a37be] SendReply putResult# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:32:25.258988Z node 3 :BS_PROXY_PUT NOTICE: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.864 sample PartId# [72075186224047637:1:863:1:24576:786:6] QueryCount# 1 VDiskId# [0:1:0:5:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.864 sample PartId# [72075186224047637:1:863:1:24576:786:5] QueryCount# 1 VDiskId# [0:1:0:4:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.864 sample PartId# [72075186224047637:1:863:1:24576:786:4] QueryCount# 1 VDiskId# [0:1:0:3:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.864 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:0:2:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.864 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:1:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.864 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 5.599 VDiskId# [0:1:0:0:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 5.745 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:0:6:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 5.812 VDiskId# [0:1:0:1:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.827 VDiskId# [0:1:0:2:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.838 VDiskId# [0:1:0:3:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.848 VDiskId# [0:1:0:4:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.862 VDiskId# [0:1:0:5:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.91 VDiskId# [0:1:0:6:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 5.987 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:0:7:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 6.021 VDiskId# [0:1:0:7:0] NodeId# 3 Status# OK } ] } >> TGRpcCmsTest::DescribeOptionsTest |69.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/cms/ut/unittest >> THiveImplTest::BalancerSpeedAndDistribution [GOOD] >> THiveImplTest::TestShortTabletTypes [GOOD] >> THiveImplTest::TestStDev [GOOD] >> THiveTest::TestBlockCreateTablet >> TGRpcCmsTest::SimpleTenantsTestSyncOperation |69.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/cms/ut/unittest |69.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/cms/ut/unittest |69.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf >> TGRpcCmsTest::AuthTokenTest |69.5%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf |69.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/perf/ydb-core-kqp-ut-perf |69.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator >> TxUsage::WriteToTopic_Demo_32_Table [GOOD] |69.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator |69.5%| [LD] {RESULT} $(B)/ydb/core/tx/replication/controller/ut_dst_creator/ydb-core-tx-replication-controller-ut_dst_creator >> TGRpcCmsTest::DisabledTxTest >> TxUsage::WriteToTopic_Demo_5_Table [GOOD] >> THiveTest::TestBlockCreateTablet [GOOD] >> THiveTest::DrainWithHiveRestart >> TGRpcCmsTest::SimpleTenantsTest >> TGRpcCmsTest::AlterRemoveTest [GOOD] >> TGRpcCmsTest::DescribeOptionsTest [GOOD] >> TxUsage::WriteToTopic_Demo_5_Query >> Compression::WriteRAW >> TGRpcCmsTest::SimpleTenantsTestSyncOperation [GOOD] >> TxUsage::WriteToTopic_Demo_15_Query [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/cms/ut/unittest >> TGRpcCmsTest::AlterRemoveTest [GOOD] Test command err: 2025-06-03T10:32:26.092117Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669077713475922:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:26.092683Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00277d/r3tmp/tmpak8yVv/pdisk_1.dat 2025-06-03T10:32:26.174030Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29634, node 1 2025-06-03T10:32:26.189534Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:26.189590Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:26.191018Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:26.191031Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:26.191034Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:26.191082Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:26.191168Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28967 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:26.236174Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:26.254559Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7511669077713476608:2312], Recipient [1:7511669077713476359:2193]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:54408" } 2025-06-03T10:32:26.254574Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2025-06-03T10:32:26.254583Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:26.254586Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:26.254606Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:54408" 2025-06-03T10:32:26.254652Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1748946746254320) 2025-06-03T10:32:26.254750Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2577: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1748946746254320 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2025-06-03T10:32:26.254793Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2637: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2025-06-03T10:32:26.256470Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2025-06-03T10:32:26.256682Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1748946746254320&action=1" } } } 2025-06-03T10:32:26.256742Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:26.256768Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-03T10:32:26.256800Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-03T10:32:26.257877Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7511669077713476626:2314], Recipient [1:7511669077713476359:2193]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1748946746254320&action=1" } UserToken: "" } 2025-06-03T10:32:26.257890Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-03T10:32:26.257935Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1748946746254320&action=1" } } 2025-06-03T10:32:26.257988Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2025-06-03T10:32:26.258025Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-03T10:32:26.259379Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2025-06-03T10:32:26.259396Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-03T10:32:26.259410Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7511669077713476619:2193], Recipient [1:7511669077713476359:2193]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-03T10:32:26.259414Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2025-06-03T10:32:26.259419Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:26.259421Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:26.259431Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2025-06-03T10:32:26.259436Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2025-06-03T10:32:26.259451Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3206: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2025-06-03T10:32:26.266850Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-03T10:32:26.266869Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:26.266871Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:26.266873Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:26.266902Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2025-06-03T10:32:26.266911Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1748946746254320 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-03T10:32:26.271040Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-03T10:32:26.271099Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:26.271120Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:784: TSubdomainManip(/Root/users/user-1)::Bootstrap 2025-06-03T10:32:26.271122Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:596: TSubDomainManip(/Root/users/user-1) create subdomain 2025-06-03T10:32:26.272138Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:620: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 DatabaseName: "Root" 2025-06-03T10:32:26.272736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976710658:1, at schemeshard: 72057594046644480 2025-06-03T10:32:26.277871Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:832: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976710658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 2025-06-03T10:32:26.277893Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:768: TSubdomainManip(/Root/users/user-1) send notification request: NKikimrScheme.TEvNotifyTxCompletion TxId: 281474976710658 2025-06-03T10:32:26.279642Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:804: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionRegistered: TxId: 281474976710658 2025-06-03T10:32:26.282412Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:809: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976710658 2025-06-03T10:32:26.282610Z node 1 :CMS_TENANTS DEBUG: console_tenants_ma ... 0:32:26.318639Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:634: TSubDomainManip(/Root/users/user-1) drop subdomain 2025-06-03T10:32:26.318670Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:651: TSubdomainManip(/Root/users/user-1) send subdomain drop cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root/users" OperationType: ESchemeOpForceDropExtSubDomain Drop { Name: "user-1" } } } ExecTimeoutPeriod: 18446744073709551615 DatabaseName: "Root" 2025-06-03T10:32:26.318956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5417: Mark as Dropping path id [OwnerId: 72057594046644480, LocalPathId: 3] by tx: 281474976710660 2025-06-03T10:32:26.319048Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpForceDropExtSubDomain, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-06-03T10:32:26.319600Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7511669077713476846:2322], Recipient [1:7511669077713476359:2193]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1748946746316290&action=2" } UserToken: "" } 2025-06-03T10:32:26.319610Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-03T10:32:26.319653Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1748946746316290&action=2" } } 2025-06-03T10:32:26.322599Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-03T10:32:26.322618Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:26.322687Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:809: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976710659 2025-06-03T10:32:26.322694Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:694: TSubdomainManip(/Root/users/user-1) done 2025-06-03T10:32:26.322706Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:710: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainReady 2025-06-03T10:32:26.322725Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:832: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976710660 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 2025-06-03T10:32:26.322739Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:768: TSubdomainManip(/Root/users/user-1) send notification request: NKikimrScheme.TEvNotifyTxCompletion TxId: 281474976710660 2025-06-03T10:32:26.322755Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435076, Sender [1:7511669077713476756:2193], Recipient [1:7511669077713476359:2193]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainReady 2025-06-03T10:32:26.322763Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:979: StateWork, processing event TEvPrivate::TEvSubdomainReady 2025-06-03T10:32:26.322771Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:3661: Ignoring ready subdomain for tenant /Root/users/user-1 in REMOVING_SUBDOMAIN state 2025-06-03T10:32:26.322887Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5417: Mark as Dropping path id [OwnerId: 72057594046644480, LocalPathId: 3] by tx: 281474976710660 2025-06-03T10:32:26.323617Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:804: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionRegistered: TxId: 281474976710660 2025-06-03T10:32:26.326857Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:809: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976710660 2025-06-03T10:32:26.326869Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:694: TSubdomainManip(/Root/users/user-1) done 2025-06-03T10:32:26.326915Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:710: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-03T10:32:26.326946Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435077, Sender [1:7511669077713476832:2193], Recipient [1:7511669077713476359:2193]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-03T10:32:26.326955Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:980: StateWork, processing event TEvPrivate::TEvSubdomainRemoved 2025-06-03T10:32:26.326962Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:26.326964Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:26.326976Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:20: TTxRemoveComputationalUnits Execute /Root/users/user-1 2025-06-03T10:32:26.326984Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_UNITS txid=1748946746316290 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-03T10:32:26.327001Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1748946746316290 issue= 2025-06-03T10:32:26.328099Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:34: TTxRemoveComputationalUnits Complete /Root/users/user-1 2025-06-03T10:32:26.328130Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2114: Send TEvTenantSlotBroker::TEvAlterTenant: TenantName: "/Root/users/user-1" 2025-06-03T10:32:26.328142Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:26.328252Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7511669077713476258:2203], Recipient [1:7511669077713476359:2193]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-03T10:32:26.328271Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-03T10:32:26.328284Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:26.328286Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:26.328308Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to REMOVING_POOLS 2025-06-03T10:32:26.328326Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_POOLS txid=1748946746316290 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-03T10:32:26.329710Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-03T10:32:26.329739Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:26.329756Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-03T10:32:26.329825Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-03T10:32:26.330059Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true StoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" Geometry { } VDiskKind: "Default" Kind: "hdd" NumGroups: 2 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046644480 X2: 3 } ItemConfigGeneration: 3 } } Success: true ConfigTxSeqNo: 13 2025-06-03T10:32:26.330080Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:151: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DeleteStoragePool { BoxId: 999 StoragePoolId: 4 ItemConfigGeneration: 3 } } } 2025-06-03T10:32:26.332143Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:306: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 14 2025-06-03T10:32:26.332203Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435081, Sender [1:7511669077713476922:2193], Recipient [1:7511669077713476359:2193]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolDeleted 2025-06-03T10:32:26.332230Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:973: StateWork, processing event TEvPrivate::TEvPoolDeleted 2025-06-03T10:32:26.332236Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:26.332238Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:26.332257Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=DELETED 2025-06-03T10:32:26.332267Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=DELETED allocatednumgroups=0 2025-06-03T10:32:26.333974Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-03T10:32:26.333993Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:26.333997Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:26.333999Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:26.334027Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:22: TTxRemoveTenantDone for tenant /Root/users/user-1 txid=1748946746316290 2025-06-03T10:32:26.334041Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1748946746316290 issue= 2025-06-03T10:32:26.334045Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2958: Remove tenant /Root/users/user-1 from database txid=1748946746316290 issue= 2025-06-03T10:32:26.334047Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2963: Remove pool /Root/users/user-1:hdd from database 2025-06-03T10:32:26.334080Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3083: Add tenant removal info for /Root/users/user-1 txid=1748946746316290 code=SUCCESS errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-03T10:32:26.338357Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:34: TTxRemoveTenantDone Complete 2025-06-03T10:32:26.338399Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:26.370988Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7511669077713476942:2324], Recipient [1:7511669077713476359:2193]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1748946746316290&action=2" } UserToken: "" } 2025-06-03T10:32:26.371003Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-03T10:32:26.371078Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1748946746316290&action=2" ready: true status: SUCCESS } } >> ReadSessionImplTest::ProperlyOrdersDecompressedData [GOOD] >> ReadSessionImplTest::PacksBatches_ExactlyTwoMessagesInBatch [GOOD] >> ReadSessionImplTest::PacksBatches_OneMessageInEveryBatch [GOOD] >> ReadSessionImplTest::PacksBatches_BigBatchDecompressWithTwoBatchTasks >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleCreateClean >> THiveTest::TestCreateSubHiveCreateManyTabletsWithReboots [GOOD] >> THiveTest::TestCreateAndDeleteTabletWithStoragePoolsReboots >> TDSProxyFaultTolerancePatchTest::block42 [GOOD] >> TDSProxyPatchTest::MovedError_ErasureNone ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/cms/ut/unittest >> TGRpcCmsTest::DescribeOptionsTest [GOOD] Test command err: 2025-06-03T10:32:26.138732Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669080128674894:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:26.138771Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002788/r3tmp/tmpayvhOx/pdisk_1.dat 2025-06-03T10:32:26.199984Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18609, node 1 2025-06-03T10:32:26.216804Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:26.216829Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:26.216832Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:26.216882Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2233 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:32:26.239623Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:26.239659Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:26.241377Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:26.274607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TClient is connected to server localhost:2233 2025-06-03T10:32:26.321420Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:131: TTxProcessor(tenants) is now locking 2025-06-03T10:32:26.321431Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:143: TTxProcessor(tenants) is now locked by parent 2025-06-03T10:32:26.324056Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:102: TTxProcessor(tenants) is now active 2025-06-03T10:32:26.337564Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285140, Sender [1:7511669080128675501:2314], Recipient [1:7511669080128675204:2196]: NKikimr::NConsole::TEvConsole::TEvDescribeTenantOptionsRequest { Request { } UserToken: "" PeerName: "ipv6:[::1]:44956" } 2025-06-03T10:32:26.337597Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:964: StateWork, processing event TEvConsole::TEvDescribeTenantOptionsRequest 2025-06-03T10:32:26.338029Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3335: Send TEvConsole::TEvDescribeTenantOptionsResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.DescribeDatabaseOptionsResult] { storage_units { kind: "hdd2" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } storage_units { kind: "hdd" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } storage_units { kind: "hdd1" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } storage_units { kind: "ssd" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } storage_units { kind: "test" labels { key: "disk_type" value: "ROT" } labels { key: "erasure" value: "none" } } availability_zones { name: "dc-1" labels { key: "collocation" value: "disabled" } labels { key: "fixed_data_center" value: "DC-1" } } availability_zones { name: "any" labels { key: "any_data_center" value: "true" } labels { key: "collocation" value: "disabled" } } computational_units { kind: "slot" labels { key: "slot_type" value: "default" } labels { key: "type" value: "dynamic_slot" } allowed_availability_zones: "any" allowed_availability_zones: "dc-1" } } } } } >> TGRpcCmsTest::AuthTokenTest [GOOD] >> TDSProxyPatchTest::MovedError_ErasureNone [GOOD] >> TDSProxyPatchTest::SecuredErrorOnGet_ErasureMirror3dc >> TxUsage::WriteToTopic_Demo_16_Table >> TGRpcCmsTest::DisabledTxTest [GOOD] >> ReadSessionImplTest::ReconnectOnTmpError [GOOD] >> ReadSessionImplTest::ReconnectOnTmpErrorAndThenTimeout [GOOD] >> ReadSessionImplTest::ReconnectOnTimeout [GOOD] >> ReadSessionImplTest::ReconnectOnTimeoutAndThenCreate [GOOD] >> ReadSessionImplTest::ReconnectsAfterFailure [GOOD] >> ReadSessionImplTest::SimpleDataHandlers >> Describe::DescribePartitionPermissions [GOOD] >> DirectReadSession::InitAndStartPartitionSession [GOOD] >> DirectReadSession::NoRetryDirectReadSession [GOOD] >> DirectReadSession::RetryDirectReadSession [GOOD] >> DirectReadWithClient::OneMessage ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/cms/ut/unittest >> TGRpcCmsTest::SimpleTenantsTestSyncOperation [GOOD] Test command err: 2025-06-03T10:32:26.310467Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669077727213126:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:26.310490Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002751/r3tmp/tmpiboBpb/pdisk_1.dat 2025-06-03T10:32:26.408401Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:26.414341Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:26.414370Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:26.417432Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14304, node 1 2025-06-03T10:32:26.433547Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:26.433563Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:26.433565Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:26.433621Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61346 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:26.473137Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:26.511103Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7511669077727213860:2313], Recipient [1:7511669077727213586:2191]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { operation_params { operation_mode: SYNC } path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:34690" } 2025-06-03T10:32:26.511132Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2025-06-03T10:32:26.511142Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:26.511146Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:26.511187Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { operation_params { operation_mode: SYNC } path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:34690" 2025-06-03T10:32:26.511248Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1748946746510258) 2025-06-03T10:32:26.511371Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2577: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1748946746510258 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2025-06-03T10:32:26.511438Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2637: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2025-06-03T10:32:26.514684Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2025-06-03T10:32:26.514900Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1748946746510258&action=1" } } } 2025-06-03T10:32:26.514973Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:26.515008Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-03T10:32:26.515057Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-03T10:32:26.515125Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285139, Sender [1:7511669077727213860:2313], Recipient [1:7511669077727213586:2191]: NKikimr::NConsole::TEvConsole::TEvNotifyOperationCompletionRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1748946746510258&action=1" } UserToken: "" PeerName: "ipv6:[::1]:34690" } 2025-06-03T10:32:26.515129Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:968: StateWork, processing event TEvConsole::TEvNotifyOperationCompletionRequest 2025-06-03T10:32:26.515229Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:3443: Add subscription to /Root/users/user-1 for [1:7511669077727213860:2313] 2025-06-03T10:32:26.515251Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3451: Send TEvConsole::TEvNotifyOperationCompletionResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1748946746510258&action=1" } } 2025-06-03T10:32:26.515381Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2025-06-03T10:32:26.515415Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-03T10:32:26.527049Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2025-06-03T10:32:26.527084Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-03T10:32:26.527112Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7511669077727213865:2191], Recipient [1:7511669077727213586:2191]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-03T10:32:26.527118Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2025-06-03T10:32:26.527125Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:26.527128Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:26.527154Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2025-06-03T10:32:26.527163Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2025-06-03T10:32:26.527197Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3206: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2025-06-03T10:32:26.528667Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-03T10:32:26.528686Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:26.528689Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:26.528691Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:26.528718Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2025-06-03T10:32:26.528728Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1748946746510258 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-03T10:32:26.530663Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-03T10:32:26.530710Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:26.530722Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:784: TSubdomainManip(/Root/users/user-1)::Bootstrap 2025-06-03T10:32:26.530724Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:596: TSubDomainManip(/Root/users/user-1) create subdomain 2025-06-03T10:32:26.531785Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:620: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 DatabaseName: "Root" 2025-06-03T10:32:26.532218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715658:1, at schemeshard: 72057594046644480 2025-06-03T10:32:26.534714Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:832: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976715658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 2025-06-03T10:32:26.534732Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:768: TSubdomainManip(/Root/users/user-1) send notification request: NKikimrScheme.TEvNotifyTxCompletion TxId: 281474976715658 2025-06-03T10:32:26.536377Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:804: TSubdomainManip(/Root/users/user-1 ... node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:5417: Mark as Dropping path id [OwnerId: 72057594046644480, LocalPathId: 3] by tx: 281474976715660 2025-06-03T10:32:26.762417Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:804: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionRegistered: TxId: 281474976715660 2025-06-03T10:32:26.774427Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:809: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976715660 2025-06-03T10:32:26.774444Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:694: TSubdomainManip(/Root/users/user-1) done 2025-06-03T10:32:26.774462Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:710: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-03T10:32:26.774485Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435077, Sender [1:7511669077727214605:2191], Recipient [1:7511669077727213586:2191]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-03T10:32:26.774491Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:980: StateWork, processing event TEvPrivate::TEvSubdomainRemoved 2025-06-03T10:32:26.774499Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:26.774503Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:26.774517Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:20: TTxRemoveComputationalUnits Execute /Root/users/user-1 2025-06-03T10:32:26.774527Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_UNITS txid=1748946746757851 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-03T10:32:26.774551Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1748946746757851 issue= 2025-06-03T10:32:26.774946Z node 3 :HIVE WARN: tx__delete_tablet.cpp:88: HIVE#72075186224037888 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found - using supplied 72075186224037888 2025-06-03T10:32:26.780890Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:34: TTxRemoveComputationalUnits Complete /Root/users/user-1 2025-06-03T10:32:26.780944Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2114: Send TEvTenantSlotBroker::TEvAlterTenant: TenantName: "/Root/users/user-1" 2025-06-03T10:32:26.780957Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:26.781063Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7511669077727213439:2190], Recipient [1:7511669077727213586:2191]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-03T10:32:26.781068Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-03T10:32:26.781079Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:26.781082Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:26.781096Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to REMOVING_POOLS 2025-06-03T10:32:26.781106Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_POOLS txid=1748946746757851 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-03T10:32:26.785000Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037889 not found 2025-06-03T10:32:26.785033Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037896 not found 2025-06-03T10:32:26.785036Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037897 not found 2025-06-03T10:32:26.785040Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037890 not found 2025-06-03T10:32:26.785043Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037893 not found 2025-06-03T10:32:26.785045Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037894 not found 2025-06-03T10:32:26.785048Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037891 not found 2025-06-03T10:32:26.785051Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037892 not found 2025-06-03T10:32:26.785054Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037895 not found 2025-06-03T10:32:26.785634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:39: Got DeleteTabletReply with Forward response from Hive 72075186224037888 to Hive 72057594037968897 shardIdx 72057594046644480:1 2025-06-03T10:32:26.786613Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-03T10:32:26.786650Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:26.786670Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-03T10:32:26.786737Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-03T10:32:26.786976Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true StoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" Geometry { } VDiskKind: "Default" Kind: "hdd" NumGroups: 2 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046644480 X2: 3 } ItemConfigGeneration: 3 } } Success: true ConfigTxSeqNo: 13 2025-06-03T10:32:26.786990Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:151: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DeleteStoragePool { BoxId: 999 StoragePoolId: 4 ItemConfigGeneration: 3 } } } 2025-06-03T10:32:26.790477Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:306: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 14 2025-06-03T10:32:26.790531Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435081, Sender [1:7511669077727214678:2191], Recipient [1:7511669077727213586:2191]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolDeleted 2025-06-03T10:32:26.790552Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:973: StateWork, processing event TEvPrivate::TEvPoolDeleted 2025-06-03T10:32:26.790559Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:26.790562Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:26.790579Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=DELETED 2025-06-03T10:32:26.790590Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=DELETED allocatednumgroups=0 2025-06-03T10:32:26.793144Z node 1 :HIVE WARN: tx__block_storage_result.cpp:56: HIVE#72057594037968897 THive::TTxBlockStorageResult retrying for 72075186224037888 because of ERROR 2025-06-03T10:32:26.793935Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2025-06-03T10:32:26.797802Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-03T10:32:26.797829Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:26.797832Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:26.797835Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:26.797872Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:22: TTxRemoveTenantDone for tenant /Root/users/user-1 txid=1748946746757851 2025-06-03T10:32:26.797877Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1748946746757851 issue= 2025-06-03T10:32:26.797881Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2958: Remove tenant /Root/users/user-1 from database txid=1748946746757851 issue= 2025-06-03T10:32:26.797884Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2963: Remove pool /Root/users/user-1:hdd from database 2025-06-03T10:32:26.797922Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3083: Add tenant removal info for /Root/users/user-1 txid=1748946746757851 code=SUCCESS errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-03T10:32:26.800145Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:34: TTxRemoveTenantDone Complete 2025-06-03T10:32:26.800237Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2431: Send /Root/users/user-1 notification to [1:7511669077727214600:2440]: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1748946746757851&action=2" ready: true status: SUCCESS } } 2025-06-03T10:32:26.800277Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:26.807836Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7511669077727214750:2453], Recipient [1:7511669077727213586:2191]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "" PeerName: "ipv6:[::1]:34690" } 2025-06-03T10:32:26.807858Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-03T10:32:26.807913Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3377: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: NOT_FOUND issues { message: "Unknown tenant /Root/users/user-1" severity: 1 } } } 2025-06-03T10:32:26.810552Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285123, Sender [1:7511669077727214755:2454], Recipient [1:7511669077727213586:2191]: NKikimr::NConsole::TEvConsole::TEvListTenantsRequest { Request { } UserToken: "" PeerName: "ipv6:[::1]:34690" } 2025-06-03T10:32:26.810569Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:967: StateWork, processing event TEvConsole::TEvListTenantsRequest 2025-06-03T10:32:26.810637Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3421: Send TEvConsole::TEvListTenantsResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.ListDatabasesResult] { } } } } 2025-06-03T10:32:26.816194Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-03T10:32:26.816280Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected >> TDSProxyPatchTest::SecuredErrorOnGet_ErasureMirror3dc [GOOD] >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_0_0_0_VdiskErrors >> ReadSessionImplTest::SimpleDataHandlers [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithCommit >> ReadSessionImplTest::SimpleDataHandlersWithCommit [GOOD] >> TGRpcCmsTest::SimpleTenantsTest [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/cms/ut/unittest >> TGRpcCmsTest::AuthTokenTest [GOOD] Test command err: 2025-06-03T10:32:26.489027Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669077318919901:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:26.489105Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002749/r3tmp/tmpnELWZG/pdisk_1.dat 2025-06-03T10:32:26.621161Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:26.644248Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:26.644280Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:26.650597Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 29929, node 1 2025-06-03T10:32:26.672344Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:26.672362Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:26.672364Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:26.672423Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13114 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:26.750773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:26.803957Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7511669077318920495:2313], Recipient [1:7511669077318920218:2209]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:45686" } 2025-06-03T10:32:26.803985Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2025-06-03T10:32:26.803994Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:26.803998Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:26.804045Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:45686" 2025-06-03T10:32:26.804115Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1748946746803902) 2025-06-03T10:32:26.804243Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2577: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1748946746803902 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2025-06-03T10:32:26.804317Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2637: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2025-06-03T10:32:26.809267Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2025-06-03T10:32:26.809574Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1748946746803902&action=1" } } } 2025-06-03T10:32:26.809639Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:26.809672Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-03T10:32:26.809712Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-03T10:32:26.809887Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2025-06-03T10:32:26.809920Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-03T10:32:26.814290Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2025-06-03T10:32:26.814321Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-03T10:32:26.814345Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7511669077318920500:2209], Recipient [1:7511669077318920218:2209]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-03T10:32:26.814350Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2025-06-03T10:32:26.814356Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:26.814359Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:26.814407Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2025-06-03T10:32:26.814414Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2025-06-03T10:32:26.814450Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3206: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2025-06-03T10:32:26.817607Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-03T10:32:26.817627Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:26.817630Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:26.817632Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:26.817656Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2025-06-03T10:32:26.817666Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1748946746803902 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-03T10:32:26.819044Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-03T10:32:26.819108Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:26.819120Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:784: TSubdomainManip(/Root/users/user-1)::Bootstrap 2025-06-03T10:32:26.819121Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:596: TSubDomainManip(/Root/users/user-1) create subdomain 2025-06-03T10:32:26.820539Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:620: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" DatabaseName: "Root" 2025-06-03T10:32:26.821197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715658:1, at schemeshard: 72057594046644480 2025-06-03T10:32:26.822092Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:832: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976715658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 2025-06-03T10:32:26.822106Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:768: TSubdomainManip(/Root/users/user-1) send notification request: NKikimrScheme.TEvNotifyTxCompletion TxId: 281474976715658 2025-06-03T10:32:26.825037Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:804: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionRegistered: TxId: 281474976715658 2025-06-03T10:32:26.826500Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7511669077318920574:2315], Recipient [1:7511669077318920218:2209]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1748946746803902&action=1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" } 2025-06-03T10:32:26.826515Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-03T10:32:26.826566Z node 1 :CMS_TENANTS TRA ... EvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2025-06-03T10:32:27.097440Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7511669081613888542:2428], Recipient [1:7511669077318920218:2209]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:45686" } 2025-06-03T10:32:27.097457Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-03T10:32:27.097470Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2130: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2025-06-03T10:32:27.097514Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7511669077318920105:2206], Recipient [1:7511669077318920218:2209]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-03T10:32:27.097517Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-03T10:32:27.097689Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3753: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2025-06-03T10:32:27.098635Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7511669081613888551:2429], Recipient [1:7511669077318920218:2209]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:45686" } 2025-06-03T10:32:27.098646Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-03T10:32:27.098656Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2130: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2025-06-03T10:32:27.098678Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7511669077318920105:2206], Recipient [1:7511669077318920218:2209]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-03T10:32:27.098681Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-03T10:32:27.098806Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3753: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2025-06-03T10:32:27.099683Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7511669081613888558:2430], Recipient [1:7511669077318920218:2209]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:45686" } 2025-06-03T10:32:27.099691Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-03T10:32:27.099700Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2130: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2025-06-03T10:32:27.099742Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7511669077318920105:2206], Recipient [1:7511669077318920218:2209]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-03T10:32:27.099745Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-03T10:32:27.099851Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3753: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2025-06-03T10:32:27.099876Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:809: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976715659 2025-06-03T10:32:27.099879Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:694: TSubdomainManip(/Root/users/user-1) done 2025-06-03T10:32:27.099888Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:710: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainReady 2025-06-03T10:32:27.099906Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435076, Sender [1:7511669077318920603:2209], Recipient [1:7511669077318920218:2209]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainReady 2025-06-03T10:32:27.099909Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:979: StateWork, processing event TEvPrivate::TEvSubdomainReady 2025-06-03T10:32:27.099915Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:27.099932Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:27.099946Z node 1 :CMS_TENANTS DEBUG: console__update_confirmed_subdomain.cpp:22: TTxUpdateConfirmedSubdomain for tenant /Root/users/user-1 to 2 2025-06-03T10:32:27.099953Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=RUNNING txid=1748946746803902 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-03T10:32:27.099973Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2913: Update database for /Root/users/user-1 confirmedsubdomain=2 2025-06-03T10:32:27.100556Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7511669081613888562:2431], Recipient [1:7511669077318920218:2209]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:45686" } 2025-06-03T10:32:27.100565Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-03T10:32:27.100572Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2130: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2025-06-03T10:32:27.100592Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7511669077318920105:2206], Recipient [1:7511669077318920218:2209]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-03T10:32:27.100595Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-03T10:32:27.100694Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3753: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: PENDING_RESOURCES required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } 2025-06-03T10:32:27.101865Z node 1 :CMS_TENANTS DEBUG: console__update_confirmed_subdomain.cpp:42: TTxUpdateConfirmedSubdomain complete for /Root/users/user-1 2025-06-03T10:32:27.101880Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:27.102026Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7511669081613888568:2432], Recipient [1:7511669077318920218:2209]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "\n\014root@builtin\022\030\022\026\n\024all-users@well-known\032\014root@builtin\"\007Builtin*\017**** (B6C6F477)" PeerName: "ipv6:[::1]:45686" } 2025-06-03T10:32:27.102033Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-03T10:32:27.102042Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2130: Send TEvTenantSlotBroker::TEvGetTenantState: TenantName: "/Root/users/user-1" 2025-06-03T10:32:27.102062Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7511669077318920105:2206], Recipient [1:7511669077318920218:2209]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-03T10:32:27.102065Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-03T10:32:27.102179Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3753: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.GetDatabaseStatusResult] { path: "/Root/users/user-1" state: RUNNING required_resources { storage_units { unit_kind: "hdd" count: 1 } } allocated_resources { storage_units { unit_kind: "hdd" count: 1 } } generation: 1 } } } } TClient is connected to server localhost:13114 TClient::Ls request: /Root/users/user-1 TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root/users/user-1" PathId: 1 SchemeshardId: 72075186224037897 PathType: EPathTypeSubDomain CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 3 SecurityStateVersion: 0 } } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72075186224037897 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 3 ProcessingParams { Version: 3 PlanReso... (TRUNCATED) 2025-06-03T10:32:27.140404Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-03T10:32:27.140593Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/cms/ut/unittest >> TGRpcCmsTest::DisabledTxTest [GOOD] Test command err: 2025-06-03T10:32:26.950793Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669078893503084:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:26.950904Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002742/r3tmp/tmpdQ6kqH/pdisk_1.dat 2025-06-03T10:32:27.044960Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:27.044997Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:27.050502Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9736, node 1 2025-06-03T10:32:27.053784Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:27.061540Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:27.061557Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:27.061560Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:27.061620Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:27.062137Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 2025-06-03T10:32:27.062146Z node 1 :GRPC_SERVER WARN: grpc_request_proxy.cpp:529: SchemeBoardDelete /Root Strong=0 TClient is connected to server localhost:15795 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:27.126280Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:27.163548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateSubDomain, opId: 281474976715658:1, at schemeshard: 72057594046644480 2025-06-03T10:32:27.172098Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715659:0, at schemeshard: 72057594046644480 >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_0_0_0_VdiskErrors [GOOD] >> ReadSessionImplTest::PacksBatches_BigBatchDecompressWithTwoBatchTasks [GOOD] >> ReadSessionImplTest::PacksBatches_DecompressesOneMessagePerTime [GOOD] >> ReadSessionImplTest::PartitionStreamStatus [GOOD] >> ReadSessionImplTest::PartitionStreamCallbacks [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_0_0_0_VdiskErrors [GOOD] Test command err: 2025-06-03T10:32:28.076550Z node 25 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [7e4afa7ea38a37be] bootstrap ActorId# [25:82:2128] Group# 0 BlobCount# 1 BlobIDs# [[72075186224047637:1:863:1:24576:786:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-03T10:32:28.076670Z node 25 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:28.076681Z node 25 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:28.076688Z node 25 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:28.076693Z node 25 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:28.076699Z node 25 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:32:28.076705Z node 25 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:32:28.081767Z node 25 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:1:0] Marker# BPP01 2025-06-03T10:32:28.081900Z node 25 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:1:1:0] Marker# BPP01 2025-06-03T10:32:28.081915Z node 25 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:2:1:0] Marker# BPP01 2025-06-03T10:32:28.081937Z node 25 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [7e4afa7ea38a37be] Result# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} GroupId# 0 Marker# BPP12 2025-06-03T10:32:28.081947Z node 25 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [7e4afa7ea38a37be] SendReply putResult# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:32:28.081999Z node 25 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.465 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:1:0] NodeId# 25 } TEvVPut{ TimestampMs# 0.465 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:2:1:0] NodeId# 25 } TEvVPut{ TimestampMs# 0.465 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:1:0] NodeId# 25 } TEvVPutResult{ TimestampMs# 5.554 VDiskId# [0:1:0:1:0] NodeId# 25 Status# OK } TEvVPutResult{ TimestampMs# 5.633 VDiskId# [0:1:1:1:0] NodeId# 25 Status# OK } TEvVPutResult{ TimestampMs# 5.649 VDiskId# [0:1:2:1:0] NodeId# 25 Status# OK } ] } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::SimpleDataHandlersWithCommit [GOOD] Test command err: 2025-06-03T10:32:27.932186Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.932196Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.932201Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:27.932321Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-03T10:32:27.932595Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.932611Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.933341Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.009857s 2025-06-03T10:32:27.933527Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:27.933674Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-03T10:32:27.933699Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.934039Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.934043Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.934047Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:27.934163Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-03T10:32:27.934173Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.934176Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.934192Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.006100s 2025-06-03T10:32:27.934286Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:27.938537Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-03T10:32:27.938576Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.938943Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.938948Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.938951Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:27.939047Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-06-03T10:32:27.939060Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.939064Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.939082Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.200225s 2025-06-03T10:32:27.939174Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:27.939271Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-03T10:32:27.939284Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.939535Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.939539Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.939542Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:27.939591Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-06-03T10:32:27.939601Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.939605Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.939620Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.299267s 2025-06-03T10:32:27.939698Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:27.939741Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-03T10:32:27.939748Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.939927Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.939930Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.939932Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:27.939979Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:27.940064Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:27.941758Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.941861Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TRANSPORT_UNAVAILABLE. Description:
: Error: GRpc error: (14): 2025-06-03T10:32:27.941867Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.941870Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.941875Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.164995s 2025-06-03T10:32:27.941935Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-06-03T10:32:27.942215Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.942218Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.942221Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:27.942288Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:27.942452Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:27.942941Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.943063Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:32:28.043883Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.045389Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-03T10:32:28.045417Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:28.045426Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-03T10:32:28.045452Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-03T10:32:28.145624Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-03T10:32:28.145704Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-06-03T10:32:28.146107Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.146113Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.146117Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:28.146222Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:28.146367Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:28.146424Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.146542Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:32:28.249668Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.249765Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-03T10:32:28.249787Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:28.249795Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-03T10:32:28.249818Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 2025-06-03T10:32:28.249853Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-03T10:32:28.249887Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-03T10:32:28.249915Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2025-06-03T10:32:28.249954Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster >> ApplyClusterEndpointTest::NoPorts [GOOD] >> ApplyClusterEndpointTest::PortFromCds [GOOD] >> ApplyClusterEndpointTest::PortFromDriver [GOOD] >> BasicUsage::MaxByteSizeEqualZero >> ReadSessionImplTest::ForcefulDestroyPartitionStream [GOOD] >> ReadSessionImplTest::DestroyPartitionStreamRequest [GOOD] >> ReadSessionImplTest::DecompressZstdEmptyMessage [GOOD] >> ReadSessionImplTest::PacksBatches_BatchABitBiggerThanLimit [GOOD] >> ReadSessionImplTest::PacksBatches_BatchesEqualToServerBatches [GOOD] >> ReadSessionImplTest::HoleBetweenOffsets [GOOD] >> ReadSessionImplTest::LOGBROKER_7702 [GOOD] ------- [TS] {default-linux-x86_64, relwithdebinfo} ydb/services/cms/ut/unittest >> TGRpcCmsTest::SimpleTenantsTest [GOOD] Test command err: 2025-06-03T10:32:27.083294Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669082890936112:2211];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:27.083501Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002735/r3tmp/tmpKQYYUa/pdisk_1.dat 2025-06-03T10:32:27.169856Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14906, node 1 2025-06-03T10:32:27.186360Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:27.186374Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:27.186377Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:27.186422Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17083 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:32:27.227474Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:27.227503Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:27.230782Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:27.232079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:27.242418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:32:27.263077Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285120, Sender [1:7511669082890936717:2313], Recipient [1:7511669082890936426:2190]: NKikimr::NConsole::TEvConsole::TEvCreateTenantRequest { Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:50238" } 2025-06-03T10:32:27.263103Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:963: StateWork, processing event TEvConsole::TEvCreateTenantRequest 2025-06-03T10:32:27.263112Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:27.263116Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:27.263158Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:71: TTxCreateTenant: Request { path: "/Root/users/user-1" resources { storage_units { unit_kind: "hdd" count: 1 } } } UserToken: "" PeerName: "ipv6:[::1]:50238" 2025-06-03T10:32:27.263213Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:365: Add tenant /Root/users/user-1 (txid = 1748946747263172) 2025-06-03T10:32:27.263337Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2577: Add tenant /Root/users/user-1 to database state=CREATING_POOLS coordinators=3 mediators=3 planresolution=10 timecastbucketspermediator=2 issue= txid=1748946747263172 subdomainversion=1 confirmedsubdomain=0 attrs= generation=1 errorcode=STATUS_CODE_UNSPECIFIED isExternalSubDomain=1 isExternalHive=1 isExternalSysViewProcessor=1 isExternalStatisticsAggregator=1 areResourcesShared=0 sharedDomainId= 2025-06-03T10:32:27.263406Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2637: Add tenant pool /Root/users/user-1:hdd to database kind=hdd config=BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } allocatednumgroups=0 state=NOT_ALLOCATED 2025-06-03T10:32:27.270028Z node 1 :CMS_TENANTS DEBUG: console__create_tenant.cpp:375: TTxCreateTenant Complete 2025-06-03T10:32:27.270333Z node 1 :CMS_TENANTS TRACE: console__create_tenant.cpp:383: Send: NKikimr::NConsole::TEvConsole::TEvCreateTenantResponse { Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1748946747263172&action=1" } } } 2025-06-03T10:32:27.270402Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:27.270440Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-03T10:32:27.270484Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-03T10:32:27.270681Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true } Success: true ConfigTxSeqNo: 5 2025-06-03T10:32:27.270716Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:131: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DefineStoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" VDiskKind: "Default" Kind: "hdd" NumGroups: 1 PDiskFilter { Property { Type: ROT } } } } } 2025-06-03T10:32:27.274194Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7511669082890936727:2314], Recipient [1:7511669082890936426:2190]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1748946747263172&action=1" } UserToken: "" } 2025-06-03T10:32:27.274210Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-03T10:32:27.274271Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1748946747263172&action=1" } } 2025-06-03T10:32:27.278651Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:244: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 6 2025-06-03T10:32:27.278687Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:168: TPoolManip(/Root/users/user-1:hdd) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-03T10:32:27.278710Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435079, Sender [1:7511669082890936722:2190], Recipient [1:7511669082890936426:2190]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolAllocated 2025-06-03T10:32:27.278715Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:972: StateWork, processing event TEvPrivate::TEvPoolAllocated 2025-06-03T10:32:27.278724Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:27.278727Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:27.278756Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=ALLOCATED 2025-06-03T10:32:27.278764Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=ALLOCATED allocatednumgroups=1 2025-06-03T10:32:27.278799Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3206: Update subdomain version in database for /Root/users/user-1 subdomainversion=2 2025-06-03T10:32:27.282224Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-03T10:32:27.282253Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:27.282256Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:27.282258Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:27.282298Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to CREATING_SUBDOMAIN 2025-06-03T10:32:27.282311Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=CREATING_SUBDOMAIN txid=1748946747263172 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-03T10:32:27.289918Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-03T10:32:27.289995Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:27.290020Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:784: TSubdomainManip(/Root/users/user-1)::Bootstrap 2025-06-03T10:32:27.290023Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:596: TSubDomainManip(/Root/users/user-1) create subdomain 2025-06-03T10:32:27.291214Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:620: TSubdomainManip(/Root/users/user-1) send subdomain creation cmd: NKikimrTxUserProxy.TEvProposeTransaction Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateExtSubDomain SubDomain { Name: "users/user-1" ExternalSchemeShard: true ExternalHive: true ExternalSysViewProcessor: true ExternalStatisticsAggregator: true GraphShard: true } } } ExecTimeoutPeriod: 18446744073709551615 DatabaseName: "Root" 2025-06-03T10:32:27.291840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715658:1, at schemeshard: 72057594046644480 2025-06-03T10:32:27.294459Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:832: TSubdomainManip(/Root/users/user-1) got propose result: Status: 53 TxId: 281474976715658 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 PathId: 3 2025-06-03T10:32:27.294483Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:768: TSubdomainManip(/Root/users/user-1) send notification request: NKikimrScheme.TEvNotifyTxCompletion TxId: 281474976715658 2025-06-03T10:32:27.298064Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:804: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionRegistered: TxId: 281474976715658 2025-06-03T10:32:27.305045Z node 1 :CMS_TENANTS DEBUG: console_ ... } } 2025-06-03T10:32:27.605261Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:809: TSubdomainManip(/Root/users/user-1) got TEvNotifyTxCompletionResult: TxId: 281474976715660 2025-06-03T10:32:27.605277Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:694: TSubdomainManip(/Root/users/user-1) done 2025-06-03T10:32:27.605318Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:710: TSubdomainManip(/Root/users/user-1) reply with NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-03T10:32:27.605345Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435077, Sender [1:7511669082890937289:2190], Recipient [1:7511669082890936426:2190]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvSubdomainRemoved 2025-06-03T10:32:27.605350Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:980: StateWork, processing event TEvPrivate::TEvSubdomainRemoved 2025-06-03T10:32:27.605359Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:27.605362Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:27.605382Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:20: TTxRemoveComputationalUnits Execute /Root/users/user-1 2025-06-03T10:32:27.605396Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_UNITS txid=1748946747573414 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-03T10:32:27.605423Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1748946747573414 issue= 2025-06-03T10:32:27.608349Z node 1 :CMS_TENANTS DEBUG: console__remove_computational_units.cpp:34: TTxRemoveComputationalUnits Complete /Root/users/user-1 2025-06-03T10:32:27.608398Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2114: Send TEvTenantSlotBroker::TEvAlterTenant: TenantName: "/Root/users/user-1" 2025-06-03T10:32:27.608416Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:27.608525Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273154052, Sender [1:7511669082890936321:2203], Recipient [1:7511669082890936426:2190]: NKikimrTenantSlotBroker.TTenantState TenantName: "/Root/users/user-1" 2025-06-03T10:32:27.608532Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:984: StateWork, processing event TEvTenantSlotBroker::TEvTenantState 2025-06-03T10:32:27.608544Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:27.608547Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:27.608558Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:23: TTxUpdateTenantState for tenant /Root/users/user-1 to REMOVING_POOLS 2025-06-03T10:32:27.608567Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3146: Update tenant state in database for /Root/users/user-1 state=REMOVING_POOLS txid=1748946747573414 errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-03T10:32:27.604660Z node 3 :HIVE WARN: tx__delete_tablet.cpp:88: HIVE#72075186224037888 THive::TTxDeleteTablet tablet (72057594046644480,1) wasn't found - using supplied 72075186224037888 2025-06-03T10:32:27.610515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__delete_tablet_reply.cpp:39: Got DeleteTabletReply with Forward response from Hive 72075186224037888 to Hive 72057594037968897 shardIdx 72057594046644480:1 2025-06-03T10:32:27.612176Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037897 not found 2025-06-03T10:32:27.612199Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037890 not found 2025-06-03T10:32:27.612203Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037893 not found 2025-06-03T10:32:27.612206Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037894 not found 2025-06-03T10:32:27.612209Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037891 not found 2025-06-03T10:32:27.612211Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037892 not found 2025-06-03T10:32:27.612213Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037895 not found 2025-06-03T10:32:27.612217Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037889 not found 2025-06-03T10:32:27.612220Z node 3 :HIVE WARN: hive_impl.cpp:491: HIVE#72075186224037888 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037896 not found 2025-06-03T10:32:27.614231Z node 1 :CMS_TENANTS DEBUG: console__update_tenant_state.cpp:45: TTxUpdateTenantState complete for /Root/users/user-1 2025-06-03T10:32:27.614263Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:27.614274Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:158: TPoolManip(/Root/users/user-1:hdd) Bootstrap 2025-06-03T10:32:27.614341Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:117: TPoolManip(/Root/users/user-1:hdd) read pool state: Request { Command { ReadStoragePool { BoxId: 999 Name: "/Root/users/user-1:hdd" } } } 2025-06-03T10:32:27.616277Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:198: TPoolManip(/Root/users/user-1:hdd) got read response: Status { Success: true StoragePool { BoxId: 999 StoragePoolId: 4 Name: "/Root/users/user-1:hdd" ErasureSpecies: "none" Geometry { } VDiskKind: "Default" Kind: "hdd" NumGroups: 2 PDiskFilter { Property { Type: ROT } } ScopeId { X1: 72057594046644480 X2: 3 } ItemConfigGeneration: 3 } } Success: true ConfigTxSeqNo: 13 2025-06-03T10:32:27.616297Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:151: TPoolManip(/Root/users/user-1:hdd) send pool request: Request { Command { DeleteStoragePool { BoxId: 999 StoragePoolId: 4 ItemConfigGeneration: 3 } } } 2025-06-03T10:32:27.619151Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 3, TabletId: 72075186224037888 not found 2025-06-03T10:32:27.624396Z node 1 :CMS_TENANTS DEBUG: console_tenants_manager.cpp:306: TPoolManip(/Root/users/user-1:hdd) got config response: Status { Success: true } Success: true ConfigTxSeqNo: 14 2025-06-03T10:32:27.624446Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 2146435081, Sender [1:7511669082890937367:2190], Recipient [1:7511669082890936426:2190]: NKikimr::NConsole::TTenantsManager::TEvPrivate::TEvPoolDeleted 2025-06-03T10:32:27.624464Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:973: StateWork, processing event TEvPrivate::TEvPoolDeleted 2025-06-03T10:32:27.624471Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:27.624474Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:27.624501Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:28: TTxUpdatePoolState for pool /Root/users/user-1:hdd of /Root/users/user-1 state=DELETED 2025-06-03T10:32:27.624510Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3047: Update pool state in database for /Root/users/user-1:hdd state=DELETED allocatednumgroups=0 2025-06-03T10:32:27.627829Z node 1 :CMS_TENANTS DEBUG: console__update_pool_state.cpp:73: TTxUpdatePoolState complete for /Root/users/user-1:hdd 2025-06-03T10:32:27.627848Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:51: TTxProcessor(tenants) enqueue tx 2025-06-03T10:32:27.627851Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:27.627854Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:245: TTxProcessor(tenants) starts new tx 2025-06-03T10:32:27.627879Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:22: TTxRemoveTenantDone for tenant /Root/users/user-1 txid=1748946747573414 2025-06-03T10:32:27.627883Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2927: Remove computational units of /Root/users/user-1 from database txid=1748946747573414 issue= 2025-06-03T10:32:27.627887Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2958: Remove tenant /Root/users/user-1 from database txid=1748946747573414 issue= 2025-06-03T10:32:27.627890Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:2963: Remove pool /Root/users/user-1:hdd from database 2025-06-03T10:32:27.627948Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3083: Add tenant removal info for /Root/users/user-1 txid=1748946747573414 code=SUCCESS errorcode=STATUS_CODE_UNSPECIFIED issue= 2025-06-03T10:32:27.630184Z node 1 :CMS_TENANTS DEBUG: console__remove_tenant_done.cpp:34: TTxRemoveTenantDone Complete 2025-06-03T10:32:27.630238Z node 1 :CMS_TENANTS TRACE: tx_processor.cpp:60: TTxProcessor(tenants) completed tx 2025-06-03T10:32:27.650700Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285131, Sender [1:7511669082890937415:2392], Recipient [1:7511669082890936426:2190]: NKikimr::NConsole::TEvConsole::TEvGetOperationRequest { Request { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1748946747573414&action=2" } UserToken: "" } 2025-06-03T10:32:27.650716Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:965: StateWork, processing event TEvConsole::TEvGetOperationRequest 2025-06-03T10:32:27.650807Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3353: Send TEvConsole::TEvGetOperationResponse: Response { operation { id: "ydb://cmsrequest/5?tenant=/Root/users/user-1&cmstid=72057594037936131&txid=1748946747573414&action=2" ready: true status: SUCCESS } } 2025-06-03T10:32:27.652227Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285122, Sender [1:7511669082890937418:2394], Recipient [1:7511669082890936426:2190]: NKikimr::NConsole::TEvConsole::TEvGetTenantStatusRequest { Request { path: "/Root/users/user-1" } UserToken: "" PeerName: "ipv6:[::1]:50238" } 2025-06-03T10:32:27.652241Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:966: StateWork, processing event TEvConsole::TEvGetTenantStatusRequest 2025-06-03T10:32:27.652291Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3377: Send TEvConsole::TEvGetTenantStatusResponse: Response { operation { ready: true status: NOT_FOUND issues { message: "Unknown tenant /Root/users/user-1" severity: 1 } } } 2025-06-03T10:32:27.657589Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:960: StateWork, received event# 273285123, Sender [1:7511669082890937421:2395], Recipient [1:7511669082890936426:2190]: NKikimr::NConsole::TEvConsole::TEvListTenantsRequest { Request { } UserToken: "" PeerName: "ipv6:[::1]:50238" } 2025-06-03T10:32:27.657604Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.h:967: StateWork, processing event TEvConsole::TEvListTenantsRequest 2025-06-03T10:32:27.657702Z node 1 :CMS_TENANTS TRACE: console_tenants_manager.cpp:3421: Send TEvConsole::TEvListTenantsResponse: Response { operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Cms.ListDatabasesResult] { } } } } 2025-06-03T10:32:27.667273Z node 1 :HIVE WARN: tx__status.cpp:57: HIVE#72057594037968897 THive::TTxStatus(status=2 node=Connected) - killing node 3 2025-06-03T10:32:27.667406Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connected -> Disconnected ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::PartitionStreamCallbacks [GOOD] Test command err: 2025-06-03T10:32:27.426533Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.426552Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.426558Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:27.430140Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:27.430526Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:27.432695Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.432933Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:32:27.433373Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-03T10:32:27.433662Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-03T10:32:27.437489Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2025-06-03T10:32:27.437528Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-03T10:32:27.437895Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:27.437914Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (2-2) 2025-06-03T10:32:27.437935Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-03T10:32:27.437943Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-03T10:32:27.456813Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.456824Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.456828Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:27.465230Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:27.465492Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:27.465580Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.469431Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 10 Compressed message data size: 30 2025-06-03T10:32:27.469762Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-03T10:32:27.469809Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-03T10:32:27.477471Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-03T10:32:27.477513Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-03T10:32:27.479145Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:27.479168Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-03T10:32:27.479187Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-03T10:32:27.479254Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 Getting new event 2025-06-03T10:32:27.479266Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-03T10:32:27.479271Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-03T10:32:27.479275Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-03T10:32:27.479308Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 5). Partition stream id: 1 Getting new event 2025-06-03T10:32:27.479350Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-06-03T10:32:27.479355Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-06-03T10:32:27.479361Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-03T10:32:27.479374Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 7). Partition stream id: 1 Getting new event 2025-06-03T10:32:27.479382Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-06-03T10:32:27.479387Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-06-03T10:32:27.479393Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 20 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-03T10:32:27.479410Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [7, 9). Partition stream id: 1 2025-06-03T10:32:27.484393Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.484402Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.484408Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:27.484531Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:27.484722Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:27.484812Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:27.484860Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 100 Compressed message data size: 91 2025-06-03T10:32:27.485078Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-03T10:32:27.485114Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-03T10:32:27.493445Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-03T10:32:27.493479Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-03T10:32:27.493553Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:27.493577Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-03T10:32:27.493624Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 2). Partition stream id: 1 Getting new event 2025-06-03T10:32:27.493637Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-03T10:32:27.493641Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-03T10:32:27.493670Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [2, 3). Partition stream id: 1 Getting new event 2025-06-03T10:32:27.493679Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-03T10:32:27.493683Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..100 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-03T10:32:27.493694Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 4). Partition stream id: 1 Getting new event 2025-06-03T10:32:27.493701Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-03T10:32:27.493705Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 100 bytes DataReceived { PartitionStream ... tream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 190 SeqNo: 231 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 191 SeqNo: 232 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 192 SeqNo: 233 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 193 SeqNo: 234 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 194 SeqNo: 235 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 195 SeqNo: 236 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 196 SeqNo: 237 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 197 SeqNo: 238 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 198 SeqNo: 239 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 199 SeqNo: 240 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 200 SeqNo: 241 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-03T10:32:28.445101Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 201). Partition stream id: 1 2025-06-03T10:32:28.479801Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-06-03T10:32:28.479811Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-06-03T10:32:28.479815Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:28.479903Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:28.480095Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:28.480199Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 5, ReadSizeServerDelta = 0 2025-06-03T10:32:28.480279Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 1000000 Compressed message data size: 3028 Post function Getting new event 2025-06-03T10:32:28.512088Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-10) 2025-06-03T10:32:28.512335Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:28.512671Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-03T10:32:28.513088Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-03T10:32:28.513219Z :DEBUG: Take Data. Partition 1. Read: {0, 3} (4-4) 2025-06-03T10:32:28.513886Z :DEBUG: Take Data. Partition 1. Read: {0, 4} (5-5) 2025-06-03T10:32:28.514037Z :DEBUG: Take Data. Partition 1. Read: {0, 5} (6-6) 2025-06-03T10:32:28.514189Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (7-7) 2025-06-03T10:32:28.514333Z :DEBUG: Take Data. Partition 1. Read: {1, 1} (8-8) 2025-06-03T10:32:28.515550Z :DEBUG: Take Data. Partition 1. Read: {1, 2} (9-9) 2025-06-03T10:32:28.515704Z :DEBUG: Take Data. Partition 1. Read: {1, 3} (10-10) 2025-06-03T10:32:28.515727Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 10, size 10000000 bytes 2025-06-03T10:32:28.515808Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 5, ReadSizeServerDelta = 0 DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 9 SeqNo: 50 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } Message { Data: ..1000000 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 10 SeqNo: 51 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-03T10:32:28.517694Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 11). Partition stream id: 1 2025-06-03T10:32:28.518239Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.518246Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.518250Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:28.518315Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:28.518413Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:28.518467Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.518519Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:32:28.518614Z :DEBUG: [db] [sessionid] [cluster] Requesting status for partition stream id: 1 2025-06-03T10:32:28.519088Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.519097Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.519102Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:28.519209Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:28.519295Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:28.519335Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.519499Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.519534Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-03T10:32:28.519564Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:28.519576Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-03T10:32:28.519637Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 >> TxUsage::WriteToTopic_Demo_23_RestartBeforeCommit_Table [GOOD] >> ReadSessionImplTest::DecompressRaw [GOOD] >> ReadSessionImplTest::DecompressGzip [GOOD] >> ReadSessionImplTest::DecompressZstd [GOOD] >> ReadSessionImplTest::DecompressRawEmptyMessage [GOOD] >> ReadSessionImplTest::DecompressGzipEmptyMessage [GOOD] >> ReadSessionImplTest::DecompressWithSynchronousExecutor [GOOD] >> ReadSessionImplTest::DataReceivedCallbackReal |69.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut |69.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut |69.6%| [LD] {RESULT} $(B)/ydb/core/blobstorage/vdisk/skeleton/ut/ydb-core-blobstorage-vdisk-skeleton-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::LOGBROKER_7702 [GOOD] Test command err: 2025-06-03T10:32:28.900824Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.900836Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.900842Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:28.900954Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:28.901103Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:28.902428Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.902549Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:32:28.903035Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.903042Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.903048Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:28.903133Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:28.903261Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:28.903293Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.903365Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:32:28.903428Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-03T10:32:28.903667Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.903671Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.903674Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:28.903750Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:28.903976Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:28.904030Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.904072Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:32:28.904348Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.904431Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-03T10:32:28.904453Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:28.904463Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-03T10:32:28.936847Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.936858Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.936863Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:28.936969Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:28.937160Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:28.937266Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.937351Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 11 Compressed message data size: 31 2025-06-03T10:32:28.938145Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-03T10:32:28.938211Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-03T10:32:28.938317Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-03T10:32:28.938343Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-03T10:32:28.938392Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:28.938401Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-03T10:32:28.938411Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-03T10:32:28.938456Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 Getting new event 2025-06-03T10:32:28.938464Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-03T10:32:28.938469Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-03T10:32:28.938472Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-03T10:32:28.938495Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [3, 5). Partition stream id: 1 Getting new event 2025-06-03T10:32:28.938514Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-06-03T10:32:28.938516Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-06-03T10:32:28.938520Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } } 2025-06-03T10:32:28.938528Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 7). Partition stream id: 1 Getting new event 2025-06-03T10:32:28.938531Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-06-03T10:32:28.938534Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-06-03T10:32:28.938537Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 22 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..11 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-03T10:32:28.938547Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [7, 9). Partition stream id: 1 2025-06-03T10:32:28.942495Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.942506Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.942511Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:28.942627Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:28.942841Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:28.942930Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.943048Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) Message data size: 10 Compressed message data size: 30 2025-06-03T10:32:28.943367Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-03T10:32:28.943408Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function Getting new event 2025-06-03T10:32:28.943464Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (5-8) 2025-06-03T10:32:28.943480Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-4) 2025-06-03T10:32:28.943523Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:28.943534Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-03T10:32:28.943539Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-03T10:32:28.943544Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (4-4) 2025-06-03T10:32:28.943552Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 4, size 40 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 42 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 43 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 44 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 4 SeqNo: 45 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-03T10:32:28.943604Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 5). Partition stream id: 1 Getting new event 2025-06-03T10:32:28.943629Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (5-5) 2025-06-03T10:32:28.943632Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (6-6) 2025-06-03T10:32:28.943636Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (7-7) 2025-06-03T10:32:28.943641Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (8-8) 2025-06-03T10:32:28.943645Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 4, size 40 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 5 SeqNo: 46 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 6 SeqNo: 47 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 7 SeqNo: 48 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:02:03.000000Z Ip: "127.0.0.1" UncompressedSize: 0 Meta: { "k1": "v1", "k": "v" } } } Message { Data: ..10 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 8 SeqNo: 49 MessageGroupId: "src_id_2" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:05:21.000000Z Ip: "1.0.0.127" UncompressedSize: 0 Meta: { "v1": "k1", "v": "k" } } } } 2025-06-03T10:32:28.943662Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [5, 9). Partition stream id: 1 2025-06-03T10:32:28.944021Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.944026Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.944031Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:28.944101Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:28.944176Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:28.944207Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:28.944240Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:32:28.944388Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-03T10:32:28.944428Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-03T10:32:28.944471Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (10-11) 2025-06-03T10:32:28.944481Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-03T10:32:28.944500Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:28.944506Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-03T10:32:28.944510Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (10-10) 2025-06-03T10:32:28.944514Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (11-11) 2025-06-03T10:32:28.944520Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 16 bytes 2025-06-03T10:32:28.944524Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 16 bytes got data event: DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 10 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 11 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } } 2025-06-03T10:32:28.944550Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 Got commit req { cookies { assign_id: 1 partition_cookie: 1 } } 2025-06-03T10:32:28.944574Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [10, 12). Partition stream id: 1 Got commit req { cookies { assign_id: 1 partition_cookie: 2 } } >> YdbTableSplit::RenameTablesAndSplit [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Table [GOOD] >> ReadSessionImplTest::UsesOnRetryStateDuringRetries [GOOD] >> RetryPolicy::TWriteSession_TestPolicy >> TxUsage::WriteToTopic_Demo_23_RestartBeforeCommit_Query >> PersQueueSdkReadSessionTest::ReadSessionWithExplicitlySpecifiedPartitions |69.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan |69.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan >> TxUsage::WriteToTopic_Demo_20_RestartNo_Table |69.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/ydb-core-tx-datashard-ut_incremental_restore_scan ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::RenameTablesAndSplit [GOOD] Test command err: 2025-06-03T10:31:05.151163Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668729712456911:2207];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:05.151265Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002254/r3tmp/tmpyAdvht/pdisk_1.dat 2025-06-03T10:31:05.259137Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:05.264213Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:05.264240Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:05.268748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 21219, node 1 2025-06-03T10:31:05.289238Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:05.289254Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:05.289256Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:05.289320Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12113 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 0 Pa... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:05.350151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:05.701225Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668729712457716:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.701267Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.747830Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_table.cpp:426: TCreateTable Propose, path: /Root/Foo, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.748102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:31:05.748107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.748917Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710658, database: /Root, subject: , status: StatusAccepted, operation: CREATE TABLE, path: /Root/Foo 2025-06-03T10:31:05.782865Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1748946665830, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-03T10:31:05.794323Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710658:0 2025-06-03T10:31:05.802205Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668729712457940:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.802252Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:05.809768Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_table.cpp:508: TAlterTable Propose, path: /Root/Foo, pathId: , opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.809980Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710659:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:31:05.809989Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-06-03T10:31:05.811058Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710659, database: /Root, subject: , status: StatusAccepted, operation: ALTER TABLE, path: /Root/Foo 2025-06-03T10:31:05.828377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1748946665872, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-03T10:31:05.832290Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710659:0 Fast forward 1m partitions 2 Fast forward 1m 2025-06-03T10:31:10.152073Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668729712456911:2207];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:10.152116Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; partitions 2 Fast forward 1m partitions 2 Fast forward 1m 2025-06-03T10:31:15.889965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:798: TSplitMerge Propose, tableStr: /Root/Foo, tableId: , opId: 281474976715657:0, at schemeshard: 72057594046644480, request: TablePath: "/Root/Foo" SourceTabletId: 72075186224037888 SourceTabletId: 72075186224037889 SchemeshardId: 72057594046644480 2025-06-03T10:31:15.890202Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:1077: TSplitMerge Propose accepted, tableStr: /Root/Foo, tableId: , opId: 281474976715657:0, at schemeshard: 72057594046644480, op: SourceRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "\002\000\004\000\000\000\377\377\377\177\000\000\000\200" TabletID: 72075186224037888 ShardIdx: 1 } SourceRanges { KeyRangeBegin: "\002\000\004\000\000\000\377\377\377\177\000\000\000\200" KeyRangeEnd: "" TabletID: 72075186224037889 ShardIdx: 2 } DestinationRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "" ShardIdx: 3 }, request: TablePath: "/Root/Foo" SourceTabletId: 72075186224037888 SourceTabletId: 72075186224037889 SchemeshardId: 72057594046644480 2025-06-03T10:31:15.890214Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715657:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:31:15.968469Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715657:0 2025-06-03T10:31:15.973813Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-03T10:31:15.973832Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037888 not found partitions 1 2025-06-03T10:31:17.867776Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_move_table.cpp:569: TMoveTable Propose, from: /Root/Foo, to: /Root/Bar, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-06-03T10:31:17.867864Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710660:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:31:17.869781Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 281474976710660, database: /Root, subject: , status: StatusAccepted, operation: ALTER TABLE RENAME, dst path: /Root/Foo, dst path: /Root/Bar 2025-06-03T10:31:17.890843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 1748947157937, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-03T10:31:17.901869Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:1103: All parts have reached barrier, tx: 281474976710660, done: 0, blocked: 1 2025-06-03T10:31:17.904951Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710660:0 2025-06-03T10:31:17.905905Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 Fast forward 1m 2025-06-03T10:31:20.237669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7306: Cannot get console configs 2025-06-03T10:31:20.237686Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:20.937564Z node 1 :TX_DATASHARD DEBUG: datashard_impl.h:3344: SendPeriodicTableStats register new pipe at datashard 72075186224037890 FollowerId 0, TableInfos size = 1 2025-06-03T10:31:20.941651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:563: Got periodic table stats at tablet 72057594046644480 from shard 72075186224037890 followerId 0 pathId [OwnerId: 72057594046644480, LocalPathId: 3] state 'Ready' dataSize 0 rowCount 0 cpuUsage 0.3813 2025-06-03T10:31:21.041984Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:590: Started TEvPersistStats at tablet 72057594046644480, queue size# 1 2025-06-03T10:31:21.042059Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__table_stats.cpp:267: PersistSingleStats for pathId 3 shard idx 72057594046644480:3 data size 0 row count 0 2025-06-03T10:31:21.042094Z node 1 :FLAT ... AT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976710664 ready parts: 1/1 2025-06-03T10:32:28.628803Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710664:0 2025-06-03T10:32:28.628806Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976710664:0 2025-06-03T10:32:28.628834Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 4 2025-06-03T10:32:28.628944Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037890, clientId# [1:7511669086194747374:2727], serverId# [1:7511669086194747378:4628], sessionId# [0:0:0] 2025-06-03T10:32:28.629674Z node 1 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037891 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-03T10:32:28.629709Z node 1 :TX_DATASHARD DEBUG: datashard_loans.cpp:128: 72075186224037891 parts [ [72075186224037890:1:114:1:12288:11424:0] ] return ack processed 2025-06-03T10:32:28.629716Z node 1 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037891 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-03T10:32:28.629733Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037891 Initiating switch from PreOffline to Offline state 2025-06-03T10:32:28.630106Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3306: 72075186224037890 Reporting state Offline to schemeshard 72057594046644480 2025-06-03T10:32:28.630911Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037890, clientId# [1:7511669086194747399:2729], serverId# [1:7511669086194747404:4652], sessionId# [0:0:0] 2025-06-03T10:32:28.631131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5554: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7511668772662131318 RawX2: 4503603922340182 } TabletId: 72075186224037890 State: 4 2025-06-03T10:32:28.631168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-06-03T10:32:28.634583Z node 1 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037892 in PreOffline state HasSharedBobs: 0 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-03T10:32:28.634645Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037892 Initiating switch from PreOffline to Offline state 2025-06-03T10:32:28.635124Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3306: 72075186224037891 Reporting state Offline to schemeshard 72057594046644480 2025-06-03T10:32:28.635138Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3306: 72075186224037891 Reporting state Offline to schemeshard 72057594046644480 2025-06-03T10:32:28.635172Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037890 state Offline 2025-06-03T10:32:28.636018Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:3 hive 72057594037968897 at ss 72057594046644480 2025-06-03T10:32:28.636971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5554: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7511669077604812342 RawX2: 4503603922340487 } TabletId: 72075186224037891 State: 4 2025-06-03T10:32:28.636988Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037891, state: Offline, at schemeshard: 72057594046644480 2025-06-03T10:32:28.637062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5554: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7511669077604812342 RawX2: 4503603922340487 } TabletId: 72075186224037891 State: 4 2025-06-03T10:32:28.637068Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037891, state: Offline, at schemeshard: 72057594046644480 2025-06-03T10:32:28.638028Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-03T10:32:28.638070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:4 hive 72057594037968897 at ss 72057594046644480 2025-06-03T10:32:28.638442Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037891 state Offline 2025-06-03T10:32:28.638448Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037891 state Offline 2025-06-03T10:32:28.638467Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3306: 72075186224037892 Reporting state Offline to schemeshard 72057594046644480 2025-06-03T10:32:28.638864Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5554: Handle TEvStateChanged, at schemeshard: 72057594046644480, message: Source { RawX1: 7511669077604812340 RawX2: 4503603922340486 } TabletId: 72075186224037892 State: 4 2025-06-03T10:32:28.638879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037892, state: Offline, at schemeshard: 72057594046644480 2025-06-03T10:32:28.642037Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046644480 ShardLocalIdx: 3, at schemeshard: 72057594046644480 2025-06-03T10:32:28.642228Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-03T10:32:28.642313Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-03T10:32:28.642334Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 2 2025-06-03T10:32:28.642352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046644480 ShardLocalIdx: 4, at schemeshard: 72057594046644480 2025-06-03T10:32:28.642409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046644480:5 hive 72057594037968897 at ss 72057594046644480 2025-06-03T10:32:28.643513Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2962: Handle TEvStateChangedResult datashard 72075186224037892 state Offline 2025-06-03T10:32:28.643545Z node 1 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-06-03T10:32:28.643563Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037890, clientId# [1:7511668781252066233:3175], serverId# [1:7511668781252066234:3176], sessionId# [0:0:0] 2025-06-03T10:32:28.643572Z node 1 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037891 reason = ReasonStop 2025-06-03T10:32:28.643582Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3728: Server disconnected at leader tablet# 72075186224037891, clientId# [1:7511669086194747305:4567], serverId# [1:7511669086194747306:4568], sessionId# [0:0:0] 2025-06-03T10:32:28.644285Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-03T10:32:28.644299Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037891 not found 2025-06-03T10:32:28.644636Z node 1 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037891 2025-06-03T10:32:28.644670Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-06-03T10:32:28.645345Z node 1 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-03T10:32:28.645362Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 2025-06-03T10:32:28.646775Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:3 2025-06-03T10:32:28.646790Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:3 tabletId 72075186224037890 2025-06-03T10:32:28.646820Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-03T10:32:28.646822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:4 tabletId 72075186224037891 2025-06-03T10:32:28.646828Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:4 2025-06-03T10:32:28.646872Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 5 ShardOwnerId: 72057594046644480 ShardLocalIdx: 5, at schemeshard: 72057594046644480 2025-06-03T10:32:28.646980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 1 2025-06-03T10:32:28.646985Z node 1 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037892 reason = ReasonStop 2025-06-03T10:32:28.647069Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046644480 2025-06-03T10:32:28.647076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046644480, LocalPathId: 3], at schemeshard: 72057594046644480 2025-06-03T10:32:28.647092Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-06-03T10:32:28.647208Z node 1 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037892 2025-06-03T10:32:28.647247Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037892 2025-06-03T10:32:28.647353Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037892 not found 2025-06-03T10:32:28.648709Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046644480:5 2025-06-03T10:32:28.648725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046644480:5 tabletId 72075186224037892 2025-06-03T10:32:28.648750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046644480 >> THiveTest::TestCreateAndDeleteTabletWithStoragePoolsReboots [GOOD] >> THiveTest::TestCreateAndDeleteTabletWithStoragePools >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression >> TContinuousBackupTests::Basic >> THiveTest::DrainWithHiveRestart [GOOD] >> THiveTest::TestCheckSubHiveForwarding >> ReadSessionImplTest::SuccessfulInit [GOOD] >> ReadSessionImplTest::SuccessfulInitAndThenTimeoutCallback [GOOD] >> ReadSessionImplTest::StopsRetryAfterFailedAttempt [GOOD] >> ReadSessionImplTest::StopsRetryAfterTimeout [GOOD] >> ReadSessionImplTest::UnpackBigBatchWithTwoPartitions [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulRelease >> TContinuousBackupTests::TakeIncrementalBackup >> ReadSessionImplTest::SimpleDataHandlersWithGracefulRelease [GOOD] >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit >> THiveTest::TestCreateAndDeleteTabletWithStoragePools [GOOD] >> THiveTest::TestCreateAndReassignTabletWithStoragePools >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit [GOOD] >> TContinuousBackupTests::Basic [GOOD] |69.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/ut_schema/unittest >> THiveTest::TestCheckSubHiveForwarding [GOOD] >> THiveTest::TestCheckSubHiveDrain >> TContinuousBackupTests::TakeIncrementalBackup [GOOD] >> DstCreator::ColumnsSizeMismatch >> THiveTest::TestCreateAndReassignTabletWithStoragePools [GOOD] >> THiveTest::TestCreateAndReassignTabletWhileStarting ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::SimpleDataHandlersWithGracefulReleaseWithCommit [GOOD] Test command err: 2025-06-03T10:32:30.454437Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.454449Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.454453Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:30.454561Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:30.455362Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-03T10:32:30.455384Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.456312Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.456317Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.456322Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:30.456416Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:30.456611Z :INFO: [db] [sessionid] [cluster] Server session id: session id 2025-06-03T10:32:30.456624Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.456848Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.456852Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.456855Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:30.456933Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-03T10:32:30.456947Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.456951Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.456980Z :INFO: [db] [sessionid] [cluster] Closing session to cluster: SessionClosed { Status: INTERNAL_ERROR Issues: "
: Error: Failed to establish connection to server "" ( cluster cluster). Attempts done: 1 " } 2025-06-03T10:32:30.457822Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.457826Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.457828Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:30.457917Z :ERROR: [db] [sessionid] [cluster] Got error. Status: TIMEOUT. Description:
: Error: Failed to establish connection to server. Attempts done: 1 2025-06-03T10:32:30.457933Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.457938Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.457948Z :INFO: [db] [sessionid] [cluster] Closing session to cluster: SessionClosed { Status: TIMEOUT Issues: "
: Error: Failed to establish connection to server. Attempts done: 1 " } 2025-06-03T10:32:30.458365Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-03T10:32:30.458371Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-03T10:32:30.458373Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:30.458463Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:30.458610Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:30.460380Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-03T10:32:30.460464Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:32:30.460531Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 2. Cluster: "TestCluster". Topic: "TestTopic". Partition: 2. Read offset: (NULL) 2025-06-03T10:32:30.461210Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-50) 2025-06-03T10:32:30.461342Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:30.461353Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-03T10:32:30.461358Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-03T10:32:30.461362Z :DEBUG: Take Data. Partition 1. Read: {0, 3} (4-4) 2025-06-03T10:32:30.461375Z :DEBUG: Take Data. Partition 1. Read: {0, 4} (5-5) 2025-06-03T10:32:30.461378Z :DEBUG: Take Data. Partition 1. Read: {0, 5} (6-6) 2025-06-03T10:32:30.461382Z :DEBUG: Take Data. Partition 1. Read: {0, 6} (7-7) 2025-06-03T10:32:30.461386Z :DEBUG: Take Data. Partition 1. Read: {0, 7} (8-8) 2025-06-03T10:32:30.461393Z :DEBUG: Take Data. Partition 1. Read: {0, 8} (9-9) 2025-06-03T10:32:30.461397Z :DEBUG: Take Data. Partition 1. Read: {0, 9} (10-10) 2025-06-03T10:32:30.461401Z :DEBUG: Take Data. Partition 1. Read: {0, 10} (11-11) 2025-06-03T10:32:30.461404Z :DEBUG: Take Data. Partition 1. Read: {0, 11} (12-12) 2025-06-03T10:32:30.461408Z :DEBUG: Take Data. Partition 1. Read: {0, 12} (13-13) 2025-06-03T10:32:30.461412Z :DEBUG: Take Data. Partition 1. Read: {0, 13} (14-14) 2025-06-03T10:32:30.461416Z :DEBUG: Take Data. Partition 1. Read: {0, 14} (15-15) 2025-06-03T10:32:30.461420Z :DEBUG: Take Data. Partition 1. Read: {0, 15} (16-16) 2025-06-03T10:32:30.461428Z :DEBUG: Take Data. Partition 1. Read: {0, 16} (17-17) 2025-06-03T10:32:30.461431Z :DEBUG: Take Data. Partition 1. Read: {0, 17} (18-18) 2025-06-03T10:32:30.461435Z :DEBUG: Take Data. Partition 1. Read: {0, 18} (19-19) 2025-06-03T10:32:30.461438Z :DEBUG: Take Data. Partition 1. Read: {0, 19} (20-20) 2025-06-03T10:32:30.461442Z :DEBUG: Take Data. Partition 1. Read: {0, 20} (21-21) 2025-06-03T10:32:30.461446Z :DEBUG: Take Data. Partition 1. Read: {0, 21} (22-22) 2025-06-03T10:32:30.461450Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (23-23) 2025-06-03T10:32:30.461454Z :DEBUG: Take Data. Partition 1. Read: {1, 1} (24-24) 2025-06-03T10:32:30.461457Z :DEBUG: Take Data. Partition 1. Read: {1, 2} (25-25) 2025-06-03T10:32:30.461461Z :DEBUG: Take Data. Partition 1. Read: {1, 3} (26-26) 2025-06-03T10:32:30.461465Z :DEBUG: Take Data. Partition 1. Read: {1, 4} (27-27) 2025-06-03T10:32:30.461469Z :DEBUG: Take Data. Partition 1. Read: {1, 5} (28-28) 2025-06-03T10:32:30.461473Z :DEBUG: Take Data. Partition 1. Read: {1, 6} (29-29) 2025-06-03T10:32:30.461477Z :DEBUG: Take Data. Partition 1. Read: {1, 7} (30-30) 2025-06-03T10:32:30.461481Z :DEBUG: Take Data. Partition 1. Read: {1, 8} (31-31) 2025-06-03T10:32:30.461484Z :DEBUG: Take Data. Partition 1. Read: {1, 9} (32-32) 2025-06-03T10:32:30.461506Z :DEBUG: Take Data. Partition 1. Read: {1, 10} (33-33) 2025-06-03T10:32:30.461510Z :DEBUG: Take Data. Partition 1. Read: {1, 11} (34-34) 2025-06-03T10:32:30.461513Z :DEBUG: Take Data. Partition 1. Read: {1, 12} (35-35) 2025-06-03T10:32:30.461517Z :DEBUG: Take Data. Partition 1. Read: {1, 13} (36-36) 2025-06-03T10:32:30.461523Z :DEBUG: Take Data. Partition 1. Read: {1, 14} (37-37) 2025-06-03T10:32:30.461526Z :DEBUG: Take Data. Partition 1. Read: {1, 15} (38-38) 2025-06-03T10:32:30.461530Z :DEBUG: Take Data. Partition 1. Read: {1, 16} (39-39) 2025-06-03T10:32:30.461533Z :DEBUG: Take Data. Partition 1. Read: {1, 17} (40-40) 2025-06-03T10:32:30.461537Z :DEBUG: Take Data. Partition 1. Read: {1, 18} (41-41) 2025-06-03T10:32:30.461541Z :DEBUG: Take Data. Partition 1. Read: {1, 19} (42-42) 2025-06-03T10:32:30.461545Z :DEBUG: Take Data. Partition 1. Read: {1, 20} (43-43) 2025-06-03T10:32:30.461548Z :DEBUG: Take Data. Partition 1. Read: {1, 21} (44-44) 2025-06-03T10:32:30.461552Z :DEBUG: Take Data. Partition 1. Read: {1, 22} (45-45) 2025-06-03T10:32:30.461555Z :DEBUG: Take Data. Partition 1. Read: {1, 23} (46-46) 2025-06-03T10:32:30.461559Z :DEBUG: Take Data. Partition 1. Read: {1, 24} (47-47) 2025-06-03T10:32:30.461562Z :DEBUG: Take Data. Partition 1. Read: {1, 25} (48-48) 2025-06-03T10:32:30.461566Z :DEBUG: Take Data. Partition 1. Read: {1, 26} (49-49) 2025-06-03T10:32:30.461598Z :DEBUG: Take Data. Partition 1. Read: {1, 27} (50-50) 2025-06-03T10:32:30.461616Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 50, size 5000 bytes 2025-06-03T10:32:30.461731Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 2 (51-100) 2025-06-03T10:32:30.461768Z :DEBUG: Take Data. Partition 2. Read: {0, 0} (51-51) 2025-06-03T10:32:30.461772Z :DEBUG: Take Data. Partition 2. Read: {0, 1} (52-52) 2025-06-03T10:32:30.461775Z :DEBUG: Take Data. Partition 2. Read: {0, 2} (53-53) 2025-06-03T10:32:30.461777Z :DEBUG: Take Data. Partition 2. Read: {0, 3} (54-54) 2025-06-03T10:32:30.461780Z :DEBUG: Take Data. Partition 2. Read: {0, 4} (55-55) 2025-06-03T10:32:30.461782Z :DEBUG: Take Data. Partition 2. Read: {0, 5} (56-56) 2025-06-03T10:32:30.461785Z :DEBUG: Take Data. Partition 2. Read: {0, 6} (57-57) 2025-06-03T10:32:30.461787Z :DEBUG: Take Data. Partition 2. Read: {0, 7} (58-58) 2025-06-03T10:32:30.461791Z :DEBUG: Take Data. Partition 2. Read: {0, 8} (59-59) 2025-06-03T10:32:30.461793Z :DEBUG: Take Data. Partition 2. Read: {0, 9} (60-60) 2025-06-03T10:32:30.461796Z :DEBUG: Take Data. Partition 2. Read: {0, 10} (61-61) 2025-06-03T10:32:30.461798Z :DEBUG: Take Data. Partition 2. Read: {0, 11} (62-62) 2025-06-03T10:32:30.461800Z :DEBUG: Take Data. Partition 2. Read: {0, 12} (63-63) 2025-06-03T10:32:30.461802Z :DEBUG: Take Data. Partition 2. Read: {0, 13} (64-64) 2025-06-03T10:32:30.461804Z :DEBUG: Take Data. Partition 2. Read: {0, 14} (65-65) 2025-06-03T10:32:30.461807Z :DEBUG: Take Data. Partition 2. Read: {0, 15} (66-66) 2025-06-03T10:32:30.461813Z :DEBUG: Take Data. Partition 2. Read: {0, 16} (67-67) 2025-06-03T10:32:30.461817Z :DEBUG: Take Data. Partition 2. Read: {0, 17} (68-68) 2025-06-03T10:32:30.461821Z :DEBUG: Take Data. Partition 2. Read: {0, 18} (69-69) 2025-06-03T10:32:30.461825Z :DEBUG: Take Data. Partition 2. Read: {0, 19} (70-70) 2025-06-03T10:32:30.461828Z :DEBUG: Take Data. Partition 2. Read: {0, 20} (71-71) 2025-06-03T10:32:30.461832Z :DEBUG: Take Data. Partition 2. Read: {0, 21} (72-72) 2025-06-03T10:32:30.461836Z :DEBUG: Take Data. Partition 2. Read: {1, 0} (73-73) 2025-06-03T10:32:30.461840Z :DEBUG: Take Data. Partition 2. Read: {1, 1} (74-74) 2025-06-03T10:32:30.461843Z :DEBUG: Take Data. Partition 2. Read: {1, 2} (75-75) 2025-06-03T10:32:30.461847Z :DEBUG: Take Data. Partition 2. Read: {1, 3} (76-76) 2025-06-03T10:32:30.461851Z :DEBUG: Take Data. Partition 2. Read: {1, 4} (77-77) 2025-06-03T10:32:30.461855Z :DEBUG: Take Data. Partition 2. Read: {1, 5} (78-78) 2025-06-03T10:32:30.461859Z :DEBUG: Take Data. Partition 2. Read: {1, 6} (79-79) 2025-06-03T10:32:30.461862Z :DEBUG: Take Data. Partition 2. Read: {1, 7} (80-80) 2025-06-03T10:32:30.461866Z :DEBUG: Take Data. Partition 2. Read: {1, 8} (81-81) 2025-06-03T10:32:30.461870Z :DEBUG: Take Data. Partition 2. Read: {1, 9} (82-82) 2025-06-03T10:32:30.461879Z :DEBUG: Take Data. Partition 2. Read: {1, 10} (83-83) 2025-06-03T10:32:30.461885Z :DEBUG: Take Data. Partition 2. Read: {1, 11} (84-84) 2025-06-03T10:32:30.461889Z :DEBUG: Take Data. Partition 2. Read: {1, 12} (85-85) 2025-06-03T10:32:30.461892Z :DEBUG: Take Data. Partition 2. Read: {1, 13} (86-86) 2025-06-03T10:32:30.461896Z :DEBUG: Take Data. Partition 2. Read: {1, 14} (87-87) 2025-06-03T10:32:30.461899Z :DEBUG: Take Data. Partition 2. Read: {1, 15} (88-88) 2025-06-03T10:32:30.461903Z :DEBUG: Take Data. Partition 2. Read: {1, 16} (89-89) 2025-06-03T10:32:30.461906Z :DEBUG: Take Data. Partition 2. Read: {1, 17} (90-90) 2025-06-03T10:32:30.461910Z :DEBUG: Take Data. Partition 2. Read: {1, 18} (91-91) 2025-06-03T10:32:30.461914Z :DEBUG: Take Data. Partition 2. Read: {1, 19} (92-92) 2025-06-03T10:32:30.461917Z :DEBUG: Take Data. Partition 2. Read: {1, 20} (93-93) 2025-06-03T10:32:30.461921Z :DEBUG: Take Data. Partition 2. Read: {1, 21} (94-94) 2025-06-03T10:32:30.461925Z :DEBUG: Take Data. Partition 2. Read: {1, 22} (95-95) 2025-06-03T10:32:30.461928Z :DEBUG: Take Data. Partition 2. Read: {1, 23} (96-96) 2025-06-03T10:32:30.461932Z :DEBUG: Take Data. Partition 2. Read: {1, 24} (97-97) 2025-06-03T10:32:30.461935Z :DEBUG: Take Data. Partition 2. Read: {1, 25} (98-98) 2025-06-03T10:32:30.461939Z :DEBUG: Take Data. Partition 2. Read: {1, 26} (99-99) 2025-06-03T10:32:30.461942Z :DEBUG: Take Data. Partition 2. Read: {1, 27} (100-100) 2025-06-03T10:32:30.461948Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 50, size 5000 bytes 2025-06-03T10:32:30.461990Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 2500, ReadSizeServerDelta = 0 2025-06-03T10:32:30.463192Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.463198Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.463204Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:30.463290Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:30.463422Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:30.463477Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.463587Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:32:30.563861Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.563951Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-03T10:32:30.563975Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:30.563982Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-03T10:32:30.564003Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-03T10:32:30.764270Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 2025-06-03T10:32:30.868353Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2025-06-03T10:32:30.869347Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-03T10:32:30.873345Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster 2025-06-03T10:32:30.877875Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.877916Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.877920Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:30.878015Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:30.878245Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:30.878330Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.881530Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:32:30.983184Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:30.983294Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-2) 2025-06-03T10:32:30.983333Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:30.983343Z :DEBUG: Take Data. Partition 1. Read: {1, 0} (2-2) 2025-06-03T10:32:30.983377Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 3). Partition stream id: 1 2025-06-03T10:32:30.983414Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 2, size 6 bytes 2025-06-03T10:32:30.989451Z :DEBUG: [db] [sessionid] [cluster] Committed response: cookies { assign_id: 1 partition_cookie: 1 } 2025-06-03T10:32:30.989509Z :INFO: [db] [sessionid] [cluster] Confirm partition stream destroy. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1 2025-06-03T10:32:30.989565Z :DEBUG: [db] [sessionid] [cluster] Abort session to cluster ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_continuous_backup/unittest >> TContinuousBackupTests::Basic [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:32:30.514028Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:32:30.514061Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:32:30.514067Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:32:30.514073Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:32:30.514089Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:32:30.514094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:32:30.514105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:32:30.514120Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:32:30.514245Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:32:30.514325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:32:30.531015Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:32:30.531048Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:30.536851Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:32:30.536955Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:32:30.537012Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:32:30.543213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:32:30.543319Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:32:30.543500Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:32:30.543588Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:32:30.544440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:32:30.544517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:32:30.544920Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:32:30.544935Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:32:30.544945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:32:30.544960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:32:30.544968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:32:30.544993Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:32:30.546841Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:32:30.569378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:32:30.569471Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:30.569550Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:32:30.569594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:32:30.569604Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:30.570619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:32:30.570650Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:32:30.570725Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:30.570738Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:32:30.570747Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:32:30.570754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:32:30.571325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:30.571341Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:32:30.571346Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:32:30.571756Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:30.571767Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:30.571776Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:32:30.571792Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:32:30.572415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:32:30.572846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:32:30.572892Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:32:30.573109Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:32:30.573140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:32:30.573146Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:32:30.573206Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:32:30.573213Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:32:30.573247Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:32:30.573260Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:32:30.578154Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:32:30.578202Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:32:30.578300Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... e TEvProposeTransactionResult, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000005 OrderId: 104 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 290 } } CommitVersion { Step: 5000005 TxId: 104 } 2025-06-03T10:32:30.887014Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409546, partId: 0 2025-06-03T10:32:30.887060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000005 OrderId: 104 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 290 } } CommitVersion { Step: 5000005 TxId: 104 } 2025-06-03T10:32:30.887082Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_part.cpp:108: HandleReply TEvDataShard::TEvProposeTransactionResult Ignore message: tablet# 72057594046678944, ev# TxKind: TX_KIND_SCHEME Origin: 72075186233409546 Status: COMPLETE TxId: 104 Step: 5000005 OrderId: 104 ExecLatency: 0 ProposeLatency: 5 DomainCoordinators: 72057594046316545 TxStats { PerShardStats { ShardId: 72075186233409546 CpuTimeUsec: 290 } } CommitVersion { Step: 5000005 TxId: 104 } FAKE_COORDINATOR: Erasing txId 104 2025-06-03T10:32:30.887369Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5517: Handle TEvSchemaChanged, tabletId: 72057594046678944, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-06-03T10:32:30.887376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 104, tablet: 72075186233409546, partId: 0 2025-06-03T10:32:30.887393Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 104:0, at schemeshard: 72057594046678944, message: Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-06-03T10:32:30.887403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:1014: NTableState::TProposedWaitParts operationId# 104:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 2025-06-03T10:32:30.887413Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:1018: NTableState::TProposedWaitParts operationId# 104:0 HandleReply TEvSchemaChanged at tablet: 72057594046678944 message: Source { RawX1: 307 RawX2: 4294969589 } Origin: 72075186233409546 State: 2 TxId: 104 Step: 0 Generation: 2 2025-06-03T10:32:30.887433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:664: CollectSchemaChanged accept TEvDataShard::TEvSchemaChanged, operationId: 104:0, shardIdx: 72057594046678944:1, datashard: 72075186233409546, left await: 0, txState.State: ProposedWaitParts, txState.ReadyForNotifications: 1, at schemeshard: 72057594046678944 2025-06-03T10:32:30.887438Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:683: all shard schema changes has been received, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-03T10:32:30.887448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:695: send schema changes ack message, operation: 104:0, datashard: 72075186233409546, at schemeshard: 72057594046678944 2025-06-03T10:32:30.887458Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 104:0 129 -> 240 2025-06-03T10:32:30.888394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-03T10:32:30.888701Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-03T10:32:30.888807Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-03T10:32:30.888819Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-03T10:32:30.888842Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 3/3 2025-06-03T10:32:30.888849Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-03T10:32:30.888856Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 3/3 2025-06-03T10:32:30.888860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-03T10:32:30.888867Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 104, ready parts: 3/3, is published: true 2025-06-03T10:32:30.888889Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:335:2313] message: TxId: 104 2025-06-03T10:32:30.888898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-03T10:32:30.888906Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-06-03T10:32:30.888913Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 104:0 2025-06-03T10:32:30.888952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:32:30.888960Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:1 2025-06-03T10:32:30.888964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 104:1 2025-06-03T10:32:30.888971Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:32:30.888976Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:2 2025-06-03T10:32:30.888980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 104:2 2025-06-03T10:32:30.888993Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:32:30.889099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:32:30.889110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-03T10:32:30.889124Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:32:30.889133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:32:30.889140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 3 2025-06-03T10:32:30.889738Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-03T10:32:30.889750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [1:730:2643] 2025-06-03T10:32:30.890054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 104 2025-06-03T10:32:30.890212Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/continuousBackupImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:32:30.890279Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/continuousBackupImpl" took 78us result status StatusPathDoesNotExist 2025-06-03T10:32:30.890332Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/continuousBackupImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 2]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Table/continuousBackupImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:32:30.890405Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/continuousBackupImpl/streamImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:32:30.890421Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/continuousBackupImpl/streamImpl" took 18us result status StatusPathDoesNotExist 2025-06-03T10:32:30.890442Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Table/continuousBackupImpl/streamImpl\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/Table\' (id: [OwnerId: 72057594046678944, LocalPathId: 2]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Table/continuousBackupImpl/streamImpl" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/Table" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 101 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_continuous_backup/unittest >> TContinuousBackupTests::TakeIncrementalBackup [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:127:2058] recipient: [1:109:2140] 2025-06-03T10:32:30.750102Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:32:30.750139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:32:30.750146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:32:30.750152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:32:30.750170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:32:30.750176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:32:30.750188Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:32:30.750204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:32:30.750338Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:32:30.750433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:32:30.769409Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:32:30.769442Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:30.782856Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:32:30.783001Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:32:30.783075Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:32:30.792785Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:32:30.792904Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:32:30.793105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:32:30.793194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:32:30.799071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:32:30.799168Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:32:30.799539Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:32:30.799551Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:32:30.799573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:32:30.799583Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:32:30.799591Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:32:30.799619Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:32:30.801449Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:122:2147] sender: [1:239:2058] recipient: [1:15:2062] 2025-06-03T10:32:30.825207Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:32:30.825337Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:30.825410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:32:30.825465Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:32:30.825478Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:30.826322Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:32:30.826349Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:32:30.826415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:30.826425Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:32:30.826432Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:32:30.826439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:32:30.826863Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:30.826874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:32:30.826881Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:32:30.827372Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:30.827386Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:30.827392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:32:30.827401Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:32:30.828208Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:32:30.828692Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:32:30.828735Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:32:30.828950Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:32:30.828978Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 135 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:32:30.828987Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:32:30.829061Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:32:30.829070Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:32:30.829110Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:32:30.829125Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:32:30.833765Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:32:30.833788Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:32:30.833858Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... hard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 4/4 2025-06-03T10:32:31.160335Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#103:1 progress is 4/4 2025-06-03T10:32:31.160339Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 4/4 2025-06-03T10:32:31.160345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 103, ready parts: 4/4, is published: true 2025-06-03T10:32:31.160367Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [1:335:2313] message: TxId: 103 2025-06-03T10:32:31.160376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 103 ready parts: 4/4 2025-06-03T10:32:31.160386Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:0 2025-06-03T10:32:31.160392Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:0 2025-06-03T10:32:31.160409Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 3 2025-06-03T10:32:31.160415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:1 2025-06-03T10:32:31.160419Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:1 2025-06-03T10:32:31.160439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 4 2025-06-03T10:32:31.160444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:2 2025-06-03T10:32:31.160448Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:2 2025-06-03T10:32:31.160459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-03T10:32:31.160464Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 103:3 2025-06-03T10:32:31.160468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 103:3 2025-06-03T10:32:31.160479Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 4 2025-06-03T10:32:31.161102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 103: got EvNotifyTxCompletionResult 2025-06-03T10:32:31.161118Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 103: satisfy waiter [1:723:2626] TestWaitNotification: OK eventTxId 103 2025-06-03T10:32:31.161284Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/IncrBackupImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:32:31.161379Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/IncrBackupImpl" took 106us result status StatusSuccess 2025-06-03T10:32:31.161558Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/IncrBackupImpl" PathDescription { Self { Name: "IncrBackupImpl" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupImpl" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:32:31.161658Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Table/continuousBackupImpl/streamImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:32:31.161691Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Table/continuousBackupImpl/streamImpl" took 36us result status StatusSuccess 2025-06-03T10:32:31.161820Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Table/continuousBackupImpl/streamImpl" PathDescription { Self { Name: "streamImpl" PathId: 4 SchemeshardId: 72057594046678944 PathType: EPathTypePersQueueGroup CreateFinished: true CreateTxId: 102 CreateStep: 5000003 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeStreamImpl Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 PQVersion: 2 } ChildrenExist: false BalancerTabletID: 72075186233409548 } PersQueueGroup { Name: "streamImpl" PathId: 4 TotalGroupCount: 1 PartitionPerTablet: 2 PQTabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 } TopicName: "continuousBackupImpl" TopicPath: "/MyRoot/Table/continuousBackupImpl/streamImpl" YdbDatabasePath: "/MyRoot" PartitionKeySchema { Name: "key" TypeId: 4 } MeteringMode: METERING_MODE_REQUEST_UNITS OffloadConfig { IncrementalBackup { DstPath: "/MyRoot/IncrBackupImpl" DstPathId { OwnerId: 72057594046678944 LocalId: 5 } } } } Partitions { PartitionId: 0 TabletId: 72075186233409547 Status: Active } AlterVersion: 2 BalancerTabletID: 72075186233409548 NextPartitionId: 1 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 } } PathId: 4 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:32:31.162046Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/IncrBackupImpl" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:32:31.162072Z node 1 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/IncrBackupImpl" took 28us result status StatusSuccess 2025-06-03T10:32:31.162158Z node 1 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/IncrBackupImpl" PathDescription { Self { Name: "IncrBackupImpl" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 103 CreateStep: 5000004 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupImpl" Columns { Name: "key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint64" TypeId: 4 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 4 PathsLimit: 10000 ShardsInside: 4 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 1 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__incremental_backup" Value: "{}" } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 5 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> DstCreator::ColumnsSizeMismatch [GOOD] >> DstCreator::ColumnTypeMismatch >> THiveTest::TestCreateAndReassignTabletWhileStarting [GOOD] >> THiveTest::TestCreateTabletBeforeLocal >> TDsProxyQuorumTracker::CheckFailModelErasure4Plus2Stripe [GOOD] |69.6%| [TA] $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/test-results/unittest/{meta.json ... results_accumulator.log} >> DstCreator::ReplicationModeMismatch ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDsProxyQuorumTracker::CheckFailModelErasure4Plus2Stripe [GOOD] Test command err: 2025-06-03T10:32:23.696799Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [7e4afa7ea38a37be] bootstrap ActorId# [3:74:2120] Group# 0 BlobCount# 1 BlobIDs# [[72075186224047637:1:863:1:24576:786:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-03T10:32:23.696856Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.696862Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.696865Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.696868Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.696871Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.696874Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.696877Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.696880Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.696883Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.696885Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.696888Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.696891Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.696894Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.696896Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.696899Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.696902Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.696905Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.696909Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.696914Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 6 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:32:23.696923Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:23.696928Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:23.696932Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:23.696935Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:23.696938Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:32:23.696941Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:32:23.696945Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 3 blob Id# [72075186224047637:1:863:1:24576:786:4] Marker# BPG33 2025-06-03T10:32:23.696947Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 3 to# 3 blob Id# [72075186224047637:1:863:1:24576:786:4] Marker# BPG32 2025-06-03T10:32:23.696951Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 4 blob Id# [72075186224047637:1:863:1:24576:786:5] Marker# BPG33 2025-06-03T10:32:23.696953Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 4 to# 4 blob Id# [72075186224047637:1:863:1:24576:786:5] Marker# BPG32 2025-06-03T10:32:23.696957Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 5 blob Id# [72075186224047637:1:863:1:24576:786:6] Marker# BPG33 2025-06-03T10:32:23.696960Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 5 to# 5 blob Id# [72075186224047637:1:863:1:24576:786:6] Marker# BPG32 2025-06-03T10:32:23.705116Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:0:0] Marker# BPP01 2025-06-03T10:32:23.705179Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:30: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 error Marker# BPG50 2025-06-03T10:32:23.705188Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:23.705193Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:23.705200Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:23.705204Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:23.705208Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Sent Marker# BPG51 2025-06-03T10:32:23.705213Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.705217Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.705221Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.705225Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.705229Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.705233Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.705237Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.705241Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.705245Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.705249Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.705253Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.705256Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:23.705264Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 6 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:32:23.705282Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 6 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:23.705290Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 6 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:23.705404Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:1:0] Marker# BPP01 2025-06-03T10:32:23.705425Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:2:0] Marker# BPP01 2025-06-03T10:32:23.705439Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:4] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:3:0] Marker# BPP01 2025-06-03T10:32:23.705451Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:5] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:4:0] Marker# BPP01 2025-06-03T10:32:23.705470Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:6] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:5:0] Marker# BPP01 2025-06-03T10:32:23.705526Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:6:0] Marker# BPP01 2025-06-03T10:32:23.705545Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [7e4afa7ea38a37be] Result# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} GroupId# 0 Marker# BPP12 2025-06-03T10:32:23.705555Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [7e4afa7ea38a37be] SendReply putResult# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:32:23.705630Z node 3 :BS_PROXY_PUT NOTICE: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.366 sample PartId# [72075186224047637:1:863:1:24576:786:6] QueryCount# 1 VDiskId# [0:1:0:5:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.366 sample PartId# [72075186224047637:1:863:1:24576:786:5] QueryCount# 1 VDiskId# [0:1:0:4:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.366 sample PartId# [72075186224047637:1:863:1:24576:786:4] QueryCount# 1 VDiskId# [0:1:0:3:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.366 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:0:2:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.366 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:1:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.366 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 8.546 VDiskId# [0:1:0:0:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 8.718 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:0:6:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 8.792 VDiskId# [0:1:0:1:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 8.811 VDiskId# [0:1:0:2:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 8.824 VDiskId# [0:1:0:3:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 8.837 VDiskId# [0:1:0:4:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 8.855 VDiskId# [0:1:0:5:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 8.914 VDiskId# [0:1:0:6:0] NodeId# 3 Status# OK } ] } >> THiveTest::TestCheckSubHiveDrain [GOOD] >> THiveTest::TestCheckSubHiveMigration >> THiveTest::TestCreateTabletBeforeLocal [GOOD] >> THiveTest::TestCreateTabletAndReassignGroups >> DstCreator::ColumnTypeMismatch [GOOD] >> DstCreator::WithSyncIndex >> DstCreator::SameOwner |69.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/cms/ut/ydb-core-cms-ut |69.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/ut/ydb-core-cms-ut |69.6%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_continuous_backup/test-results/unittest/{meta.json ... results_accumulator.log} >> DstCreator::GlobalConsistency >> THiveTest::TestCreateTabletAndReassignGroups [GOOD] >> THiveTest::TestCreateTabletAndReassignGroups3 |69.6%| [LD] {RESULT} $(B)/ydb/core/cms/ut/ydb-core-cms-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::ColumnTypeMismatch [GOOD] Test command err: 2025-06-03T10:32:31.473597Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669098493532379:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:31.474116Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000eea/r3tmp/tmp8085bC/pdisk_1.dat 2025-06-03T10:32:31.601983Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:31.604785Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669098493532352:2079] 1748946751468391 != 1748946751468394 TClient is connected to server localhost:22572 TServer::EnableGrpc on GrpcPort 26519, node 1 2025-06-03T10:32:31.651827Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:31.651840Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:31.651843Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:31.651917Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:31.660972Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:31.661005Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:31.662100Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22572 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:31.712891Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:31.717771Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:31.739526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946751762 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946751790 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946751762 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946751790 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-03T10:32:31.755128Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:31.755164Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:31.755167Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-03T10:32:31.755372Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-03T10:32:31.915867Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946751776, tx_id: 281474976715658 } } } 2025-06-03T10:32:31.916027Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-03T10:32:31.916608Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:484} 2025-06-03T10:32:31.917365Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946751790 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "extra" Type: "Utf8" TypeId: 4608 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartiti ... ocalhost:20419 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:32.261051Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:32.261084Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:32.262168Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:32:32.269358Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:32:32.272693Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:32.282595Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946752315 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946752343 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946752315 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946752343 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-03T10:32:32.301042Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:32.301098Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:32.301101Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-03T10:32:32.301242Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-03T10:32:32.556106Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946752329, tx_id: 281474976715658 } } } 2025-06-03T10:32:32.556203Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-03T10:32:32.556683Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:484} 2025-06-03T10:32:32.556991Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946752343 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-03T10:32:32.557039Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Column type mismatch: name: value, expected: Utf8, got: Uint32 >> DstCreator::ReplicationModeMismatch [GOOD] >> DstCreator::ReplicationConsistencyLevelMismatch >> DstCreator::WithSyncIndex [GOOD] >> THiveTest::TestCreateTabletAndReassignGroups3 [GOOD] >> THiveTest::TestCreateTabletAndMixedReassignGroups3 >> DstCreator::SameOwner [GOOD] >> DstCreator::SamePartitionCount >> THiveTest::TestCheckSubHiveMigration [GOOD] >> THiveTest::TestCheckSubHiveMigrationManyTablets >> TDSProxyGetTest::TestMirror32GetIntervalsWipedAllOk [GOOD] >> TDSProxyPatchTest::NaiveOk_ErasureMirror3dc >> DstCreator::ReplicationConsistencyLevelMismatch [GOOD] >> DstCreator::GlobalConsistency [GOOD] >> DstCreator::KeyColumnNameMismatch >> DstCreator::NonExistentSrc >> TDSProxyPatchTest::NaiveOk_ErasureMirror3dc [GOOD] >> TDSProxyPutTest::TestBlock42PutStatusOkWith_0_0_VdiskErrors >> LocalPartition::WithoutPartitionWithRestart [GOOD] >> LocalPartition::WithoutPartitionUnknownEndpoint ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::WithSyncIndex [GOOD] Test command err: 2025-06-03T10:32:32.822564Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669104344443543:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:32.822590Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000ed2/r3tmp/tmpncA3r6/pdisk_1.dat 2025-06-03T10:32:32.884672Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:63463 TServer::EnableGrpc on GrpcPort 27739, node 1 2025-06-03T10:32:32.920487Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:32.920502Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:32.920504Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:32.920572Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:32.923903Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:32.923932Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:32.925003Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:63463 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:32.967387Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:32.972593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946753078 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946753015 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946753078 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: ".sys" PathId: 18446... (TRUNCATED) 2025-06-03T10:32:33.056875Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:33.056913Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:33.056915Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-03T10:32:33.057109Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-03T10:32:33.207444Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946753078, tx_id: 281474976715658 } } } 2025-06-03T10:32:33.207578Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-03T10:32:33.208124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:32:33.208558Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-06-03T10:32:33.208566Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715659 2025-06-03T10:32:33.216867Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 2025-06-03T10:32:33.217236Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Replicated" PathDescription { Self { Name: "Replicated" PathId: 5 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946753260 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 0 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } Ta ... untToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186224037906 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 19 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 7 PathOwnerId: 72057594046644480 } TClient::Ls request: /Root/Replicated/index_by_value 2025-06-03T10:32:33.226025Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 2] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 7] TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "index_by_value" PathId: 6 SchemeshardId: 72057594046644480 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946753260 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946753260 ParentPathId: 6 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { ... (TRUNCATED) TClient::Ls request: /Root/Replicated/index_by_value/indexImplTable TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946753260 ParentPathId: 6 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } ... (TRUNCATED) Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946753260 ParentPathId: 6 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186224037906 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 19 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__async_replica" Value: "true" } } Path: "/Root/Replicated/index_by_value/indexImplTable" >> DirectReadWithClient::OneMessage [GOOD] >> DirectReadWithClient::ManyMessages >> THiveTest::TestCreateTabletAndMixedReassignGroups3 [GOOD] >> THiveTest::TestCreateTabletAndReassignGroupsWithReboots >> TDSProxyPutTest::TestBlock42PutStatusOkWith_0_0_VdiskErrors [GOOD] >> TDsProxyQuorumTracker::CheckFailModelErasure4Plus2Block >> DstCreator::SamePartitionCount [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::ReplicationConsistencyLevelMismatch [GOOD] Test command err: 2025-06-03T10:32:32.585095Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669105126371835:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:32.585119Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000ede/r3tmp/tmpJCJmVL/pdisk_1.dat 2025-06-03T10:32:32.663424Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:17285 2025-06-03T10:32:32.685366Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:32.685400Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:32.686590Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 20111, node 1 2025-06-03T10:32:32.702137Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:32.702155Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:32.702157Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:32.702218Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17285 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:32.751899Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:32.755340Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:32.817936Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946752798 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946752868 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946752798 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946752868 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-03T10:32:32.832593Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:32.832663Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:32.832667Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-03T10:32:32.832840Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-03T10:32:33.053533Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946752854, tx_id: 281474976715658 } } } 2025-06-03T10:32:33.053662Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-03T10:32:33.054096Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:484} 2025-06-03T10:32:33.054621Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946752868 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_NONE ConsistencyLevel: CONSISTENCY_LEVEL_UNKNOWN } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 Tx ... 2057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:33.401393Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:33.401432Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:33.402865Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:33.406082Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:33.407675Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:32:33.408870Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:33.423852Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946753456 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946753526 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946753456 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946753526 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-03T10:32:33.490016Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:33.490044Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:33.490046Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-03T10:32:33.490269Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-03T10:32:33.779225Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946753470, tx_id: 281474976715658 } } } 2025-06-03T10:32:33.779309Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-03T10:32:33.779777Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:484} 2025-06-03T10:32:33.780145Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946753526 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_GLOBAL } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-03T10:32:33.780198Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Replication consistency level mismatch: expected: CONSISTENCY_LEVEL_ROW, got: 1 >> DstCreator::Basic >> DstCreator::NonExistentSrc [GOOD] >> DstCreator::KeyColumnsSizeMismatch >> DstCreator::KeyColumnNameMismatch [GOOD] >> Compression::WriteRAW [GOOD] >> Compression::WriteGZIP ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::SamePartitionCount [GOOD] Test command err: 2025-06-03T10:32:32.987400Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669103442431871:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:32.987434Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000ec8/r3tmp/tmp1HWoH7/pdisk_1.dat 2025-06-03T10:32:33.056871Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:28555 TServer::EnableGrpc on GrpcPort 11548, node 1 2025-06-03T10:32:33.089622Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:33.089667Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:33.089836Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:33.089848Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:33.089850Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:33.089889Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:33.090713Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28555 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:33.129226Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:33.132205Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:32:33.133004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946753232 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946753176 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "user@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946753232 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-03T10:32:33.198786Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:33.198815Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:33.198817Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-03T10:32:33.198986Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-03T10:32:33.407538Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946753232, tx_id: 281474976715659 } } } 2025-06-03T10:32:33.407646Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-03T10:32:33.408109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:32:33.408441Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715660} 2025-06-03T10:32:33.408444Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715660 TClient::Ls request: /Root/Replicated 2025-06-03T10:32:33.426166Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715660 2025-06-03T10:32:33.426181Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715660 CreateStep: 1748946753470 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "user@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) 2025-06-03T10:32:33.762120Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669109669556995:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:33.762987Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000ec8/r3tmp/tmpI2xhVR/pdisk_1.dat 2025-06-03T10:32:33.793626Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:61534 TServer::EnableGrpc on GrpcPort 14334, node 2 2025-06-03T10:32:33.809662Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:33.809675Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:33.809677Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:33.809731Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61534 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:33.859294Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:33.859333Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:33.859892Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:32:33.860297Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:32:33.863307Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946753918 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946753904 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946753918 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-03T10:32:33.891389Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:33.891479Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:33.891494Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-03T10:32:33.891714Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-03T10:32:34.187831Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946753918, tx_id: 281474976715658 } } } 2025-06-03T10:32:34.187964Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-03T10:32:34.188435Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:32:34.188663Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-06-03T10:32:34.188671Z node 2 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715659 2025-06-03T10:32:34.196601Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 2025-06-03T10:32:34.196620Z node 2 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946753918 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root/Replicated TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946754240 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::KeyColumnNameMismatch [GOOD] Test command err: 2025-06-03T10:32:33.130714Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669110031372046:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:33.130736Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000ec1/r3tmp/tmpxascrH/pdisk_1.dat 2025-06-03T10:32:33.195610Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:3257 TServer::EnableGrpc on GrpcPort 6124, node 1 2025-06-03T10:32:33.231526Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:33.231549Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:33.231552Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:33.231616Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:33.232477Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:33.232522Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:33.234031Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:3257 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:33.282391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:33.286281Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:32:33.290296Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1748946753393 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946753330 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1748946753393 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-03T10:32:33.367941Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:33.367976Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:33.367979Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-03T10:32:33.368190Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-03T10:32:33.723054Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946753393, tx_id: 281474976710658 } } } 2025-06-03T10:32:33.723174Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-03T10:32:33.723669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-06-03T10:32:33.724120Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-06-03T10:32:33.724123Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710659 2025-06-03T10:32:33.737191Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 2025-06-03T10:32:33.737208Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] TClient::Ls request: /Root/Replicated TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1748946753785 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000ec1/r3tmp/tmpoNjh59/pdisk_1.dat 2025-06-03T10:32:34.077425Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:32:34.081988Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:34.083768Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669111929387911:2079] 1748946754049259 != 1748946754049262 TClient is connected to server localhost:6843 TServer::EnableGrpc on GrpcPort 12762, node 2 2025-06-03T10:32:34.099599Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:34.099616Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:34.099618Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:34.099681Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6843 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:34.151952Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:34.151994Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:34.152549Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:32:34.153016Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:32:34.158602Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:34.172937Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946754198 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946754233 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946754198 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946754233 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-03T10:32:34.190038Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:34.190085Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:34.190088Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-03T10:32:34.190304Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-03T10:32:34.465028Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946754219, tx_id: 281474976715658 } } } 2025-06-03T10:32:34.465122Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-03T10:32:34.465692Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:484} 2025-06-03T10:32:34.466071Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946754233 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-03T10:32:34.466116Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Key column name mismatch: position: 0, expected: key, got: value >> DstCreator::Basic [GOOD] >> DstCreator::CannotFindColumn >> DstCreator::WithSyncIndexAndIntermediateDir >> DstCreator::KeyColumnsSizeMismatch [GOOD] >> DstCreator::WithIntermediateDir >> YdbTableSplit::MergeByNoLoadAfterSplit [GOOD] >> DstCreator::CannotFindColumn [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::KeyColumnsSizeMismatch [GOOD] Test command err: 2025-06-03T10:32:34.080387Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669114597395443:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:34.080724Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000ebc/r3tmp/tmpfD2nIL/pdisk_1.dat 2025-06-03T10:32:34.163966Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:22827 TServer::EnableGrpc on GrpcPort 16922, node 1 2025-06-03T10:32:34.202195Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:34.202220Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:34.202222Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:34.202284Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22827 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:32:34.234626Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:34.234659Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:34.235716Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:34.248351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946754296 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946754296 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version... (TRUNCATED) 2025-06-03T10:32:34.255504Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:34.255539Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:34.255542Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-03T10:32:34.255678Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-03T10:32:34.596204Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { status: SCHEME_ERROR, issues: } } 2025-06-03T10:32:34.596226Z node 1 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Cannot describe table: status: SCHEME_ERROR, issue: 2025-06-03T10:32:34.796592Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669111055679750:2206];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:34.796663Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000ebc/r3tmp/tmp5ZZnUH/pdisk_1.dat 2025-06-03T10:32:34.813947Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:13998 TServer::EnableGrpc on GrpcPort 15338, node 2 2025-06-03T10:32:34.841844Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:34.841863Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:34.841867Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:34.841926Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13998 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:34.895785Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:34.895836Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:34.896966Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:34.905618Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:34.915091Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:32:34.916541Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:34.993174Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946754954 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946755052 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946754954 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946755052 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-03T10:32:35.011912Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:35.011947Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:35.011950Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-03T10:32:35.012130Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-03T10:32:35.293402Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946755024, tx_id: 281474976715658 } } } 2025-06-03T10:32:35.293559Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-03T10:32:35.294115Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:484} 2025-06-03T10:32:35.294723Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946755052 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnNames: "value" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-03T10:32:35.294792Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Key columns size mismatch: expected: 1, got: 2 >> TVPatchTests::FindingPartsWhenPartsAreDontExist >> THiveTest::TestCreateTabletAndReassignGroupsWithReboots [GOOD] >> THiveTest::TestCreateExternalTablet >> DstCreator::WithSyncIndexAndIntermediateDir [GOOD] >> TVPatchTests::FindingPartsWhenPartsAreDontExist [GOOD] >> TVPatchTests::FindingPartsWhenOnlyOnePartExists ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::CannotFindColumn [GOOD] Test command err: 2025-06-03T10:32:34.610123Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669114925413279:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:34.610148Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000eb8/r3tmp/tmpeznOgb/pdisk_1.dat 2025-06-03T10:32:34.676361Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:26939 TServer::EnableGrpc on GrpcPort 21130, node 1 2025-06-03T10:32:34.709891Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:34.709906Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:34.709908Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:34.709958Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:34.711405Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:34.711456Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:34.712566Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26939 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:34.770356Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:34.774240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1748946754877 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946754821 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710658 CreateStep: 1748946754877 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-03T10:32:34.845779Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:34.845891Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:34.845897Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-03T10:32:34.846018Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-03T10:32:35.085896Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946754877, tx_id: 281474976710658 } } } 2025-06-03T10:32:35.086000Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-03T10:32:35.086447Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 2025-06-03T10:32:35.086837Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976710659} 2025-06-03T10:32:35.086846Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976710659 2025-06-03T10:32:35.098441Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976710659 TClient::Ls request: /Root/Replicated 2025-06-03T10:32:35.098463Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 3] TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710659 CreateStep: 1748946755143 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) 2025-06-03T10:32:35.307142Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669115572743229:2093];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:35.307475Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000eb8/r3tmp/tmptc24Un/pdisk_1.dat 2025-06-03T10:32:35.330779Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:30122 TServer::EnableGrpc on GrpcPort 6805, node 2 2025-06-03T10:32:35.361331Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:35.361347Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:35.361349Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:35.361416Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30122 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:35.405736Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:35.405777Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:35.407099Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:35.411989Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:35.413741Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:32:35.415771Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:35.428002Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946755458 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946755486 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946755458 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 7 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 7 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 5 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946755486 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: "Src" PathId: 2 S... (TRUNCATED) 2025-06-03T10:32:35.444903Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:35.444951Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:35.444954Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-03T10:32:35.445135Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-03T10:32:35.838580Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Src, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946755472, tx_id: 281474976715658 } } } 2025-06-03T10:32:35.838691Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-03T10:32:35.839135Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAlreadyExists txid# 281474976715660 Reason# Check failed: path: '/Root/Dst', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 3], type: EPathTypeTable, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_table.cpp:484} 2025-06-03T10:32:35.839477Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dst" PathDescription { Self { Name: "Dst" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946755486 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Dst" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value2" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-03T10:32:35.839539Z node 2 :REPLICATION_CONTROLLER ERROR: dst_creator.cpp:594: [DstCreator][rid 1][tid 1] Error: status# StatusSchemeError, reason# Cannot find column: name: value >> DstCreator::WithIntermediateDir [GOOD] >> DstCreator::WithAsyncIndex >> IncrementalRestoreScan::Empty >> THiveTest::TestCreateExternalTablet [GOOD] >> TVPatchTests::FindingPartsWhenOnlyOnePartExists [GOOD] |69.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/security/ut/ydb-core-security-ut |69.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/security/ut/ydb-core-security-ut |69.7%| [LD] {RESULT} $(B)/ydb/core/security/ut/ydb-core-security-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::WithSyncIndexAndIntermediateDir [GOOD] Test command err: 2025-06-03T10:32:35.513400Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669116558175953:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:35.513454Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000eb6/r3tmp/tmp4XuhLK/pdisk_1.dat 2025-06-03T10:32:35.618183Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:35.618227Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:35.620517Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:35.629048Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:64142 TServer::EnableGrpc on GrpcPort 11125, node 1 2025-06-03T10:32:35.664361Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:35.664376Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:35.664379Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:35.664433Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64142 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:35.726630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:35.731626Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946755843 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946755773 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946755843 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: ".sys" PathId: 18446... (TRUNCATED) 2025-06-03T10:32:35.817394Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:35.817437Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:35.817440Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-03T10:32:35.817628Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-03T10:32:35.965385Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946755843, tx_id: 281474976715658 } } } 2025-06-03T10:32:35.965513Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-03T10:32:35.966055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480 2025-06-03T10:32:35.966730Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-06-03T10:32:35.966738Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715659 2025-06-03T10:32:35.973652Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 2025-06-03T10:32:35.973978Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:335: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/Dir/Replicated" PathDescription { Self { Name: "Replicated" PathId: 6 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946756018 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 0 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } ... : 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186224037905 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 19 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__async_replica" Value: "true" } } PathId: 8 PathOwnerId: 72057594046644480 } 2025-06-03T10:32:35.981025Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 2] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 8] TClient::Ls request: /Root/Dir/Replicated/index_by_value TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "index_by_value" PathId: 7 SchemeshardId: 72057594046644480 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946756018 ParentPathId: 6 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableIndexVersion: 1 } ChildrenExist: true } Children { Name: "indexImplTable" PathId: 8 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946756018 ParentPathId: 7 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" PathSubType: EPathSubTypeSyncIndexImplTable Version { ... (TRUNCATED) TClient::Ls request: /Root/Dir/Replicated/index_by_value/indexImplTable TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 8 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946756018 ParentPathId: 7 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } ... (TRUNCATED) Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "indexImplTable" PathId: 8 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946756018 ParentPathId: 7 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeSyncIndexImplTable Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplTable" Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "value" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 SplitByLoadSettings { Enabled: false } } ColumnFamilies { Id: 0 StorageConfig { SysLog { PreferredPoolKind: "test" } Log { PreferredPoolKind: "test" } Data { PreferredPoolKind: "test" } } } } TableSchemaVersion: 1 IsBackup: false ReplicationConfig { Mode: REPLICATION_MODE_READ_ONLY ConsistencyLevel: CONSISTENCY_LEVEL_ROW } IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186224037905 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 7 PathsLimit: 10000 ShardsInside: 19 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } UserAttributes { Key: "__async_replica" Value: "true" } } Path: "/Root/Dir/Replicated/index_by_value/indexImplTable" >> TxUsage::WriteToTopic_Demo_32_Query ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::FindingPartsWhenOnlyOnePartExists [GOOD] Test command err: Recv 65537 2025-06-03T10:32:36.172820Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-03T10:32:36.173221Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-06-03T10:32:36.173241Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts 2025-06-03T10:32:36.173256Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm Recv 65537 2025-06-03T10:32:36.381759Z node 2 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:10:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-03T10:32:36.381854Z node 2 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:10:0] Status# OK ResultSize# 1 2025-06-03T10:32:36.381866Z node 2 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:10:0] FoundParts# [1] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-03T10:32:36.381929Z node 2 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 XorReceiver# no ParityPart# no ForceEnd# yes 2025-06-03T10:32:36.381941Z node 2 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: received force end; OriginalBlobId# [1:2:3:4:6:10:0] PatchedBlobId# [1:3:3:4:6:10:0] OriginalPartId# 1 PatchedPartId# 1 Status# OK ErrorReason# Send NKikimr::TEvBlobStorage::TEvVPatchResult 2025-06-03T10:32:36.381954Z node 2 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvVPatchDyingConfirm >> DstCreator::WithAsyncIndex [GOOD] >> IncrementalRestoreScan::Empty [GOOD] >> IncrementalRestoreScan::ChangeSenderEmpty |69.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable |69.7%| [LD] {RESULT} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable |69.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/ut_sentinel_unstable/ydb-core-cms-ut_sentinel_unstable ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest >> IncrementalRestoreScan::Empty [GOOD] Test command err: 2025-06-03T10:32:36.787178Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:32:36.787260Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:32:36.787284Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000e18/r3tmp/tmpbPpycX/pdisk_1.dat 2025-06-03T10:32:36.897667Z node 1 :CHANGE_EXCHANGE DEBUG: incr_restore_scan.cpp:178: [TIncrementalRestoreScan][1337][OwnerId: 1, LocalPathId: 2][OwnerId: 3, LocalPathId: 4][1:595:2520] Exhausted 2025-06-03T10:32:36.897711Z node 1 :CHANGE_EXCHANGE DEBUG: incr_restore_scan.cpp:127: [TIncrementalRestoreScan][1337][OwnerId: 1, LocalPathId: 2][OwnerId: 3, LocalPathId: 4][1:595:2520] Handle TEvIncrementalRestoreScan::TEvFinished NKikimr::NDataShard::TEvIncrementalRestoreScan::TEvFinished 2025-06-03T10:32:36.897721Z node 1 :CHANGE_EXCHANGE DEBUG: incr_restore_scan.cpp:191: [TIncrementalRestoreScan][1337][OwnerId: 1, LocalPathId: 2][OwnerId: 3, LocalPathId: 4][1:595:2520] Finish Done >> BasicUsage::MaxByteSizeEqualZero [GOOD] >> BasicUsage::TSimpleWriteSession_AutoSeqNo_BasicUsage ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_dst_creator/unittest >> DstCreator::WithAsyncIndex [GOOD] Test command err: 2025-06-03T10:32:35.724527Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669116584246550:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:35.724561Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000eaf/r3tmp/tmpcbAJFy/pdisk_1.dat 2025-06-03T10:32:35.788667Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:25346 TServer::EnableGrpc on GrpcPort 27339, node 1 2025-06-03T10:32:35.825538Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:35.825570Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:35.826655Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:35.827059Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:35.827070Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:35.827072Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:35.827119Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25346 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:35.874426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:35.878159Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946755976 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" Key... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946755920 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 3 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946755976 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: false } Children { Name: ".sys" PathId: 1844... (TRUNCATED) 2025-06-03T10:32:35.943202Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:35.943241Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:35.943243Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-03T10:32:35.943378Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-03T10:32:36.079848Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946755976, tx_id: 281474976715658 } } } 2025-06-03T10:32:36.079961Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-03T10:32:36.080501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480 2025-06-03T10:32:36.080721Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-06-03T10:32:36.080729Z node 1 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715659 2025-06-03T10:32:36.087414Z node 1 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 TClient::Ls request: /Root/Dir/Replicated 2025-06-03T10:32:36.087428Z node 1 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 4] TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 4 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946756130 ParentPathId: 3 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "ke... (TRUNCATED) 2025-06-03T10:32:36.435566Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669119548160087:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:36.435596Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000eaf/r3tmp/tmpNoekws/pdisk_1.dat 2025-06-03T10:32:36.463349Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TClient is connected to server localhost:6441 TServer::EnableGrpc on GrpcPort 10603, node 2 2025-06-03T10:32:36.486112Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:36.486130Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:36.486132Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:36.486199Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6441 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:36.541855Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:36.541882Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:36.542398Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:32:36.542865Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:32:36.544762Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... TClient::Ls request: /Root/Table TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946756648 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: true } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyCo... (TRUNCATED) TClient::Ls request: /Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 1748946756592 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 4 SubDomainVersion: 1 SecurityStateVersion: 0 } ChildrenExist: true } Children { Name: "Table" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946756648 ParentPathId: 1 PathState: EPathStateCreate Owner: "root@builtin" ACL: "" ChildrenExist: true } Children { Name: ".sys" PathId: 18446... (TRUNCATED) 2025-06-03T10:32:36.617627Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:36.617699Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:56: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:32:36.617704Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:87: [DstCreator][rid 1][tid 1] Get table profiles 2025-06-03T10:32:36.617882Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:106: [DstCreator][rid 1][tid 1] Handle NKikimr::NConsole::TEvConfigsDispatcher::TEvGetConfigResponse 2025-06-03T10:32:36.851617Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:158: [DstCreator][rid 1][tid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribeTableResponse { Result: { name: Table, owner: root@builtin, type: Table, size_bytes: 0, created_at: { plan_step: 1748946756648, tx_id: 281474976715658 } } } 2025-06-03T10:32:36.851734Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:249: [DstCreator][rid 1][tid 1] Handle NKikimr::TEvTxUserProxy::TEvAllocateTxIdResult 2025-06-03T10:32:36.852283Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:32:36.852545Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:279: [DstCreator][rid 1][tid 1] Handle {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715659} 2025-06-03T10:32:36.852555Z node 2 :REPLICATION_CONTROLLER DEBUG: dst_creator.cpp:306: [DstCreator][rid 1][tid 1] Subscribe tx: txId# 281474976715659 2025-06-03T10:32:36.861787Z node 2 :REPLICATION_CONTROLLER TRACE: dst_creator.cpp:311: [DstCreator][rid 1][tid 1] Handle NKikimrScheme.TEvNotifyTxCompletionResult TxId: 281474976715659 TClient::Ls request: /Root/Replicated 2025-06-03T10:32:36.861803Z node 2 :REPLICATION_CONTROLLER INFO: dst_creator.cpp:585: [DstCreator][rid 1][tid 1] Success: dstPathId# [OwnerId: 72057594046644480, LocalPathId: 5] TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Replicated" PathId: 5 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715659 CreateStep: 1748946756907 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Replicated" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key... (TRUNCATED) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/services/ydb/table_split_ut/unittest >> YdbTableSplit::MergeByNoLoadAfterSplit [GOOD] Test command err: 2025-06-03T10:31:11.465771Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668755887880194:2207];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:11.465828Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00223e/r3tmp/tmpr2NwSd/pdisk_1.dat 2025-06-03T10:31:11.720498Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20785, node 1 2025-06-03T10:31:11.775973Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:31:11.775987Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:31:11.775990Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:31:11.776047Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16241 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:31:11.812128Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:11.812168Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:11.814482Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:11.849328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... Triggering split by load TClient is connected to server localhost:16241 2025-06-03T10:31:12.195276Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668760182848299:2334], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:12.195329Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:12.246467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:31:12.336015Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668760182848462:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:12.336093Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:12.338401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946672340 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) Table has 1 shards TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946672340 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-03T10:31:12.385652Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668760182848559:2378], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:12.385689Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:12.385840Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668760182848567:2383], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:12.385850Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668760182848568:2384], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:12.386575Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668760182848598:2390], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:12.386595Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668760182848602:2392], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:12.386602Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668760182848609:2395], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:12.386610Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668760182848610:2396], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:12.386618Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:12.387093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata, operationId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:31:12.387181Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715661:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:31:12.387192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager, operationId: 281474976715661:1, at schemeshard: 72057594046644480 2025-06-03T10:31:12.387212Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715661:2, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:31:12.387236Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /Root/.metadata/workload_manager/pools, operationId: 281474976715661:2, at schemeshard: 72057594046644480 2025-06-03T10:31:12.387251Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715661:3, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:31:12.387262Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_create_resource_pool.cpp:148: [72057594046644480] TCreateResourcePool Propose: opId# 281474976715661:3, path# /Root/.metadata/workload_manager/pools/default 2025-06-03T10:31:12.387313Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976715661:3 1 -> 128 2025-06-03T10:31:12.387393Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976715661:4, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:31:12.387405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation p ... tPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 2 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) 2025-06-03T10:32:32.525712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__table_stats.cpp:450: Propose merge request : Transaction { WorkingDir: "/Root" OperationType: ESchemeOpSplitMergeTablePartitions SplitMergeTablePartitions { TablePath: "/Root/Foo" SourceTabletId: 72075186224037889 SourceTabletId: 72075186224037890 SchemeshardId: 72057594046644480 } Internal: true FailOnExist: false } TxId: 281474976710658 TabletId: 72057594046644480, reason: shard with tabletId: 72075186224037890 merge by load (shardLoad: 0.02), shardToMergeCount: 2, totalSize: 0, sizeToMerge: 0, totalLoad: 0.04, loadThreshold: 0.07 2025-06-03T10:32:32.525782Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:798: TSplitMerge Propose, tableStr: /Root/Foo, tableId: , opId: 281474976710658:0, at schemeshard: 72057594046644480, request: TablePath: "/Root/Foo" SourceTabletId: 72075186224037889 SourceTabletId: 72075186224037890 SchemeshardId: 72057594046644480 2025-06-03T10:32:32.526011Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_split_merge.cpp:1077: TSplitMerge Propose accepted, tableStr: /Root/Foo, tableId: , opId: 281474976710658:0, at schemeshard: 72057594046644480, op: SourceRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "\002\000\004\000\000\000\346\022\375~\000\000\000\200" TabletID: 72075186224037889 ShardIdx: 2 } SourceRanges { KeyRangeBegin: "\002\000\004\000\000\000\346\022\375~\000\000\000\200" KeyRangeEnd: "" TabletID: 72075186224037890 ShardIdx: 3 } DestinationRanges { KeyRangeBegin: "\002\000\000\000\000\200\000\000\000\200" KeyRangeEnd: "" ShardIdx: 4 }, request: TablePath: "/Root/Foo" SourceTabletId: 72075186224037889 SourceTabletId: 72075186224037890 SchemeshardId: 72057594046644480 2025-06-03T10:32:32.526021Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 281474976710658:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:32:32.530037Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 281474976710658:0 ProgressState, operation type: TxSplitTablePartition, at tablet# 72057594046644480 2025-06-03T10:32:32.550989Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:175: TCreateParts opId# 281474976710658:0 HandleReply TEvCreateTabletReply, at tabletId: 72057594046644480 2025-06-03T10:32:32.551057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710658:0 2 -> 3 2025-06-03T10:32:32.552291Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:83: TSplitMerge TConfigureDestination ProgressState, operationId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-03T10:32:32.557152Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037891 actor [1:7511669103781303284:5671] 2025-06-03T10:32:32.567550Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037891 2025-06-03T10:32:32.567632Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037891, state: WaitScheme 2025-06-03T10:32:32.567708Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2025-06-03T10:32:32.604571Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:37: TSplitMerge TConfigureDestination operationId# 281474976710658:0 HandleReply TEvInitSplitMergeDestinationAck, operationId: 281474976710658:0, at schemeshard: 72057594046644480 message# OperationCookie: 281474976710658 TabletId: 72075186224037891 2025-06-03T10:32:32.604605Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710658:0 3 -> 131 2025-06-03T10:32:32.605517Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:328: TSplitMerge TTransferData operationId# 281474976710658:0 ProgressState, at schemeshard: 72057594046644480 2025-06-03T10:32:32.611263Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state Ready tabletId 72075186224037891 2025-06-03T10:32:32.611312Z node 1 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast Ready tabletId 72075186224037891 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-03T10:32:32.611335Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037891 2025-06-03T10:32:32.611348Z node 1 :TX_DATASHARD INFO: datashard.cpp:1293: Change sender activated: at tablet: 72075186224037891 2025-06-03T10:32:32.611469Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037891 TxInFly 0 2025-06-03T10:32:32.614392Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:206: TSplitMerge TTransferData operationId# 281474976710658:0 HandleReply TEvSplitAck, at schemeshard: 72057594046644480, message: OperationCookie: 281474976710658 TabletId: 72075186224037889 2025-06-03T10:32:32.614515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:206: TSplitMerge TTransferData operationId# 281474976710658:0 HandleReply TEvSplitAck, at schemeshard: 72057594046644480, message: OperationCookie: 281474976710658 TabletId: 72075186224037890 2025-06-03T10:32:32.614655Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 281474976710658:0 131 -> 132 2025-06-03T10:32:32.615323Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-03T10:32:32.615428Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-03T10:32:32.615448Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:431: TSplitMerge TNotifySrc, operationId: 281474976710658:0 ProgressState, at schemeshard: 72057594046644480 2025-06-03T10:32:32.616026Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 2 Version: 6 PathOwnerId: 72057594046644480, cookie: 281474976710658 2025-06-03T10:32:32.616047Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:108: Operation in-flight, at schemeshard: 72057594046644480, txId: 281474976710658 2025-06-03T10:32:32.616054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046644480, txId: 281474976710658, pathId: [OwnerId: 72057594046644480, LocalPathId: 2], version: 6 2025-06-03T10:32:32.618023Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:386: TSplitMerge TNotifySrc, operationId: 281474976710658:0 HandleReply TEvSplitPartitioningChangedAck, from datashard: 72075186224037889, at schemeshard: 72057594046644480 2025-06-03T10:32:32.618036Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037889 Initiating switch from PreOffline to Offline state 2025-06-03T10:32:32.618043Z node 1 :TX_DATASHARD INFO: datashard_loans.cpp:177: 72075186224037890 Initiating switch from PreOffline to Offline state 2025-06-03T10:32:32.618107Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_split_merge.cpp:386: TSplitMerge TNotifySrc, operationId: 281474976710658:0 HandleReply TEvSplitPartitioningChangedAck, from datashard: 72075186224037890, at schemeshard: 72057594046644480 2025-06-03T10:32:32.618126Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710658:0 progress is 1/1 2025-06-03T10:32:32.618131Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#281474976710658:0 progress is 1/1 2025-06-03T10:32:32.618139Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976710658:0 2025-06-03T10:32:32.619131Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3306: 72075186224037889 Reporting state Offline to schemeshard 72057594046644480 2025-06-03T10:32:32.619135Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:260: Unable to activate 281474976710658:0 2025-06-03T10:32:32.619175Z node 1 :TX_DATASHARD INFO: datashard_impl.h:3306: 72075186224037890 Reporting state Offline to schemeshard 72057594046644480 2025-06-03T10:32:32.619284Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037889, state: Offline, at schemeshard: 72057594046644480 2025-06-03T10:32:32.619359Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186224037890, state: Offline, at schemeshard: 72057594046644480 2025-06-03T10:32:32.631650Z node 1 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037889 reason = ReasonStop 2025-06-03T10:32:32.631681Z node 1 :TX_DATASHARD INFO: datashard.cpp:197: OnTabletStop: 72075186224037890 reason = ReasonStop 2025-06-03T10:32:32.637476Z node 1 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037889 2025-06-03T10:32:32.637529Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037889 2025-06-03T10:32:32.637653Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037889 not found 2025-06-03T10:32:32.637658Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037890 not found 2025-06-03T10:32:32.638204Z node 1 :TX_DATASHARD INFO: datashard.cpp:257: OnTabletDead: 72075186224037890 2025-06-03T10:32:32.638233Z node 1 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037890 TClient::Ls request: /Root/Foo TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Foo" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748946672340 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 6 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 6 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 3 } ChildrenExist: false } Table { Name: "Foo" Columns { Name: "NameHash" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Name" Type: "Utf8" TypeId: 4608 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "Versio... (TRUNCATED) |69.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest >> PersQueueSdkReadSessionTest::ReadSessionWithExplicitlySpecifiedPartitions [GOOD] >> PersQueueSdkReadSessionTest::SettingsValidation >> TxUsage::WriteToTopic_Demo_5_Query [GOOD] >> TxUsage::WriteToTopic_Demo_16_Table [GOOD] >> TxUsage::WriteToTopic_Demo_20_RestartNo_Table [GOOD] >> TxUsage::WriteToTopic_Demo_11_Table [GOOD] >> TVPatchTests::PatchPartFastXorDiffWithEmptyDiffBuffer >> TxUsage::WriteToTopic_Demo_20_RestartNo_Query >> IncrementalRestoreScan::ChangeSenderSimple >> TxUsage::WriteToTopic_Demo_6_Table >> TxUsage::WriteToTopic_Demo_16_Query >> TVPatchTests::PatchPartFastXorDiffWithEmptyDiffBuffer [GOOD] >> TxUsage::WriteToTopic_Demo_11_Query >> IncrementalRestoreScan::ChangeSenderEmpty [GOOD] >> TVPatchTests::PatchPartFastXorDiffDisorder >> TCmsTenatsTest::TestClusterLimit >> TCmsTest::RequestReplaceDevices >> TVPatchTests::PatchPartFastXorDiffDisorder [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::PatchPartFastXorDiffWithEmptyDiffBuffer [GOOD] Test command err: Recv 65537 2025-06-03T10:32:38.133387Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:100:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-03T10:32:38.133854Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:100:0] Status# OK ResultSize# 1 2025-06-03T10:32:38.133883Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:100:0] FoundParts# [5] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchXorDiff 2025-06-03T10:32:38.133980Z node 1 :BS_VDISK_PATCH INFO: {BSVSP13@skeleton_vpatch_actor.cpp:674} [0:1:0:0:0] TEvVPatch: received xor diff; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] FromPart# 4 ToPart# 0 HasBuffer# no ReceivedXorDiffCount# 1/0 Send NKikimr::TEvBlobStorage::TEvVPatchXorDiffResult Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-03T10:32:38.134025Z node 1 :BS_VDISK_PATCH INFO: {BSVSP09@skeleton_vpatch_actor.cpp:577} [0:1:0:0:0] TEvVPatch: received diff; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] OriginalPartId# 5 PatchedPartId# 5 XorReceiver# yes ParityPart# yes ForceEnd# no 2025-06-03T10:32:38.134036Z node 1 :BS_VDISK_PATCH INFO: {BSVSP05@skeleton_vpatch_actor.cpp:246} [0:1:0:0:0] TEvVPatch: send vGet for pulling part data; OriginalBlobId# [1:2:3:4:6:100:0] PullingPart# 5 Send NKikimr::TEvBlobStorage::TEvVGet >> TCmsTenatsTest::TestTenantRatioLimit ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest >> IncrementalRestoreScan::ChangeSenderEmpty [GOOD] Test command err: 2025-06-03T10:32:37.617830Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:32:37.617913Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:32:37.617940Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000e15/r3tmp/tmpSN6DeZ/pdisk_1.dat 2025-06-03T10:32:37.748894Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-06-03T10:32:37.748991Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:32:37.749063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-03T10:32:37.749129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:32:37.749138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:32:37.749415Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-03T10:32:37.749437Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-03T10:32:37.749485Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:32:37.749492Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-03T10:32:37.749497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:32:37.749501Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:32:37.749573Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:32:37.749578Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-06-03T10:32:37.749582Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:32:37.749619Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:32:37.749623Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:32:37.749628Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-06-03T10:32:37.749633Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:32:37.750079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:32:37.750131Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:32:37.750177Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-03T10:32:37.750385Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-06-03T10:32:37.750390Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-03T10:32:37.750394Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-06-03T10:32:37.767390Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } 2025-06-03T10:32:37.767426Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:37.767577Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage 2025-06-03T10:32:37.767597Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:32:37.768872Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946757181103 != 1748946757181107 2025-06-03T10:32:37.811227Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:32:37.811471Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-03T10:32:37.811692Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:37.811725Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:37.822462Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:37.898164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-03T10:32:37.898259Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 AckTo { RawX1: 0 RawX2: 0 } } Step: 500 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-03T10:32:37.898294Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2025-06-03T10:32:37.898407Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:32:37.898420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2025-06-03T10:32:37.898490Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-03T10:32:37.898519Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-03T10:32:37.898771Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-03T10:32:37.898781Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 1, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-03T10:32:37.898839Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-03T10:32:37.898846Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:571:2499], at schemeshard: 72057594046644480, txId: 1, path id: 1 2025-06-03T10:32:37.899091Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:32:37.899104Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046644480] TDone opId# 1:0 ProgressState 2025-06-03T10:32:37.899121Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-06-03T10:32:37.899127Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:32:37.899134Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-06-03T10:32:37.899138Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:32:37.899146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-03T10:32:37.899154Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:32:37.899161Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-06-03T10:32:37.899167Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1:0 2025-06-03T10:32:37.899182Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-06-03T10:32:37.899190Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 1 2025-06-03T10:32:37.899198Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046644480, LocalPathId: 1], 3 2025-06-03T10:32:37.899731Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046644480, cookie: 1 2025-06-03T10:3 ... veACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "Table" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } ColumnFamilies { Id: 0 Name: "default" } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 2 PathOwnerId: 72057594046644480 2025-06-03T10:32:38.371380Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:59:2106] Handle TEvNavigate describe path /Root/IncrBackupTable 2025-06-03T10:32:38.372861Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:819:2671] HANDLE EvNavigateScheme /Root/IncrBackupTable 2025-06-03T10:32:38.372984Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:819:2671] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-03T10:32:38.373000Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:819:2671] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/Root/IncrBackupTable" 2025-06-03T10:32:38.373258Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:819:2671] Handle TEvDescribeSchemeResult Forward to# [1:591:2517] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/IncrBackupTable" PathDescription { Self { Name: "IncrBackupTable" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1500 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } ColumnFamilies { Id: 0 Name: "default" } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-03T10:32:38.373552Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:65: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:821:2673] HandleUserTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/IncrBackupTable TableId: [72057594046644480:3:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:32:38.373587Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:131: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:821:2673] HandleTargetTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:32:38.373623Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:227: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:821:2673] HandleKeys TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-03T10:32:38.373644Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:176: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:821:2673] Handle NKikimr::NDataShard::TEvIncrementalRestoreScan::TEvNoMoreData >> BasicUsage::WriteAndReadSomeMessagesWithAsyncCompression [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/skeleton/ut/unittest >> TVPatchTests::PatchPartFastXorDiffDisorder [GOOD] Test command err: Recv 65537 2025-06-03T10:32:38.510817Z node 1 :BS_VDISK_PATCH INFO: {BSVSP03@skeleton_vpatch_actor.cpp:190} [0:1:0:0:0] TEvVPatch: bootstrapped; OriginalBlobId# [1:2:3:4:6:100:0] Deadline# 1970-01-01T00:00:01.000000Z Send NKikimr::TEvBlobStorage::TEvVGet Recv NKikimr::TEvBlobStorage::TEvVGetResult 2025-06-03T10:32:38.511313Z node 1 :BS_VDISK_PATCH INFO: {BSVSP06@skeleton_vpatch_actor.cpp:266} [0:1:0:0:0] TEvVPatch: received parts index; OriginalBlobId# [1:2:3:4:6:100:0] Status# OK ResultSize# 1 2025-06-03T10:32:38.511340Z node 1 :BS_VDISK_PATCH INFO: {BSVSP04@skeleton_vpatch_actor.cpp:226} [0:1:0:0:0] TEvVPatch: sended found parts; OriginalBlobId# [1:2:3:4:6:100:0] FoundParts# [5] Status# OK Send NKikimr::TEvBlobStorage::TEvVPatchFoundParts Recv NKikimr::TEvBlobStorage::TEvVPatchXorDiff 2025-06-03T10:32:38.511395Z node 1 :BS_VDISK_PATCH INFO: {BSVSP13@skeleton_vpatch_actor.cpp:674} [0:1:0:0:0] TEvVPatch: received xor diff; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] FromPart# 4 ToPart# 0 HasBuffer# no ReceivedXorDiffCount# 1/0 Send NKikimr::TEvBlobStorage::TEvVPatchXorDiffResult 2025-06-03T10:32:38.511414Z node 1 :BS_VDISK_PATCH DEBUG: {BSVSP17@skeleton_vpatch_actor.cpp:727} [0:1:0:0:0] NotifySkeletonAboutDying; Send NKikimr::TEvVPatchDyingRequest Recv NKikimr::TEvBlobStorage::TEvVPatchDiff 2025-06-03T10:32:38.511441Z node 1 :BS_VDISK_PATCH INFO: {BSVSP07@skeleton_vpatch_actor.cpp:315} [0:1:0:0:0] TEvVPatch: send patch result; OriginalBlobId# [1:2:3:4:6:100:0] PatchedBlobId# [1:3:3:4:6:100:0] OriginalPartId# 0 PatchedPartId# 0 Status# ERROR ErrorReason# [XorDiff from datapart] the start of the diff at index 0 righter than the start of the diff at index 1; PrevDiffStart# 2 DiffStart# 0 Send NKikimr::TEvBlobStorage::TEvVPatchResult Recv NKikimr::TEvVPatchDyingConfirm >> ReadIteratorExternalBlobs::NotExtBlobs [GOOD] >> IncrementalRestoreScan::ChangeSenderSimple [GOOD] >> TCmsTest::TestKeepAvailableModeScheduled >> TxUsage::WriteToTopic_Demo_23_RestartBeforeCommit_Query [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_read_iterator/unittest >> ReadIteratorExternalBlobs::NotExtBlobs [GOOD] Test command err: 2025-06-03T10:29:08.466521Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:29:08.466618Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:29:08.466652Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001445/r3tmp/tmpTYSxUe/pdisk_1.dat 2025-06-03T10:29:08.598774Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:29:08.620396Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:08.622475Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946547914647 != 1748946547914651 2025-06-03T10:29:08.666280Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:08.666347Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:08.677694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:29:08.754180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:29:08.775894Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:29:08.776285Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:29:08.776437Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:29:08.776538Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:29:08.787253Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:29:08.787481Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:29:08.787512Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:29:08.787684Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:29:08.787692Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:29:08.787698Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:29:08.787757Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:29:08.787780Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:29:08.787792Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:29:08.798192Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:29:08.803964Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:29:08.804095Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:29:08.804137Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:29:08.804145Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:29:08.804152Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:29:08.804160Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:29:08.804269Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:29:08.804279Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:29:08.804432Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:29:08.804468Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:29:08.804614Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:29:08.804625Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:29:08.804635Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:29:08.804644Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:29:08.804650Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:29:08.804656Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:29:08.804664Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:29:08.804681Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:08.804689Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:08.804699Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:29:08.804724Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:29:08.804730Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:29:08.804758Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:29:08.804848Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:29:08.804863Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:29:08.804888Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:29:08.804899Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:29:08.804905Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:29:08.804912Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:29:08.804917Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:29:08.804981Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-03T10:29:08.804987Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-03T10:29:08.804992Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-03T10:29:08.805016Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:29:08.805033Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-03T10:29:08.805037Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-03T10:29:08.805042Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-03T10:29:08.805046Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-03T10:29:08.805053Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832: Operation [0:281474976715657] at 72075186224037888 is not ready to execute on unit WaitForPlan 2025-06-03T10:29:08.805437Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269746185, Sender [1:683:2579], Recipient [1:663:2568]: NKikimr::TEvTxProxySchemeCache::TEvWatchNotifyUpdated 2025-06-03T10:29:08.805453Z node 1 :TX_DATASHARD DEBUG: datashard_subdomain_path_id.cpp:129: Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at datashard 72075186224037888 2025-06-03T10:29:08.821746Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:29:08.821795Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:29:08.821806Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:29:08.821826Z node 1 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715657 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: PREPARED 2025-06-03T10:29:08.821845Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:467: 72075186224037888 not sending time cast registration request in state WaitScheme 2025-06-03T10:29:08.969889Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:699:2589], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:29:08.969927Z node 1 :TX_DATASHARD TRACE: datashard_impl. ... SHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:32:23.527606Z node 18 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [18:752:2627], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:32:23.568364Z node 18 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [18:823:2667] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:23.599111Z node 18 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jwtnjw1d1wxv1nz4t2ztsz4f, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=18&id=ZTYyYTZiZGItMzdmNGI1OGYtODM4ODhmZmQtMTA4ODQwMjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:24.267833Z node 19 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [19:302:2346], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:32:24.267879Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:32:24.267891Z node 19 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001445/r3tmp/tmpvcjf64/pdisk_1.dat 2025-06-03T10:32:24.382831Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:32:24.401351Z node 19 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:24.401883Z node 19 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [19:32:2079] 1748946743850310 != 1748946743850314 2025-06-03T10:32:24.444693Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:24.444736Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:24.457794Z node 19 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(19, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:24.537715Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:32:24.792658Z node 19 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [19:734:2616], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:24.792694Z node 19 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:24.792715Z node 19 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [19:745:2621], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:24.798990Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:32:24.959577Z node 19 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [19:748:2624], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:32:24.998503Z node 19 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [19:818:2663] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:25.665110Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715660. Ctx: { TraceId: 01jwtnjxer2g1gz09gd9w3z6mn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=MjI4YzQyMmMtZjA4NDU2NS1kMWIxY2RmMi05MDgzMzg0Yw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:26.450207Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtnjyardgxc8nq2crf5gy2c, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=NmU4MWE4NDktZTJlMjJjMmUtNjQ5OWJlZjctYTg1NDE1ZTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:27.256144Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715662. Ctx: { TraceId: 01jwtnjz368afzzr15n9pwfr6e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=MjUzYWNmNmMtNWYzMzdlYzUtMmE5ODZkMWUtMjVjZWNjMjU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:28.157171Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715663. Ctx: { TraceId: 01jwtnjzwcfr02rnm1jpag9d7e, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=NmQ4MjdhNTEtNzI1YjBlNjAtYTc5YWQ1NzEtMmU5ZWE5OTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:28.864680Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jwtnk0rfbz9dmecs8dw0pyz9, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=ZDVlNDQzMDYtYWIwY2FhNzgtNzgzNGRiOTAtOTY3Mjg1MWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:29.471004Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715665. Ctx: { TraceId: 01jwtnk1em28yf8nqk4rxg75yw, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=ZmNhMjA4NmQtYTNjMTI1MmItNTJhNzVjMjctNWQyMzEyMTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:30.298351Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715666. Ctx: { TraceId: 01jwtnk21m24yhsjhn5c44rtar, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=ZDZmOTc2NTMtZDRjNjY3ZDItNmUzZGYxZDQtOTAxOGNkZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:31.038605Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jwtnk2vh345st1a0c0fjf994, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=NjZiNzFjMDAtMjRkNzg5ZjItNjkwNTNiNjgtYzYwNjMwMGE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:31.814889Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715668. Ctx: { TraceId: 01jwtnk3jp13acet3f6mbdgw3n, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=ZTk2MDAwNWUtNDE1MTgzMzQtYTA0Mzk2NTItMWRlNWM1MGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:32:32.631774Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715669. Ctx: { TraceId: 01jwtnk4atexsdwp1tgtvqz7nr, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=NjVhOWY0YjYtNjI0ZGY1YjEtYmNjMmQ0MjctODk0MzZlM2M=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root ... waiting for stats after upsert 2025-06-03T10:32:34.714078Z node 19 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7306: Cannot get console configs 2025-06-03T10:32:34.714111Z node 19 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded Captured TEvDataShard::TEvPeriodicTableStats DatashardId: 72075186224037888 TableLocalId: 2 Generation: 1 Round: 0 TableStats { DataSize: 10487312 RowCount: 10 IndexSize: 0 InMemSize: 10487312 LastAccessTime: 1544 LastUpdateTime: 1544 ImmediateTxCompleted: 10 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 10 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 0 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 765 Memory: 17425320 } ShardState: 2 UserTablePartOwners: 72075186224037888 NodeId: 19 StartTime: 450 TableOwnerId: 72057594046644480 FollowerId: 0 ... waiting for stats after compaction Captured TEvDataShard::TEvPeriodicTableStats DatashardId: 72075186224037888 TableLocalId: 2 Generation: 1 Round: 1 TableStats { DataSize: 10487312 RowCount: 10 IndexSize: 0 InMemSize: 10487312 LastAccessTime: 1544 LastUpdateTime: 1544 ImmediateTxCompleted: 10 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 10 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 0 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 20 HasLoanedParts: false ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 681 Memory: 124804 Storage: 10486554 } ShardState: 2 UserTablePartOwners: 72075186224037888 NodeId: 19 StartTime: 450 TableOwnerId: 72057594046644480 FollowerId: 0 Captured TEvDataShard::TEvPeriodicTableStats DatashardId: 72075186224037888 TableLocalId: 2 Generation: 1 Round: 2 TableStats { DataSize: 10486220 RowCount: 10 IndexSize: 0 InMemSize: 0 LastAccessTime: 1544 LastUpdateTime: 1544 ImmediateTxCompleted: 10 PlannedTxCompleted: 1 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 10 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 SearchHeight: 1 LastFullCompactionTs: 20 HasLoanedParts: false Channels { Channel: 1 DataSize: 10486220 IndexSize: 0 } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 681 Memory: 124804 Storage: 10486554 } ShardState: 2 UserTablePartOwners: 72075186224037888 NodeId: 19 StartTime: 450 TableOwnerId: 72057594046644480 FollowerId: 0 2025-06-03T10:32:38.848246Z node 19 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715670. Ctx: { TraceId: 01jwtnkb57aakq1y11mytjqpv2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=19&id=OTFkNjczYTEtN2VhYjQxNTEtNzczZjhmYTMtOTJiYzY0NWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> TDowntimeTest::AddDowntime [GOOD] >> TDowntimeTest::HasUpcomingDowntime [GOOD] >> TDowntimeTest::CleanupOldSegments [GOOD] >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Table ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_incremental_restore_scan/unittest >> IncrementalRestoreScan::ChangeSenderSimple [GOOD] Test command err: 2025-06-03T10:32:38.633212Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:32:38.633342Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:32:38.633376Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000e0f/r3tmp/tmp0sFTzw/pdisk_1.dat 2025-06-03T10:32:38.769496Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "Root" StoragePools { Name: "/Root:test" Kind: "test" } } } TxId: 1 TabletId: 72057594046644480 , at schemeshard: 72057594046644480 2025-06-03T10:32:38.769601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //Root, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:32:38.769686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 0 2025-06-03T10:32:38.769767Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046644480 2025-06-03T10:32:38.769776Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:32:38.770150Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046644480 PathId: 1, at schemeshard: 72057594046644480 2025-06-03T10:32:38.770179Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //Root 2025-06-03T10:32:38.770240Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:32:38.770250Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046644480 2025-06-03T10:32:38.770257Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:32:38.770262Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:32:38.770363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:32:38.770371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046644480 2025-06-03T10:32:38.770377Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:32:38.770430Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:32:38.770436Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:32:38.770442Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046644480 2025-06-03T10:32:38.770450Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:32:38.770970Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046644480 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:32:38.771051Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046644480 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:32:38.771107Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 2025-06-03T10:32:38.771343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__notify.cpp:30: NotifyTxCompletion operation in-flight, txId: 1, at schemeshard: 72057594046644480 2025-06-03T10:32:38.771350Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1, ready parts: 0/1, is published: true 2025-06-03T10:32:38.771354Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__notify.cpp:131: NotifyTxCompletion transaction is registered, txId: 1, at schemeshard: 72057594046644480 2025-06-03T10:32:38.787852Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AvailableExternalDataSources: "ObjectStorage" } 2025-06-03T10:32:38.787905Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:38.788053Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# ObjectStorage 2025-06-03T10:32:38.788076Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:32:38.789387Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946758148040 != 1748946758148044 2025-06-03T10:32:38.831526Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:32:38.831765Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-03T10:32:38.832553Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:38.832586Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:38.843280Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:38.915653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 500, transactions count in step: 1, at schemeshard: 72057594046644480 2025-06-03T10:32:38.915734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 AckTo { RawX1: 0 RawX2: 0 } } Step: 500 MediatorID: 72057594046382081 TabletID: 72057594046644480, at schemeshard: 72057594046644480 2025-06-03T10:32:38.915761Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2025-06-03T10:32:38.915867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:32:38.915880Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046644480 2025-06-03T10:32:38.915927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 1 2025-06-03T10:32:38.915958Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046644480, LocalPathId: 1], at schemeshard: 72057594046644480 2025-06-03T10:32:38.916156Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046644480 2025-06-03T10:32:38.916163Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046644480, txId: 1, path id: [OwnerId: 72057594046644480, LocalPathId: 1] 2025-06-03T10:32:38.916217Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046644480 2025-06-03T10:32:38.916224Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [1:571:2499], at schemeshard: 72057594046644480, txId: 1, path id: 1 2025-06-03T10:32:38.916431Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:32:38.916441Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046644480] TDone opId# 1:0 ProgressState 2025-06-03T10:32:38.916458Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-06-03T10:32:38.916462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:32:38.916468Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1:0 progress is 1/1 2025-06-03T10:32:38.916471Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:32:38.916477Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1, ready parts: 1/1, is published: false 2025-06-03T10:32:38.916484Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1 ready parts: 1/1 2025-06-03T10:32:38.916490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1:0 2025-06-03T10:32:38.916494Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1:0 2025-06-03T10:32:38.916508Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 1] was 2 2025-06-03T10:32:38.916516Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1, publications: 1, subscribers: 1 2025-06-03T10:32:38.916523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1, [OwnerId: 72057594046644480, LocalPathId: 1], 3 2025-06-03T10:32:38.916961Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046644480, msg: Owner: 72057594046644480 Generation: 2 LocalPathId: 1 Version: 3 PathOwnerId: 72057594046644480, cookie: 1 2025-06-03T10:3 ... -03T10:32:39.353606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 281474976715658 ready parts: 1/1 2025-06-03T10:32:39.353609Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 281474976715658:0 2025-06-03T10:32:39.353612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 281474976715658:0 2025-06-03T10:32:39.353630Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046644480, LocalPathId: 3] was 3 2025-06-03T10:32:39.353672Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 281474976715658 datashard 72075186224037889 state Ready 2025-06-03T10:32:39.353679Z node 1 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186224037889 Got TEvSchemaChangedResult from SS at 72075186224037889 2025-06-03T10:32:39.353770Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:403: actor# [1:59:2106] Handle TEvNavigate describe path /Root/IncrBackupTable 2025-06-03T10:32:39.354976Z node 1 :TX_PROXY DEBUG: describe.cpp:272: Actor# [1:807:2665] HANDLE EvNavigateScheme /Root/IncrBackupTable 2025-06-03T10:32:39.355397Z node 1 :TX_PROXY DEBUG: describe.cpp:356: Actor# [1:807:2665] HANDLE EvNavigateKeySetResult TDescribeReq marker# P5 ErrorCount# 0 2025-06-03T10:32:39.355429Z node 1 :TX_PROXY DEBUG: describe.cpp:435: Actor# [1:807:2665] SEND to# 72057594046644480 shardToRequest NKikimrSchemeOp.TDescribePath Path: "/Root/IncrBackupTable" Options { ShowPrivateTable: true } 2025-06-03T10:32:39.355655Z node 1 :TX_PROXY DEBUG: describe.cpp:448: Actor# [1:807:2665] Handle TEvDescribeSchemeResult Forward to# [1:591:2517] Cookie: 0 TEvDescribeSchemeResult: NKikimrScheme.TEvDescribeSchemeResult PreSerializedData size# 20 Record# Status: StatusSuccess Path: "/Root/IncrBackupTable" PathDescription { Self { Name: "IncrBackupTable" PathId: 3 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1500 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 1 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "IncrBackupTable" Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "__ydb_incrBackupImpl_deleted" Type: "Bool" TypeId: 6 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "key" KeyColumnIds: 1 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { MinPartitionsCount: 1 } ColumnFamilies { Id: 0 Name: "default" } } TableSchemaVersion: 1 IsBackup: false IsRestore: false } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046644480 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 3 PathOwnerId: 72057594046644480 2025-06-03T10:32:39.355963Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037889, clientId# [1:817:2669], serverId# [1:818:2670], sessionId# [0:0:0] 2025-06-03T10:32:39.356081Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:65: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:819:2671] HandleUserTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/IncrBackupTable TableId: [72057594046644480:3:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:32:39.356105Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:131: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:819:2671] HandleTargetTable TEvTxProxySchemeCache::TEvNavigateKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 Instant: 0 ResultSet [{ Path: Root/Table TableId: [72057594046644480:2:1] RequestType: ByTableId Operation: OpTable RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: Ok Kind: KindTable DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } }] } 2025-06-03T10:32:39.356139Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.h:227: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:819:2671] HandleKeys TEvTxProxySchemeCache::TEvResolveKeySetResult: result# { ErrorCount: 0 DatabaseName: DomainOwnerId: 0 ResultSet [{ TableId: [OwnerId: 72057594046644480, LocalPathId: 2] Access: 0 SyncVersion: false Status: OkData Kind: KindRegularTable PartitionsCount: 1 DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } From: (Uint32 : NULL) IncFrom: 1 To: () IncTo: 0 }] } 2025-06-03T10:32:39.356172Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:139: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:819:2671] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvEnqueueRecords { Records [{ Order: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] BodySize: 18 }] } 2025-06-03T10:32:39.356192Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:144: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:819:2671] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 0 Group: 0 Step: 0 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Kind: IncrementalRestore Source: InitialScan Body: 18b TableId: [OwnerId: 72057594046644480, LocalPathId: 3] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-06-03T10:32:39.356217Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:392: actor# [1:59:2106] Handle TEvGetProxyServicesRequest 2025-06-03T10:32:39.356227Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:40: [TableChangeSenderShard][0:0][72075186224037888][1:823:2671] Handle NKikimr::TEvTxUserProxy::TEvGetProxyServicesResponse 2025-06-03T10:32:39.356274Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:824:2675], serverId# [1:825:2676], sessionId# [0:0:0] 2025-06-03T10:32:39.397590Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:78: [TableChangeSenderShard][0:0][72075186224037888][1:823:2671] Handshake NKikimrChangeExchange.TEvStatus Status: STATUS_OK LastRecordOrder: 0 2025-06-03T10:32:39.397647Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:154: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:819:2671] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-03T10:32:39.397683Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_table_base.cpp:123: [TableChangeSenderShard][0:0][72075186224037888][1:823:2671] Handle NKikimr::NChangeExchange::TEvChangeExchange::TEvRecords { Records [{ Order: 0 Group: 0 Step: 0 TxId: 0 PathId: [OwnerId: 72057594046644480, LocalPathId: 2] Kind: IncrementalRestore Source: InitialScan Body: 18b TableId: [OwnerId: 72057594046644480, LocalPathId: 3] SchemaVersion: 0 LockId: 0 LockOffset: 0 }] } 2025-06-03T10:32:39.397697Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:154: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:819:2671] Handle NKikimr::NChangeExchange::TEvChangeExchangePrivate::TEvReady { PartitionId: 72075186224037888 } 2025-06-03T10:32:39.397742Z node 1 :CHANGE_EXCHANGE DEBUG: change_sender_incr_restore.cpp:176: [IncrRestoreChangeSenderMain][[OwnerId: 72057594046644480, LocalPathId: 2]][1:819:2671] Handle NKikimr::NDataShard::TEvIncrementalRestoreScan::TEvNoMoreData >> TMaintenanceApiTest::ManyActionGroupsWithSingleAction |69.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TDowntimeTest::CleanupOldSegments [GOOD] |69.8%| [TA] $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/test-results/unittest/{meta.json ... results_accumulator.log} >> TCmsTest::StateRequestNode >> TCmsTenatsTest::TestClusterLimit [GOOD] >> TCmsTenatsTest::RequestShutdownHost >> TTicketParserTest::AuthenticationWithUserAccount >> TCmsTest::RequestReplaceDevices [GOOD] >> TCmsTest::RequestReplaceDevicePDisk >> TCmsTenatsTest::TestTenantRatioLimit [GOOD] >> TCmsTenatsTest::TestTenantRatioLimitForceRestartMode >> TTicketParserTest::LoginRefreshGroupsWithError >> TTicketParserTest::AuthenticationWithUserAccount [GOOD] >> TTicketParserTest::AuthenticationUnsupported >> TDsProxyQuorumTracker::CheckFailModelErasure4Plus2Block [GOOD] >> TCmsTest::TestKeepAvailableModeScheduled [GOOD] >> TCmsTest::TestKeepAvailableModeScheduledDisconnects ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDsProxyQuorumTracker::CheckFailModelErasure4Plus2Block [GOOD] Test command err: 2025-06-03T10:32:34.174692Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [7e4afa7ea38a37be] bootstrap ActorId# [3:74:2120] Group# 0 BlobCount# 1 BlobIDs# [[72075186224047637:1:863:1:24576:786:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-03T10:32:34.174771Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:34.174780Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 1 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:34.174786Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 2 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:34.174791Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 3 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:34.174796Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 4 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:34.174801Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 5 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:34.174807Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:34.174812Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:34.174817Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:34.174822Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:34.174827Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:34.174832Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 6 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:34.174838Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:34.174843Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 1 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:34.174848Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 2 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:34.174853Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 3 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:34.174858Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 4 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:34.174866Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [7e4afa7ea38a37be] Id# [72075186224047637:1:863:1:24576:786:0] restore disk# 7 part# 5 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:34.174874Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [7e4afa7ea38a37be] restore Id# [72075186224047637:1:863:1:24576:786:0] optimisticReplicas# 6 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:32:34.174890Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:32:34.174897Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:32:34.174905Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:32:34.174910Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:32:34.174916Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:32:34.174922Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:32:34.174928Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 3 blob Id# [72075186224047637:1:863:1:24576:786:4] Marker# BPG33 2025-06-03T10:32:34.174934Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 3 to# 3 blob Id# [72075186224047637:1:863:1:24576:786:4] Marker# BPG32 2025-06-03T10:32:34.174940Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 4 blob Id# [72075186224047637:1:863:1:24576:786:5] Marker# BPG33 2025-06-03T10:32:34.174945Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 4 to# 4 blob Id# [72075186224047637:1:863:1:24576:786:5] Marker# BPG32 2025-06-03T10:32:34.174952Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 5 blob Id# [72075186224047637:1:863:1:24576:786:6] Marker# BPG33 2025-06-03T10:32:34.174957Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 5 to# 5 blob Id# [72075186224047637:1:863:1:24576:786:6] Marker# BPG32 2025-06-03T10:32:34.179996Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:0:0] Marker# BPP01 2025-06-03T10:32:34.180078Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:1:0] Marker# BPP01 2025-06-03T10:32:34.180095Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:2:0] Marker# BPP01 2025-06-03T10:32:34.180109Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:4] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:3:0] Marker# BPP01 2025-06-03T10:32:34.180134Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:5] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:4:0] Marker# BPP01 2025-06-03T10:32:34.180147Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:6] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:5:0] Marker# BPP01 2025-06-03T10:32:34.180164Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [7e4afa7ea38a37be] Result# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} GroupId# 0 Marker# BPP12 2025-06-03T10:32:34.180175Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [7e4afa7ea38a37be] SendReply putResult# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:32:34.180239Z node 3 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.351 sample PartId# [72075186224047637:1:863:1:24576:786:6] QueryCount# 1 VDiskId# [0:1:0:5:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.352 sample PartId# [72075186224047637:1:863:1:24576:786:5] QueryCount# 1 VDiskId# [0:1:0:4:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.352 sample PartId# [72075186224047637:1:863:1:24576:786:4] QueryCount# 1 VDiskId# [0:1:0:3:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.352 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:0:2:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.352 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:1:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.352 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 5.393 VDiskId# [0:1:0:0:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.437 VDiskId# [0:1:0:1:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.453 VDiskId# [0:1:0:2:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.468 VDiskId# [0:1:0:3:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.491 VDiskId# [0:1:0:4:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.505 VDiskId# [0:1:0:5:0] NodeId# 3 Status# OK } ] } >> TMaintenanceApiTest::ManyActionGroupsWithSingleAction [GOOD] >> TMaintenanceApiTest::CreateTime >> TTicketParserTest::AuthenticationUnsupported [GOOD] >> TTicketParserTest::AuthenticationUnknown >> TTicketParserTest::AuthenticationUnknown [GOOD] >> TTicketParserTest::Authorization >> TxUsage::WriteToTopic_Demo_32_Query [GOOD] >> TxUsage::WriteToTopic_Demo_33_Table >> Compression::WriteGZIP [GOOD] >> Compression::WriteZSTD >> LocalPartition::WithoutPartitionUnknownEndpoint [GOOD] >> LocalPartition::WithoutPartitionDeadNode >> TTicketParserTest::Authorization [GOOD] >> TTicketParserTest::AuthorizationModify >> TCmsTest::StateRequestNode [GOOD] >> TCmsTest::StateRequestUnknownNode >> TCmsTest::RequestReplaceDevicePDisk [GOOD] >> TCmsTest::RequestReplaceDevicePDiskByPath >> TTicketParserTest::AuthorizationRetryError >> TTicketParserTest::AuthorizationModify [GOOD] >> TCmsTenatsTest::TestTenantRatioLimitForceRestartMode [GOOD] >> TCmsTenatsTest::TestTenantRatioLimitForceRestartModeScheduled ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ut/unittest >> TTicketParserTest::AuthorizationModify [GOOD] Test command err: 2025-06-03T10:32:40.962190Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669140882573148:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:40.962504Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f95/r3tmp/tmp471Usy/pdisk_1.dat 2025-06-03T10:32:41.016937Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669140882573125:2079] 1748946760961977 != 1748946760961980 2025-06-03T10:32:41.019369Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 10467, node 1 2025-06-03T10:32:41.031585Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:41.031601Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:41.031604Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:41.031660Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11174 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:41.093215Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:41.093241Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:41.094180Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:41.094688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:41.097791Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:32:41.097809Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:41.097812Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:32:41.098135Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-03T10:32:41.098154Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [114dbe5f0330] Connect to grpc://localhost:32391 2025-06-03T10:32:41.098836Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [114dbe5f0330] Request AuthenticateRequest { iam_token: "**** (8E120919)" } 2025-06-03T10:32:41.101268Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [114dbe5f0330] Response AuthenticateResponse { subject { user_account { id: "user1" } } } 2025-06-03T10:32:41.101343Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:997: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-06-03T10:32:41.101628Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [114dbe5f08b0] Connect to grpc://localhost:8222 2025-06-03T10:32:41.101770Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [114dbe5f08b0] Request GetUserAccountRequest { user_account_id: "user1" } 2025-06-03T10:32:41.103531Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [114dbe5f08b0] Response UserAccount { yandex_passport_user_account { login: "login1" } } 2025-06-03T10:32:41.103623Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of login1@passport 2025-06-03T10:32:41.407213Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669141720565922:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:41.407237Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f95/r3tmp/tmpLcSbM7/pdisk_1.dat 2025-06-03T10:32:41.420216Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:41.420651Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669141720565897:2079] 1748946761407061 != 1748946761407064 TServer::EnableGrpc on GrpcPort 9068, node 2 2025-06-03T10:32:41.433507Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:41.433523Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:41.433526Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:41.433587Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7827 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:41.511447Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:41.511484Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:41.511909Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:41.512403Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:41.514008Z node 2 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (8E120919): Token is not supported 2025-06-03T10:32:41.858308Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511669141714751237:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:41.858446Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f95/r3tmp/tmpps1840/pdisk_1.dat 2025-06-03T10:32:41.869915Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:41.870142Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511669141714751209:2079] 1748946761858085 != 1748946761858088 TServer::EnableGrpc on GrpcPort 16743, node 3 2025-06-03T10:32:41.880483Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:41.880515Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:41.880517Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:41.880581Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5485 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:41.961451Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:41.961480Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:41.961821Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:41.962428Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:41.962719Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, ... nitLoginToken, database /Root, A6 error 2025-06-03T10:32:42.356948Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-03T10:32:42.356990Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [114dbe5f0330] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "XXXXXXXX" type: "ydb.database" } resource_path { id: "XXXXXXXX" type: "resource-manager.folder" } } 2025-06-03T10:32:42.357496Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [114dbe5f0330] Status 16 Access Denied 2025-06-03T10:32:42.357537Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.read now has a permanent error "Access Denied" retryable:0 2025-06-03T10:32:42.357548Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket **** (8E120919) () has now permanent error message 'Access Denied' 2025-06-03T10:32:42.357661Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:32:42.357668Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:42.357681Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:32:42.357691Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-03T10:32:42.357723Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [114dbe5f0330] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "XXXXXXXX" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:32:42.358164Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [114dbe5f0330] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-03T10:32:42.358209Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-03T10:32:42.358239Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-03T10:32:42.358351Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:32:42.358358Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:42.358359Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:32:42.358362Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-03T10:32:42.358391Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [114dbe5f0330] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "XXXXXXXX" type: "resource-manager.folder" } } 2025-06-03T10:32:42.358706Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [114dbe5f0330] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-03T10:32:42.358737Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-03T10:32:42.358750Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-03T10:32:42.358838Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:32:42.358843Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:42.358844Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:32:42.358846Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(monitoring.view) 2025-06-03T10:32:42.358859Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [114dbe5f0330] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "monitoring.view" resource_path { id: "gizmo" type: "iam.gizmo" } } 2025-06-03T10:32:42.359062Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [114dbe5f0330] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-03T10:32:42.359091Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission monitoring.view now has a valid subject "user1@as" 2025-06-03T10:32:42.359101Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-03T10:32:42.646820Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7511669147423259915:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:42.646856Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f95/r3tmp/tmpBldkXq/pdisk_1.dat 2025-06-03T10:32:42.662480Z node 5 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:42.663007Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7511669147423259897:2079] 1748946762646675 != 1748946762646678 TServer::EnableGrpc on GrpcPort 4705, node 5 2025-06-03T10:32:42.678131Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:42.678146Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:42.678148Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:42.678213Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5047 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:42.751501Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:42.751543Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:42.751893Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:42.752531Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:42.754049Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:32:42.754063Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:42.754066Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:32:42.754080Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-03T10:32:42.754102Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [114dbd01d3b0] Connect to grpc://localhost:20807 2025-06-03T10:32:42.754327Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [114dbd01d3b0] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:32:42.756131Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [114dbd01d3b0] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-03T10:32:42.756215Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-03T10:32:42.756251Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-03T10:32:42.756440Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:32:42.756451Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:42.756454Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:32:42.756459Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-03T10:32:42.756467Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-03T10:32:42.756501Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [114dbd01d3b0] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:32:42.756678Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [114dbd01d3b0] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:32:42.756965Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [114dbd01d3b0] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-03T10:32:42.757005Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-03T10:32:42.757322Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [114dbd01d3b0] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-03T10:32:42.757357Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.write now has a valid subject "user1@as" 2025-06-03T10:32:42.757381Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as >> TMaintenanceApiTest::CreateTime [GOOD] >> TMaintenanceApiTest::LastRefreshTime >> TCmsTest::TestKeepAvailableModeScheduledDisconnects [GOOD] >> TCmsTest::TestLoadLog >> TCmsTest::StateRequestUnknownNode [GOOD] >> TCmsTest::StateRequestUnknownMultipleNodes >> TTicketParserTest::LoginGood >> TCmsTest::TestLoadLog [GOOD] >> TCmsTest::TestLogOperationsRollback >> TCmsTest::RequestReplaceDevicePDiskByPath [GOOD] >> TCmsTest::RequestReplaceManyDevicesOnOneNode >> TCmsTenatsTest::RequestShutdownHost [GOOD] >> TCmsTenatsTest::RequestShutdownHostWithTenantPolicy >> TTicketParserTest::LoginGood [GOOD] >> TTicketParserTest::LoginGoodWithGroups >> BasicUsage::TSimpleWriteSession_AutoSeqNo_BasicUsage [GOOD] >> BasicUsage::TWriteSession_AutoBatching [GOOD] >> BasicUsage::TWriteSession_BatchingProducesContinueTokens [GOOD] >> BasicUsage::BrokenCredentialsProvider >> TTicketParserTest::LoginGoodWithGroups [GOOD] >> TTicketParserTest::LoginRefreshGroupsGood >> TCmsTenatsTest::TestTenantRatioLimitForceRestartModeScheduled [GOOD] >> TCmsTest::ActionIssue >> TMaintenanceApiTest::LastRefreshTime [GOOD] |69.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |69.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard |69.8%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_incremental_restore_scan/test-results/unittest/{meta.json ... results_accumulator.log} |69.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_testshard/ydb-core-blobstorage-ut_testshard >> DirectReadWithClient::ManyMessages [GOOD] >> DirectReadWithControlSession::Init [GOOD] >> DirectReadWithControlSession::StopPartitionSessionGracefully [GOOD] >> DirectReadWithControlSession::StopPartitionSession [GOOD] >> DirectReadWithControlSession::EmptyDirectReadResponse [GOOD] >> DirectReadWithServer::KillPQTablet >> TCmsTest::StateRequestUnknownMultipleNodes [GOOD] >> TCmsTest::StateStorageAvailabilityMode >> PersQueueSdkReadSessionTest::SettingsValidation [GOOD] >> PersQueueSdkReadSessionTest::SpecifyClustersExplicitly ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TMaintenanceApiTest::LastRefreshTime [GOOD] Test command err: 2025-06-03T10:32:40.097820Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:32:40.098798Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:40.101344Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:32:40.101390Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:32:40.101444Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:32:40.101530Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:32:40.101973Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:40.102042Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:40.102459Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:40.102489Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:40.104096Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:40.104119Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:40.104150Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:32:40.104225Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:40.136433Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:32:40.169051Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:40.169185Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:40.170637Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:40.170787Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:32:40.170794Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:32:40.170802Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:32:40.170805Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:32:40.170821Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:32:40.170841Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:32:40.170862Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:40.172521Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-03T10:32:40.214955Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:40.215027Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:32:40.274026Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:40.274069Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:40.274152Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:40.274437Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-1-1" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-2-2" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-3-3" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-4-4" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: ... erconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 125527512 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 125527512 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 125527512 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 125527512 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 125527512 } Devices { Name: "pdisk-19-19" State: UP Timestamp: 125527512 } Timestamp: 125527512 NodeId: 19 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 125527512 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 125527512 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 125527512 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 125527512 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 125527512 } Devices { Name: "pdisk-20-20" State: UP Timestamp: 125527512 } Timestamp: 125527512 NodeId: 20 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 125527512 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 125527512 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 125527512 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 125527512 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 125527512 } Devices { Name: "pdisk-21-21" State: UP Timestamp: 125527512 } Timestamp: 125527512 NodeId: 21 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 125527512 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 125527512 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 125527512 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 125527512 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 125527512 } Devices { Name: "pdisk-22-22" State: UP Timestamp: 125527512 } Timestamp: 125527512 NodeId: 22 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 125527512 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 125527512 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 125527512 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 125527512 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 125527512 } Devices { Name: "pdisk-23-23" State: UP Timestamp: 125527512 } Timestamp: 125527512 NodeId: 23 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 125527512 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 125527512 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 125527512 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 125527512 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 125527512 } Devices { Name: "pdisk-24-24" State: UP Timestamp: 125527512 } Timestamp: 125527512 NodeId: 24 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 125527512 } } 2025-06-03T10:32:43.923394Z node 17 :CMS INFO: cms.cpp:347: Check request: User: "test-user" Actions { Type: SHUTDOWN_HOST Host: "17" Duration: 600000000 } Actions { Type: SHUTDOWN_HOST Host: "18" Duration: 600000000 } PartialPermissionAllowed: true Schedule: true DryRun: false Reason: "" AvailabilityMode: MODE_MAX_AVAILABILITY MaintenanceTaskId: "task-1" 2025-06-03T10:32:43.923402Z node 17 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "17" Duration: 600000000 2025-06-03T10:32:43.923412Z node 17 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 17, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:32:43.923445Z node 17 :CMS DEBUG: cms.cpp:729: Ring: 0; State: Ok 2025-06-03T10:32:43.923447Z node 17 :CMS DEBUG: cms.cpp:729: Ring: 1; State: Ok 2025-06-03T10:32:43.923449Z node 17 :CMS DEBUG: cms.cpp:729: Ring: 2; State: Ok 2025-06-03T10:32:43.923452Z node 17 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:43.923459Z node 17 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "18" Duration: 600000000 2025-06-03T10:32:43.923462Z node 17 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 18, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:32:43.923479Z node 17 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Issue in affected group with id '0': too many unavailable vdisks. Locked: Host ::1:12001 (17) has temporary lock, VDisk [0:1:0:1:0] (::1:/18/pdisk-18.data) is locked by this request. Down: ) 2025-06-03T10:32:43.923495Z node 17 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# test-user-p-1, requestId# test-user-r-1, owner# test-user 2025-06-03T10:32:43.923502Z node 17 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (17) (permission test-user-p-1 until 1970-01-01T00:12:05Z) 2025-06-03T10:32:43.923511Z node 17 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:32:43.923560Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# test-user-p-1, validity# 1970-01-01T00:12:05.527512Z, action# Type: SHUTDOWN_HOST Host: "17" Duration: 600000000 2025-06-03T10:32:43.923590Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# test-user-r-1, owner# test-user, order# 1, priority# 0, body# User: "test-user" Actions { Type: SHUTDOWN_HOST Host: "18" Duration: 600000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (17) has temporary lock, VDisk [0:1:0:1:0] (::1:/18/pdisk-18.data) is locked by this request. Down: " } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:32:43.964508Z node 17 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:44.006283Z node 17 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:44.006375Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "test-user" Actions { Type: SHUTDOWN_HOST Host: "17" Duration: 600000000 } Actions { Type: SHUTDOWN_HOST Host: "18" Duration: 600000000 } PartialPermissionAllowed: true Schedule: true DryRun: false Reason: "" AvailabilityMode: MODE_MAX_AVAILABILITY MaintenanceTaskId: "task-1" }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW_PARTIAL } RequestId: "test-user-r-1" Permissions { Id: "test-user-p-1" Action { Type: SHUTDOWN_HOST Host: "17" Duration: 600000000 } Deadline: 725527512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 17 InterconnectPort: 12001 } } } } 2025-06-03T10:32:44.006384Z node 17 :CMS DEBUG: cms.cpp:1064: Schedule cleanup at 1970-01-01T00:32:05.527512Z 2025-06-03T10:32:44.007537Z node 17 :CMS INFO: cms.cpp:1366: Get selected requests for test-user 2025-06-03T10:32:44.007551Z node 17 :CMS DEBUG: cms.cpp:1392: Resulting status: OK 2025-06-03T10:32:44.007584Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManageRequestRequest { User: "test-user" Command: GET RequestId: "test-user-r-1" }, response# NKikimr::NCms::TEvCms::TEvManageRequestResponse { Status { Code: OK } Requests { RequestId: "test-user-r-1" Owner: "test-user" Actions { Type: SHUTDOWN_HOST Host: "18" Duration: 600000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (17) has temporary lock, VDisk [0:1:0:1:0] (::1:/18/pdisk-18.data) is locked by this request. Down: " } } PartialPermissionAllowed: true Reason: "" AvailabilityMode: MODE_MAX_AVAILABILITY Priority: 0 } } 2025-06-03T10:32:44.111777Z node 17 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (17) (permission test-user-p-1 until 1970-01-01T00:12:05Z) 2025-06-03T10:32:44.111886Z node 17 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:44.111907Z node 17 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:44.111929Z node 17 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:11Z 2025-06-03T10:32:44.112109Z node 17 :CMS INFO: cms.cpp:347: Check request: User: "test-user" Actions { Type: SHUTDOWN_HOST Host: "18" Duration: 600000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (17) has temporary lock, VDisk [0:1:0:1:0] (::1:/18/pdisk-18.data) is locked by this request. Down: " } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:32:44.112123Z node 17 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "18" Duration: 600000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (17) has temporary lock, VDisk [0:1:0:1:0] (::1:/18/pdisk-18.data) is locked by this request. Down: " } 2025-06-03T10:32:44.112134Z node 17 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 18, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:32:44.112166Z node 17 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Issue in affected group with id '0': too many unavailable vdisks. Locked: Host ::1:12001 (17) has planned shutdown (permission test-user-p-1 owned by test-user), VDisk [0:1:0:1:0] (::1:/18/pdisk-18.data) is locked by this request. Down: ) 2025-06-03T10:32:44.112189Z node 17 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:32:44.112249Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# test-user-r-1, owner# test-user, order# 1, priority# 0, body# User: "test-user" Actions { Type: SHUTDOWN_HOST Host: "18" Duration: 600000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (17) has planned shutdown (permission test-user-p-1 owned by test-user), VDisk [0:1:0:1:0] (::1:/18/pdisk-18.data) is locked by this request. Down: " } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:32:44.123241Z node 17 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:44.123318Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "test-user" RequestId: "test-user-r-1" AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (17) has planned shutdown (permission test-user-p-1 owned by test-user), VDisk [0:1:0:1:0] (::1:/18/pdisk-18.data) is locked by this request. Down: " } RequestId: "test-user-r-1" Deadline: 431130024 } >> TxUsage::WriteToTopic_Demo_20_RestartNo_Query [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithSyncCompression [GOOD] >> BasicUsage::WriteAndReadSomeMessagesWithNoCompression >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Table >> TTicketParserTest::TicketFromCertificateCheckIssuerGood >> TTicketParserTest::TicketFromCertificateCheckIssuerGood [GOOD] >> TTicketParserTest::TicketFromCertificateCheckIssuerBad >> TxUsage::WriteToTopic_Demo_16_Query [GOOD] >> TTicketParserTest::TicketFromCertificateCheckIssuerBad [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationBad >> TxUsage::WriteToTopic_Demo_17_Table >> TCmsTest::StateStorageAvailabilityMode [GOOD] >> TCmsTest::RequestReplaceManyDevicesOnOneNode [GOOD] >> TxUsage::WriteToTopic_Demo_33_Table [GOOD] >> TxUsage::WriteToTopic_Demo_6_Table [GOOD] >> TCmsTenatsTest::RequestShutdownHostWithTenantPolicy [GOOD] >> TCmsTenatsTest::TestClusterLimitForceRestartMode >> TxUsage::WriteToTopic_Demo_33_Query >> TxUsage::WriteToTopic_Demo_6_Query ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::StateStorageAvailabilityMode [GOOD] Test command err: 2025-06-03T10:32:40.850644Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:32:40.851516Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:40.853197Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:32:40.853260Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:32:40.853330Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:32:40.853405Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:32:40.853779Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:40.853829Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:40.854108Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:40.854202Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:40.855658Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:40.855682Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:40.855715Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:32:40.855782Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:40.886694Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:32:40.919488Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:40.919598Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:40.921314Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:40.921465Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:32:40.921473Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:32:40.921483Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:32:40.921488Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:32:40.921507Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:32:40.921535Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:32:40.921600Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:40.923616Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { } } Success: true 2025-06-03T10:32:40.965861Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:40.965926Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:32:41.015668Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:41.015716Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:41.015794Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:41.015950Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 5 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 6 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 7 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 8 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 120029000 } } 2025-06-03T10:32:41.015998Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { Hosts: "1" }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Timestamp: 120029000 } } 2025-06-03T10:32:41.056786Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:41.122183Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:41.122233Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:41.122275Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:41.122356Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { Hosts: "2" }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120129000 } Timestamp: 120129000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Timestamp: 120129000 } } 2025-06-03T10:32:42.700554Z node 9 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:42.701317Z node 9 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:32:42.703278Z node 9 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:42.703343Z node 9 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:42.703593Z node 9 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:42.703615Z node 9 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:42.704177Z node 9 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:32:42.704213Z node 9 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:32:42.704244Z node 9 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:32:42.704300Z node 9 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:32:42.704995Z node 9 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:42.705033Z node 9 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:42.705055Z node 9 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:32:42.705071Z node 9 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:42.727233Z node 9 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:32:42.738615Z node 9 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:42.738765Z node 9 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:42.738788Z node 9 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:42.738871Z node 9 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:32:42.738877Z node 9 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:32:42.738886Z node 9 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:32:42.738891Z node 9 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:32:42.738898Z node 9 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:32:42.738914Z node 9 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:32:42.739541Z node 9 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 9 PDiskId: 9 Path: "/9/pdisk-9.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 10 PDiskId: 10 Path: "/10/pdisk-10.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 11 PDiskId: 11 Path: "/11/pdisk-11.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 12 PDiskId: 12 Path: "/12/pdisk-12.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 13 PDiskId: 13 Path: "/13/pdisk-13.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 14 PDiskId: 14 Path: "/14/pdisk-14.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 15 PDiskId: 15 Path: "/15/pdisk-15.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 16 PDiskId: 16 Path: "/16/pdisk-16.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 9 PDiskId: 9 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 9 PDiskId: 9 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 9 PDiskId: 9 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 9 PDiskId: 9 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 10 PDiskId: 10 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 10 PDiskId: 10 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 10 PDiskId: 10 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 10 PDiskId: 10 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 11 PDiskId: 11 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSl ... } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120028512 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120028512 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120028512 } Devices { Name: "pdisk-21-21" State: UP Timestamp: 120028512 } Timestamp: 120028512 NodeId: 21 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028512 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120028512 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120028512 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120028512 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120028512 } Devices { Name: "pdisk-22-22" State: UP Timestamp: 120028512 } Timestamp: 120028512 NodeId: 22 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028512 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120028512 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120028512 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120028512 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120028512 } Devices { Name: "pdisk-23-23" State: UP Timestamp: 120028512 } Timestamp: 120028512 NodeId: 23 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028512 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120028512 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120028512 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120028512 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120028512 } Devices { Name: "pdisk-24-24" State: UP Timestamp: 120028512 } Timestamp: 120028512 NodeId: 24 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 120028512 } } 2025-06-03T10:32:44.457130Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { Hosts: "ghrun-pyvh3niaay.auto.internal.com" }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: NO_SUCH_HOST Reason: "Unknown host ghrun-pyvh3niaay.auto.internal.com" } } 2025-06-03T10:32:45.987956Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:45.988457Z node 25 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:32:45.989811Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:45.989888Z node 25 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:45.990260Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:45.990341Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:45.990935Z node 25 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:32:45.991011Z node 25 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:32:45.991046Z node 25 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:32:45.991134Z node 25 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:32:45.991870Z node 25 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:45.992058Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:45.992102Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:32:45.992127Z node 25 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:46.004755Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:32:46.037124Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:46.037227Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:46.037264Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:46.037359Z node 25 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:32:46.037365Z node 25 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:32:46.037371Z node 25 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:32:46.037374Z node 25 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:32:46.037385Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:46.037433Z node 25 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:32:46.037445Z node 25 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:32:46.037518Z node 25 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { } } Success: true 2025-06-03T10:32:46.079498Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:46.079560Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:32:46.079844Z node 25 :CMS INFO: cms.cpp:104: OnTabletDead: 72057594037936128 2025-06-03T10:32:46.079851Z node 25 :CMS DEBUG: cms.cpp:1209: TCms::Cleanup 2025-06-03T10:32:46.081264Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:46.081875Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:46.081917Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:46.082264Z node 25 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:46.082345Z node 25 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:46.082483Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:46.082570Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:69: Loaded config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:32:46.082591Z node 25 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:46.082674Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:46.082717Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 } 2025-06-03T10:32:46.197418Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:46.242610Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:46.242684Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:46.242756Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "28" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-03T10:32:46.242764Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "28" Services: "storage" Duration: 60000000 2025-06-03T10:32:46.242776Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 28, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 1 2025-06-03T10:32:46.242785Z node 25 :CMS DEBUG: cms.cpp:729: Ring: 0; State: Restart 2025-06-03T10:32:46.242788Z node 25 :CMS DEBUG: cms.cpp:729: Ring: 1; State: Ok 2025-06-03T10:32:46.242790Z node 25 :CMS DEBUG: cms.cpp:729: Ring: 2; State: Ok 2025-06-03T10:32:46.242792Z node 25 :CMS DEBUG: cms.cpp:729: Ring: 3; State: Ok 2025-06-03T10:32:46.242794Z node 25 :CMS DEBUG: cms.cpp:729: Ring: 4; State: Ok 2025-06-03T10:32:46.242797Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:46.242834Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "28" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Action { Type: RESTART_SERVICES Host: "28" Services: "storage" Duration: 60000000 } Deadline: 180135000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 28 InterconnectPort: 12004 } } } } 2025-06-03T10:32:46.253753Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:46.275465Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:46.275556Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:46.275612Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "28" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:32:46.275625Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "28" Services: "storage" Duration: 60000000 2025-06-03T10:32:46.275640Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 28, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 1 2025-06-03T10:32:46.275650Z node 25 :CMS DEBUG: cms.cpp:729: Ring: 0; State: Restart 2025-06-03T10:32:46.275652Z node 25 :CMS DEBUG: cms.cpp:729: Ring: 1; State: Ok 2025-06-03T10:32:46.275654Z node 25 :CMS DEBUG: cms.cpp:729: Ring: 2; State: Ok 2025-06-03T10:32:46.275657Z node 25 :CMS DEBUG: cms.cpp:729: Ring: 3; State: Ok 2025-06-03T10:32:46.275659Z node 25 :CMS DEBUG: cms.cpp:729: Ring: 4; State: Ok 2025-06-03T10:32:46.275666Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Too many unavailable state storage rings. Restarting rings: 1. Temporary (for a 2 minutes) locked rings: 0. Maximum allowed number of unavailable rings for this mode: 1) 2025-06-03T10:32:46.275691Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "28" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Too many unavailable state storage rings. Restarting rings: 1. Temporary (for a 2 minutes) locked rings: 0. Maximum allowed number of unavailable rings for this mode: 1" } Deadline: 420235000 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::RequestReplaceManyDevicesOnOneNode [GOOD] Test command err: 2025-06-03T10:32:38.793207Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:38.796120Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:32:38.799434Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:38.799534Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:38.799960Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:38.799986Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:38.800920Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:32:38.801028Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:32:38.801080Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:32:38.801156Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:32:38.801976Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:38.802014Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:38.802057Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:32:38.802125Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:38.836608Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:32:38.848068Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:38.848206Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:38.850084Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:38.850231Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:32:38.850240Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:32:38.850251Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:32:38.850256Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:32:38.850264Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:32:38.850292Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:32:38.853066Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-03T10:32:38.863450Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:38.894810Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:38.894863Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:32:38.937597Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:38.937632Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:38.937729Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:38.938134Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-1-1" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-2-2" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-3-3" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120026512 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120026512 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120026512 } Devices { Name: "pdisk-4-4" State: UP Timestamp: 120026512 } Timestamp: 120026512 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: ... ime: 0 ChangeTime: 0 Path: "/28/pdisk-86.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120126 2025-06-03T10:32:45.046300Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 29, response# PDiskStateInfo { PDiskId: 87 CreateTime: 0 ChangeTime: 0 Path: "/29/pdisk-87.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 88 CreateTime: 0 ChangeTime: 0 Path: "/29/pdisk-88.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 89 CreateTime: 0 ChangeTime: 0 Path: "/29/pdisk-89.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120126 2025-06-03T10:32:45.046311Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 30, response# PDiskStateInfo { PDiskId: 90 CreateTime: 0 ChangeTime: 0 Path: "/30/pdisk-90.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 91 CreateTime: 0 ChangeTime: 0 Path: "/30/pdisk-91.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 92 CreateTime: 0 ChangeTime: 0 Path: "/30/pdisk-92.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120126 2025-06-03T10:32:45.046329Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 32, response# PDiskStateInfo { PDiskId: 96 CreateTime: 0 ChangeTime: 0 Path: "/32/pdisk-96.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 97 CreateTime: 0 ChangeTime: 0 Path: "/32/pdisk-97.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 98 CreateTime: 0 ChangeTime: 0 Path: "/32/pdisk-98.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120126 2025-06-03T10:32:45.046341Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 33, response# PDiskStateInfo { PDiskId: 99 CreateTime: 0 ChangeTime: 0 Path: "/33/pdisk-99.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 100 CreateTime: 0 ChangeTime: 0 Path: "/33/pdisk-100.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 101 CreateTime: 0 ChangeTime: 0 Path: "/33/pdisk-101.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120126 2025-06-03T10:32:45.046353Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 34, response# PDiskStateInfo { PDiskId: 102 CreateTime: 0 ChangeTime: 0 Path: "/34/pdisk-102.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 103 CreateTime: 0 ChangeTime: 0 Path: "/34/pdisk-103.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 104 CreateTime: 0 ChangeTime: 0 Path: "/34/pdisk-104.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120126 2025-06-03T10:32:45.046365Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 35, response# PDiskStateInfo { PDiskId: 105 CreateTime: 0 ChangeTime: 0 Path: "/35/pdisk-105.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 106 CreateTime: 0 ChangeTime: 0 Path: "/35/pdisk-106.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 107 CreateTime: 0 ChangeTime: 0 Path: "/35/pdisk-107.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120126 2025-06-03T10:32:45.046378Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 36, response# PDiskStateInfo { PDiskId: 108 CreateTime: 0 ChangeTime: 0 Path: "/36/pdisk-108.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 109 CreateTime: 0 ChangeTime: 0 Path: "/36/pdisk-109.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 110 CreateTime: 0 ChangeTime: 0 Path: "/36/pdisk-110.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120126 2025-06-03T10:32:45.046389Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 37, response# PDiskStateInfo { PDiskId: 111 CreateTime: 0 ChangeTime: 0 Path: "/37/pdisk-111.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 112 CreateTime: 0 ChangeTime: 0 Path: "/37/pdisk-112.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 113 CreateTime: 0 ChangeTime: 0 Path: "/37/pdisk-113.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120126 2025-06-03T10:32:45.046402Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 38, response# PDiskStateInfo { PDiskId: 114 CreateTime: 0 ChangeTime: 0 Path: "/38/pdisk-114.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 115 CreateTime: 0 ChangeTime: 0 Path: "/38/pdisk-115.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 116 CreateTime: 0 ChangeTime: 0 Path: "/38/pdisk-116.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120126 2025-06-03T10:32:45.046414Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 39, response# PDiskStateInfo { PDiskId: 117 CreateTime: 0 ChangeTime: 0 Path: "/39/pdisk-117.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 118 CreateTime: 0 ChangeTime: 0 Path: "/39/pdisk-118.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 119 CreateTime: 0 ChangeTime: 0 Path: "/39/pdisk-119.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120126 2025-06-03T10:32:45.046426Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 40, response# PDiskStateInfo { PDiskId: 120 CreateTime: 0 ChangeTime: 0 Path: "/40/pdisk-120.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 121 CreateTime: 0 ChangeTime: 0 Path: "/40/pdisk-121.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 122 CreateTime: 0 ChangeTime: 0 Path: "/40/pdisk-122.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120126 2025-06-03T10:32:45.046438Z node 25 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 31, response# PDiskStateInfo { PDiskId: 93 CreateTime: 0 ChangeTime: 0 Path: "/31/pdisk-93.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 94 CreateTime: 0 ChangeTime: 0 Path: "/31/pdisk-94.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } PDiskStateInfo { PDiskId: 95 CreateTime: 0 ChangeTime: 0 Path: "/31/pdisk-95.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120126 2025-06-03T10:32:45.046446Z node 25 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-06-03T10:32:45.057455Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:45.057548Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: REPLACE_DEVICES Host: "25" Devices: "pdisk-25-75" Devices: "pdisk-25-76" Devices: "pdisk-25-77" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-1" Permissions { Id: "user-p-1" Action { Type: REPLACE_DEVICES Host: "25" Devices: "pdisk-25-75" Devices: "pdisk-25-76" Devices: "pdisk-25-77" Duration: 60000000 } Deadline: 180126512 } } 2025-06-03T10:32:45.057562Z node 25 :CMS DEBUG: cms.cpp:1064: Schedule cleanup at 1970-01-01T00:05:00.126512Z 2025-06-03T10:32:45.091081Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for PDisk 25:77 (::1:/25/pdisk-77.data) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:45.091109Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for PDisk 25:75 (::1:/25/pdisk-75.data) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:45.091113Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for PDisk 25:76 (::1:/25/pdisk-76.data) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:45.091273Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:45.091295Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:45.091308Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:45.091764Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "34" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:32:45.091774Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "34" Duration: 60000000 2025-06-03T10:32:45.091785Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 34, with state: Up, with limit: 3, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:32:45.091856Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:45.091877Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-2, requestId# user-r-2, owner# user 2025-06-03T10:32:45.091882Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12010 (34) (permission user-p-2 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:45.091892Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:32:45.091933Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-2, validity# 1970-01-01T00:03:00.228024Z, action# Type: SHUTDOWN_HOST Host: "34" Duration: 60000000 2025-06-03T10:32:45.103078Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:45.103248Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: SHUTDOWN_HOST Host: "34" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-2" Permissions { Id: "user-p-2" Action { Type: SHUTDOWN_HOST Host: "34" Duration: 60000000 } Deadline: 180228024 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 34 InterconnectPort: 12010 } } } } >> TTicketParserTest::TicketFromCertificateWithValidationBad [GOOD] >> TTicketParserTest::NebiusAuthorizationWithRequiredPermissions >> TCmsTest::ActionIssue [GOOD] >> TTicketParserTest::NebiusAuthorizationWithRequiredPermissions [GOOD] >> TTicketParserTest::NebiusAuthorizationUnavailable >> TTicketParserTest::BulkAuthorizationRetryError ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::ActionIssue [GOOD] Test command err: 2025-06-03T10:32:38.932354Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:32:38.933089Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:38.936475Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:38.936581Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:38.937140Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:38.937219Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:38.937690Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:32:38.937747Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:32:38.937812Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:32:38.937933Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:32:38.940103Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:38.940141Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:38.940187Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:32:38.940240Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:38.971471Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:32:39.004611Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:39.004727Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:39.005992Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:39.006143Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:32:39.006149Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:32:39.006157Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:32:39.006160Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:32:39.006174Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:39.006223Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:32:39.006254Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:32:39.007530Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { } } Success: true 2025-06-03T10:32:39.049696Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:39.049766Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:32:39.049928Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvGetConfigRequest { }, response# NKikimr::NCms::TEvCms::TEvGetConfigResponse { Status { Code: OK } Config { DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: false UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 60 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } } } 2025-06-03T10:32:39.049976Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:39.108628Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:39.108738Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:39.108870Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120028000 } Timestamp: 120028000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120028000 } Timestamp: 120028000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120028000 } Timestamp: 120028000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120028000 } Timestamp: 120028000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120028000 } Timestamp: 120028000 NodeId: 5 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120028000 } Timestamp: 120028000 NodeId: 6 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120028000 } Timestamp: 120028000 NodeId: 7 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120028000 } Timestamp: 120028000 NodeId: 8 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 120028000 } } 2025-06-03T10:32:39.149592Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:39.191020Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:39.191102Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesLimit: 0 DisabledNodesRatioLimit: 10 } ClusterLimits { DisabledNodesLimit: 0 DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: false UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 60 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } 2025-06-03T10:32:39.191115Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:39.227080Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:39.227118Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:39.227136Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:39.227213Z node 1 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:32:39.227225Z node 1 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 2025-06-03T10:32:39.227239Z node 1 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 1, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:32:39.227246Z node 1 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 1, with state: Up, with limit: 0, with ratio limit: 10, locked nodes: 0, down nodes: 0 2025-06-03T10:32:39.227257Z node 1 :CMS DEBUG: cms.cpp:729: Ring: 0; State: Ok 2025-06-03T10:32:39.227261Z node 1 :CMS DEBUG: cms.cpp:729: Ring: 1; State: Ok 2025-06-03T10:32:39.227266Z node 1 :CMS DEBUG: cms.cpp:729: Ring: 2; State: Ok 2025-06-03T10:32:39.227272Z node 1 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:39.227292Z node 1 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-1, requestId# user-r-1, owner# user 2025-06-03T10:32:39.227302Z node 1 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (1) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:39.227314Z node 1 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:32:39.227354Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-1, validity# 1970-01-01T00:03:00.129000Z, action# Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 2025-06-03T10:32:39.238297Z node 1 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:39.238380Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-1" Permissions { Id: "user-p-1" Action { Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 } Deadline: 180129000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 1 InterconnectPort: 12001 } } } } 2025-06-03T10:32:39.238389Z node 1 :CMS DEBUG: cms.cpp:1064: Schedule cleanup at 1970-01-01T00:05:00.129000Z 2025-06-03T10:32:39.259722Z node 1 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (1) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:39.259797Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:39.259823Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:39.259843Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:39.259934Z node 1 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "2" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:32:39.259945Z node 1 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "2" Duration: 60000000 2025-06-03T10:32:39.259957Z node 1 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 2, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:32:39.259963Z node 1 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 2, with state: Up, with limit: 0, with ratio limit: 10, locked nodes: 1, down nodes: 0 2025-06-03T10:32:3 ... { Code: DISALLOW_TEMP Reason: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } RequestId: "user-r-2" Deadline: 420131512 } 2025-06-03T10:32:45.796561Z node 25 :CMS INFO: cms.cpp:1366: Get selected requests for user 2025-06-03T10:32:45.796573Z node 25 :CMS DEBUG: cms.cpp:1392: Resulting status: OK 2025-06-03T10:32:45.796596Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManageRequestRequest { User: "user" Command: GET RequestId: "user-r-2" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManageRequestResponse { Status { Code: OK } Requests { RequestId: "user-r-2" Owner: "user" Actions { Type: SHUTDOWN_HOST Host: "34" Duration: 60000000 } Actions { Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } } PartialPermissionAllowed: false Reason: "" AvailabilityMode: MODE_MAX_AVAILABILITY Priority: 0 } } 2025-06-03T10:32:45.839661Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (25) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:45.839771Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:45.839789Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:45.839800Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:45.839974Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "34" Duration: 60000000 } Actions { Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } } PartialPermissionAllowed: false Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:32:45.839981Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "34" Duration: 60000000 2025-06-03T10:32:45.839989Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 34, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:32:45.840017Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:45.840026Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } 2025-06-03T10:32:45.840029Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 26, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 2, down nodes: 0 2025-06-03T10:32:45.840043Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Issue in affected group with id '0': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: ) 2025-06-03T10:32:45.840065Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:32:45.840107Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-2, owner# user, order# 2, priority# 0, body# User: "user" Actions { Type: SHUTDOWN_HOST Host: "34" Duration: 60000000 } Actions { Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } } PartialPermissionAllowed: false Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:32:45.851015Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:45.851094Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-2" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } RequestId: "user-r-2" Deadline: 420233024 } 2025-06-03T10:32:45.851209Z node 25 :CMS INFO: cms.cpp:1366: Get selected requests for user 2025-06-03T10:32:45.851225Z node 25 :CMS DEBUG: cms.cpp:1392: Resulting status: OK 2025-06-03T10:32:45.851252Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManageRequestRequest { User: "user" Command: GET RequestId: "user-r-2" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManageRequestResponse { Status { Code: OK } Requests { RequestId: "user-r-2" Owner: "user" Actions { Type: SHUTDOWN_HOST Host: "34" Duration: 60000000 } Actions { Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } } PartialPermissionAllowed: false Reason: "" AvailabilityMode: MODE_MAX_AVAILABILITY Priority: 0 } } 2025-06-03T10:32:45.851301Z node 25 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-1 2025-06-03T10:32:45.851305Z node 25 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-03T10:32:45.851316Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-03T10:32:45.851337Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-1, reason# explicit remove 2025-06-03T10:32:45.862303Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-03T10:32:45.862377Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-1" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-03T10:32:45.947808Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:45.947875Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:45.947896Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:45.948156Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "34" Duration: 60000000 } Actions { Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } } PartialPermissionAllowed: false Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:32:45.948168Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "34" Duration: 60000000 2025-06-03T10:32:45.948180Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 34, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:32:45.948223Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:45.948240Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } 2025-06-03T10:32:45.948245Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 26, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:32:45.948271Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:45.948292Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-2, requestId# user-r-2, owner# user 2025-06-03T10:32:45.948301Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12010 (34) (permission user-p-2 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:45.948308Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-3, requestId# user-r-2, owner# user 2025-06-03T10:32:45.948313Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (26) (permission user-p-3 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:45.948324Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:32:45.948369Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-2, validity# 1970-01-01T00:03:00.336048Z, action# Type: SHUTDOWN_HOST Host: "34" Duration: 60000000 2025-06-03T10:32:45.948380Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-3, validity# 1970-01-01T00:03:00.336048Z, action# Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 2025-06-03T10:32:45.948390Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-2, owner# user 2025-06-03T10:32:45.959453Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:45.959559Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-2" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Id: "user-p-2" Action { Type: SHUTDOWN_HOST Host: "34" Duration: 60000000 } Deadline: 180336048 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 34 InterconnectPort: 12010 } } } Permissions { Id: "user-p-3" Action { Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 } Deadline: 180336048 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 26 InterconnectPort: 12002 } } } } 2025-06-03T10:32:45.959725Z node 25 :CMS INFO: cms.cpp:1366: Get selected requests for user 2025-06-03T10:32:45.959737Z node 25 :CMS DEBUG: cms.cpp:1392: Resulting status: WRONG_REQUEST Unknown request user-r-2 2025-06-03T10:32:45.959759Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManageRequestRequest { User: "user" Command: GET RequestId: "user-r-2" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManageRequestResponse { Status { Code: WRONG_REQUEST Reason: "Unknown request user-r-2" } } >> TCmsTest::TestLogOperationsRollback [GOOD] >> TTicketParserTest::NebiusAuthenticationUnavailable >> TTicketParserTest::NebiusAuthorizationUnavailable [GOOD] >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Table [GOOD] >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Query ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::TestLogOperationsRollback [GOOD] Test command err: 2025-06-03T10:32:39.829534Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:32:39.830554Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:39.833133Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:32:39.833185Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:32:39.833246Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:32:39.833363Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:32:39.833850Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:39.833928Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:39.834369Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:39.834399Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:39.836010Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:39.836037Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:39.836084Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:32:39.836150Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:39.866349Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:32:39.899208Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:39.899320Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:39.900955Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:39.901135Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:32:39.901145Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:32:39.901156Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:32:39.901161Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:32:39.901182Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:32:39.901213Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:32:39.901237Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:39.903553Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-03T10:32:39.946101Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:39.946184Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:32:39.983665Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:39.983713Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:39.983809Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:05:00Z 2025-06-03T10:32:39.984123Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 300029000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 300029000 } Devices { Name: "pdisk-1-1" State: UP Timestamp: 300029000 } Timestamp: 300029000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 300029000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 300029000 } Devices { Name: "pdisk-2-2" State: UP Timestamp: 300029000 } Timestamp: 300029000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 300029000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 300029000 } Devices { Name: "pdisk-3-3" State: UP Timestamp: 300029000 } Timestamp: 300029000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 300029000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 300029000 } Devices { Name: "pdisk-4-4" State: UP Timestamp: 300029000 } Timestamp: 300029000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: ... ser: "user" Actions { Type: RESTART_SERVICES Host: "22" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "30" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "38" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-03T10:32:44.939450Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "22" Services: "storage" Duration: 60000000 2025-06-03T10:32:44.939453Z node 18 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 22, with state: Up, with limit: 3, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:32:44.939470Z node 18 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:44.939474Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "30" Services: "storage" Duration: 60000000 2025-06-03T10:32:44.939477Z node 18 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 30, with state: Up, with limit: 3, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:32:44.939493Z node 18 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:44.939498Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "38" Services: "storage" Duration: 60000000 2025-06-03T10:32:44.939500Z node 18 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 38, with state: Up, with limit: 3, with ratio limit: 0, locked nodes: 2, down nodes: 0 2025-06-03T10:32:44.939516Z node 18 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:44.939551Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "22" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "30" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "38" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Action { Type: RESTART_SERVICES Host: "22" Services: "storage" Duration: 60000000 } Deadline: 180127512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 22 InterconnectPort: 12005 } } } Permissions { Action { Type: RESTART_SERVICES Host: "30" Services: "storage" Duration: 60000000 } Deadline: 180127512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 30 InterconnectPort: 12013 } } } Permissions { Action { Type: RESTART_SERVICES Host: "38" Services: "storage" Duration: 60000000 } Deadline: 180127512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 38 InterconnectPort: 12021 } } } } 2025-06-03T10:32:44.939577Z node 18 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "23" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "31" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "39" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-03T10:32:44.939583Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "23" Services: "storage" Duration: 60000000 2025-06-03T10:32:44.939588Z node 18 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 23, with state: Up, with limit: 3, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:32:44.939608Z node 18 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:44.939613Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "31" Services: "storage" Duration: 60000000 2025-06-03T10:32:44.939616Z node 18 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 31, with state: Up, with limit: 3, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:32:44.939632Z node 18 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:44.939637Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "39" Services: "storage" Duration: 60000000 2025-06-03T10:32:44.939639Z node 18 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 39, with state: Up, with limit: 3, with ratio limit: 0, locked nodes: 2, down nodes: 0 2025-06-03T10:32:44.939656Z node 18 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:44.939679Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "23" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "31" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "39" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Action { Type: RESTART_SERVICES Host: "23" Services: "storage" Duration: 60000000 } Deadline: 180127512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 23 InterconnectPort: 12006 } } } Permissions { Action { Type: RESTART_SERVICES Host: "31" Services: "storage" Duration: 60000000 } Deadline: 180127512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 31 InterconnectPort: 12014 } } } Permissions { Action { Type: RESTART_SERVICES Host: "39" Services: "storage" Duration: 60000000 } Deadline: 180127512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 39 InterconnectPort: 12022 } } } } 2025-06-03T10:32:44.939695Z node 18 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "24" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "32" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "40" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-03T10:32:44.939698Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "24" Services: "storage" Duration: 60000000 2025-06-03T10:32:44.939701Z node 18 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 24, with state: Up, with limit: 3, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:32:44.939718Z node 18 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:44.939723Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "32" Services: "storage" Duration: 60000000 2025-06-03T10:32:44.939725Z node 18 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 32, with state: Up, with limit: 3, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:32:44.939741Z node 18 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:44.939746Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "40" Services: "storage" Duration: 60000000 2025-06-03T10:32:44.939749Z node 18 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 40, with state: Up, with limit: 3, with ratio limit: 0, locked nodes: 2, down nodes: 0 2025-06-03T10:32:44.939764Z node 18 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:44.939787Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "24" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "32" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "40" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Action { Type: RESTART_SERVICES Host: "24" Services: "storage" Duration: 60000000 } Deadline: 180127512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 24 InterconnectPort: 12007 } } } Permissions { Action { Type: RESTART_SERVICES Host: "32" Services: "storage" Duration: 60000000 } Deadline: 180127512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 32 InterconnectPort: 12015 } } } Permissions { Action { Type: RESTART_SERVICES Host: "40" Services: "storage" Duration: 60000000 } Deadline: 180127512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 40 InterconnectPort: 12023 } } } } 2025-06-03T10:32:44.939803Z node 18 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "33" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "41" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-03T10:32:44.939807Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 60000000 2025-06-03T10:32:44.939809Z node 18 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 25, with state: Up, with limit: 3, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:32:44.939841Z node 18 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:44.939847Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "33" Services: "storage" Duration: 60000000 2025-06-03T10:32:44.939849Z node 18 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 33, with state: Up, with limit: 3, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:32:44.939866Z node 18 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:44.939871Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "41" Services: "storage" Duration: 60000000 2025-06-03T10:32:44.939874Z node 18 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 41, with state: Up, with limit: 3, with ratio limit: 0, locked nodes: 2, down nodes: 0 2025-06-03T10:32:44.939889Z node 18 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:44.939912Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "33" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "41" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Action { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 60000000 } Deadline: 180127512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 25 InterconnectPort: 12008 } } } Permissions { Action { Type: RESTART_SERVICES Host: "33" Services: "storage" Duration: 60000000 } Deadline: 180127512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 33 InterconnectPort: 12016 } } } Permissions { Action { Type: RESTART_SERVICES Host: "41" Services: "storage" Duration: 60000000 } Deadline: 180127512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 41 InterconnectPort: 12024 } } } } >> TTicketParserTest::NebiusAuthenticationUnavailable [GOOD] >> TTicketParserTest::NebiusAuthorizationRetryError ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ut/unittest >> TTicketParserTest::NebiusAuthorizationUnavailable [GOOD] Test command err: 2025-06-03T10:32:46.904450Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669166156108902:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:46.904683Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f79/r3tmp/tmpfsFPKT/pdisk_1.dat 2025-06-03T10:32:46.955509Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669166156108883:2079] 1748946766904255 != 1748946766904258 2025-06-03T10:32:46.955543Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14074, node 1 2025-06-03T10:32:46.966363Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:46.966377Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:46.966379Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:46.966420Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13277 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:47.007242Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:47.007272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:47.008343Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:47.034186Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:47.037206Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket BEF525231B33DD72292A5377DCC8862BAD00C272CE6382C42090A61ADC318A41 () has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-03T10:32:47.559472Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669168817125580:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:47.559486Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f79/r3tmp/tmpBMvXP6/pdisk_1.dat TServer::EnableGrpc on GrpcPort 9778, node 2 2025-06-03T10:32:47.574322Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:47.574617Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669168817125560:2079] 1748946767559396 != 1748946767559399 2025-06-03T10:32:47.578710Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:47.578718Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:47.578720Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:47.578754Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16530 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:47.663082Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:47.663107Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:47.663438Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:47.664011Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:47.664625Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket 4CA524484BA4A8142FB94DD410FC168C54AE91D023C72EA1649C89F5CFC7BF11 () has now permanent error message 'Cannot create token from certificate. Client`s certificate and server`s certificate have different issuers' 2025-06-03T10:32:47.664672Z node 2 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket 4CA524484BA4A8142FB94DD410FC168C54AE91D023C72EA1649C89F5CFC7BF11: Cannot create token from certificate. Client`s certificate and server`s certificate have different issuers 2025-06-03T10:32:48.379605Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511669171248485398:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:48.379653Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f79/r3tmp/tmpKtVkMd/pdisk_1.dat 2025-06-03T10:32:48.394521Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:48.394740Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511669171248485378:2079] 1748946768379494 != 1748946768379497 TServer::EnableGrpc on GrpcPort 29432, node 3 2025-06-03T10:32:48.404804Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:48.404816Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:48.404817Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:48.404859Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24900 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:48.481235Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:48.481263Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:48.482356Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:48.485910Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:48.487669Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket D333039493A4FC1D559615C81028DF4FB65EB7DD41DD53ACACEEDECCEAB66AC6 () has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-06-03T10:32:48.487723Z node 3 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket D333039493A4FC1D559615C81028DF4FB65EB7DD41DD53ACACEEDECCEAB66AC6: Cannot create token from certificate. Client certificate failed verification 2025-06-03T10:32:48.798170Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511669173568918825:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:48.798194Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f79/r3tmp/tmpLhQDvl/pdisk_1.dat 2025-06-03T10:32:48.814276Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:48.814902Z node 4 :CONFIGS_DISPATCHER ERROR: configs_disp ... terSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:48.904125Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:48.905763Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:32:48.905785Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:48.905788Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:32:48.906014Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-06-03T10:32:48.906032Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [338a7d1f9bf0] Connect to grpc://localhost:12559 2025-06-03T10:32:48.906648Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [338a7d1f9bf0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { resultCode: PERMISSION_DENIED } } 0: "OK" 2025-06-03T10:32:48.908892Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [338a7d1f9bf0] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { resultCode: PERMISSION_DENIED } } } 2025-06-03T10:32:48.908962Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1219: Ticket **** (8E120919) permission something.write access denied for subject "user1@as" 2025-06-03T10:32:48.908998Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-03T10:32:48.909230Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:32:48.909237Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:48.909240Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:32:48.909248Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-06-03T10:32:48.909327Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [338a7d1f9bf0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { resultCode: PERMISSION_DENIED } } 0: "OK" 2025-06-03T10:32:48.910166Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [338a7d1f9bf0] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { resultCode: PERMISSION_DENIED } } } 2025-06-03T10:32:48.910278Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1219: Ticket **** (8E120919) permission something.write access denied for subject "user1@as" 2025-06-03T10:32:48.910304Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket **** (8E120919) () has now permanent error message 'something.write for aaaa1234 bbbb4554 - PERMISSION_DENIED' 2025-06-03T10:32:49.201861Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7511669179076714378:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:49.202051Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f79/r3tmp/tmp187eDc/pdisk_1.dat 2025-06-03T10:32:49.214919Z node 5 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:49.215213Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7511669179076714353:2079] 1748946769201620 != 1748946769201623 TServer::EnableGrpc on GrpcPort 6345, node 5 2025-06-03T10:32:49.226284Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:49.226316Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:49.226320Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:49.226406Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24074 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:49.305611Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:49.305648Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:49.305977Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:49.306548Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:49.307784Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:32:49.307810Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:49.307812Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:32:49.307832Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-06-03T10:32:49.307844Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [338a7d01a8b0] Connect to grpc://localhost:26140 2025-06-03T10:32:49.308079Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [338a7d01a8b0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response 14: "Service Unavailable" 2025-06-03T10:32:49.310020Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [338a7d01a8b0] Status 14 Service Unavailable 2025-06-03T10:32:49.310101Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-03T10:32:49.310120Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.write now has a retryable error "Service Unavailable" retryable: 1 2025-06-03T10:32:49.310125Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:49.310142Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-06-03T10:32:49.310255Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [338a7d01a8b0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } 2025-06-03T10:32:49.310836Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [338a7d01a8b0] Status 1 CANCELLED 2025-06-03T10:32:49.310880Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "CANCELLED" retryable: 1 2025-06-03T10:32:49.310882Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.write now has a retryable error "CANCELLED" retryable: 1 2025-06-03T10:32:49.310884Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'CANCELLED' >> TTicketParserTest::TicketFromCertificateWithValidationGood >> TCmsTenatsTest::TestClusterLimitForceRestartMode [GOOD] >> Compression::WriteZSTD [GOOD] >> Compression::WriteWithMixedCodecs >> TTicketParserTest::LoginBad >> TTicketParserTest::AccessServiceAuthenticationOk ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTenatsTest::TestClusterLimitForceRestartMode [GOOD] Test command err: 2025-06-03T10:32:38.724845Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:38.729215Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:38.729409Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:38.730164Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:38.730721Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:38.731057Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:32:38.735907Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:38.735998Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:38.736094Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:32:38.736163Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:38.736265Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:32:38.736320Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:32:38.736367Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:32:38.736452Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:32:38.773148Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:32:38.784822Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:38.784941Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:38.787019Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:38.787207Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:32:38.787217Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:32:38.787229Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:32:38.787234Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:32:38.787245Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:32:38.787274Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:32:38.789846Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { } } Success: true 2025-06-03T10:32:38.800276Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:38.842803Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:38.842873Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:32:38.843105Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvGetConfigRequest { }, response# NKikimr::NCms::TEvCms::TEvGetConfigResponse { Status { Code: OK } Config { DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: false UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 60 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } } } 2025-06-03T10:32:38.843172Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:38.906343Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:38.906486Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:38.906697Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120027000 } Timestamp: 120027000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120027000 } Timestamp: 120027000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120027000 } Timestamp: 120027000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120027000 } Timestamp: 120027000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120027000 } Timestamp: 120027000 NodeId: 5 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120027000 } Timestamp: 120027000 NodeId: 6 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120027000 } Timestamp: 120027000 NodeId: 7 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120027000 } Timestamp: 120027000 NodeId: 8 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 120027000 } } 2025-06-03T10:32:38.937434Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:38.981218Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:38.981327Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesLimit: 0 DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesLimit: 2 DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: false UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 60 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } 2025-06-03T10:32:38.981347Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:39.028509Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:39.028548Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:39.028563Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:39.028638Z node 1 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false TenantPolicy: NONE AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:32:39.028647Z node 1 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 2025-06-03T10:32:39.028657Z node 1 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 1, with state: Up, with limit: 2, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:32:39.028665Z node 1 :CMS DEBUG: cms.cpp:729: Ring: 0; State: Ok 2025-06-03T10:32:39.028668Z node 1 :CMS DEBUG: cms.cpp:729: Ring: 1; State: Ok 2025-06-03T10:32:39.028670Z node 1 :CMS DEBUG: cms.cpp:729: Ring: 2; State: Ok 2025-06-03T10:32:39.028673Z node 1 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:39.028688Z node 1 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-1, requestId# user-r-1, owner# user 2025-06-03T10:32:39.028696Z node 1 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (1) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:39.028705Z node 1 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:32:39.028742Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-1, validity# 1970-01-01T00:03:00.128000Z, action# Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 2025-06-03T10:32:39.039889Z node 1 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:39.039999Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false TenantPolicy: NONE AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-1" Permissions { Id: "user-p-1" Action { Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 } Deadline: 180128000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 1 InterconnectPort: 12001 } } } } 2025-06-03T10:32:39.040013Z node 1 :CMS DEBUG: cms.cpp:1064: Schedule cleanup at 1970-01-01T00:05:00.128000Z 2025-06-03T10:32:39.061687Z node 1 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (1) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:39.061756Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:39.061773Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:39.061786Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:39.061846Z node 1 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "2" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false TenantPolicy: NONE AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:32:39.061857Z node 1 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "2" Duration: 60000000 2025-06-03T10:32:39.061870Z node 1 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 2, with state: Up, with limit: 2, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:32:39.061877Z node 1 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:39.061895Z node 1 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-2, requestId# user-r-2, owner# user 2025-06-03T10:32:39.061901Z node 1 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (2) (permission user-p-2 until 1970 ... 10:32:48.731345Z node 41 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:32:48.746343Z node 41 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:48.746425Z node 41 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: SHUTDOWN_HOST Host: "42" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Cannot lock node \'42\': too many unavailable nodes. Locked: 1, down: 0, limit: 1" } RequestId: "user-r-2" Deadline: 420230512 } 2025-06-03T10:32:48.830579Z node 41 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (41) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:48.830648Z node 41 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:48.830668Z node 41 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:48.830679Z node 41 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:48.830726Z node 41 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "42" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-03T10:32:48.830733Z node 41 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "42" Duration: 60000000 2025-06-03T10:32:48.830743Z node 41 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 42, with state: Up, with limit: 1, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:32:48.830750Z node 41 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Cannot lock node '42': too many unavailable nodes. Locked: 1, down: 0, limit: 1) 2025-06-03T10:32:48.830762Z node 41 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:32:48.841890Z node 41 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:48.841989Z node 41 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: SHUTDOWN_HOST Host: "42" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Cannot lock node \'42\': too many unavailable nodes. Locked: 1, down: 0, limit: 1" } RequestId: "user-r-3" Deadline: 420332024 } 2025-06-03T10:32:48.853400Z node 41 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (41) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:48.853478Z node 41 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:48.853501Z node 41 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:48.853518Z node 41 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:48.853571Z node 41 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "42" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false 2025-06-03T10:32:48.853582Z node 41 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "42" Duration: 60000000 2025-06-03T10:32:48.853593Z node 41 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 42, with state: Up, with limit: 1, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:32:48.853604Z node 41 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Cannot lock node '42': too many unavailable nodes. Locked: 1, down: 0, limit: 1) 2025-06-03T10:32:48.853619Z node 41 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:32:48.864593Z node 41 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:48.864680Z node 41 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: SHUTDOWN_HOST Host: "42" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Cannot lock node \'42\': too many unavailable nodes. Locked: 1, down: 0, limit: 1" } RequestId: "user-r-4" Deadline: 420433536 } 2025-06-03T10:32:48.864816Z node 41 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-1 2025-06-03T10:32:48.864831Z node 41 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-03T10:32:48.864850Z node 41 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-03T10:32:48.864877Z node 41 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-1, reason# explicit remove 2025-06-03T10:32:48.875896Z node 41 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-03T10:32:48.875973Z node 41 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-1" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-03T10:32:48.887567Z node 41 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:48.887682Z node 41 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:48.887744Z node 41 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "42" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:32:48.887758Z node 41 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "42" Duration: 60000000 2025-06-03T10:32:48.887773Z node 41 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 42, with state: Up, with limit: 1, with ratio limit: 0, locked nodes: 0, down nodes: 1 2025-06-03T10:32:48.887784Z node 41 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Cannot lock node '42': too many unavailable nodes. Locked: 0, down: 1, limit: 1) 2025-06-03T10:32:48.887815Z node 41 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:32:48.899073Z node 41 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:48.899103Z node 41 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:48.899168Z node 41 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: SHUTDOWN_HOST Host: "42" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Cannot lock node \'42\': too many unavailable nodes. Locked: 0, down: 1, limit: 1" } RequestId: "user-r-5" Deadline: 420536560 } 2025-06-03T10:32:48.920909Z node 41 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:48.921001Z node 41 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:48.921055Z node 41 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "42" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-03T10:32:48.921068Z node 41 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "42" Duration: 60000000 2025-06-03T10:32:48.921082Z node 41 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 42, with state: Up, with limit: 1, with ratio limit: 0, locked nodes: 0, down nodes: 1 2025-06-03T10:32:48.921095Z node 41 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Cannot lock node '42': too many unavailable nodes. Locked: 0, down: 1, limit: 1) 2025-06-03T10:32:48.921112Z node 41 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:32:48.932055Z node 41 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:48.932084Z node 41 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:48.932140Z node 41 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: SHUTDOWN_HOST Host: "42" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Cannot lock node \'42\': too many unavailable nodes. Locked: 0, down: 1, limit: 1" } RequestId: "user-r-6" Deadline: 420638072 } 2025-06-03T10:32:49.005666Z node 41 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:49.005769Z node 41 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:49.005833Z node 41 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "42" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false 2025-06-03T10:32:49.005848Z node 41 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "42" Duration: 60000000 2025-06-03T10:32:49.005861Z node 41 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 42, with state: Up, with limit: 1, with ratio limit: 0, locked nodes: 0, down nodes: 1 2025-06-03T10:32:49.005866Z node 41 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 42, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 1 2025-06-03T10:32:49.005873Z node 41 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:49.005894Z node 41 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-2, requestId# user-r-7, owner# user 2025-06-03T10:32:49.005904Z node 41 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (42) (permission user-p-2 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:49.005917Z node 41 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:32:49.005949Z node 41 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-2, validity# 1970-01-01T00:03:00.739584Z, action# Type: SHUTDOWN_HOST Host: "42" Duration: 60000000 2025-06-03T10:32:49.016916Z node 41 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:49.016942Z node 41 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:49.017011Z node 41 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: SHUTDOWN_HOST Host: "42" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-7" Permissions { Id: "user-p-2" Action { Type: SHUTDOWN_HOST Host: "42" Duration: 60000000 } Deadline: 180739584 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 42 InterconnectPort: 12002 } } } } >> TTicketParserTest::TicketFromCertificateWithValidationGood [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationDifferentIssuersGood >> TTicketParserTest::LoginBad [GOOD] >> TTicketParserTest::BulkAuthorizationWithRequiredPermissions >> TTicketParserTest::AccessServiceAuthenticationOk [GOOD] >> TTicketParserTest::AccessServiceAuthenticationApiKeyOk >> TTicketParserTest::BulkAuthorizationWithRequiredPermissions [GOOD] >> TTicketParserTest::BulkAuthorizationWithUserAccount >> TTicketParserTest::LoginRefreshGroupsWithError [GOOD] >> TTicketParserTest::NebiusAccessServiceAuthenticationOk >> BasicUsage::BrokenCredentialsProvider [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationDifferentIssuersGood [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationDifferentIssuersBad >> TTicketParserTest::AccessServiceAuthenticationApiKeyOk [GOOD] >> TTicketParserTest::AuthenticationUnavailable >> TCmsTenatsTest::TestTenantLimit >> TTicketParserTest::BulkAuthorizationWithUserAccount [GOOD] >> TTicketParserTest::BulkAuthorizationWithUserAccount2 >> TTicketParserTest::NebiusAccessServiceAuthenticationOk [GOOD] >> TTicketParserTest::NebiusAuthenticationRetryError ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> BasicUsage::BrokenCredentialsProvider [GOOD] Test command err: 2025-06-03T10:32:28.933638Z :MaxByteSizeEqualZero INFO: Random seed for debugging is 1748946748933630 2025-06-03T10:32:29.063576Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669091109014469:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:29.063616Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:32:29.068230Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669093000232590:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:29.068252Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:32:29.103248Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00250e/r3tmp/tmppGf9K9/pdisk_1.dat 2025-06-03T10:32:29.105749Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:32:29.154952Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24873, node 1 2025-06-03T10:32:29.164102Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/00250e/r3tmp/yandexEinXdb.tmp 2025-06-03T10:32:29.164120Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/00250e/r3tmp/yandexEinXdb.tmp 2025-06-03T10:32:29.164208Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/00250e/r3tmp/yandexEinXdb.tmp 2025-06-03T10:32:29.164269Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:29.165671Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:29.165704Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:29.170023Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:29.171082Z INFO: TTestServer started on Port 24159 GrpcPort 24873 TClient is connected to server localhost:24159 PQClient connected to localhost:24873 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:29.207471Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:29.207508Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:29.208551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480 2025-06-03T10:32:29.209070Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:32:29.209392Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... waiting... waiting... waiting... 2025-06-03T10:32:29.485212Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669093000232854:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:29.485247Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:29.485509Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669093000232904:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:29.487419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710657:3, at schemeshard: 72057594046644480 2025-06-03T10:32:29.509624Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669093000232906:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710657 completed, doublechecking } 2025-06-03T10:32:29.597389Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669093000232934:2131] txid# 281474976710658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:29.604707Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511669091109015457:2340], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:32:29.605607Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=YTJhOGJmYjYtNWFiNzBjMjEtZjViOWQ0NWYtODVkMGE4Yzk=, ActorId: [1:7511669091109015431:2333], ActorState: ExecuteState, TraceId: 01jwtnk23c38g54xxwme7prq7n, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:32:29.606190Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:32:29.609253Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7511669093000232949:2314], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:32:29.609418Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=2&id=YjNlYmJmOS1jZGM4ODE3ZC1hZmM2YzYzMC0xNDJlNDFkNg==, ActorId: [2:7511669093000232852:2305], ActorState: ExecuteState, TraceId: 01jwtnk21b2x2nrtsx0ta45cvc, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:32:29.609646Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:32:29.611644Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720661:0, at schemeshard: 72057594046644480 2025-06-03T10:32:29.691837Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:29.784829Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720663:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:24873", true, true, 1000); 2025-06-03T10:32:29.912018Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720664. Ctx: { TraceId: 01jwtnk2ddf7vnbvnm4x8sskt2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmFjZGUyNWYtYmQ0NWYyYWUtZTdmMWU3ZTktZjA2NWRkMzQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7511669091109015853:2931] 2025-06-03T10:32:34.065901Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511669091109014469:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:34.065946Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:32:34.068501Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7511669093000232590:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:34.068551Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;er ... userAgent="pqv1 server" ip=ipv6:[::1]:37540 proto=v1 topic=test-topic durationSec=0 2025-06-03T10:32:51.388556Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-03T10:32:51.388896Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-03T10:32:51.388932Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint32; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-06-03T10:32:51.388938Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-03T10:32:51.388940Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-03T10:32:51.388945Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [5:7511669185643542268:2485] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-06-03T10:32:51.389499Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [5:7511669185643542268:2485] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-06-03T10:32:51.410384Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [5:7511669185643542268:2485] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-06-03T10:32:51.410499Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [5:7511669185643542303:2485] connected; active server actors: 1 2025-06-03T10:32:51.410519Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [5:7511669185643542268:2485] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-06-03T10:32:51.410523Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [5:7511669185643542268:2485] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-03T10:32:51.410575Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [5:7511669185643542303:2485] disconnected; active server actors: 1 2025-06-03T10:32:51.410585Z node 5 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [5:7511669185643542303:2485] disconnected no session 2025-06-03T10:32:51.424226Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [5:7511669185643542268:2485] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-03T10:32:51.424243Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [5:7511669185643542268:2485] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-03T10:32:51.424246Z node 5 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [5:7511669185643542268:2485] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-03T10:32:51.424252Z node 5 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-03T10:32:51.424463Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72075186224037892] server connected, pipe [5:7511669185643542326:2485], now have 1 active actors on pipe 2025-06-03T10:32:51.424524Z node 5 :PQ_WRITE_PROXY DEBUG: writer.cpp:798: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 6, Generation: 1 2025-06-03T10:32:51.424592Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-03T10:32:51.424607Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-03T10:32:51.424635Z node 6 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|431d0d4e-d60aa096-60733254-b4ac4a37_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-03T10:32:51.424679Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:35: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-03T10:32:51.424703Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-03T10:32:51.424957Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-03T10:32:51.424965Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-03T10:32:51.424979Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-03T10:32:51.425537Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|431d0d4e-d60aa096-60733254-b4ac4a37_0 2025-06-03T10:32:51.425955Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1748946771425 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:32:51.425994Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|431d0d4e-d60aa096-60733254-b4ac4a37_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-03T10:32:51.426097Z :INFO: [] MessageGroupId [src] SessionId [src|431d0d4e-d60aa096-60733254-b4ac4a37_0] Write session: close. Timeout = 0 ms 2025-06-03T10:32:51.426108Z :INFO: [] MessageGroupId [src] SessionId [src|431d0d4e-d60aa096-60733254-b4ac4a37_0] Write session will now close 2025-06-03T10:32:51.426113Z :DEBUG: [] MessageGroupId [src] SessionId [src|431d0d4e-d60aa096-60733254-b4ac4a37_0] Write session: aborting 2025-06-03T10:32:51.426274Z :INFO: [] MessageGroupId [src] SessionId [src|431d0d4e-d60aa096-60733254-b4ac4a37_0] Write session: gracefully shut down, all writes complete 2025-06-03T10:32:51.426279Z :DEBUG: [] MessageGroupId [src] SessionId [src|431d0d4e-d60aa096-60733254-b4ac4a37_0] Write session: destroy 2025-06-03T10:32:51.426516Z node 5 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|431d0d4e-d60aa096-60733254-b4ac4a37_0 grpc read done: success: 0 data: 2025-06-03T10:32:51.426528Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|431d0d4e-d60aa096-60733254-b4ac4a37_0 grpc read failed 2025-06-03T10:32:51.426534Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|431d0d4e-d60aa096-60733254-b4ac4a37_0 grpc closed 2025-06-03T10:32:51.426538Z node 5 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|431d0d4e-d60aa096-60733254-b4ac4a37_0 is DEAD 2025-06-03T10:32:51.426764Z node 5 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:32:51.426905Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037892] server disconnected, pipe [5:7511669185643542326:2485] destroyed 2025-06-03T10:32:51.426938Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-03T10:32:51.428352Z :INFO: [/Root] [/Root] [b27bcd34-5ad2abff-f9e3f3a5-1a9ef4f1] Starting read session 2025-06-03T10:32:51.428367Z :DEBUG: [/Root] [/Root] [b27bcd34-5ad2abff-f9e3f3a5-1a9ef4f1] Starting session to cluster null (localhost:13251) 2025-06-03T10:32:51.428608Z :DEBUG: [/Root] [/Root] [b27bcd34-5ad2abff-f9e3f3a5-1a9ef4f1] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:51.428613Z :DEBUG: [/Root] [/Root] [b27bcd34-5ad2abff-f9e3f3a5-1a9ef4f1] [null] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:51.428617Z :DEBUG: [/Root] [/Root] [b27bcd34-5ad2abff-f9e3f3a5-1a9ef4f1] [null] Reconnecting session to cluster null in 0.000000s 2025-06-03T10:32:51.428680Z :ERROR: [/Root] [/Root] [b27bcd34-5ad2abff-f9e3f3a5-1a9ef4f1] [null] Got error. Status: CLIENT_UNAUTHENTICATED. Description:
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation 2025-06-03T10:32:51.428686Z :DEBUG: [/Root] [/Root] [b27bcd34-5ad2abff-f9e3f3a5-1a9ef4f1] [null] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:51.428688Z :DEBUG: [/Root] [/Root] [b27bcd34-5ad2abff-f9e3f3a5-1a9ef4f1] [null] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:51.428701Z :INFO: [/Root] [/Root] [b27bcd34-5ad2abff-f9e3f3a5-1a9ef4f1] [null] Closing session to cluster: SessionClosed { Status: CLIENT_UNAUTHENTICATED Issues: "
: Error: Failed to establish connection to server "" ( cluster null). Attempts done: 1
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation " } Get event on client 2025-06-03T10:32:51.428736Z :NOTICE: [/Root] [/Root] [b27bcd34-5ad2abff-f9e3f3a5-1a9ef4f1] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-03T10:32:51.428740Z :DEBUG: [/Root] [/Root] [b27bcd34-5ad2abff-f9e3f3a5-1a9ef4f1] [null] Abort session to cluster Got close event: SessionClosed { Status: CLIENT_UNAUTHENTICATED Issues: "
: Error: Failed to establish connection to server "" ( cluster null). Attempts done: 1
: Error: Can't get Authentication info from CredentialsProvider. ydb/public/sdk/cpp/src/client/persqueue_public/ut/basic_usage_ut.cpp:451: exception during creation " }2025-06-03T10:32:51.428749Z :INFO: [/Root] [/Root] [b27bcd34-5ad2abff-f9e3f3a5-1a9ef4f1] Closing read session. Close timeout: 0.000000s 2025-06-03T10:32:51.428754Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): 2025-06-03T10:32:51.428759Z :INFO: [/Root] [/Root] [b27bcd34-5ad2abff-f9e3f3a5-1a9ef4f1] Counters: { Errors: 1 CurrentSessionLifetimeMs: 0 BytesRead: 0 MessagesRead: 0 BytesReadCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:32:51.428767Z :NOTICE: [/Root] [/Root] [b27bcd34-5ad2abff-f9e3f3a5-1a9ef4f1] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } >> TTicketParserTest::AuthenticationUnavailable [GOOD] >> TTicketParserTest::AuthenticationRetryError >> TTicketParserTest::BulkAuthorizationWithUserAccount2 [GOOD] >> TTicketParserTest::BulkAuthorizationUnavailable >> TTicketParserTest::TicketFromCertificateWithValidationDifferentIssuersBad [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationDefaultGroupGood >> DirectReadWithServer::KillPQTablet [GOOD] >> DirectReadWithServer::KillPQRBTablet [GOOD] >> DirectReadWithServer::Devslice [GOOD] >> LocalPartition::Basic >> TTicketParserTest::BulkAuthorizationUnavailable [GOOD] >> TMaintenanceApiTest::SingleCompositeActionGroup >> PersQueueSdkReadSessionTest::SpecifyClustersExplicitly [GOOD] >> PersQueueSdkReadSessionTest::StopResumeReadingData >> TTicketParserTest::TicketFromCertificateWithValidationDefaultGroupGood [GOOD] >> TTicketParserTest::TicketFromCertificateWithValidationCheckIssuerBad ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ut/unittest >> TTicketParserTest::BulkAuthorizationUnavailable [GOOD] Test command err: 2025-06-03T10:32:50.769652Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669179737753750:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:50.769684Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f72/r3tmp/tmpingpJc/pdisk_1.dat 2025-06-03T10:32:50.819434Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669179737753727:2079] 1748946770769492 != 1748946770769495 2025-06-03T10:32:50.824906Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 28122, node 1 2025-06-03T10:32:50.835692Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:50.835706Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:50.835708Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:50.835753Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14677 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:50.900704Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:50.900731Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:50.901461Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:32:50.901842Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:32:51.021987Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:32:51.022157Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:32:51.022168Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:51.022494Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket **** (5DAB89DE) () has now permanent error message 'Token is not in correct format' 2025-06-03T10:32:51.022508Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:773: CanInitLoginToken, database /Root, A2 error Token is not in correct format 2025-06-03T10:32:51.022514Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket **** (5DAB89DE): Token is not in correct format 2025-06-03T10:32:51.230936Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669185123026398:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:51.230955Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f72/r3tmp/tmpyo1xhi/pdisk_1.dat 2025-06-03T10:32:51.240965Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:51.241176Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669185123026379:2079] 1748946771230833 != 1748946771230836 TServer::EnableGrpc on GrpcPort 17990, node 2 2025-06-03T10:32:51.251686Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:51.251699Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:51.251701Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:51.251746Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25507 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:51.334845Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:51.334878Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:51.335271Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:51.335946Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:51.337518Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:32:51.337549Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:51.337554Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:32:51.337586Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-06-03T10:32:51.337617Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [715d7d01b670] Connect to grpc://localhost:11020 2025-06-03T10:32:51.338477Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [715d7d01b670] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-06-03T10:32:51.340933Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [715d7d01b670] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } results { items { permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission_denied_error { message: "Access Denied" } } } } 2025-06-03T10:32:51.341021Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission something.write access denied for subject "user1@as" 2025-06-03T10:32:51.341067Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-03T10:32:51.341306Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:32:51.341322Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:51.341325Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:32:51.341338Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-06-03T10:32:51.341394Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [715d7d01b670] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-06-03T10:32:51.342120Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [715d7d01b670] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } results { items { permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission_denied_error { message: "Access Denied" } } } } 2025-06-03T10:32:51.342185Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission something.write access denied for subject "user1@as" 2025-06-03T10:32:51.342206Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket **** (8E120919) () has now permanent error message 'something.write for folder_id aaaa1234 - Access Denied' 2025-06-03T10:32:51.601146Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511669185123652706:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:51.601190Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f72/r3tmp/tmpHuFub5/pdisk_1.dat 2025-06-03T10:32:51.616425Z node 3 :IMPORT WARN: schemeshard_impo ... onfiguration TClient is connected to server localhost:21036 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:52.110135Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:52.110164Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:52.110923Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:32:52.111130Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:32:52.112978Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:32:52.112993Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:52.112995Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:32:52.113021Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read somewhere.sleep something.list something.write something.eat) 2025-06-03T10:32:52.113035Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [715d7d00fbf0] Connect to grpc://localhost:14406 2025-06-03T10:32:52.113238Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [715d7d00fbf0] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "somewhere.sleep" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.list" ...(truncated) } 2025-06-03T10:32:52.114919Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [715d7d00fbf0] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } results { items { permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission_denied_error { message: "Access Denied" } } items { permission: "somewhere.sleep" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission_denied_error { message: "Access Denied" } } items { permission: "something.list" r...(truncated) } 2025-06-03T10:32:52.114983Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission something.read access denied for subject "user1@as" 2025-06-03T10:32:52.114999Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission somewhere.sleep access denied for subject "user1@as" 2025-06-03T10:32:52.115002Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission something.list access denied for subject "user1@as" 2025-06-03T10:32:52.115004Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1323: Ticket **** (8E120919) permission something.eat access denied for subject "user1@as" 2025-06-03T10:32:52.115007Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:997: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-06-03T10:32:52.115047Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [715d7d014070] Connect to grpc://localhost:65427 2025-06-03T10:32:52.115144Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [715d7d014070] Request GetUserAccountRequest { user_account_id: "user1" } 2025-06-03T10:32:52.116719Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [715d7d014070] Response UserAccount { yandex_passport_user_account { login: "login1" } } 2025-06-03T10:32:52.116819Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of login1@passport 2025-06-03T10:32:52.429657Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7511669192042078464:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:52.429679Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f72/r3tmp/tmpPZCxSw/pdisk_1.dat 2025-06-03T10:32:52.441160Z node 5 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:52.441326Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7511669192042078441:2079] 1748946772429529 != 1748946772429532 TServer::EnableGrpc on GrpcPort 5144, node 5 2025-06-03T10:32:52.452708Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:52.452723Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:52.452725Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:52.452775Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27896 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:52.532953Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:52.532986Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:52.533421Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:52.534079Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:52.535641Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:32:52.535659Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:52.535662Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:32:52.535684Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-06-03T10:32:52.535703Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [715d7d112e30] Connect to grpc://localhost:11664 2025-06-03T10:32:52.535945Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [715d7d112e30] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-06-03T10:32:52.537836Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [715d7d112e30] Status 14 Service Unavailable 2025-06-03T10:32:52.537908Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-03T10:32:52.537920Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.write now has a retryable error "Service Unavailable" retryable: 1 2025-06-03T10:32:52.537924Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:52.537936Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-06-03T10:32:52.537994Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [715d7d112e30] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-06-03T10:32:52.538606Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [715d7d112e30] Status 1 CANCELLED 2025-06-03T10:32:52.538645Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "CANCELLED" retryable: 1 2025-06-03T10:32:52.538654Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.write now has a retryable error "CANCELLED" retryable: 1 2025-06-03T10:32:52.538656Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'CANCELLED' >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleCreateClean [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestReboot >> BasicUsage::WriteAndReadSomeMessagesWithNoCompression [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessages >> TTicketParserTest::TicketFromCertificateWithValidationCheckIssuerBad [GOOD] >> TCmsTenatsTest::TestTenantLimit [GOOD] >> TCmsTenatsTest::TestScheduledPermissionWithNonePolicy >> TCmsTenatsTest::TestNoneTenantPolicy ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ut/unittest >> TTicketParserTest::TicketFromCertificateWithValidationCheckIssuerBad [GOOD] Test command err: 2025-06-03T10:32:50.614586Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669179913933544:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:50.614648Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f74/r3tmp/tmpooZgqg/pdisk_1.dat TServer::EnableGrpc on GrpcPort 27585, node 1 2025-06-03T10:32:50.713628Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:50.713628Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:50.713632Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:50.713633Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:50.713680Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:50.713831Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669179913933522:2079] 1748946770614424 != 1748946770614427 2025-06-03T10:32:50.716886Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:50.716948Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:26625 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:32:50.718056Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:50.731051Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:50.734281Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket C691A25D4A501FA92A20C3C33AF5B02FBF13F55F52FFCEFA9AA9D68F35CA4CD3 () has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-03T10:32:51.474234Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669186956096054:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:51.474255Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f74/r3tmp/tmpL2r8xi/pdisk_1.dat 2025-06-03T10:32:51.491976Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:51.492172Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669186956096033:2079] 1748946771474136 != 1748946771474139 TServer::EnableGrpc on GrpcPort 23180, node 2 2025-06-03T10:32:51.501214Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:51.501226Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:51.501227Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:51.501262Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2337 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:51.576497Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:51.576527Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:51.577669Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:51.582516Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:51.584126Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket 397CBE23F51FA51494CE21C6E84AF2D9E53A6C51B7713C6432CA845427CDB6D0 () has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-03T10:32:52.065671Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511669191439709036:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:52.065694Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f74/r3tmp/tmpuOYpEj/pdisk_1.dat 2025-06-03T10:32:52.080438Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:52.080654Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511669191439709015:2079] 1748946772065535 != 1748946772065538 TServer::EnableGrpc on GrpcPort 21118, node 3 2025-06-03T10:32:52.087385Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:52.087396Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:52.087397Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:52.087436Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:25711 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:52.169458Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:52.169495Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:52.169954Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:52.170436Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:52.172132Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket 2BD7CC39B58D5CC24CB3C8F51B8B660C27B84698EE2B903B61F01B4F38950771 () has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-06-03T10:32:52.172215Z node 3 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket 2BD7CC39B58D5CC24CB3C8F51B8B660C27B84698EE2B903B61F01B4F38950771: Cannot create token from certificate. Client certificate failed verification 2025-06-03T10:32:52.764927Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511669191786497887:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:52.764947Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f74/r3tmp/tmp2cEQqh/pdisk_1.dat 2025-06-03T10:32:52.779420Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:52.779694Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7511669191786497867:2079] 1748946772764799 != 1748946772764802 TServer::EnableGrpc on GrpcPort 27842, node 4 2025-06-03T10:32:52.790857Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:52.790878Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:52.790880Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:52.790928Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30267 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:52.868279Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:52.868310Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:52.868689Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:52.869238Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:52.869582Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:32:52.870352Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket 977BD1F74409FB31DC51E7D8FD342B4CC16F8C16369D3A4F6804AF068CAFA688 () has now valid token of C=RU,ST=MSK,L=MSK,O=YA,OU=UtTest,CN=localhost@cert 2025-06-03T10:32:53.416925Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7511669193073195519:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:53.416946Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f74/r3tmp/tmpMh2NIo/pdisk_1.dat 2025-06-03T10:32:53.431228Z node 5 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:53.431543Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7511669193073195496:2079] 1748946773416751 != 1748946773416754 TServer::EnableGrpc on GrpcPort 22929, node 5 2025-06-03T10:32:53.439913Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:53.439929Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:53.439930Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:53.439981Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26284 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:53.520521Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:53.520547Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:53.520874Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:53.521519Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:53.522354Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket 6499FC536AC71AE57B15C630968EF99C11BA2DC8B54AD3447D3027269DD07A75 () has now permanent error message 'Cannot create token from certificate. Client certificate failed verification' 2025-06-03T10:32:53.522435Z node 5 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket 6499FC536AC71AE57B15C630968EF99C11BA2DC8B54AD3447D3027269DD07A75: Cannot create token from certificate. Client certificate failed verification >> TxUsage::WriteToTopic_Demo_33_Query [GOOD] >> ReadSessionImplTest::DataReceivedCallbackReal [GOOD] >> ReadSessionImplTest::DataReceivedCallback >> LocalPartition::WithoutPartitionDeadNode [GOOD] >> LocalPartition::WithoutPartitionPartitionRelocation >> TxUsage::WriteToTopic_Demo_34_Table >> TCmsTest::CollectInfo >> THiveTest::TestCheckSubHiveMigrationManyTablets [GOOD] >> THiveTest::TestCheckSubHiveMigrationWithReboots >> TTicketParserTest::AuthorizationRetryError [GOOD] >> TTicketParserTest::AuthorizationRetryErrorImmediately >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleDrop [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleDropIndex >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestCreateCleanWithRetry [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestCreateCleanManyTables >> TColumnShardTestSchema::TTL-Reboot-Internal+FirstPkColumn-WritePortionsOnInsert [GOOD] >> TColumnShardTestSchema::TTL-Reboot-Internal-FirstPkColumn-WritePortionsOnInsert [GOOD] >> TxUsage::WriteToTopic_Demo_17_Table [GOOD] >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Table [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/hive/ut/unittest >> THiveTest::TestCreateExternalTablet [GOOD] Test command err: 2025-06-03T10:30:29.880555Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:30:29.881484Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:29.881539Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "/tmp/pdisk.dat" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-03T10:30:29.881676Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:30:29.881908Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-03T10:30:29.881919Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 0 2025-06-03T10:30:29.882097Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:27:2074] ControllerId# 72057594037932033 2025-06-03T10:30:29.882101Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:30:29.882135Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:30:29.882153Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:30:29.885778Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-03T10:30:29.885798Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:30:29.886254Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:35:2079] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:29.886302Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:36:2080] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:29.886346Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:37:2081] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:29.886384Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:38:2082] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:29.886420Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:39:2083] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:29.886458Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:40:2084] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:29.886499Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:41:2085] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:29.886505Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:30:29.886524Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:27:2074] 2025-06-03T10:30:29.886530Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:27:2074] 2025-06-03T10:30:29.886542Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-03T10:30:29.886552Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:30:29.886728Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:30:29.886860Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:30:29.890842Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:27:2074] 2025-06-03T10:30:29.890871Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:29.890880Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:30:29.891208Z node 1 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:29.891238Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:29.891246Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-03T10:30:29.892128Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-03T10:30:29.892261Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-03T10:30:29.892272Z node 1 :LOCAL DEBUG: local.cpp:1441: TDomainLocal(dc-1): Bootstrap 2025-06-03T10:30:29.892326Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:27:2074] 2025-06-03T10:30:29.892336Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:29.892827Z node 1 :LOCAL DEBUG: local.cpp:1149: TDomainLocal(dc-1): Binding to hive 72057594037927937 at domain dc-1 (allocated resources: ) 2025-06-03T10:30:29.892842Z node 1 :LOCAL DEBUG: local.cpp:975: TLocalNodeRegistrar::Bootstrap 2025-06-03T10:30:29.892848Z node 1 :LOCAL DEBUG: local.cpp:181: TLocalNodeRegistrar::TryToRegister 2025-06-03T10:30:29.892886Z node 1 :LOCAL DEBUG: local.cpp:213: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:52:2092] 2025-06-03T10:30:29.892904Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:246: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-03T10:30:29.893105Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-03T10:30:29.893116Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-03T10:30:29.893120Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-03T10:30:29.893125Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033} 2025-06-03T10:30:29.894235Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [1:31:2063] 2025-06-03T10:30:29.894250Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [1:31:2063] 2025-06-03T10:30:29.894276Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037936129 entry.State: StInit ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:30:29.894302Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-03T10:30:29.894307Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:30:29.894328Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:322} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\371$\224\316I\335\243.)W\014\261m\013\346Osy\0160" } 2025-06-03T10:30:29.894360Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [1:52:2092] 2025-06-03T10:30:29.894367Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [1:52:2092] 2025-06-03T10:30:29.894376Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033} 2025-06-03T10:30:29.894436Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:29.894476Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-03T10:30:29.894486Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:361} StateFunc Type# 2146435075 Sender# [1:47:2090] SessionId# [0:0:0] Cookie# 0 2025-06-03T10:30:29.894495Z node 1 :BS_NODE DEBUG: {NWDC36@distconf_persistent_storage.cpp:205} TEvStorageConfigStored NumOk# 0 NumError# 0 Passed# 0.003218s 2025-06-03T10:30:29.895084Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [1:52:2092] 2025-06-03T10:30:29.895190Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033} 2025-06-03T10:30:29.895221Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StInit ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:30:29.895254Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72057594037932033 entry.State: StInitResolve success: false ev: {EvInfo Status: 5 TabletID: 72057594037932033 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[1:24343667:0] : 2}, {[1:2199047599219:0] : 8}, {[1:1099535971443:0] : 5}}}} 2025-06-03T10:30:29.895259Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72057594037932033 followers: 0 2025-06-03T10:30:29.895369Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:172: TClient[72057594037932033] forward result error, check reconnect [1:27:2074] 2025-06-03T10:30:29.895375Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:562: TClient[72057594037932033] schedule retry [1:27:2074] 2025-06-03T10:30:29.895383Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:361} StateFunc Type# 268639248 Sender# [1:12:2059] SessionId# [0:0:0] Cookie# 0 2025-06-03T10:30:29.896076Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:246: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936129 Cookie: 0 ProxyOptions: SigNone} 2025-06-03T10:30:29.896099Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:27:2074] 2025-06-03T10:30:29.896132Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 0} 2025-06-03T10:30:29.896139Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 1} 2025-06-03T10:30:29.896142Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 720575 ... ccept Connect Originator# [100:306:2288] 2025-06-03T10:32:36.470795Z node 100 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72057594037932033] connected with status OK role: Leader [100:306:2288] 2025-06-03T10:32:36.470800Z node 100 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72057594037932033] send queued [100:306:2288] 2025-06-03T10:32:36.470803Z node 100 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037932033] push event to server [100:306:2288] 2025-06-03T10:32:36.470807Z node 100 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037932033] HandleSend Sender# [100:274:2265] EventType# 268637702 2025-06-03T10:32:36.470825Z node 100 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:9} Tx{28, NKikimr::NBsController::TBlobStorageController::TTxSelectGroups} queued, type NKikimr::NBsController::TBlobStorageController::TTxSelectGroups 2025-06-03T10:32:36.470831Z node 100 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:9} Tx{28, NKikimr::NBsController::TBlobStorageController::TTxSelectGroups} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-03T10:32:36.470859Z node 100 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:9} Tx{28, NKikimr::NBsController::TBlobStorageController::TTxSelectGroups} hope 1 -> done Change{20, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-03T10:32:36.470863Z node 100 :TABLET_EXECUTOR DEBUG: Leader{72057594037932033:2:9} Tx{28, NKikimr::NBsController::TBlobStorageController::TTxSelectGroups} release 4194304b of static, Memory{0 dyn 0} 2025-06-03T10:32:36.470879Z node 100 :HIVE DEBUG: hive_impl.cpp:72: HIVE#72057594037927937 Connected to tablet 72057594037932033 from tablet 72057594037927937 2025-06-03T10:32:36.470910Z node 100 :HIVE DEBUG: hive_impl.cpp:433: HIVE#72057594037927937 THive::Handle TEvControllerSelectGroupsResult: success Status: OK NewStyleQuerySupported: true MatchingGroups { Groups { ErasureSpecies: 0 GroupID: 2147483648 StoragePoolName: "def1" AssuredResources { } CurrentResources { } PhysicalGroup: true Decommitted: false } } MatchingGroups { Groups { ErasureSpecies: 0 GroupID: 2147483649 StoragePoolName: "def2" AssuredResources { } CurrentResources { } PhysicalGroup: true Decommitted: false } } MatchingGroups { Groups { ErasureSpecies: 0 GroupID: 2147483650 StoragePoolName: "def3" AssuredResources { } CurrentResources { } PhysicalGroup: true Decommitted: false } } 2025-06-03T10:32:36.470924Z node 100 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{5, NKikimr::NHive::TTxUpdateTabletGroups} queued, type NKikimr::NHive::TTxUpdateTabletGroups 2025-06-03T10:32:36.470927Z node 100 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{5, NKikimr::NHive::TTxUpdateTabletGroups} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-03T10:32:36.470934Z node 100 :HIVE DEBUG: tx__update_tablet_groups.cpp:63: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{57126168771136}(72075186224037888,HIVE_REASSIGN_REASON_NO,[]) 2025-06-03T10:32:36.470945Z node 100 :HIVE DEBUG: tx__update_tablet_groups.cpp:151: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{57126168771136}: tablet 72075186224037888 channel 0 assigned to group 2147483648 2025-06-03T10:32:36.470967Z node 100 :HIVE DEBUG: tx__update_tablet_groups.cpp:151: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{57126168771136}: tablet 72075186224037888 channel 1 assigned to group 2147483649 2025-06-03T10:32:36.470974Z node 100 :HIVE DEBUG: tx__update_tablet_groups.cpp:151: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{57126168771136}: tablet 72075186224037888 channel 2 assigned to group 2147483650 2025-06-03T10:32:36.470985Z node 100 :HIVE NOTICE: tx__update_tablet_groups.cpp:326: HIVE#72057594037927937 THive::TTxUpdateTabletGroups{57126168771136}(72075186224037888)::Execute - TryToBoot was not successfull 2025-06-03T10:32:36.470992Z node 100 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{5, NKikimr::NHive::TTxUpdateTabletGroups} hope 1 -> done Change{5, redo 698b alter 0b annex 0, ~{ 2, 1, 3 } -{ }, 0 gb} 2025-06-03T10:32:36.470995Z node 100 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} Tx{5, NKikimr::NHive::TTxUpdateTabletGroups} release 4194304b of static, Memory{0 dyn 0} 2025-06-03T10:32:36.481410Z node 100 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [49bb8b081a887568] bootstrap ActorId# [100:309:2291] Group# 0 BlobCount# 1 BlobIDs# [[72057594037927937:2:4:0:0:698:0]] HandleClass# TabletLog Tactic# MinLatency RestartCounter# 0 Marker# BPP13 2025-06-03T10:32:36.481462Z node 100 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:42: [49bb8b081a887568] Id# [72057594037927937:2:4:0:0:698:0] restore disk# 0 part# 0 situation# ESituation::Unknown Marker# BPG51 2025-06-03T10:32:36.481469Z node 100 :BS_PROXY_PUT DEBUG: dsproxy_strategy_restore.h:65: [49bb8b081a887568] restore Id# [72057594037927937:2:4:0:0:698:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:32:36.481478Z node 100 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [49bb8b081a887568] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037927937:2:4:0:0:698:1] Marker# BPG33 2025-06-03T10:32:36.481483Z node 100 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [49bb8b081a887568] Sending missing VPut part# 0 to# 0 blob Id# [72057594037927937:2:4:0:0:698:1] Marker# BPG32 2025-06-03T10:32:36.481511Z node 100 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [100:39:2082] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037927937:2:4:0:0:698:1] FDS# 698 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-03T10:32:36.482123Z node 100 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [49bb8b081a887568] received {EvVPutResult Status# OK ID# [72057594037927937:2:4:0:0:698:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 19 } Cost# 85496 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 20 }}}} from# [0:1:0:0:0] Marker# BPP01 2025-06-03T10:32:36.482161Z node 100 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [49bb8b081a887568] Result# TEvPutResult {Id# [72057594037927937:2:4:0:0:698:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 0 Marker# BPP12 2025-06-03T10:32:36.482172Z node 100 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [49bb8b081a887568] SendReply putResult# TEvPutResult {Id# [72057594037927937:2:4:0:0:698:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:32:36.482202Z node 100 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.155 sample PartId# [72057594037927937:2:4:0:0:698:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 100 } TEvVPutResult{ TimestampMs# 0.785 VDiskId# [0:1:0:0:0] NodeId# 100 Status# OK } ] } 2025-06-03T10:32:36.482231Z node 100 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:4:0:0:698:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-03T10:32:36.482280Z node 100 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:5} commited cookie 1 for step 4 2025-06-03T10:32:36.482302Z node 100 :HIVE DEBUG: tx__create_tablet.cpp:503: HIVE#72057594037927937 THive::TTxCreateTablet::Complete (72057594037927937,0) TabletId: 72075186224037888 SideEffects: {Notifications: 0x10040201 [100:263:2259] {EvCreateTabletReply Status: OK Owner: 72057594037927937 OwnerIdx: 0 TabletID: 72075186224037888 Origin: 72057594037927937}} 2025-06-03T10:32:36.482332Z node 100 :HIVE DEBUG: tx__update_tablet_groups.cpp:332: HIVE#72057594037927937 THive::TTxUpdateTabletGroups{57126168771136}(72075186224037888)::Complete SideEffects: {Notifications: 0x10040207 [100:263:2259] {EvTabletCreationResult Status: OK TabletID: 72075186224037888} Callbacks: 1 Actions: NKikimr::TTabletKillRequest} 2025-06-03T10:32:36.482383Z node 100 :STATESTORAGE DEBUG: statestorage_proxy.cpp:246: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037888 Cookie: 0 ProxyOptions: SigNone} 2025-06-03T10:32:36.482409Z node 100 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 0} 2025-06-03T10:32:36.482417Z node 100 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 1} 2025-06-03T10:32:36.482422Z node 100 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 2} 2025-06-03T10:32:36.482435Z node 100 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72075186224037888} 2025-06-03T10:32:36.482445Z node 100 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72075186224037888} 2025-06-03T10:32:36.482451Z node 100 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72075186224037888} 2025-06-03T10:32:36.482501Z node 100 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037888] ::Bootstrap [100:313:2294] 2025-06-03T10:32:36.482508Z node 100 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037888] lookup [100:313:2294] 2025-06-03T10:32:36.482528Z node 100 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037888 entry.State: StInit ev: {EvForward TabletID: 72075186224037888 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:32:36.482544Z node 100 :STATESTORAGE DEBUG: statestorage_proxy.cpp:246: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037888 Cookie: 0 ProxyOptions: SigNone} 2025-06-03T10:32:36.482553Z node 100 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 0} 2025-06-03T10:32:36.482556Z node 100 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 1} 2025-06-03T10:32:36.482560Z node 100 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037888 Cookie: 2} 2025-06-03T10:32:36.482563Z node 100 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72075186224037888} 2025-06-03T10:32:36.482568Z node 100 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72075186224037888} 2025-06-03T10:32:36.482572Z node 100 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72075186224037888} 2025-06-03T10:32:36.482584Z node 100 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037888 entry.State: StInitResolve success: false ev: {EvInfo Status: 5 TabletID: 72075186224037888 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[100:24343667:0] : 2}, {[100:2199047599219:0] : 8}, {[100:1099535971443:0] : 5}}}} 2025-06-03T10:32:36.482590Z node 100 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72075186224037888 followers: 0 2025-06-03T10:32:36.482606Z node 100 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:172: TClient[72075186224037888] forward result error, check reconnect [100:313:2294] 2025-06-03T10:32:36.482612Z node 100 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:498: TClient[72075186224037888] connect failed [100:313:2294] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::TTL-Reboot-Internal+FirstPkColumn-WritePortionsOnInsert [GOOD] Test command err: Running TestTtl ttlColumnType=Timestamp 2025-06-03T10:32:25.736283Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:389: StateInit, received event# 268828672, Sender [1:103:2136], Recipient [1:139:2170]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:25.738854Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:389: StateInit, received event# 268828673, Sender [1:103:2136], Recipient [1:139:2170]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:25.738988Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-03T10:32:25.743163Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-03T10:32:25.743252Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-03T10:32:25.744092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:32:25.744152Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:32:25.744199Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:32:25.744220Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:32:25.744240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:32:25.744260Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:32:25.744279Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:32:25.744306Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:32:25.744325Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:32:25.744345Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:32:25.744363Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:32:25.744382Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:32:25.750277Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:389: StateInit, received event# 268828684, Sender [1:103:2136], Recipient [1:139:2170]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:25.755024Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-03T10:32:25.755122Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-06-03T10:32:25.755136Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-03T10:32:25.755190Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-03T10:32:25.755240Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-03T10:32:25.755255Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-03T10:32:25.755261Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-03T10:32:25.755272Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-03T10:32:25.755282Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-03T10:32:25.755290Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-03T10:32:25.755295Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-03T10:32:25.755318Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-03T10:32:25.755327Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-03T10:32:25.755336Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-03T10:32:25.755341Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-03T10:32:25.755352Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-03T10:32:25.755359Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-03T10:32:25.755367Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-06-03T10:32:25.755371Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-06-03T10:32:25.755384Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-06-03T10:32:25.755395Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-03T10:32:25.755399Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-03T10:32:25.755409Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-03T10:32:25.755417Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-03T10:32:25.755422Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-03T10:32:25.755448Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:32:25.755457Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:32:25.755462Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-03T10:32:25.755485Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:32:25.755492Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:32:25.755497Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-03T10:32:25.755512Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:32:25.755519Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:32:25.755523Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-03T10:32:25.755533Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:32:25.755541Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:32:25.755548Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:32:25.755552Z node ... :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=140; 2025-06-03T10:32:55.284384Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1000;merger=0;interval_id=140; 2025-06-03T10:32:55.284387Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-03T10:32:55.284393Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:32:55.284396Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=1;count=1000;finished=1; 2025-06-03T10:32:55.284399Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:199;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-03T10:32:55.284453Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:105;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-03T10:32:55.284467Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:1;records_count:1000;schema=timestamp: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:32:55.284474Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-03T10:32:55.284484Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:230;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;);columns=1;rows=1000; 2025-06-03T10:32:55.284495Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:250;stage=data_format;batch_size=8000;num_rows=1000;batch_columns=timestamp; 2025-06-03T10:32:55.284524Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:366;event=send_data;compute_actor_id=[5:595:2600];bytes=8000;rows=1000;faults=0;finished=0;fault=0;schema=timestamp: uint64; 2025-06-03T10:32:55.284532Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:270;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:32:55.284540Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:32:55.284545Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:193;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:32:55.284561Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:105;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-03T10:32:55.284567Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:32:55.284574Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:193;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:32:55.284578Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:410: Scan [5:596:2601] finished for tablet 9437184 2025-06-03T10:32:55.284616Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:416;event=scan_finish;compute_actor_id=[5:595:2600];stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_processing","f_ProduceResults","f_task_result"],"t":0},{"events":["f_ack","l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish","l_task_result"],"t":0.001}],"full":{"a":1748946775282638,"name":"_full_task","f":1748946775282638,"d_finished":0,"c":0,"l":1748946775284584,"d":1946},"events":[{"name":"bootstrap","f":1748946775282688,"d_finished":283,"c":1,"l":1748946775282971,"d":283},{"a":1748946775284560,"name":"ack","f":1748946775284448,"d_finished":99,"c":1,"l":1748946775284547,"d":123},{"a":1748946775284559,"name":"processing","f":1748946775282984,"d_finished":1037,"c":8,"l":1748946775284547,"d":1062},{"name":"ProduceResults","f":1748946775282864,"d_finished":202,"c":11,"l":1748946775284576,"d":202},{"a":1748946775284576,"name":"Finish","f":1748946775284576,"d_finished":0,"c":0,"l":1748946775284584,"d":8},{"name":"task_result","f":1748946775282988,"d_finished":925,"c":7,"l":1748946775284408,"d":925}],"id":"9437184::30"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:32:55.284623Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:366;event=send_data;compute_actor_id=[5:595:2600];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-03T10:32:55.284646Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:371;event=scan_finished;compute_actor_id=[5:595:2600];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_processing","f_ProduceResults","f_task_result"],"t":0},{"events":["f_ack","l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish","l_task_result"],"t":0.001}],"full":{"a":1748946775282638,"name":"_full_task","f":1748946775282638,"d_finished":0,"c":0,"l":1748946775284627,"d":1989},"events":[{"name":"bootstrap","f":1748946775282688,"d_finished":283,"c":1,"l":1748946775282971,"d":283},{"a":1748946775284560,"name":"ack","f":1748946775284448,"d_finished":99,"c":1,"l":1748946775284547,"d":166},{"a":1748946775284559,"name":"processing","f":1748946775282984,"d_finished":1037,"c":8,"l":1748946775284547,"d":1105},{"name":"ProduceResults","f":1748946775282864,"d_finished":202,"c":11,"l":1748946775284576,"d":202},{"a":1748946775284576,"name":"Finish","f":1748946775284576,"d_finished":0,"c":0,"l":1748946775284627,"d":51},{"name":"task_result","f":1748946775282988,"d_finished":925,"c":7,"l":1748946775284408,"d":925}],"id":"9437184::30"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:32:55.284656Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-03T10:32:55.282375Z;index_granules=0;index_portions=1;index_batches=10;committed_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=59648;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=59648;selected_rows=0; 2025-06-03T10:32:55.284659Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:190;event=scan_aborted;reason=unexpected on destructor; 2025-06-03T10:32:55.284682Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:596:2601];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; >> TCmsTenatsTest::TestScheduledPermissionWithNonePolicy [GOOD] >> TCmsTenatsTest::TestTenantLimitForceRestartMode >> TxUsage::WriteToTopic_Demo_17_Query >> TColumnShardTestSchema::TTL-Reboot+Internal+FirstPkColumn-WritePortionsOnInsert [GOOD] >> TColumnShardTestSchema::TTL-Reboot+Internal-FirstPkColumn-WritePortionsOnInsert [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::TTL-Reboot-Internal-FirstPkColumn-WritePortionsOnInsert [GOOD] Test command err: Running TestTtl ttlColumnType=Timestamp 2025-06-03T10:32:25.626106Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:389: StateInit, received event# 268828672, Sender [1:103:2136], Recipient [1:139:2170]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:25.631836Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:389: StateInit, received event# 268828673, Sender [1:103:2136], Recipient [1:139:2170]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:25.632047Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-03T10:32:25.636353Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-03T10:32:25.636460Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-03T10:32:25.637427Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:32:25.637494Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:32:25.637543Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:32:25.637573Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:32:25.637595Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:32:25.637617Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:32:25.637637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:32:25.637664Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:32:25.637689Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:32:25.637711Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:32:25.637730Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:32:25.637749Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:32:25.644174Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:389: StateInit, received event# 268828684, Sender [1:103:2136], Recipient [1:139:2170]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:25.646453Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-03T10:32:25.646534Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-06-03T10:32:25.646549Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-03T10:32:25.646595Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-03T10:32:25.646648Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-03T10:32:25.646663Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-03T10:32:25.646669Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-03T10:32:25.646681Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-03T10:32:25.646693Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-03T10:32:25.646703Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-03T10:32:25.646708Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-03T10:32:25.646732Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-03T10:32:25.646741Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-03T10:32:25.646751Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-03T10:32:25.646756Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-03T10:32:25.646769Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-03T10:32:25.646778Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-03T10:32:25.646787Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-06-03T10:32:25.646793Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-06-03T10:32:25.646808Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-06-03T10:32:25.646817Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-03T10:32:25.646822Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-03T10:32:25.646831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-03T10:32:25.646840Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-03T10:32:25.646845Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-03T10:32:25.646875Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:32:25.646884Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:32:25.646889Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-03T10:32:25.646913Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:32:25.646922Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:32:25.646927Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-03T10:32:25.646943Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:32:25.646951Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:32:25.646956Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-03T10:32:25.646966Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:32:25.646976Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:32:25.646983Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:32:25.646988Z node ... 32:55.216000Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=255; 2025-06-03T10:32:55.216011Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1000;merger=0;interval_id=255; 2025-06-03T10:32:55.216015Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-03T10:32:55.216022Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-03T10:32:55.216025Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=1;count=1000;finished=1; 2025-06-03T10:32:55.216029Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:199;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-03T10:32:55.216080Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:105;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-03T10:32:55.216091Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:1;records_count:1000;schema=saved_at: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-03T10:32:55.216096Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-03T10:32:55.216104Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:230;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;);columns=1;rows=1000; 2025-06-03T10:32:55.216111Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:250;stage=data_format;batch_size=8000;num_rows=1000;batch_columns=saved_at; 2025-06-03T10:32:55.216144Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:366;event=send_data;compute_actor_id=[5:594:2599];bytes=8000;rows=1000;faults=0;finished=0;fault=0;schema=saved_at: uint64; 2025-06-03T10:32:55.216154Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:270;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-03T10:32:55.216163Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-03T10:32:55.216169Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:193;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-03T10:32:55.216190Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:105;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-03T10:32:55.216196Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-03T10:32:55.216203Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:193;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-03T10:32:55.216207Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:410: Scan [5:595:2600] finished for tablet 9437184 2025-06-03T10:32:55.216260Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:416;event=scan_finish;compute_actor_id=[5:594:2599];stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_processing","f_ProduceResults","f_task_result"],"t":0},{"events":["f_ack","l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish","l_task_result"],"t":0.002}],"full":{"a":1748946775213911,"name":"_full_task","f":1748946775213911,"d_finished":0,"c":0,"l":1748946775216216,"d":2305},"events":[{"name":"bootstrap","f":1748946775213962,"d_finished":347,"c":1,"l":1748946775214309,"d":347},{"a":1748946775216188,"name":"ack","f":1748946775216075,"d_finished":97,"c":1,"l":1748946775216172,"d":125},{"a":1748946775216187,"name":"processing","f":1748946775214326,"d_finished":1224,"c":9,"l":1748946775216172,"d":1253},{"name":"ProduceResults","f":1748946775214196,"d_finished":243,"c":12,"l":1748946775216205,"d":243},{"a":1748946775216205,"name":"Finish","f":1748946775216205,"d_finished":0,"c":0,"l":1748946775216216,"d":11},{"name":"task_result","f":1748946775214328,"d_finished":1113,"c":8,"l":1748946775216039,"d":1113}],"id":"9437184::30"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-03T10:32:55.216267Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:366;event=send_data;compute_actor_id=[5:594:2599];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-03T10:32:55.216298Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:371;event=scan_finished;compute_actor_id=[5:594:2599];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_processing","f_ProduceResults","f_task_result"],"t":0},{"events":["f_ack","l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish","l_task_result"],"t":0.002}],"full":{"a":1748946775213911,"name":"_full_task","f":1748946775213911,"d_finished":0,"c":0,"l":1748946775216272,"d":2361},"events":[{"name":"bootstrap","f":1748946775213962,"d_finished":347,"c":1,"l":1748946775214309,"d":347},{"a":1748946775216188,"name":"ack","f":1748946775216075,"d_finished":97,"c":1,"l":1748946775216172,"d":181},{"a":1748946775216187,"name":"processing","f":1748946775214326,"d_finished":1224,"c":9,"l":1748946775216172,"d":1309},{"name":"ProduceResults","f":1748946775214196,"d_finished":243,"c":12,"l":1748946775216205,"d":243},{"a":1748946775216205,"name":"Finish","f":1748946775216205,"d_finished":0,"c":0,"l":1748946775216272,"d":67},{"name":"task_result","f":1748946775214328,"d_finished":1113,"c":8,"l":1748946775216039,"d":1113}],"id":"9437184::30"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-03T10:32:55.216307Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-03T10:32:55.213615Z;index_granules=0;index_portions=1;index_batches=10;committed_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=59748;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=59748;selected_rows=0; 2025-06-03T10:32:55.216311Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:190;event=scan_aborted;reason=unexpected on destructor; 2025-06-03T10:32:55.216340Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:595:2600];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;; >> ReadSessionImplTest::DataReceivedCallback [GOOD] >> TCmsTenatsTest::TestNoneTenantPolicy [GOOD] >> TCmsTenatsTest::TestDefaultTenantPolicyWithSingleTenantHost >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Query >> TCmsTest::CollectInfo [GOOD] >> TCmsTest::DynamicConfig ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::DataReceivedCallback [GOOD] Test command err: 2025-06-03T10:32:29.272593Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.272601Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.272604Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:29.272773Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:29.274751Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:29.274810Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.274888Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:32:29.275017Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.275073Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-03T10:32:29.275114Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:29.275122Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-03T10:32:29.275304Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.275307Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.275310Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:29.275386Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:29.275528Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:29.275565Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.275601Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:32:29.275654Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.275683Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-03T10:32:29.275714Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:29.275719Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-03T10:32:29.275900Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.275902Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.275904Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:29.275984Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:29.276110Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:29.276142Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.276170Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:32:29.276349Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.276410Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-03T10:32:29.276438Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:29.276446Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-03T10:32:29.276630Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.276634Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.276637Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:29.276708Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:29.276813Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:29.276836Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.276863Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:32:29.277623Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.277726Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-03T10:32:29.277757Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:29.277767Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-03T10:32:29.277971Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.277974Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.277976Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:29.278030Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:29.278177Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:29.278197Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.278229Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:32:29.278299Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.278340Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-03T10:32:29.278374Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:29.278381Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-03T10:32:29.278518Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.278521Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.278523Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:29.278573Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:29.278697Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:29.278728Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.278755Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:32:29.278800Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.278820Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-03T10:32:29.278841Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:29.278846Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-03T10:32:29.279001Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.279005Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.279008Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:29.279071Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:29.279170Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:29.279200Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.279231Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:32:29.279359Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.279400Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-03T10:32:29.279425Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:29.279431Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 0 bytes 2025-06-03T10:32:29.279592Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.279594Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.279596Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:29.279645Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:29.279732Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:29.279767Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.279799Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:32:29.280041Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.280095Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-03T10:32:29.280108Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:29.280113Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 3 bytes 2025-06-03T10:32:29.291827Z :ReadSession INFO: Random seed for debugging is 1748946749291820 2025-06-03T10:32:29.501574Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669089579289997:2215];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:29.501678Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:32:29.507338Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669091278686593:2210];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:29.507413Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;p ... EvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-03T10:32:43.796743Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-03T10:32:43.796769Z node 2 :PERSQUEUE DEBUG: partition.cpp:3267: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user offset is set to 3 (startOffset 0) session shared/user_1_1_7756059927438552016_v1 2025-06-03T10:32:43.796790Z node 2 :PERSQUEUE DEBUG: partition.cpp:2185: [PQ: 72075186224037892, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-06-03T10:32:43.796796Z node 2 :PERSQUEUE DEBUG: partition.cpp:2186: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- delete ---------------- 2025-06-03T10:32:43.796798Z node 2 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- write ----------------- 2025-06-03T10:32:43.796800Z node 2 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037892, Partition: 0, State: StateIdle] i0000000000 2025-06-03T10:32:43.796802Z node 2 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037892, Partition: 0, State: StateIdle] m0000000000cuser 2025-06-03T10:32:43.796804Z node 2 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037892, Partition: 0, State: StateIdle] m0000000000uuser 2025-06-03T10:32:43.796806Z node 2 :PERSQUEUE DEBUG: partition.cpp:2199: [PQ: 72075186224037892, Partition: 0, State: StateIdle] --- rename ---------------- 2025-06-03T10:32:43.796808Z node 2 :PERSQUEUE DEBUG: partition.cpp:2204: [PQ: 72075186224037892, Partition: 0, State: StateIdle] =========================== 2025-06-03T10:32:43.796818Z node 2 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-06-03T10:32:43.797550Z node 2 :PERSQUEUE DEBUG: partition_read.cpp:779: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp for offset 3 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-03T10:32:43.797573Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-03T10:32:43.797577Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 3 2025-06-03T10:32:43.797692Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer shared/user session shared/user_1_1_7756059927438552016_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 3 } 2025-06-03T10:32:43.797723Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer shared/user session shared/user_1_1_7756059927438552016_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) commit done to position 3 endOffset 3 with cookie 3 2025-06-03T10:32:43.797739Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer shared/user session shared/user_1_1_7756059927438552016_v1 replying for commits: assignId# 1, from# 3, to# 3, offset# 3 2025-06-03T10:32:43.797978Z :DEBUG: [/Root] [/Root] [7f7d9d1a-80d53e8f-23b95b27-24180508] [dc1] Committed response: cookies { assign_id: 1 partition_cookie: 3 } 2025-06-03T10:32:43.893542Z :INFO: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|6d070e98-ea12f2ec-1e08f165-e1c50294_0] Write session will now close 2025-06-03T10:32:43.893576Z :DEBUG: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|6d070e98-ea12f2ec-1e08f165-e1c50294_0] Write session: aborting 2025-06-03T10:32:43.893837Z :INFO: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|6d070e98-ea12f2ec-1e08f165-e1c50294_0] Write session: gracefully shut down, all writes complete 2025-06-03T10:32:43.893849Z :DEBUG: [] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|6d070e98-ea12f2ec-1e08f165-e1c50294_0] Write session: destroy 2025-06-03T10:32:43.894081Z node 1 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: test-message-group-id|6d070e98-ea12f2ec-1e08f165-e1c50294_0 grpc read done: success: 0 data: 2025-06-03T10:32:43.894097Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: test-message-group-id|6d070e98-ea12f2ec-1e08f165-e1c50294_0 grpc read failed 2025-06-03T10:32:43.894111Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: test-message-group-id|6d070e98-ea12f2ec-1e08f165-e1c50294_0 grpc closed 2025-06-03T10:32:43.894119Z node 1 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: test-message-group-id|6d070e98-ea12f2ec-1e08f165-e1c50294_0 is DEAD 2025-06-03T10:32:43.894460Z node 1 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:32:43.894687Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037892] server disconnected, pipe [1:7511669149708834769:2606] destroyed 2025-06-03T10:32:43.894721Z node 2 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-03T10:32:44.616021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7306: Cannot get console configs 2025-06-03T10:32:44.616040Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:46.557538Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 1 consumer shared/user session shared/user_1_1_7756059927438552016_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 5 from offset3 2025-06-03T10:32:53.796179Z node 1 :PQ_READ_PROXY DEBUG: partition_actor.cpp:1266: session cookie 1 consumer shared/user session shared/user_1_1_7756059927438552016_v1 TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) wait data in partition inited, cookie 6 from offset3 2025-06-03T10:32:53.894181Z :INFO: [/Root] [/Root] [7f7d9d1a-80d53e8f-23b95b27-24180508] Closing read session. Close timeout: 0.000000s 2025-06-03T10:32:53.894209Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:3 2025-06-03T10:32:53.894225Z :INFO: [/Root] [/Root] [7f7d9d1a-80d53e8f-23b95b27-24180508] Counters: { Errors: 0 CurrentSessionLifetimeMs: 16343 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 24 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:32:53.894247Z :NOTICE: [/Root] [/Root] [7f7d9d1a-80d53e8f-23b95b27-24180508] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-03T10:32:53.894259Z :DEBUG: [/Root] [/Root] [7f7d9d1a-80d53e8f-23b95b27-24180508] [dc1] Abort session to cluster 2025-06-03T10:32:53.894635Z :NOTICE: [/Root] [/Root] [7f7d9d1a-80d53e8f-23b95b27-24180508] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-03T10:32:53.894686Z node 1 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_1_1_7756059927438552016_v1 grpc read done: success# 0, data# { } 2025-06-03T10:32:53.894715Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_1_1_7756059927438552016_v1 grpc read failed 2025-06-03T10:32:53.894723Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_1_1_7756059927438552016_v1 grpc closed 2025-06-03T10:32:53.894743Z node 1 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_1_1_7756059927438552016_v1 is DEAD 2025-06-03T10:32:53.894982Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037892] Destroy direct read session shared/user_1_1_7756059927438552016_v1 2025-06-03T10:32:53.895005Z node 2 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037892] server disconnected, pipe [1:7511669123939030530:2524] destroyed 2025-06-03T10:32:53.895023Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_1_1_7756059927438552016_v1 2025-06-03T10:32:53.895159Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [1:7511669123939030527:2521] disconnected; active server actors: 1 2025-06-03T10:32:53.895167Z node 1 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [1:7511669123939030527:2521] client user disconnected session shared/user_1_1_7756059927438552016_v1 2025-06-03T10:32:54.023178Z node 1 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1948: ActorId: [1:7511669196953475533:2705] TxId: 281474976720727. Ctx: { TraceId: 01jwtnkszb7c41c4nqyhnq4par, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YTRhMDdlZjAtYTY4OGE1YTItY2ZkYTM0NTktM2I1MDQ0OA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: Failed to send EvStartKqpTasksRequest because node is unavailable: 2 2025-06-03T10:32:54.023417Z node 1 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:1210: SelfId: [1:7511669196953475537:2705], TxId: 281474976720727, task: 3. Ctx: { SessionId : ydb://session/3?node_id=1&id=YTRhMDdlZjAtYTY4OGE1YTItY2ZkYTM0NTktM2I1MDQ0OA==. TraceId : 01jwtnkszb7c41c4nqyhnq4par. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. Handle abort execution event from: [1:7511669196953475533:2705], status: UNAVAILABLE, reason: {
: Error: Terminate execution } 2025-06-03T10:32:54.248248Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:54.248254Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:54.248257Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:54.248317Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:32:54.248425Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:32:54.248480Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:54.248521Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:32:54.248625Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-03T10:32:54.248654Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-03T10:32:54.248710Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (2-2) 2025-06-03T10:32:54.248723Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-03T10:32:54.248731Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:32:54.248737Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (2-2) 2025-06-03T10:32:54.248769Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-03T10:32:54.248779Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes >> TMaintenanceApiTest::SingleCompositeActionGroup [GOOD] >> TMaintenanceApiTest::SimplifiedMirror3DC ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::TTL-Reboot+Internal+FirstPkColumn-WritePortionsOnInsert [GOOD] Test command err: Running TestTtl ttlColumnType=Timestamp 2025-06-03T10:32:25.699036Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:389: StateInit, received event# 268828672, Sender [1:103:2136], Recipient [1:139:2170]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:25.702830Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:389: StateInit, received event# 268828673, Sender [1:103:2136], Recipient [1:139:2170]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:25.703015Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-03T10:32:25.708240Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-03T10:32:25.708356Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-03T10:32:25.709247Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:32:25.709331Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:32:25.709434Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:32:25.709466Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:32:25.709487Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:32:25.709507Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:32:25.709527Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:32:25.709552Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:32:25.709574Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:32:25.709594Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:32:25.709612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:32:25.709632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:32:25.715834Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:389: StateInit, received event# 268828684, Sender [1:103:2136], Recipient [1:139:2170]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:25.717264Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-03T10:32:25.717414Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-06-03T10:32:25.717428Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-03T10:32:25.717474Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-03T10:32:25.717522Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-03T10:32:25.717538Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-03T10:32:25.717545Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-03T10:32:25.717556Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-03T10:32:25.717566Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-03T10:32:25.717575Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-03T10:32:25.717580Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-03T10:32:25.717603Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-03T10:32:25.717612Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-03T10:32:25.717620Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-03T10:32:25.717625Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-03T10:32:25.717638Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-03T10:32:25.717645Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-03T10:32:25.717653Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-06-03T10:32:25.717658Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-06-03T10:32:25.717673Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-06-03T10:32:25.717683Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-03T10:32:25.717688Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-03T10:32:25.717697Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-03T10:32:25.717705Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-03T10:32:25.717710Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-03T10:32:25.717737Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:32:25.717746Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:32:25.717750Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-03T10:32:25.717772Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:32:25.717782Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:32:25.717787Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-03T10:32:25.717801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:32:25.717809Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:32:25.717813Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-03T10:32:25.717823Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:32:25.717832Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:32:25.717840Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:32:25.717844Z node ... :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=220; 2025-06-03T10:32:55.929820Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1000;merger=0;interval_id=220; 2025-06-03T10:32:55.929824Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-03T10:32:55.929830Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:32:55.929833Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=1;count=1000;finished=1; 2025-06-03T10:32:55.929836Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:199;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-03T10:32:55.929884Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:105;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-03T10:32:55.929895Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:1;records_count:1000;schema=timestamp: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:32:55.929899Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-03T10:32:55.929906Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:230;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;);columns=1;rows=1000; 2025-06-03T10:32:55.929912Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:250;stage=data_format;batch_size=8000;num_rows=1000;batch_columns=timestamp; 2025-06-03T10:32:55.929942Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:366;event=send_data;compute_actor_id=[5:628:2632];bytes=8000;rows=1000;faults=0;finished=0;fault=0;schema=timestamp: uint64; 2025-06-03T10:32:55.929951Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:270;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:32:55.929958Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:32:55.929963Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:193;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:32:55.929980Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:105;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-03T10:32:55.929986Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:32:55.929992Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:193;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:32:55.929996Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:410: Scan [5:629:2633] finished for tablet 9437184 2025-06-03T10:32:55.930038Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:416;event=scan_finish;compute_actor_id=[5:628:2632];stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_processing","f_ProduceResults","f_task_result"],"t":0},{"events":["f_ack","l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish","l_task_result"],"t":0.001}],"full":{"a":1748946775928093,"name":"_full_task","f":1748946775928093,"d_finished":0,"c":0,"l":1748946775930002,"d":1909},"events":[{"name":"bootstrap","f":1748946775928124,"d_finished":263,"c":1,"l":1748946775928387,"d":263},{"a":1748946775929979,"name":"ack","f":1748946775929879,"d_finished":86,"c":1,"l":1748946775929965,"d":109},{"a":1748946775929978,"name":"processing","f":1748946775928395,"d_finished":1030,"c":8,"l":1748946775929966,"d":1054},{"name":"ProduceResults","f":1748946775928281,"d_finished":186,"c":11,"l":1748946775929994,"d":186},{"a":1748946775929994,"name":"Finish","f":1748946775929994,"d_finished":0,"c":0,"l":1748946775930002,"d":8},{"name":"task_result","f":1748946775928397,"d_finished":933,"c":7,"l":1748946775929845,"d":933}],"id":"9437184::35"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:32:55.930045Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:366;event=send_data;compute_actor_id=[5:628:2632];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-03T10:32:55.930068Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:371;event=scan_finished;compute_actor_id=[5:628:2632];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_processing","f_ProduceResults","f_task_result"],"t":0},{"events":["f_ack","l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish","l_task_result"],"t":0.001}],"full":{"a":1748946775928093,"name":"_full_task","f":1748946775928093,"d_finished":0,"c":0,"l":1748946775930049,"d":1956},"events":[{"name":"bootstrap","f":1748946775928124,"d_finished":263,"c":1,"l":1748946775928387,"d":263},{"a":1748946775929979,"name":"ack","f":1748946775929879,"d_finished":86,"c":1,"l":1748946775929965,"d":156},{"a":1748946775929978,"name":"processing","f":1748946775928395,"d_finished":1030,"c":8,"l":1748946775929966,"d":1101},{"name":"ProduceResults","f":1748946775928281,"d_finished":186,"c":11,"l":1748946775929994,"d":186},{"a":1748946775929994,"name":"Finish","f":1748946775929994,"d_finished":0,"c":0,"l":1748946775930049,"d":55},{"name":"task_result","f":1748946775928397,"d_finished":933,"c":7,"l":1748946775929845,"d":933}],"id":"9437184::35"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:32:55.930077Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-03T10:32:55.927912Z;index_granules=0;index_portions=1;index_batches=10;committed_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=59648;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=59648;selected_rows=0; 2025-06-03T10:32:55.930081Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:190;event=scan_aborted;reason=unexpected on destructor; 2025-06-03T10:32:55.930105Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:629:2633];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; >> TCmsTest::RequestRestartServicesMultipleNodes >> TCmsTest::ManageRequestsWrong >> TCmsTest::TestKeepAvailableMode >> TCmsTest::DynamicConfig [GOOD] >> TCmsTest::DisabledEvictVDisks ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::TTL-Reboot+Internal-FirstPkColumn-WritePortionsOnInsert [GOOD] Test command err: Running TestTtl ttlColumnType=Timestamp 2025-06-03T10:32:25.715893Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:389: StateInit, received event# 268828672, Sender [1:103:2136], Recipient [1:139:2170]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:25.718748Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:389: StateInit, received event# 268828673, Sender [1:103:2136], Recipient [1:139:2170]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:25.718904Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-03T10:32:25.723230Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-03T10:32:25.723339Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-03T10:32:25.724319Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:32:25.724393Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:32:25.724446Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:32:25.724478Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:32:25.724500Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:32:25.724522Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:32:25.724542Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:32:25.724569Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:32:25.724593Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:32:25.724613Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:32:25.724632Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:32:25.724651Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:32:25.731153Z node 1 :TX_COLUMNSHARD TRACE: columnshard_impl.h:389: StateInit, received event# 268828684, Sender [1:103:2136], Recipient [1:139:2170]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:25.734311Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-03T10:32:25.734413Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-06-03T10:32:25.734426Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-03T10:32:25.734479Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-03T10:32:25.734526Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-03T10:32:25.734539Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-03T10:32:25.734545Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-03T10:32:25.734556Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-03T10:32:25.734566Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-03T10:32:25.734574Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-03T10:32:25.734579Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-03T10:32:25.734602Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-03T10:32:25.734610Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-03T10:32:25.734618Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-03T10:32:25.734622Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-03T10:32:25.734635Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-03T10:32:25.734643Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-03T10:32:25.734651Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-06-03T10:32:25.734656Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-06-03T10:32:25.734668Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-06-03T10:32:25.734678Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-03T10:32:25.734683Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-03T10:32:25.734692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-03T10:32:25.734700Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-03T10:32:25.734705Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-03T10:32:25.734730Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:32:25.734739Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:32:25.734744Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-03T10:32:25.734766Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:32:25.734774Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:32:25.734779Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=16;type=RestoreV2Chunks; 2025-06-03T10:32:25.734794Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:32:25.734801Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:32:25.734806Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=17;type=CleanDeprecatedSnapshot; 2025-06-03T10:32:25.734815Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:32:25.734826Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:32:25.734833Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:32:25.734838Z node ... de 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:21;event=interval_result_received;interval_idx=0;intervalId=405; 2025-06-03T10:32:55.990746Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:47;event=interval_result;interval_idx=0;count=1000;merger=0;interval_id=405; 2025-06-03T10:32:55.990750Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=scanner.cpp:65;event=intervals_finished; 2025-06-03T10:32:55.990755Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-03T10:32:55.990758Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=1;count=1000;finished=1; 2025-06-03T10:32:55.990761Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:199;stage=limit exhausted;limit=limits:(bytes=0;chunks=0);; 2025-06-03T10:32:55.990809Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:105;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-03T10:32:55.990820Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:1;records_count:1000;schema=saved_at: uint64;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-03T10:32:55.990823Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=plain_read_data.cpp:73;event=DoExtractReadyResults;result=0;count=0;finished=1; 2025-06-03T10:32:55.990830Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:230;stage=ready result;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;);columns=1;rows=1000; 2025-06-03T10:32:55.990837Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:250;stage=data_format;batch_size=8000;num_rows=1000;batch_columns=saved_at; 2025-06-03T10:32:55.990867Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:366;event=send_data;compute_actor_id=[5:627:2631];bytes=8000;rows=1000;faults=0;finished=0;fault=0;schema=saved_at: uint64; 2025-06-03T10:32:55.990875Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:270;stage=finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-03T10:32:55.990883Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-03T10:32:55.990888Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:193;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-03T10:32:55.990905Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:105;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-03T10:32:55.990911Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-03T10:32:55.990915Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:193;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-03T10:32:55.990920Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:410: Scan [5:628:2632] finished for tablet 9437184 2025-06-03T10:32:55.990963Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:416;event=scan_finish;compute_actor_id=[5:627:2631];stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_processing","f_ProduceResults","f_task_result"],"t":0},{"events":["f_ack","l_ack","l_processing","l_ProduceResults","f_Finish","l_Finish","l_task_result"],"t":0.001}],"full":{"a":1748946775988954,"name":"_full_task","f":1748946775988954,"d_finished":0,"c":0,"l":1748946775990927,"d":1973},"events":[{"name":"bootstrap","f":1748946775988984,"d_finished":266,"c":1,"l":1748946775989250,"d":266},{"a":1748946775990903,"name":"ack","f":1748946775990805,"d_finished":85,"c":1,"l":1748946775990890,"d":109},{"a":1748946775990902,"name":"processing","f":1748946775989434,"d_finished":987,"c":9,"l":1748946775990891,"d":1012},{"name":"ProduceResults","f":1748946775989127,"d_finished":194,"c":12,"l":1748946775990918,"d":194},{"a":1748946775990918,"name":"Finish","f":1748946775990918,"d_finished":0,"c":0,"l":1748946775990927,"d":9},{"name":"task_result","f":1748946775989437,"d_finished":889,"c":8,"l":1748946775990770,"d":889}],"id":"9437184::35"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-03T10:32:55.990969Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:366;event=send_data;compute_actor_id=[5:627:2631];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-03T10:32:55.990993Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:371;event=scan_finished;compute_actor_id=[5:627:2631];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_processing","f_ProduceResults","f_task_result"],"t":0},{"events":["f_ack","l_ProduceResults","f_Finish","l_task_result"],"t":0.001},{"events":["l_ack","l_processing","l_Finish"],"t":0.002}],"full":{"a":1748946775988954,"name":"_full_task","f":1748946775988954,"d_finished":0,"c":0,"l":1748946775990973,"d":2019},"events":[{"name":"bootstrap","f":1748946775988984,"d_finished":266,"c":1,"l":1748946775989250,"d":266},{"a":1748946775990903,"name":"ack","f":1748946775990805,"d_finished":85,"c":1,"l":1748946775990890,"d":155},{"a":1748946775990902,"name":"processing","f":1748946775989434,"d_finished":987,"c":9,"l":1748946775990891,"d":1058},{"name":"ProduceResults","f":1748946775989127,"d_finished":194,"c":12,"l":1748946775990918,"d":194},{"a":1748946775990918,"name":"Finish","f":1748946775990918,"d_finished":0,"c":0,"l":1748946775990973,"d":55},{"name":"task_result","f":1748946775989437,"d_finished":889,"c":8,"l":1748946775990770,"d":889}],"id":"9437184::35"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;;); 2025-06-03T10:32:55.991002Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-03T10:32:55.988774Z;index_granules=0;index_portions=1;index_batches=10;committed_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=59748;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=59748;selected_rows=0; 2025-06-03T10:32:55.991006Z node 5 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:190;event=scan_aborted;reason=unexpected on destructor; 2025-06-03T10:32:55.991034Z node 5 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: SelfId=[5:628:2632];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=9;column_names=saved_at;);;program_input=(column_ids=9;column_names=saved_at;);;; >> TTicketParserTest::AuthorizationRetryErrorImmediately [GOOD] >> TTicketParserTest::AuthorizationWithRequiredPermissions >> TMaintenanceApiTest::SimplifiedMirror3DC [GOOD] >> TMaintenanceApiTest::RequestReplaceDevicePDisk >> TCmsTest::RequestRestartServicesRejectSecond >> TCmsTest::WalleTasks >> TTicketParserTest::AuthorizationWithRequiredPermissions [GOOD] >> TTicketParserTest::AuthorizationWithUserAccount >> TCmsTenatsTest::TestTenantLimitForceRestartMode [GOOD] >> TCmsTenatsTest::TestTenantLimitForceRestartModeScheduled >> TCmsTest::RequestReplaceBrokenDevices >> TTicketParserTest::AuthorizationWithUserAccount [GOOD] >> TTicketParserTest::AuthorizationUnavailable >> TCmsTenatsTest::TestDefaultTenantPolicyWithSingleTenantHost [GOOD] >> TCmsTenatsTest::TestLimitsWithDownNode >> Compression::WriteWithMixedCodecs [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithAbort >> TTicketParserTest::AuthorizationUnavailable [GOOD] >> TxUsage::WriteToTopic_Demo_6_Query [GOOD] >> TCmsTest::RequestRestartServicesMultipleNodes [GOOD] >> TCmsTest::RequestRestartServicesDryRun >> LocalPartition::Basic [GOOD] >> LocalPartition::DescribeBadPartition >> TCmsTest::ManageRequestsWrong [GOOD] >> TCmsTest::ManageRequestsDry >> TxUsage::WriteToTopic_Demo_7_Table ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ut/unittest >> TTicketParserTest::AuthorizationUnavailable [GOOD] Test command err: 2025-06-03T10:32:42.785313Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669148214655093:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:42.785381Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f8a/r3tmp/tmpUXgVm8/pdisk_1.dat 2025-06-03T10:32:42.844655Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:42.845461Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669148214655074:2079] 1748946762785091 != 1748946762785094 TServer::EnableGrpc on GrpcPort 9376, node 1 2025-06-03T10:32:42.860205Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:42.860218Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:42.860220Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:42.860255Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19975 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:42.914809Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:42.914834Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:42.915597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:32:42.915732Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:32:42.919158Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-06-03T10:32:42.919194Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [1391be3f2330] Connect to grpc://localhost:8920 2025-06-03T10:32:42.919864Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [1391be3f2330] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:32:42.921919Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [1391be3f2330] Status 14 Service Unavailable 2025-06-03T10:32:42.921984Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a permanent error "Service Unavailable" retryable:1 2025-06-03T10:32:42.922003Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:42.922007Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-06-03T10:32:42.922083Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [1391be3f2330] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:32:42.922474Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [1391be3f2330] Status 14 Service Unavailable 2025-06-03T10:32:42.922509Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a permanent error "Service Unavailable" retryable:1 2025-06-03T10:32:42.922520Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:43.786984Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-03T10:32:43.787002Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-06-03T10:32:43.787109Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [1391be3f2330] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:32:43.788059Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [1391be3f2330] Status 14 Service Unavailable 2025-06-03T10:32:43.788136Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a permanent error "Service Unavailable" retryable:1 2025-06-03T10:32:43.788152Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:44.787430Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-03T10:32:44.787451Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-06-03T10:32:44.787534Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [1391be3f2330] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:32:44.788259Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [1391be3f2330] Status 14 Service Unavailable 2025-06-03T10:32:44.788306Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a permanent error "Service Unavailable" retryable:1 2025-06-03T10:32:44.788320Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:46.788311Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-03T10:32:46.788332Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthorization(something.read) 2025-06-03T10:32:46.788430Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [1391be3f2330] Request AuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:32:46.789351Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [1391be3f2330] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-03T10:32:46.789414Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a valid subject "user1@as" 2025-06-03T10:32:46.789444Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket AKIA****MPLE (B3EDC139) () has now valid token of user1@as 2025-06-03T10:32:47.785724Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511669148214655093:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:47.785772Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:32:55.087517Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669203006535631:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:55.087591Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f8a/r3tmp/tmpImewY9/pdisk_1.dat 2025-06-03T10:32:55.101999Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:55.102220Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669203006535612:2079] 1748946775087402 != 1748946775087405 TServer::EnableGrpc on GrpcPort 24153, node 2 2025-06-03T10:32:55.110383Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:55.110395Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:55.110397Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:55.110436Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6105 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03 ... 392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-03T10:32:57.896632Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:997: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-06-03T10:32:57.896885Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [1391bd116e30] Connect to grpc://localhost:17617 2025-06-03T10:32:57.896990Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [1391bd116e30] Request GetUserAccountRequest { user_account_id: "user1" } 2025-06-03T10:32:57.898808Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [1391bd116e30] Response UserAccount { yandex_passport_user_account { login: "login1" } } 2025-06-03T10:32:57.898901Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of login1@passport 2025-06-03T10:32:57.899114Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:32:57.899123Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:57.899125Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:32:57.899129Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-03T10:32:57.899159Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [1391bd01f670] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:32:57.899656Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [1391bd01f670] Status 16 Access Denied 2025-06-03T10:32:57.899694Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.write now has a permanent error "Access Denied" retryable:0 2025-06-03T10:32:57.899704Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket **** (8E120919) () has now permanent error message 'Access Denied' 2025-06-03T10:32:57.899872Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:32:57.899879Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:57.899880Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:32:57.899884Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-03T10:32:57.899889Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-03T10:32:57.899908Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [1391bd01f670] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:32:57.900005Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [1391bd01f670] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:32:57.900297Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [1391bd01f670] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-03T10:32:57.900350Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-03T10:32:57.900478Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [1391bd01f670] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-03T10:32:57.900500Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.write now has a valid subject "user1@as" 2025-06-03T10:32:57.900508Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:997: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-06-03T10:32:57.900537Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of login1@passport 2025-06-03T10:32:58.160490Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7511669214118976644:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:58.160521Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f8a/r3tmp/tmpP26v0V/pdisk_1.dat 2025-06-03T10:32:58.171986Z node 5 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:58.172204Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7511669214118976624:2079] 1748946778160385 != 1748946778160388 TServer::EnableGrpc on GrpcPort 15402, node 5 2025-06-03T10:32:58.183313Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:58.183326Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:58.183328Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:58.183374Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14806 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:58.263413Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:58.263438Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:58.263785Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:58.264471Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:58.264670Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:32:58.265491Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:32:58.265506Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:58.265508Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:32:58.265520Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-03T10:32:58.265530Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-03T10:32:58.265545Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [1391bd912b70] Connect to grpc://localhost:6468 2025-06-03T10:32:58.265715Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [1391bd912b70] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:32:58.266443Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [1391bd912b70] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:32:58.267589Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [1391bd912b70] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-03T10:32:58.267628Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.read now has a valid subject "user1@as" 2025-06-03T10:32:58.267672Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [1391bd912b70] Status 14 Service Unavailable 2025-06-03T10:32:58.267703Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.write now has a permanent error "Service Unavailable" retryable:1 2025-06-03T10:32:58.267711Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:58.267715Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-03T10:32:58.267724Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-03T10:32:58.267771Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [1391bd912b70] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:32:58.267921Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [1391bd912b70] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:32:58.268398Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [1391bd912b70] Status 1 CANCELLED 2025-06-03T10:32:58.268402Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [1391bd912b70] Status 1 CANCELLED 2025-06-03T10:32:58.268422Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.write now has a permanent error "CANCELLED" retryable:1 2025-06-03T10:32:58.268429Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:1415: Ticket **** (8E120919) permission something.read now has a retryable error "CANCELLED" 2025-06-03T10:32:58.268431Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'CANCELLED' |69.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan |69.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan |69.9%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/scan/ydb-core-kqp-ut-scan >> TCmsTest::TestKeepAvailableMode [GOOD] >> TCmsTest::TestKeepAvailableModeDisconnects >> TCmsTest::RequestRestartServicesRejectSecond [GOOD] >> TCmsTest::RequestRestartServicesWrongHost >> TKeyValueTest::TestRewriteThenLastValueNewApi [GOOD] >> TxUsage::WriteToTopic_Demo_23_RestartAfterCommit_Query [GOOD] >> TxUsage::WriteToTopic_Demo_24_Table >> TClusterInfoTest::DeviceId [GOOD] >> TClusterInfoTest::FillInfo [GOOD] >> TCmsTenatsTest::CollectInfo >> TCmsTest::RequestReplaceBrokenDevices [GOOD] >> TCmsTest::PermissionDuration ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/keyvalue/ut/unittest >> TKeyValueTest::TestRewriteThenLastValueNewApi [GOOD] Test command err: Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [1:55:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:58:2057] recipient: [1:51:2095] Leader for TabletID 72057594037927937 is [1:57:2097] sender: [1:75:2057] recipient: [1:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [2:55:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:58:2057] recipient: [2:51:2095] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:75:2057] recipient: [2:14:2061] !Reboot 72057594037927937 (actor [2:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:77:2057] recipient: [2:36:2083] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:80:2057] recipient: [2:79:2110] Leader for TabletID 72057594037927937 is [2:57:2097] sender: [2:81:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:83:2057] recipient: [2:79:2110] !Reboot 72057594037927937 (actor [2:57:2097]) rebooted! !Reboot 72057594037927937 (actor [2:57:2097]) tablet resolver refreshed! new actor is[2:82:2111] Leader for TabletID 72057594037927937 is [2:82:2111] sender: [2:168:2057] recipient: [2:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [3:55:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:58:2057] recipient: [3:51:2095] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:75:2057] recipient: [3:14:2061] !Reboot 72057594037927937 (actor [3:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:77:2057] recipient: [3:36:2083] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:80:2057] recipient: [3:79:2110] Leader for TabletID 72057594037927937 is [3:57:2097] sender: [3:81:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:83:2057] recipient: [3:79:2110] !Reboot 72057594037927937 (actor [3:57:2097]) rebooted! !Reboot 72057594037927937 (actor [3:57:2097]) tablet resolver refreshed! new actor is[3:82:2111] Leader for TabletID 72057594037927937 is [3:82:2111] sender: [3:168:2057] recipient: [3:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [4:55:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:58:2057] recipient: [4:51:2095] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:75:2057] recipient: [4:14:2061] !Reboot 72057594037927937 (actor [4:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:78:2057] recipient: [4:36:2083] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:81:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [4:57:2097] sender: [4:82:2057] recipient: [4:80:2110] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:84:2057] recipient: [4:80:2110] !Reboot 72057594037927937 (actor [4:57:2097]) rebooted! !Reboot 72057594037927937 (actor [4:57:2097]) tablet resolver refreshed! new actor is[4:83:2111] Leader for TabletID 72057594037927937 is [4:83:2111] sender: [4:169:2057] recipient: [4:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [5:55:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:58:2057] recipient: [5:51:2095] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:75:2057] recipient: [5:14:2061] !Reboot 72057594037927937 (actor [5:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:81:2057] recipient: [5:36:2083] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:84:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [5:57:2097] sender: [5:85:2057] recipient: [5:83:2113] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:87:2057] recipient: [5:83:2113] !Reboot 72057594037927937 (actor [5:57:2097]) rebooted! !Reboot 72057594037927937 (actor [5:57:2097]) tablet resolver refreshed! new actor is[5:86:2114] Leader for TabletID 72057594037927937 is [5:86:2114] sender: [5:172:2057] recipient: [5:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [6:55:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:58:2057] recipient: [6:51:2095] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:75:2057] recipient: [6:14:2061] !Reboot 72057594037927937 (actor [6:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:81:2057] recipient: [6:36:2083] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:84:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [6:57:2097] sender: [6:85:2057] recipient: [6:83:2113] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:87:2057] recipient: [6:83:2113] !Reboot 72057594037927937 (actor [6:57:2097]) rebooted! !Reboot 72057594037927937 (actor [6:57:2097]) tablet resolver refreshed! new actor is[6:86:2114] Leader for TabletID 72057594037927937 is [6:86:2114] sender: [6:172:2057] recipient: [6:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [7:55:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:58:2057] recipient: [7:51:2095] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:75:2057] recipient: [7:14:2061] !Reboot 72057594037927937 (actor [7:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:82:2057] recipient: [7:36:2083] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:85:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [7:57:2097] sender: [7:86:2057] recipient: [7:84:2113] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:88:2057] recipient: [7:84:2113] !Reboot 72057594037927937 (actor [7:57:2097]) rebooted! !Reboot 72057594037927937 (actor [7:57:2097]) tablet resolver refreshed! new actor is[7:87:2114] Leader for TabletID 72057594037927937 is [7:87:2114] sender: [7:173:2057] recipient: [7:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [8:55:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:58:2057] recipient: [8:51:2095] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:75:2057] recipient: [8:14:2061] !Reboot 72057594037927937 (actor [8:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:84:2057] recipient: [8:36:2083] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:87:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [8:57:2097] sender: [8:88:2057] recipient: [8:86:2115] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:90:2057] recipient: [8:86:2115] !Reboot 72057594037927937 (actor [8:57:2097]) rebooted! !Reboot 72057594037927937 (actor [8:57:2097]) tablet resolver refreshed! new actor is[8:89:2116] Leader for TabletID 72057594037927937 is [8:89:2116] sender: [8:175:2057] recipient: [8:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [9:55:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:58:2057] recipient: [9:50:2095] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:75:2057] recipient: [9:14:2061] !Reboot 72057594037927937 (actor [9:57:2097]) on event NKikimr::TEvKeyValue::TEvRequest ! Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:84:2057] recipient: [9:36:2083] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:87:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [9:57:2097] sender: [9:88:2057] recipient: [9:86:2115] Leader for TabletID 72057594037927937 is [9:89:2116] sender: [9:90:2057] recipient: [9:86:2115] !Reboot 72057594037927937 (actor [9:57:2097]) rebooted! !Reboot 72057594037927937 (actor [9:57:2097]) tablet resolver refreshed! new actor is[9:89:2116] Leader for TabletID 72057594037927937 is [9:89:2116] sender: [9:175:2057] recipient: [9:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [10:55:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:58:2057] recipient: [10:51:2095] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:75:2057] recipient: [10:14:2061] !Reboot 72057594037927937 (actor [10:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:85:2057] recipient: [10:36:2083] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:88:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [10:57:2097] sender: [10:89:2057] recipient: [10:87:2115] Leader for TabletID 72057594037927937 is [10:90:2116] sender: [10:91:2057] recipient: [10:87:2115] !Reboot 72057594037927937 (actor [10:57:2097]) rebooted! !Reboot 72057594037927937 (actor [10:57:2097]) tablet resolver refreshed! new actor is[10:90:2116] Leader for TabletID 72057594037927937 is [10:90:2116] sender: [10:176:2057] recipient: [10:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [11:55:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:58:2057] recipient: [11:50:2095] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:75:2057] recipient: [11:14:2061] !Reboot 72057594037927937 (actor [11:57:2097]) on event NKikimr::TEvKeyValue::TEvCollect ! Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:86:2057] recipient: [11:36:2083] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:89:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [11:57:2097] sender: [11:90:2057] recipient: [11:88:2116] Leader for TabletID 72057594037927937 is [11:91:2117] sender: [11:92:2057] recipient: [11:88:2116] !Reboot 72057594037927937 (actor [11:57:2097]) rebooted! !Reboot 72057594037927937 (actor [11:57:2097]) tablet resolver refreshed! new actor is[11:91:2117] Leader for TabletID 72057594037927937 is [11:91:2117] sender: [11:111:2057] recipient: [11:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [12:55:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:58:2057] recipient: [12:51:2095] Leader for TabletID 72057594037927937 is [12:57:2097] sender: [12:75:2057] recipient: [12:14:2061] !Reboot 72057594037927937 (actor [12:57:2 ... 2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [106:55:2057] recipient: [106:51:2095] Leader for TabletID 72057594037927937 is [106:57:2097] sender: [106:58:2057] recipient: [106:51:2095] Leader for TabletID 72057594037927937 is [106:57:2097] sender: [106:75:2057] recipient: [106:14:2061] !Reboot 72057594037927937 (actor [106:57:2097]) on event NKikimr::TEvKeyValue::TEvCompleteGC ! Leader for TabletID 72057594037927937 is [106:57:2097] sender: [106:128:2057] recipient: [106:36:2083] Leader for TabletID 72057594037927937 is [106:57:2097] sender: [106:130:2057] recipient: [106:14:2061] Leader for TabletID 72057594037927937 is [106:57:2097] sender: [106:132:2057] recipient: [106:131:2148] Leader for TabletID 72057594037927937 is [106:133:2149] sender: [106:134:2057] recipient: [106:131:2148] !Reboot 72057594037927937 (actor [106:57:2097]) rebooted! !Reboot 72057594037927937 (actor [106:57:2097]) tablet resolver refreshed! new actor is[106:133:2149] Leader for TabletID 72057594037927937 is [106:133:2149] sender: [106:153:2057] recipient: [106:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [107:55:2057] recipient: [107:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [107:55:2057] recipient: [107:51:2095] Leader for TabletID 72057594037927937 is [107:57:2097] sender: [107:58:2057] recipient: [107:51:2095] Leader for TabletID 72057594037927937 is [107:57:2097] sender: [107:75:2057] recipient: [107:14:2061] !Reboot 72057594037927937 (actor [107:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [107:57:2097] sender: [107:131:2057] recipient: [107:36:2083] Leader for TabletID 72057594037927937 is [107:57:2097] sender: [107:134:2057] recipient: [107:14:2061] Leader for TabletID 72057594037927937 is [107:57:2097] sender: [107:135:2057] recipient: [107:133:2151] Leader for TabletID 72057594037927937 is [107:136:2152] sender: [107:137:2057] recipient: [107:133:2151] !Reboot 72057594037927937 (actor [107:57:2097]) rebooted! !Reboot 72057594037927937 (actor [107:57:2097]) tablet resolver refreshed! new actor is[107:136:2152] Leader for TabletID 72057594037927937 is [107:136:2152] sender: [107:222:2057] recipient: [107:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [108:55:2057] recipient: [108:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [108:55:2057] recipient: [108:51:2095] Leader for TabletID 72057594037927937 is [108:57:2097] sender: [108:58:2057] recipient: [108:51:2095] Leader for TabletID 72057594037927937 is [108:57:2097] sender: [108:75:2057] recipient: [108:14:2061] !Reboot 72057594037927937 (actor [108:57:2097]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [108:57:2097] sender: [108:131:2057] recipient: [108:36:2083] Leader for TabletID 72057594037927937 is [108:57:2097] sender: [108:134:2057] recipient: [108:133:2151] Leader for TabletID 72057594037927937 is [108:57:2097] sender: [108:135:2057] recipient: [108:14:2061] Leader for TabletID 72057594037927937 is [108:136:2152] sender: [108:137:2057] recipient: [108:133:2151] !Reboot 72057594037927937 (actor [108:57:2097]) rebooted! !Reboot 72057594037927937 (actor [108:57:2097]) tablet resolver refreshed! new actor is[108:136:2152] Leader for TabletID 72057594037927937 is [108:136:2152] sender: [108:222:2057] recipient: [108:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [109:55:2057] recipient: [109:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [109:55:2057] recipient: [109:51:2095] Leader for TabletID 72057594037927937 is [109:57:2097] sender: [109:58:2057] recipient: [109:51:2095] Leader for TabletID 72057594037927937 is [109:57:2097] sender: [109:75:2057] recipient: [109:14:2061] !Reboot 72057594037927937 (actor [109:57:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [109:57:2097] sender: [109:132:2057] recipient: [109:36:2083] Leader for TabletID 72057594037927937 is [109:57:2097] sender: [109:135:2057] recipient: [109:134:2151] Leader for TabletID 72057594037927937 is [109:57:2097] sender: [109:136:2057] recipient: [109:14:2061] Leader for TabletID 72057594037927937 is [109:137:2152] sender: [109:138:2057] recipient: [109:134:2151] !Reboot 72057594037927937 (actor [109:57:2097]) rebooted! !Reboot 72057594037927937 (actor [109:57:2097]) tablet resolver refreshed! new actor is[109:137:2152] Leader for TabletID 72057594037927937 is [109:137:2152] sender: [109:155:2057] recipient: [109:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [110:55:2057] recipient: [110:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [110:55:2057] recipient: [110:50:2095] Leader for TabletID 72057594037927937 is [110:57:2097] sender: [110:58:2057] recipient: [110:50:2095] Leader for TabletID 72057594037927937 is [110:57:2097] sender: [110:75:2057] recipient: [110:14:2061] !Reboot 72057594037927937 (actor [110:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [110:57:2097] sender: [110:134:2057] recipient: [110:36:2083] Leader for TabletID 72057594037927937 is [110:57:2097] sender: [110:137:2057] recipient: [110:136:2153] Leader for TabletID 72057594037927937 is [110:57:2097] sender: [110:138:2057] recipient: [110:14:2061] Leader for TabletID 72057594037927937 is [110:139:2154] sender: [110:140:2057] recipient: [110:136:2153] !Reboot 72057594037927937 (actor [110:57:2097]) rebooted! !Reboot 72057594037927937 (actor [110:57:2097]) tablet resolver refreshed! new actor is[110:139:2154] Leader for TabletID 72057594037927937 is [110:139:2154] sender: [110:225:2057] recipient: [110:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [111:55:2057] recipient: [111:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [111:55:2057] recipient: [111:51:2095] Leader for TabletID 72057594037927937 is [111:57:2097] sender: [111:58:2057] recipient: [111:51:2095] Leader for TabletID 72057594037927937 is [111:57:2097] sender: [111:75:2057] recipient: [111:14:2061] !Reboot 72057594037927937 (actor [111:57:2097]) on event NKikimr::TEvKeyValue::TEvExecuteTransaction ! Leader for TabletID 72057594037927937 is [111:57:2097] sender: [111:134:2057] recipient: [111:36:2083] Leader for TabletID 72057594037927937 is [111:57:2097] sender: [111:137:2057] recipient: [111:14:2061] Leader for TabletID 72057594037927937 is [111:57:2097] sender: [111:138:2057] recipient: [111:136:2153] Leader for TabletID 72057594037927937 is [111:139:2154] sender: [111:140:2057] recipient: [111:136:2153] !Reboot 72057594037927937 (actor [111:57:2097]) rebooted! !Reboot 72057594037927937 (actor [111:57:2097]) tablet resolver refreshed! new actor is[111:139:2154] Leader for TabletID 72057594037927937 is [111:139:2154] sender: [111:225:2057] recipient: [111:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [112:55:2057] recipient: [112:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [112:55:2057] recipient: [112:50:2095] Leader for TabletID 72057594037927937 is [112:57:2097] sender: [112:58:2057] recipient: [112:50:2095] Leader for TabletID 72057594037927937 is [112:57:2097] sender: [112:75:2057] recipient: [112:14:2061] !Reboot 72057594037927937 (actor [112:57:2097]) on event NKikimr::TEvKeyValue::TEvIntermediate ! Leader for TabletID 72057594037927937 is [112:57:2097] sender: [112:135:2057] recipient: [112:36:2083] Leader for TabletID 72057594037927937 is [112:57:2097] sender: [112:137:2057] recipient: [112:14:2061] Leader for TabletID 72057594037927937 is [112:57:2097] sender: [112:139:2057] recipient: [112:138:2153] Leader for TabletID 72057594037927937 is [112:140:2154] sender: [112:141:2057] recipient: [112:138:2153] !Reboot 72057594037927937 (actor [112:57:2097]) rebooted! !Reboot 72057594037927937 (actor [112:57:2097]) tablet resolver refreshed! new actor is[112:140:2154] Leader for TabletID 72057594037927937 is [112:140:2154] sender: [112:226:2057] recipient: [112:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [113:55:2057] recipient: [113:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [113:55:2057] recipient: [113:51:2095] Leader for TabletID 72057594037927937 is [113:57:2097] sender: [113:58:2057] recipient: [113:51:2095] Leader for TabletID 72057594037927937 is [113:57:2097] sender: [113:75:2057] recipient: [113:14:2061] !Reboot 72057594037927937 (actor [113:57:2097]) on event NKikimr::TEvTabletPipe::TEvServerConnected ! Leader for TabletID 72057594037927937 is [113:57:2097] sender: [113:138:2057] recipient: [113:36:2083] Leader for TabletID 72057594037927937 is [113:57:2097] sender: [113:141:2057] recipient: [113:14:2061] Leader for TabletID 72057594037927937 is [113:57:2097] sender: [113:142:2057] recipient: [113:140:2156] Leader for TabletID 72057594037927937 is [113:143:2157] sender: [113:144:2057] recipient: [113:140:2156] !Reboot 72057594037927937 (actor [113:57:2097]) rebooted! !Reboot 72057594037927937 (actor [113:57:2097]) tablet resolver refreshed! new actor is[113:143:2157] Leader for TabletID 72057594037927937 is [113:143:2157] sender: [113:229:2057] recipient: [113:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [114:55:2057] recipient: [114:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [114:55:2057] recipient: [114:51:2095] Leader for TabletID 72057594037927937 is [114:57:2097] sender: [114:58:2057] recipient: [114:51:2095] Leader for TabletID 72057594037927937 is [114:57:2097] sender: [114:75:2057] recipient: [114:14:2061] !Reboot 72057594037927937 (actor [114:57:2097]) on event NKikimr::TEvKeyValue::TEvRead ! Leader for TabletID 72057594037927937 is [114:57:2097] sender: [114:138:2057] recipient: [114:36:2083] Leader for TabletID 72057594037927937 is [114:57:2097] sender: [114:141:2057] recipient: [114:140:2156] Leader for TabletID 72057594037927937 is [114:57:2097] sender: [114:142:2057] recipient: [114:14:2061] Leader for TabletID 72057594037927937 is [114:143:2157] sender: [114:144:2057] recipient: [114:140:2156] !Reboot 72057594037927937 (actor [114:57:2097]) rebooted! !Reboot 72057594037927937 (actor [114:57:2097]) tablet resolver refreshed! new actor is[114:143:2157] Leader for TabletID 72057594037927937 is [114:143:2157] sender: [114:229:2057] recipient: [114:14:2061] Leader for TabletID 72057594037927937 is [0:0:0] sender: [115:55:2057] recipient: [115:50:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [115:55:2057] recipient: [115:50:2095] Leader for TabletID 72057594037927937 is [115:57:2097] sender: [115:58:2057] recipient: [115:50:2095] Leader for TabletID 72057594037927937 is [115:57:2097] sender: [115:75:2057] recipient: [115:14:2061] !Reboot 72057594037927937 (actor [115:57:2097]) on event NKikimr::TEvKeyValue::TEvNotify ! Leader for TabletID 72057594037927937 is [115:57:2097] sender: [115:139:2057] recipient: [115:36:2083] Leader for TabletID 72057594037927937 is [115:57:2097] sender: [115:142:2057] recipient: [115:141:2156] Leader for TabletID 72057594037927937 is [115:57:2097] sender: [115:143:2057] recipient: [115:14:2061] Leader for TabletID 72057594037927937 is [115:144:2157] sender: [115:145:2057] recipient: [115:141:2156] !Reboot 72057594037927937 (actor [115:57:2097]) rebooted! !Reboot 72057594037927937 (actor [115:57:2097]) tablet resolver refreshed! new actor is[115:144:2157] Leader for TabletID 72057594037927937 is [0:0:0] sender: [116:55:2057] recipient: [116:51:2095] IGNORE Leader for TabletID 72057594037927937 is [0:0:0] sender: [116:55:2057] recipient: [116:51:2095] Leader for TabletID 72057594037927937 is [116:57:2097] sender: [116:58:2057] recipient: [116:51:2095] Leader for TabletID 72057594037927937 is [116:57:2097] sender: [116:75:2057] recipient: [116:14:2061] >> TxUsage::WriteToTopic_Demo_34_Table [GOOD] >> TCmsTenatsTest::TestLimitsWithDownNode [GOOD] >> TCmsTenatsTest::TestScheduledPermissionWithDefaultPolicy >> TCmsTenatsTest::TestTenantLimitForceRestartModeScheduled [GOOD] >> TxUsage::WriteToTopic_Demo_34_Query >> TCmsTest::RequestRestartServicesDryRun [GOOD] >> TCmsTest::RequestReplacePDiskDoesntBreakGroup >> TCmsTest::ManageRequestsDry [GOOD] >> TCmsTest::Notifications ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTenatsTest::TestTenantLimitForceRestartModeScheduled [GOOD] Test command err: 2025-06-03T10:32:51.986177Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:32:51.986966Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:51.989197Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:32:51.989248Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:32:51.989330Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:32:51.989427Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:32:51.989880Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:51.989948Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:51.990267Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:51.990295Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:51.991829Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:51.991852Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:51.991894Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:32:51.991946Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:52.019772Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:32:52.052052Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:52.052125Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:52.053173Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:52.053271Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:32:52.053276Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:32:52.053282Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:32:52.053285Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:32:52.053322Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:32:52.053350Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:32:52.053366Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:52.055198Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { } } Success: true 2025-06-03T10:32:52.097455Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:52.097521Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:32:52.097661Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvGetConfigRequest { }, response# NKikimr::NCms::TEvCms::TEvGetConfigResponse { Status { Code: OK } Config { DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: false UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 60 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } } } 2025-06-03T10:32:52.097706Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:52.156002Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:52.156116Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:52.156273Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 5 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 6 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 7 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 8 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 120029000 } } 2025-06-03T10:32:52.186891Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:52.228598Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:52.228684Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesLimit: 2 DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesLimit: 0 DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: false UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 60 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } 2025-06-03T10:32:52.228700Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:52.263616Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:52.263660Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:52.263679Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:52.263753Z node 1 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:32:52.263764Z node 1 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 2025-06-03T10:32:52.263777Z node 1 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 1, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:32:52.263794Z node 1 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 1, with state: Up, with limit: 2, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:32:52.263804Z node 1 :CMS DEBUG: cms.cpp:729: Ring: 0; State: Ok 2025-06-03T10:32:52.263807Z node 1 :CMS DEBUG: cms.cpp:729: Ring: 1; State: Ok 2025-06-03T10:32:52.263810Z node 1 :CMS DEBUG: cms.cpp:729: Ring: 2; State: Ok 2025-06-03T10:32:52.263814Z node 1 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:52.263833Z node 1 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-1, requestId# user-r-1, owner# user 2025-06-03T10:32:52.263841Z node 1 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (1) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:52.263852Z node 1 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:32:52.263894Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-1, validity# 1970-01-01T00:03:00.130000Z, action# Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 2025-06-03T10:32:52.274993Z node 1 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:52.275094Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-1" Permissions { Id: "user-p-1" Action { Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 } Deadline: 180130000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 1 InterconnectPort: 12001 } } } } 2025-06-03T10:32:52.275104Z node 1 :CMS DEBUG: cms.cpp:1064: Schedule cleanup at 1970-01-01T00:05:00.130000Z 2025-06-03T10:32:52.296640Z node 1 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (1) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:52.296699Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:52.296716Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:52.296731Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:52.296782Z node 1 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "2" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:32:52.296789Z node 1 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "2" Duration: 60000000 2025-06-03T10:32:52.296798Z node 1 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 2, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:32:52.296802Z node 1 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 2, with state: Up, with limit: 2, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:32:52.2 ... nodes. Locked: 1, down: 0, limit: 1" } 2025-06-03T10:32:58.540506Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 27, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:32:58.540509Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 27, with state: Up, with limit: 1, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:32:58.540517Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Cannot lock node '27' of tenant 'user0': too many unavailable nodes. Locked: 1, down: 0, limit: 1) 2025-06-03T10:32:58.540532Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-2, requestId# user-r-1, owner# user 2025-06-03T10:32:58.540538Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (26) (permission user-p-2 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:58.540547Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:32:58.540578Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-2, validity# 1970-01-01T00:03:00.537560Z, action# Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 2025-06-03T10:32:58.540598Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" Actions { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'27\' of tenant \'user0\': too many unavailable nodes. Locked: 1, down: 0, limit: 1" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false 2025-06-03T10:32:58.551405Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:58.551486Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_FORCE_RESTART }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW_PARTIAL } RequestId: "user-r-1" Permissions { Id: "user-p-2" Action { Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 } Deadline: 180537560 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 26 InterconnectPort: 12002 } } } } 2025-06-03T10:32:58.551602Z node 25 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-2 2025-06-03T10:32:58.551610Z node 25 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-03T10:32:58.551621Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-03T10:32:58.551644Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-2, reason# explicit remove 2025-06-03T10:32:58.562390Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-03T10:32:58.562441Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-2" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-03T10:32:58.583795Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:58.583884Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:58.583935Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'27\' of tenant \'user0\': too many unavailable nodes. Locked: 1, down: 0, limit: 1" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:32:58.583949Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'27\' of tenant \'user0\': too many unavailable nodes. Locked: 1, down: 0, limit: 1" } 2025-06-03T10:32:58.583958Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 27, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 1 2025-06-03T10:32:58.583961Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 27, with state: Up, with limit: 1, with ratio limit: 0, locked nodes: 0, down nodes: 1 2025-06-03T10:32:58.583969Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Cannot lock node '27' of tenant 'user0': too many unavailable nodes. Locked: 0, down: 1, limit: 1) 2025-06-03T10:32:58.583988Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:32:58.584020Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" Actions { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'27\' of tenant \'user0\': too many unavailable nodes. Locked: 0, down: 1, limit: 1" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:32:58.594879Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:58.594906Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:58.594953Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Cannot lock node \'27\' of tenant \'user0\': too many unavailable nodes. Locked: 0, down: 1, limit: 1" } RequestId: "user-r-1" Deadline: 420640584 } 2025-06-03T10:32:58.668305Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:58.668389Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:58.668440Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'27\' of tenant \'user0\': too many unavailable nodes. Locked: 0, down: 1, limit: 1" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-03T10:32:58.668449Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'27\' of tenant \'user0\': too many unavailable nodes. Locked: 0, down: 1, limit: 1" } 2025-06-03T10:32:58.668458Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 27, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 1 2025-06-03T10:32:58.668461Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 27, with state: Up, with limit: 1, with ratio limit: 0, locked nodes: 0, down nodes: 1 2025-06-03T10:32:58.668469Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Cannot lock node '27' of tenant 'user0': too many unavailable nodes. Locked: 0, down: 1, limit: 1) 2025-06-03T10:32:58.668486Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:32:58.668514Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" Actions { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'27\' of tenant \'user0\': too many unavailable nodes. Locked: 0, down: 1, limit: 1" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-03T10:32:58.679479Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:58.679504Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:58.679555Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_KEEP_AVAILABLE }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Cannot lock node \'27\' of tenant \'user0\': too many unavailable nodes. Locked: 0, down: 1, limit: 1" } RequestId: "user-r-1" Deadline: 420742096 } 2025-06-03T10:32:58.700922Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:58.701001Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:58.701054Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'27\' of tenant \'user0\': too many unavailable nodes. Locked: 0, down: 1, limit: 1" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false 2025-06-03T10:32:58.701065Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'27\' of tenant \'user0\': too many unavailable nodes. Locked: 0, down: 1, limit: 1" } 2025-06-03T10:32:58.701075Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 27, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 1 2025-06-03T10:32:58.701078Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 27, with state: Up, with limit: 1, with ratio limit: 0, locked nodes: 0, down nodes: 1 2025-06-03T10:32:58.701084Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:58.701101Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-3, requestId# user-r-1, owner# user 2025-06-03T10:32:58.701107Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12003 (27) (permission user-p-3 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:58.701117Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:32:58.701138Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-3, validity# 1970-01-01T00:03:00.843608Z, action# Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 2025-06-03T10:32:58.701148Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-1, owner# user 2025-06-03T10:32:58.711940Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:58.711962Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:58.712024Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_FORCE_RESTART }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Id: "user-p-3" Action { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 } Deadline: 180843608 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 27 InterconnectPort: 12003 } } } } >> TCmsTenatsTest::TestClusterRatioLimit >> TCmsTest::TestKeepAvailableModeDisconnects [GOOD] >> TCmsTest::TestForceRestartModeScheduled >> TMaintenanceApiTest::RequestReplaceDevicePDisk [GOOD] >> TCmsTest::RequestRestartServicesWrongHost [GOOD] >> TCmsTest::RestartNodeInDownState >> TCmsTenatsTest::CollectInfo [GOOD] >> TCmsTenatsTest::RequestRestartServices >> TCmsTest::RequestRestartServicesOk ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TMaintenanceApiTest::RequestReplaceDevicePDisk [GOOD] Test command err: 2025-06-03T10:32:53.272458Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:32:53.273802Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:53.275951Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:32:53.276021Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:32:53.276069Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:32:53.276130Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:32:53.276485Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:53.276548Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:53.276839Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:53.276895Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:53.278631Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:53.278672Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:53.278713Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:32:53.278758Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:53.297396Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:32:53.329792Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:53.329917Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:53.331049Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:53.331174Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:32:53.331180Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:32:53.331188Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:32:53.331191Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:32:53.331205Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:53.331255Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:32:53.331282Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:32:53.333060Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 9 PDiskId: 9 Path: "/9/pdisk-9.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 10 PDiskId: 10 Path: "/10/pdisk-10.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 11 PDiskId: 11 Path: "/11/pdisk-11.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 12 PDiskId: 12 Path: "/12/pdisk-12.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 13 PDiskId: 13 Path: "/13/pdisk-13.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 14 PDiskId: 14 Path: "/14/pdisk-14.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 15 PDiskId: 15 Path: "/15/pdisk-15.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 16 PDiskId: 16 Path: "/16/pdisk-16.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 9 PDiskId: 9 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 9 PDiskId: 9 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 9 PDiskId: 9 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 9 PDiskId: 9 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 10 PDiskId: 10 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 10 PDiskId: 10 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 10 PDiskId: 10 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 10 PDiskId: 10 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 11 PDiskId: 11 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 11 PDiskId: 11 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 11 PDiskId: 11 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 11 PDiskId: 11 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 12 PDiskId: 12 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 12 PDiskId: 12 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 12 PDiskId: 12 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 12 PDiskId: 12 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 13 PDiskId: 13 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 13 PDiskId: 13 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 13 PDiskId: 13 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 13 PDiskId: 13 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 14 PDiskId: 14 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 14 PDiskId: 14 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 14 PDiskId: 14 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 14 PDiskId: 14 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 15 PDiskId: 15 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 15 PDiskId: 15 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 15 PDiskId: 15 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 15 PDiskId: 15 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 16 PDiskId: 16 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 16 PDiskId: 16 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 16 PDiskId: 16 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 16 PDiskId: 16 VSlotId: 1003 } GroupId: 7 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlo ... tamp: 120028000 NodeId: 33 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Timestamp: 120028000 } } 2025-06-03T10:32:59.574582Z node 28 :CMS DEBUG: sentinel.cpp:486: [Sentinel] [ConfigUpdater] Handle TEvCms::TEvClusterStateResponse: response# Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-34-34" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 34 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-35-35" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 35 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-28-28" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 28 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-29-29" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 29 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-30-30" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 30 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-31-31" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 31 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-32-32" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 32 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-33-33" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 33 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Timestamp: 120028000 } 2025-06-03T10:32:59.574611Z node 28 :CMS DEBUG: sentinel.cpp:944: [Sentinel] [Main] Config was updated in 120.003000s 2025-06-03T10:32:59.574618Z node 28 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-06-03T10:32:59.574645Z node 28 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: REPLACE_DEVICES Host: "::1" Devices: "/28/pdisk-28.data" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false Reason: "" AvailabilityMode: MODE_MAX_AVAILABILITY MaintenanceTaskId: "" 2025-06-03T10:32:59.574651Z node 28 :CMS DEBUG: cms.cpp:379: Checking action: Type: REPLACE_DEVICES Host: "::1" Devices: "/28/pdisk-28.data" Duration: 60000000 2025-06-03T10:32:59.574686Z node 28 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:59.574698Z node 28 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-1, requestId# user-r-1, owner# user 2025-06-03T10:32:59.574704Z node 28 :CMS INFO: cluster_info.cpp:777: Adding lock for PDisk 28:28 (::1:/28/pdisk-28.data) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:59.574712Z node 28 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:32:59.574750Z node 28 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-1, validity# 1970-01-01T00:03:00.028000Z, action# Type: REPLACE_DEVICES Host: "::1" Devices: "/28/pdisk-28.data" Duration: 60000000 2025-06-03T10:32:59.574757Z node 28 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-1, owner# user 2025-06-03T10:32:59.574795Z node 28 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 28, wbId# [28:8388350642965737326:1634689637] 2025-06-03T10:32:59.574799Z node 28 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 29, wbId# [29:8388350642965737326:1634689637] 2025-06-03T10:32:59.574802Z node 28 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 30, wbId# [30:8388350642965737326:1634689637] 2025-06-03T10:32:59.574805Z node 28 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 31, wbId# [31:8388350642965737326:1634689637] 2025-06-03T10:32:59.574808Z node 28 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 32, wbId# [32:8388350642965737326:1634689637] 2025-06-03T10:32:59.574810Z node 28 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 33, wbId# [33:8388350642965737326:1634689637] 2025-06-03T10:32:59.574813Z node 28 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 34, wbId# [34:8388350642965737326:1634689637] 2025-06-03T10:32:59.574816Z node 28 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 35, wbId# [35:8388350642965737326:1634689637] 2025-06-03T10:32:59.574839Z node 28 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 28, response# PDiskStateInfo { PDiskId: 28 CreateTime: 0 ChangeTime: 0 Path: "/28/pdisk-28.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120028 2025-06-03T10:32:59.574917Z node 28 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 29, response# PDiskStateInfo { PDiskId: 29 CreateTime: 0 ChangeTime: 0 Path: "/29/pdisk-29.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120028 2025-06-03T10:32:59.574927Z node 28 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 30, response# PDiskStateInfo { PDiskId: 30 CreateTime: 0 ChangeTime: 0 Path: "/30/pdisk-30.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120028 2025-06-03T10:32:59.574934Z node 28 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 31, response# PDiskStateInfo { PDiskId: 31 CreateTime: 0 ChangeTime: 0 Path: "/31/pdisk-31.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120028 2025-06-03T10:32:59.574941Z node 28 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 32, response# PDiskStateInfo { PDiskId: 32 CreateTime: 0 ChangeTime: 0 Path: "/32/pdisk-32.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120028 2025-06-03T10:32:59.574948Z node 28 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 34, response# PDiskStateInfo { PDiskId: 34 CreateTime: 0 ChangeTime: 0 Path: "/34/pdisk-34.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120028 2025-06-03T10:32:59.574955Z node 28 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 35, response# PDiskStateInfo { PDiskId: 35 CreateTime: 0 ChangeTime: 0 Path: "/35/pdisk-35.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120028 2025-06-03T10:32:59.574962Z node 28 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 33, response# PDiskStateInfo { PDiskId: 33 CreateTime: 0 ChangeTime: 0 Path: "/33/pdisk-33.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120028 2025-06-03T10:32:59.574968Z node 28 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-06-03T10:32:59.615702Z node 28 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:59.657283Z node 28 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:59.657401Z node 28 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: REPLACE_DEVICES Host: "::1" Devices: "/28/pdisk-28.data" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false Reason: "" AvailabilityMode: MODE_MAX_AVAILABILITY MaintenanceTaskId: "" }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-1" Permissions { Id: "user-p-1" Action { Type: REPLACE_DEVICES Host: "::1" Devices: "/28/pdisk-28.data" Duration: 60000000 } Deadline: 180028000 } } 2025-06-03T10:32:59.657413Z node 28 :CMS DEBUG: cms.cpp:1064: Schedule cleanup at 1970-01-01T00:05:00.028000Z 2025-06-03T10:32:59.658671Z node 28 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: WRONG_REQUEST Reason: "Unknown request user-r-1" } } >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessages [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessagesNoClusterDiscovery >> TTicketParserTest::BulkAuthorizationRetryError [GOOD] >> TTicketParserTest::BulkAuthorizationRetryErrorImmediately >> TCmsTest::PermissionDuration [GOOD] >> TCmsTest::RacyStartCollecting >> TCmsTenatsTest::TestScheduledPermissionWithDefaultPolicy [GOOD] >> TTicketParserTest::NebiusAuthorizationRetryError [GOOD] >> TTicketParserTest::NebiusAuthorizationRetryErrorImmediately >> TCmsTest::RequestReplacePDiskDoesntBreakGroup [GOOD] >> TCmsTest::RequestReplacePDiskConsecutiveWithDone >> TCmsTest::WalleRebootDownNode >> PersQueueSdkReadSessionTest::StopResumeReadingData [GOOD] >> ReadSessionImplTest::CreatePartitionStream [GOOD] >> ReadSessionImplTest::BrokenCompressedData [GOOD] >> ReadSessionImplTest::CommitOffsetTwiceIsError [GOOD] >> ReadSessionImplTest::CommonHandler [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTenatsTest::TestScheduledPermissionWithDefaultPolicy [GOOD] Test command err: 2025-06-03T10:32:54.128920Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:32:54.129931Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:54.132667Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:32:54.132716Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:32:54.132778Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:32:54.132866Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:32:54.133358Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:54.133439Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:54.133872Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:54.133902Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:54.135734Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:54.135764Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:54.135828Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:32:54.135884Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:54.166037Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:32:54.209169Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:54.209307Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:54.210871Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:54.211023Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:32:54.211031Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:32:54.211041Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:32:54.211045Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:32:54.211064Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:54.211123Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:32:54.211152Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:32:54.212906Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { } } Success: true 2025-06-03T10:32:54.255189Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:54.255253Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:32:54.255403Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvGetConfigRequest { }, response# NKikimr::NCms::TEvCms::TEvGetConfigResponse { Status { Code: OK } Config { DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: false UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 60 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } } } 2025-06-03T10:32:54.255448Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:54.303240Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:54.303335Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:54.303468Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 5 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 6 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 7 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120029000 } Timestamp: 120029000 NodeId: 8 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 120029000 } } 2025-06-03T10:32:54.333921Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:54.375212Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:54.375290Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesLimit: 0 DisabledNodesRatioLimit: 10 } ClusterLimits { DisabledNodesLimit: 0 DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: false UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 60 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } 2025-06-03T10:32:54.375301Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:54.409652Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:54.409695Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:54.409713Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:54.409792Z node 1 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false TenantPolicy: NONE AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:32:54.409802Z node 1 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 2025-06-03T10:32:54.409816Z node 1 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 1, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:32:54.409826Z node 1 :CMS DEBUG: cms.cpp:729: Ring: 0; State: Ok 2025-06-03T10:32:54.409830Z node 1 :CMS DEBUG: cms.cpp:729: Ring: 1; State: Ok 2025-06-03T10:32:54.409834Z node 1 :CMS DEBUG: cms.cpp:729: Ring: 2; State: Ok 2025-06-03T10:32:54.409838Z node 1 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:54.409856Z node 1 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-1, requestId# user-r-1, owner# user 2025-06-03T10:32:54.409865Z node 1 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (1) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:54.409875Z node 1 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:32:54.409920Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-1, validity# 1970-01-01T00:03:00.130000Z, action# Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 2025-06-03T10:32:54.420888Z node 1 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:32:54.420969Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false TenantPolicy: NONE AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-1" Permissions { Id: "user-p-1" Action { Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 } Deadline: 180130000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 1 InterconnectPort: 12001 } } } } 2025-06-03T10:32:54.420979Z node 1 :CMS DEBUG: cms.cpp:1064: Schedule cleanup at 1970-01-01T00:05:00.130000Z 2025-06-03T10:32:54.442610Z node 1 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (1) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:32:54.442678Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:54.442701Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:54.442716Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:54.442772Z node 1 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "2" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false TenantPolicy: NONE AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:32:54.442782Z node 1 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "2" Duration: 60000000 2025-06-03T10:32:54.442794Z node 1 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 2, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:32:54.442802Z node 1 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:32:54.442820Z node 1 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-2, requestId# user-r-2, owner# user 2025-06-03T10:32:54.442827Z node 1 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (2) (permission user-p-2 until 197 ... Actions { Type: SHUTDOWN_HOST Host: "30" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'30\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } } Actions { Type: SHUTDOWN_HOST Host: "31" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'31\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } } Actions { Type: SHUTDOWN_HOST Host: "32" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'32\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:33:00.656133Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:00.656260Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW_PARTIAL } RequestId: "user-r-1" Permissions { Id: "user-p-3" Action { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 } Deadline: 180333560 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 27 InterconnectPort: 12003 } } } Permissions { Id: "user-p-4" Action { Type: SHUTDOWN_HOST Host: "28" Duration: 60000000 } Deadline: 180333560 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 28 InterconnectPort: 12004 } } } } 2025-06-03T10:33:00.656457Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvGetConfigRequest { }, response# NKikimr::NCms::TEvCms::TEvGetConfigResponse { Status { Code: OK } Config { DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesLimit: 0 DisabledNodesRatioLimit: 30 } ClusterLimits { DisabledNodesLimit: 0 DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: false UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 60 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } } } 2025-06-03T10:33:00.656517Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:00.667379Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:00.667449Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesLimit: 0 DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesLimit: 0 DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: false UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 60 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } 2025-06-03T10:33:00.678435Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12003 (27) (permission user-p-3 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:00.678458Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12004 (28) (permission user-p-4 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:00.678503Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:00.678519Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:00.678531Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:00.678587Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "29" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'29\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } } Actions { Type: SHUTDOWN_HOST Host: "30" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'30\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } } Actions { Type: SHUTDOWN_HOST Host: "31" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'31\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } } Actions { Type: SHUTDOWN_HOST Host: "32" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'32\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:33:00.678596Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "29" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'29\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } 2025-06-03T10:33:00.678605Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 29, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 2, down nodes: 0 2025-06-03T10:33:00.678608Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 29, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 2, down nodes: 0 2025-06-03T10:33:00.678612Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:00.678621Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "30" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'30\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } 2025-06-03T10:33:00.678624Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 30, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 3, down nodes: 0 2025-06-03T10:33:00.678626Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 30, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 3, down nodes: 0 2025-06-03T10:33:00.678629Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:00.678634Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "31" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'31\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } 2025-06-03T10:33:00.678661Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 31, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 4, down nodes: 0 2025-06-03T10:33:00.678663Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 31, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 4, down nodes: 0 2025-06-03T10:33:00.678665Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:00.678671Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "32" Duration: 60000000 Issue { Type: TENANT_DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'32\' of tenant \'user0\': too many unavailable nodes. Locked: 2, down: 0, total: 8, limit: 30%" } 2025-06-03T10:33:00.678674Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 32, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 5, down nodes: 0 2025-06-03T10:33:00.678676Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 32, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 5, down nodes: 0 2025-06-03T10:33:00.678678Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:00.678693Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-5, requestId# user-r-1, owner# user 2025-06-03T10:33:00.678698Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12005 (29) (permission user-p-5 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:00.678702Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-6, requestId# user-r-1, owner# user 2025-06-03T10:33:00.678705Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12006 (30) (permission user-p-6 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:00.678708Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-7, requestId# user-r-1, owner# user 2025-06-03T10:33:00.678711Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12007 (31) (permission user-p-7 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:00.678714Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-8, requestId# user-r-1, owner# user 2025-06-03T10:33:00.678717Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12008 (32) (permission user-p-8 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:00.678724Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:00.678756Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-5, validity# 1970-01-01T00:03:00.436584Z, action# Type: SHUTDOWN_HOST Host: "29" Duration: 60000000 2025-06-03T10:33:00.678763Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-6, validity# 1970-01-01T00:03:00.436584Z, action# Type: SHUTDOWN_HOST Host: "30" Duration: 60000000 2025-06-03T10:33:00.678768Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-7, validity# 1970-01-01T00:03:00.436584Z, action# Type: SHUTDOWN_HOST Host: "31" Duration: 60000000 2025-06-03T10:33:00.678773Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-8, validity# 1970-01-01T00:03:00.436584Z, action# Type: SHUTDOWN_HOST Host: "32" Duration: 60000000 2025-06-03T10:33:00.678779Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-1, owner# user 2025-06-03T10:33:00.689805Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:00.689943Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Id: "user-p-5" Action { Type: SHUTDOWN_HOST Host: "29" Duration: 60000000 } Deadline: 180436584 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 29 InterconnectPort: 12005 } } } Permissions { Id: "user-p-6" Action { Type: SHUTDOWN_HOST Host: "30" Duration: 60000000 } Deadline: 180436584 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 30 InterconnectPort: 12006 } } } Permissions { Id: "user-p-7" Action { Type: SHUTDOWN_HOST Host: "31" Duration: 60000000 } Deadline: 180436584 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 31 InterconnectPort: 12007 } } } Permissions { Id: "user-p-8" Action { Type: SHUTDOWN_HOST Host: "32" Duration: 60000000 } Deadline: 180436584 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 32 InterconnectPort: 12008 } } } } >> TCmsTest::Notifications [GOOD] >> TCmsTest::Mirror3dcPermissions ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> ReadSessionImplTest::CommonHandler [GOOD] Test command err: 2025-06-03T10:32:29.652942Z :ReadSession INFO: Random seed for debugging is 1748946749652936 2025-06-03T10:32:29.756278Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669092810100141:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:29.756302Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:32:29.762964Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669091499240720:2221];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002500/r3tmp/tmpBhiNMi/pdisk_1.dat 2025-06-03T10:32:29.813883Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:32:29.814004Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:32:29.820145Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:32:29.851107Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:29.859112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:29.859142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:29.864961Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17029, node 1 2025-06-03T10:32:29.921505Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:29.921551Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:29.933843Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:32:29.937263Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:29.978096Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/002500/r3tmp/yandexzRAdNL.tmp 2025-06-03T10:32:29.978124Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/002500/r3tmp/yandexzRAdNL.tmp 2025-06-03T10:32:29.978209Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/002500/r3tmp/yandexzRAdNL.tmp 2025-06-03T10:32:29.978278Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:29.983956Z INFO: TTestServer started on Port 25788 GrpcPort 17029 TClient is connected to server localhost:25788 PQClient connected to localhost:17029 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:30.064388Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-06-03T10:32:30.386687Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669095794208142:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:30.386781Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:30.386951Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669095794208179:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:30.388925Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710657:3, at schemeshard: 72057594046644480 2025-06-03T10:32:30.406165Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669095794208181:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710657 completed, doublechecking } 2025-06-03T10:32:30.480173Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669095794208209:2128] txid# 281474976710658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:30.486147Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720661:0, at schemeshard: 72057594046644480 2025-06-03T10:32:30.488300Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511669097105068413:2340], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:32:30.488436Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=OTFmYzczYTEtMjg0YWNiMGItMmZiYWUwNmEtMjRmODQ3NzE=, ActorId: [1:7511669097105068364:2333], ActorState: ExecuteState, TraceId: 01jwtnk2yp6dbd4wgkz7hm2rhg, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:32:30.489069Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:32:30.489149Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7511669095794208217:2314], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:32:30.489579Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=2&id=NTMwOTEzYzUtNDg1MTdlM2QtZDc0MGYxYTctOGQyYzY5ZWQ=, ActorId: [2:7511669095794208140:2305], ActorState: ExecuteState, TraceId: 01jwtnk2xhbbf0895cjz1tcftw, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:32:30.489726Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:32:30.563087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:30.632252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720663:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:17029", true, true, 1000); 2025-06-03T10:32:30.671416Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976720664. Ctx: { TraceId: 01jwtnk35x3xc5zrvtzyca2xav, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OThkYzU3YTMtZTNlYTYwOGItOGExZWI5ZDItM2M0NjBkYTA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7511669097105068804:2921] 2025-06-03T10:32:34.756578Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511669092810100141:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:34.756627Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:32:34.761668Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[2:7511669091499240720:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:34.761708Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeo ... p:890: session cookie 1 consumer shared/user session shared/user_7_1_18399123803048186955_v1 after read state TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1) EndOffset 3 ReadOffset 3 ReadGuid 72f95d99-b4659a9e-6eaf824c-d3ce6e2e has messages 1 2025-06-03T10:33:02.025512Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1917: session cookie 1 consumer shared/user session shared/user_7_1_18399123803048186955_v1 read done: guid# 72f95d99-b4659a9e-6eaf824c-d3ce6e2e, partition# TopicId: Topic rt3.dc1--test-topic in dc dc1 in database: Root, partition 0(assignId:1), size# 220 2025-06-03T10:33:02.025536Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2079: session cookie 1 consumer shared/user session shared/user_7_1_18399123803048186955_v1 response to read: guid# 72f95d99-b4659a9e-6eaf824c-d3ce6e2e 2025-06-03T10:33:02.025731Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2122: session cookie 1 consumer shared/user session shared/user_7_1_18399123803048186955_v1 Process answer. Aval parts: 0 2025-06-03T10:33:02.026031Z :DEBUG: [/Root] [/Root] [b9346fe4-9f2a0773-b3ac5167-2f72b5af] [dc1] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:33:02.026185Z :DEBUG: [/Root] Decompression task done. Partition/PartitionSessionId: 0 (2-2) 2025-06-03T10:33:02.026171Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_7_1_18399123803048186955_v1 grpc read done: success# 1, data# { read { } } 2025-06-03T10:33:02.026237Z :DEBUG: [/Root] Take Data. Partition 0. Read: {0, 0} (2-2) 2025-06-03T10:33:02.026254Z :DEBUG: [/Root] [/Root] [b9346fe4-9f2a0773-b3ac5167-2f72b5af] [dc1] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-03T10:33:02.026246Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:1816: session cookie 1 consumer shared/user session shared/user_7_1_18399123803048186955_v1 got read request: guid# 8af6ad69-40b4327-e85d7fdf-82e78712 DataReceived { PartitionStreamId: 1 PartitionId: 0 Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "dc1". Topic: "test-topic" Partition: 0 PartitionKey: "" Information: { Offset: 2 SeqNo: 3 MessageGroupId: "test-message-group-id" CreateTime: 2025-06-03T10:33:00.918000Z WriteTime: 2025-06-03T10:33:00.919000Z Ip: "ipv6:[::1]:44770" UncompressedSize: 8 Meta: { "logtype": "unknown", "ident": "unknown", "server": "ipv6:[::1]:44770" } } } } 2025-06-03T10:33:02.026291Z :INFO: [/Root] [/Root] [b9346fe4-9f2a0773-b3ac5167-2f72b5af] Closing read session. Close timeout: 3.000000s 2025-06-03T10:33:02.026301Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:2 2025-06-03T10:33:02.026310Z :INFO: [/Root] [/Root] [b9346fe4-9f2a0773-b3ac5167-2f72b5af] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1236 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 84 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:33:02.026489Z :INFO: [/Root] [/Root] [b9346fe4-9f2a0773-b3ac5167-2f72b5af] Closing read session. Close timeout: 0.000000s 2025-06-03T10:33:02.026496Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): dc1:test-topic:0:1:2:2 2025-06-03T10:33:02.026501Z :INFO: [/Root] [/Root] [b9346fe4-9f2a0773-b3ac5167-2f72b5af] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1236 BytesRead: 24 MessagesRead: 3 BytesReadCompressed: 84 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:33:02.026518Z :NOTICE: [/Root] [/Root] [b9346fe4-9f2a0773-b3ac5167-2f72b5af] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-03T10:33:02.026564Z node 7 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer shared/user session shared/user_7_1_18399123803048186955_v1 grpc read done: success# 0, data# { } 2025-06-03T10:33:02.026575Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer shared/user session shared/user_7_1_18399123803048186955_v1 grpc read failed 2025-06-03T10:33:02.026580Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer shared/user session shared/user_7_1_18399123803048186955_v1 grpc closed 2025-06-03T10:33:02.026596Z node 7 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer shared/user session shared/user_7_1_18399123803048186955_v1 is DEAD 2025-06-03T10:33:02.026756Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037892] Destroy direct read session shared/user_7_1_18399123803048186955_v1 2025-06-03T10:33:02.026792Z node 8 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037892] server disconnected, pipe [7:7511669224816281562:2526] destroyed 2025-06-03T10:33:02.026825Z node 8 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: shared/user_7_1_18399123803048186955_v1 2025-06-03T10:33:02.026979Z node 7 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [7:7511669224816281559:2523] disconnected; active server actors: 1 2025-06-03T10:33:02.026990Z node 7 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037893][rt3.dc1--test-topic] pipe [7:7511669224816281559:2523] client user disconnected session shared/user_7_1_18399123803048186955_v1 2025-06-03T10:33:02.440086Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:33:02.440096Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:33:02.440101Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:33:02.440194Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:33:02.440315Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:33:02.440381Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:33:02.440452Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: 13. Commit offset: 31 2025-06-03T10:33:02.440736Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:33:02.440740Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:33:02.440743Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:33:02.440808Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:33:02.440907Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:33:02.440957Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:33:02.440994Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:33:02.441597Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 Post function 2025-06-03T10:33:02.441777Z :INFO: Error decompressing data: (TZLibDecompressorError) util/stream/zlib.cpp:143: inflate error(incorrect header check) 2025-06-03T10:33:02.441791Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-3) 2025-06-03T10:33:02.441828Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:33:02.441836Z :DEBUG: Take Data. Partition 1. Read: {0, 1} (2-2) 2025-06-03T10:33:02.441841Z :DEBUG: Take Data. Partition 1. Read: {0, 2} (3-3) 2025-06-03T10:33:02.441850Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 3, size 16 bytes DataReceived { PartitionStreamId: 1 PartitionId: 1 Message { DataDecompressionError: "(TZLibDecompressorError) util/stream/zlib.cpp:143: inflate error(incorrect header check)" Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 1 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 2 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } Message { Data: ..8 bytes.. Partition stream id: 1 Cluster: "TestCluster". Topic: "TestTopic" Partition: 1 PartitionKey: "" Information: { Offset: 3 SeqNo: 1 MessageGroupId: "src_id" CreateTime: 1970-01-01T00:00:00.042000Z WriteTime: 1970-01-01T00:00:00.042000Z Ip: "::1" UncompressedSize: 0 Meta: { } } } } 2025-06-03T10:33:02.442343Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:33:02.442348Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:33:02.442351Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:33:02.442416Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:33:02.442523Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:33:02.442566Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:33:02.442595Z :INFO: [db] [sessionid] [cluster] Confirm partition stream create. Partition stream id: 1. Cluster: "TestCluster". Topic: "TestTopic". Partition: 1. Read offset: (NULL) 2025-06-03T10:33:02.442702Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:33:02.442742Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-03T10:33:02.442774Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:33:02.442781Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes 2025-06-03T10:33:02.442792Z :DEBUG: [db] [sessionid] [cluster] Commit offsets [1, 2). Partition stream id: 1 2025-06-03T10:33:02.443132Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:33:02.443136Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:33:02.443139Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:33:02.443187Z :DEBUG: [db] [sessionid] [cluster] Successfully connected. Initializing session 2025-06-03T10:33:02.443266Z :INFO: [db] [sessionid] [cluster] Server session id: 123-session-id-321 2025-06-03T10:33:02.443302Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:33:02.443393Z :DEBUG: [db] [sessionid] [cluster] After sending read request: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:33:02.443438Z :DEBUG: Decompression task done. Partition/PartitionSessionId: 1 (1-1) 2025-06-03T10:33:02.443455Z :DEBUG: Take Data. Partition 1. Read: {0, 0} (1-1) 2025-06-03T10:33:02.443468Z :DEBUG: [db] [sessionid] [cluster] The application data is transferred to the client. Number of messages 1, size 8 bytes >> TCmsTest::TestForceRestartModeScheduled [GOOD] >> TCmsTest::TestForceRestartModeScheduledDisconnects >> TCmsTest::RestartNodeInDownState [GOOD] >> TCmsTest::SamePriorityRequest >> TCmsTenatsTest::TestClusterRatioLimit [GOOD] >> TCmsTenatsTest::TestClusterRatioLimitForceRestartMode >> TCmsTest::StateRequest >> TCmsTest::RequestRestartServicesOk [GOOD] >> TCmsTest::RequestRestartServicesReject >> TCmsTest::RacyStartCollecting [GOOD] >> TCmsTest::PriorityRange >> TCmsTest::VDisksEvictionShouldFailWhileSentinelIsDisabled >> TxUsage::WriteToTopic_Demo_17_Query [GOOD] |69.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut |69.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut |69.9%| [LD] {RESULT} $(B)/ydb/core/tx/long_tx_service/ut/ydb-core-tx-long_tx_service-ut >> TTicketParserTest::BulkAuthorizationRetryErrorImmediately [GOOD] >> TTicketParserTest::BulkAuthorization >> TxUsage::WriteToTopic_Demo_18_RestartNo_Table >> TTicketParserTest::NebiusAuthenticationRetryError [GOOD] >> TTicketParserTest::NebiusAuthenticationRetryErrorImmediately >> TTicketParserTest::BulkAuthorization [GOOD] >> TTicketParserTest::AuthorizationWithUserAccount2 >> TCmsTest::WalleRebootDownNode [GOOD] >> TCmsTest::WalleCleanupTest >> TTicketParserTest::NebiusAuthorizationRetryErrorImmediately [GOOD] >> TTicketParserTest::NebiusAuthorization >> LocalPartition::WithoutPartitionPartitionRelocation [GOOD] >> LocalPartition::DirectWriteWithoutDescribeResourcesPermission >> TTicketParserTest::AuthenticationRetryError [GOOD] >> TTicketParserTest::AuthenticationRetryErrorImmediately >> TCmsTest::RequestReplacePDiskConsecutiveWithDone [GOOD] >> TTicketParserTest::AuthorizationWithUserAccount2 [GOOD] >> TTicketParserTest::BulkAuthorizationModify >> TCmsTest::TestForceRestartModeScheduledDisconnects [GOOD] >> TTicketParserTest::NebiusAuthorization [GOOD] >> TTicketParserTest::NebiusAuthorizationModify >> TCmsTest::WalleTasks [GOOD] >> TCmsTest::WalleTasksWithNodeLimit ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::RequestReplacePDiskConsecutiveWithDone [GOOD] Test command err: 2025-06-03T10:32:56.951679Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:32:56.952238Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:56.954059Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:56.954117Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:56.954403Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:56.954429Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:56.954990Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:32:56.955062Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:32:56.955127Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:32:56.955224Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:32:56.956765Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:56.956786Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:56.956809Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:32:56.956842Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:56.988213Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:32:57.020575Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:57.020671Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:57.021861Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:57.021990Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:32:57.021997Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:32:57.022004Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:32:57.022007Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:32:57.022021Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:57.022067Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:32:57.022090Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:32:57.023646Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-03T10:32:57.065653Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:57.065747Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:32:57.126133Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:57.126171Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:57.126245Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:57.126575Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-1-1" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-2-2" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-3-3" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-4-4" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: ... mplete 2025-06-03T10:33:02.923018Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:02.923184Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: REPLACE_DEVICES Host: "::1" Devices: "/29/pdisk-58.data" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: false 2025-06-03T10:33:02.923191Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: REPLACE_DEVICES Host: "::1" Devices: "/29/pdisk-58.data" Duration: 60000000 2025-06-03T10:33:02.923227Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:02.923241Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-5, requestId# user-r-5, owner# user 2025-06-03T10:33:02.923248Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for PDisk 29:58 (::1:/29/pdisk-58.data) (permission user-p-5 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.923258Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:02.923285Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-5, validity# 1970-01-01T00:03:00.442584Z, action# Type: REPLACE_DEVICES Host: "::1" Devices: "/29/pdisk-58.data" Duration: 60000000 2025-06-03T10:33:02.934079Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:02.934162Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: REPLACE_DEVICES Host: "::1" Devices: "/29/pdisk-58.data" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-5" Permissions { Id: "user-p-5" Action { Type: REPLACE_DEVICES Host: "::1" Devices: "/29/pdisk-58.data" Duration: 60000000 } Deadline: 180442584 } } 2025-06-03T10:33:02.934260Z node 25 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-5 2025-06-03T10:33:02.934269Z node 25 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-03T10:33:02.934282Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-03T10:33:02.934300Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-5, reason# explicit remove 2025-06-03T10:33:02.945144Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-03T10:33:02.945213Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-5" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-03T10:33:02.956451Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:02.956476Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:02.956490Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:02.956650Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: REPLACE_DEVICES Host: "::1" Devices: "/30/pdisk-60.data" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: false 2025-06-03T10:33:02.956658Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: REPLACE_DEVICES Host: "::1" Devices: "/30/pdisk-60.data" Duration: 60000000 2025-06-03T10:33:02.956691Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:02.956705Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-6, requestId# user-r-6, owner# user 2025-06-03T10:33:02.956712Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for PDisk 30:60 (::1:/30/pdisk-60.data) (permission user-p-6 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.956719Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:02.956747Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-6, validity# 1970-01-01T00:03:00.545608Z, action# Type: REPLACE_DEVICES Host: "::1" Devices: "/30/pdisk-60.data" Duration: 60000000 2025-06-03T10:33:02.967564Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:02.967639Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: REPLACE_DEVICES Host: "::1" Devices: "/30/pdisk-60.data" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-6" Permissions { Id: "user-p-6" Action { Type: REPLACE_DEVICES Host: "::1" Devices: "/30/pdisk-60.data" Duration: 60000000 } Deadline: 180545608 } } 2025-06-03T10:33:02.967751Z node 25 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-6 2025-06-03T10:33:02.967763Z node 25 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-03T10:33:02.967782Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-03T10:33:02.967803Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-6, reason# explicit remove 2025-06-03T10:33:02.978618Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-03T10:33:02.978679Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-6" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-03T10:33:03.010795Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:03.010841Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:03.010856Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:03.011020Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: REPLACE_DEVICES Host: "::1" Devices: "/31/pdisk-62.data" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: false 2025-06-03T10:33:03.011028Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: REPLACE_DEVICES Host: "::1" Devices: "/31/pdisk-62.data" Duration: 60000000 2025-06-03T10:33:03.011071Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:03.011087Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-7, requestId# user-r-7, owner# user 2025-06-03T10:33:03.011096Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for PDisk 31:62 (::1:/31/pdisk-62.data) (permission user-p-7 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:03.011103Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:03.011139Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-7, validity# 1970-01-01T00:03:00.648632Z, action# Type: REPLACE_DEVICES Host: "::1" Devices: "/31/pdisk-62.data" Duration: 60000000 2025-06-03T10:33:03.022064Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:03.022157Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: REPLACE_DEVICES Host: "::1" Devices: "/31/pdisk-62.data" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-7" Permissions { Id: "user-p-7" Action { Type: REPLACE_DEVICES Host: "::1" Devices: "/31/pdisk-62.data" Duration: 60000000 } Deadline: 180648632 } } 2025-06-03T10:33:03.022299Z node 25 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-7 2025-06-03T10:33:03.022310Z node 25 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-03T10:33:03.022327Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-03T10:33:03.022356Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-7, reason# explicit remove 2025-06-03T10:33:03.033194Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-03T10:33:03.033260Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-7" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-03T10:33:03.106259Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:03.106310Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:03.106325Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:03.106495Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: REPLACE_DEVICES Host: "::1" Devices: "/32/pdisk-64.data" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: false 2025-06-03T10:33:03.106503Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: REPLACE_DEVICES Host: "::1" Devices: "/32/pdisk-64.data" Duration: 60000000 2025-06-03T10:33:03.106550Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:03.106566Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-8, requestId# user-r-8, owner# user 2025-06-03T10:33:03.106574Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for PDisk 32:64 (::1:/32/pdisk-64.data) (permission user-p-8 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:03.106583Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:03.106620Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-8, validity# 1970-01-01T00:03:00.751656Z, action# Type: REPLACE_DEVICES Host: "::1" Devices: "/32/pdisk-64.data" Duration: 60000000 2025-06-03T10:33:03.117859Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:03.117959Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: REPLACE_DEVICES Host: "::1" Devices: "/32/pdisk-64.data" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-8" Permissions { Id: "user-p-8" Action { Type: REPLACE_DEVICES Host: "::1" Devices: "/32/pdisk-64.data" Duration: 60000000 } Deadline: 180751656 } } 2025-06-03T10:33:03.118088Z node 25 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-8 2025-06-03T10:33:03.118097Z node 25 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-03T10:33:03.118110Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-03T10:33:03.118134Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-8, reason# explicit remove 2025-06-03T10:33:03.128908Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-03T10:33:03.128960Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-8" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Table >> TTicketParserTest::BulkAuthorizationModify [GOOD] >> TCmsTest::SamePriorityRequest [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::TestForceRestartModeScheduledDisconnects [GOOD] Test command err: 2025-06-03T10:32:57.139667Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:32:57.140402Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:57.142370Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:32:57.142415Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:32:57.142466Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:32:57.142538Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:32:57.142915Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:57.142969Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:57.143234Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:57.143254Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:57.144607Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:57.144625Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:57.144654Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:32:57.144701Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:57.172917Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:32:57.215633Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:57.215740Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:57.216918Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:57.217026Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:32:57.217031Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:32:57.217038Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:32:57.217041Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:32:57.217053Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:57.217100Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:32:57.217122Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:32:57.219393Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-03T10:32:57.261386Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:57.261448Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:32:57.310219Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:57.310265Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:57.310334Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:05:00Z 2025-06-03T10:32:57.310631Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 300029000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 300029000 } Devices { Name: "pdisk-1-1" State: UP Timestamp: 300029000 } Timestamp: 300029000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 300029000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 300029000 } Devices { Name: "pdisk-2-2" State: UP Timestamp: 300029000 } Timestamp: 300029000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 300029000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 300029000 } Devices { Name: "pdisk-3-3" State: UP Timestamp: 300029000 } Timestamp: 300029000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 300029000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 300029000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 300029000 } Devices { Name: "pdisk-4-4" State: UP Timestamp: 300029000 } Timestamp: 300029000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: ... 1-1-0-7-0" State: UP Timestamp: 300028000 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 300028000 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 300028000 } Devices { Name: "pdisk-32-32" State: UP Timestamp: 300028000 } Timestamp: 300028000 NodeId: 32 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 300028000 } } 2025-06-03T10:33:03.205801Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "25" Duration: 60000000 } Actions { Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 } Actions { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false 2025-06-03T10:33:03.205809Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "25" Duration: 60000000 2025-06-03T10:33:03.205818Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 25, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:33:03.205846Z node 25 :CMS DEBUG: cms.cpp:729: Ring: 0; State: Ok 2025-06-03T10:33:03.205849Z node 25 :CMS DEBUG: cms.cpp:729: Ring: 1; State: Ok 2025-06-03T10:33:03.205851Z node 25 :CMS DEBUG: cms.cpp:729: Ring: 2; State: Ok 2025-06-03T10:33:03.205853Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:03.205860Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 2025-06-03T10:33:03.205863Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 26, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:33:03.205875Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: You cannot get two or more disks from the same group at the same time in partial permissions allowed mode) 2025-06-03T10:33:03.205881Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 2025-06-03T10:33:03.205883Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 27, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:33:03.205890Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: You cannot get two or more disks from the same group at the same time in partial permissions allowed mode) 2025-06-03T10:33:03.205902Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-1, requestId# user-r-1, owner# user 2025-06-03T10:33:03.205907Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (25) (permission user-p-1 until 1970-01-01T00:06:00Z) 2025-06-03T10:33:03.205916Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:03.205947Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-1, validity# 1970-01-01T00:06:00.028000Z, action# Type: SHUTDOWN_HOST Host: "25" Duration: 60000000 2025-06-03T10:33:03.205971Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" Actions { Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 Issue { Type: GENERIC Message: "You cannot get two or more disks from the same group at the same time in partial permissions allowed mode" } } Actions { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: GENERIC Message: "You cannot get two or more disks from the same group at the same time in partial permissions allowed mode" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false 2025-06-03T10:33:03.246635Z node 25 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:03.288367Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:03.288504Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: SHUTDOWN_HOST Host: "25" Duration: 60000000 } Actions { Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 } Actions { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW_PARTIAL } RequestId: "user-r-1" Permissions { Id: "user-p-1" Action { Type: SHUTDOWN_HOST Host: "25" Duration: 60000000 } Deadline: 360028000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 25 InterconnectPort: 12001 } } } } 2025-06-03T10:33:03.288519Z node 25 :CMS DEBUG: cms.cpp:1064: Schedule cleanup at 1970-01-01T00:08:00.028000Z 2025-06-03T10:33:03.313113Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (25) (permission user-p-1 until 1970-01-01T00:06:00Z) 2025-06-03T10:33:03.313239Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:03.313351Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:05:00Z 2025-06-03T10:33:03.313543Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 Issue { Type: GENERIC Message: "You cannot get two or more disks from the same group at the same time in partial permissions allowed mode" } } Actions { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: GENERIC Message: "You cannot get two or more disks from the same group at the same time in partial permissions allowed mode" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false 2025-06-03T10:33:03.313557Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 Issue { Type: GENERIC Message: "You cannot get two or more disks from the same group at the same time in partial permissions allowed mode" } 2025-06-03T10:33:03.313568Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 26, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:33:03.313616Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:03.313631Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: GENERIC Message: "You cannot get two or more disks from the same group at the same time in partial permissions allowed mode" } 2025-06-03T10:33:03.313636Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 27, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 2, down nodes: 0 2025-06-03T10:33:03.313653Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: You cannot get two or more disks from the same group at the same time in partial permissions allowed mode) 2025-06-03T10:33:03.313675Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-2, requestId# user-r-1, owner# user 2025-06-03T10:33:03.313683Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (26) (permission user-p-2 until 1970-01-01T00:06:00Z) 2025-06-03T10:33:03.313697Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:03.313730Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-2, validity# 1970-01-01T00:06:00.131512Z, action# Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 2025-06-03T10:33:03.313757Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" Actions { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: GENERIC Message: "You cannot get two or more disks from the same group at the same time in partial permissions allowed mode" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false 2025-06-03T10:33:03.324595Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:03.324618Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:03.324690Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_FORCE_RESTART }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW_PARTIAL } RequestId: "user-r-1" Permissions { Id: "user-p-2" Action { Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 } Deadline: 360131512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 26 InterconnectPort: 12002 } } } } 2025-06-03T10:33:03.346343Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (26) (permission user-p-2 until 1970-01-01T00:06:00Z) 2025-06-03T10:33:03.346367Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (25) (permission user-p-1 until 1970-01-01T00:06:00Z) 2025-06-03T10:33:03.346427Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:03.346494Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:05:00Z 2025-06-03T10:33:03.346603Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: GENERIC Message: "You cannot get two or more disks from the same group at the same time in partial permissions allowed mode" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false 2025-06-03T10:33:03.346611Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: GENERIC Message: "You cannot get two or more disks from the same group at the same time in partial permissions allowed mode" } 2025-06-03T10:33:03.346620Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 27, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 2, down nodes: 0 2025-06-03T10:33:03.346652Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:03.346671Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-3, requestId# user-r-1, owner# user 2025-06-03T10:33:03.346678Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12003 (27) (permission user-p-3 until 1970-01-01T00:06:00Z) 2025-06-03T10:33:03.346690Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:03.346714Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-3, validity# 1970-01-01T00:06:00.233024Z, action# Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 2025-06-03T10:33:03.346722Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-1, owner# user 2025-06-03T10:33:03.357596Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:03.357622Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:03.357693Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_FORCE_RESTART }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Id: "user-p-3" Action { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 } Deadline: 360233024 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 27 InterconnectPort: 12003 } } } } >> TCmsTest::RequestRestartServicesReject [GOOD] >> TCmsTest::RequestRestartServicesPartial >> TCmsTest::StateRequest [GOOD] >> TCmsTest::ScheduledEmergencyDuringRollingRestart >> TCmsTenatsTest::RequestRestartServices [GOOD] >> TCmsTest::PriorityRange [GOOD] >> TTicketParserTest::NebiusAuthorizationModify [GOOD] >> TCmsTenatsTest::TestClusterRatioLimitForceRestartMode [GOOD] >> TCmsTenatsTest::TestClusterLimitForceRestartModeScheduled ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ut/unittest >> TTicketParserTest::BulkAuthorizationModify [GOOD] Test command err: 2025-06-03T10:32:49.453532Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669175358522723:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:49.453622Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f77/r3tmp/tmpqYPO5n/pdisk_1.dat 2025-06-03T10:32:49.503122Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669175358522703:2079] 1748946769453404 != 1748946769453407 2025-06-03T10:32:49.505969Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14183, node 1 2025-06-03T10:32:49.513823Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:49.513839Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:49.513841Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:49.513883Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23508 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:49.579260Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:49.579286Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:49.580148Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:49.580227Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:49.583764Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-06-03T10:32:49.583790Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [336a7e5f08b0] Connect to grpc://localhost:9070 2025-06-03T10:32:49.584680Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [336a7e5f08b0] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-03T10:32:49.587052Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [336a7e5f08b0] Status 14 Service Unavailable 2025-06-03T10:32:49.587131Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-03T10:32:49.587145Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:49.587162Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-06-03T10:32:49.587253Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [336a7e5f08b0] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-03T10:32:49.587903Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [336a7e5f08b0] Status 14 Service Unavailable 2025-06-03T10:32:49.587941Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-03T10:32:49.587951Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:50.454500Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-03T10:32:50.454540Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-06-03T10:32:50.454630Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [336a7e5f08b0] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-03T10:32:50.455631Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [336a7e5f08b0] Status 14 Service Unavailable 2025-06-03T10:32:50.455687Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-03T10:32:50.455701Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:51.454992Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-03T10:32:51.455038Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-06-03T10:32:51.455169Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [336a7e5f08b0] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-03T10:32:51.455949Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [336a7e5f08b0] Status 14 Service Unavailable 2025-06-03T10:32:51.455983Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket AKIA****MPLE (B3EDC139) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-03T10:32:51.455991Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:54.453744Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511669175358522723:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:54.453789Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:32:54.456282Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-03T10:32:54.456313Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceBulkAuthorization( something.read) 2025-06-03T10:32:54.456406Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [336a7e5f08b0] Request BulkAuthorizeRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-03T10:32:54.457091Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [336a7e5f08b0] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-03T10:32:54.457148Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket AKIA****MPLE (B3EDC139) () has now valid token of user1@as 2025-06-03T10:33:01.755672Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669230542547890:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:01.756042Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f77/r3tmp/tmpdjBLFa/pdisk_1.dat 2025-06-03T10:33:01.767121Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:01.767259Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669230542547869:2079] 1748946781755578 != 1748946781755581 TServer::EnableGrpc on GrpcPort 15777, node 2 2025-06-03T10:33:01.777106Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:01.777123Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:01.777124Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:01.777160Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65162 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_De ... o.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:04.506482Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:04.506956Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:04.507548Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:04.509374Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:33:04.509390Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:33:04.509392Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:33:04.509405Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.read) 2025-06-03T10:33:04.509426Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(somewhere.sleep) 2025-06-03T10:33:04.509429Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.list) 2025-06-03T10:33:04.509434Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.write) 2025-06-03T10:33:04.509438Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:487: Ticket **** (8E120919) asking for AccessServiceAuthorization(something.eat) 2025-06-03T10:33:04.509448Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [336a7d1f28b0] Connect to grpc://localhost:2572 2025-06-03T10:33:04.509970Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [336a7d1f28b0] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.read" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:33:04.510611Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [336a7d1f28b0] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "somewhere.sleep" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:33:04.510666Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [336a7d1f28b0] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.list" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:33:04.510727Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [336a7d1f28b0] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.write" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:33:04.510771Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [336a7d1f28b0] Request AuthorizeRequest { iam_token: "**** (8E120919)" permission: "something.eat" resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } } 2025-06-03T10:33:04.512014Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [336a7d1f28b0] Status 16 Access Denied 2025-06-03T10:33:04.512072Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission somewhere.sleep now has a permanent error "Access Denied" retryable:0 2025-06-03T10:33:04.512084Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [336a7d1f28b0] Status 16 Access Denied 2025-06-03T10:33:04.512093Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.read now has a permanent error "Access Denied" retryable:0 2025-06-03T10:33:04.512109Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [336a7d1f28b0] Status 16 Access Denied 2025-06-03T10:33:04.512123Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.list now has a permanent error "Access Denied" retryable:0 2025-06-03T10:33:04.512145Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [336a7d1f28b0] Status 16 Access Denied 2025-06-03T10:33:04.512170Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1407: Ticket **** (8E120919) permission something.eat now has a permanent error "Access Denied" retryable:0 2025-06-03T10:33:04.512187Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [336a7d1f28b0] Response AuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-03T10:33:04.512203Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:1392: Ticket **** (8E120919) permission something.write now has a valid subject "user1@as" 2025-06-03T10:33:04.512205Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:997: Ticket **** (8E120919) asking for UserAccount(user1@as) 2025-06-03T10:33:04.512466Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [336a7d1f2e30] Connect to grpc://localhost:7866 2025-06-03T10:33:04.512568Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [336a7d1f2e30] Request GetUserAccountRequest { user_account_id: "user1" } 2025-06-03T10:33:04.513938Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [336a7d1f2e30] Response UserAccount { yandex_passport_user_account { login: "login1" } } 2025-06-03T10:33:04.514037Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of login1@passport 2025-06-03T10:33:04.790186Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7511669243181046496:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:04.790201Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f77/r3tmp/tmpDJvvWR/pdisk_1.dat 2025-06-03T10:33:04.801854Z node 5 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:04.801916Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7511669243181046476:2079] 1748946784790075 != 1748946784790078 TServer::EnableGrpc on GrpcPort 21780, node 5 2025-06-03T10:33:04.813462Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:04.813473Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:04.813474Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:04.813518Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20006 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:04.893673Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:04.893707Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:04.894165Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:04.894704Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:04.896198Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:33:04.896207Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:33:04.896210Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:33:04.896230Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read) 2025-06-03T10:33:04.896244Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [336a7d043670] Connect to grpc://localhost:23367 2025-06-03T10:33:04.896422Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [336a7d043670] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } } result_filter: ALL_FAILED } 2025-06-03T10:33:04.898089Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [336a7d043670] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-03T10:33:04.898162Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-03T10:33:04.898309Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:33:04.898316Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:33:04.898318Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:33:04.898326Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:508: Ticket **** (8E120919) asking for AccessServiceBulkAuthorization( something.read something.write) 2025-06-03T10:33:04.898362Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [336a7d043670] Request BulkAuthorizeRequest { iam_token: "**** (8E120919)" actions { items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.read" } items { resource_path { id: "bbbb4554" type: "ydb.database" } resource_path { id: "aaaa1234" type: "resource-manager.folder" } permission: "something.write" } } result_filter: ALL_FAILED } 2025-06-03T10:33:04.898725Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [336a7d043670] Response BulkAuthorizeResponse { subject { user_account { id: "user1" } } } 2025-06-03T10:33:04.898762Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as >> TTicketParserTest::LoginRefreshGroupsGood [GOOD] >> TTicketParserTest::LoginCheckRemovedUser >> TCmsTest::VDisksEvictionShouldFailWhileSentinelIsDisabled [GOOD] >> TCmsTest::VDisksEvictionShouldFailOnUnsupportedAction ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::SamePriorityRequest [GOOD] Test command err: 2025-06-03T10:32:57.661678Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:32:57.662576Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:57.664599Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:32:57.664633Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:32:57.664680Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:32:57.664743Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:32:57.665031Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:57.665081Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:57.665367Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:57.665398Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:57.666941Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:57.666967Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:57.667018Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:32:57.667070Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:57.695422Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:32:57.727866Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:57.727953Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:57.729111Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:57.729239Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:32:57.729245Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:32:57.729253Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:32:57.729256Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:32:57.729270Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:32:57.729311Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:32:57.729332Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:57.731045Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-03T10:32:57.773062Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:57.773125Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:32:57.832281Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:57.832330Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:57.832405Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:57.832640Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-1-1" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-2-2" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-3-3" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-4-4" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: ... 33:03.406091Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:03.406200Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW_PARTIAL } RequestId: "user-r-1" Permissions { Id: "user-p-1" Action { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 60000000 } Deadline: 180028000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 25 InterconnectPort: 12001 } } } } 2025-06-03T10:33:03.406212Z node 25 :CMS DEBUG: cms.cpp:1064: Schedule cleanup at 1970-01-01T00:05:00.028000Z 2025-06-03T10:33:03.429689Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (25) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:03.429814Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:03.429843Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:03.429858Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:03.430035Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 2025-06-03T10:33:03.430045Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 2025-06-03T10:33:03.430058Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 26, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:33:03.430088Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Issue in affected group with id '0': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: ) 2025-06-03T10:33:03.430113Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:03.430180Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-2, owner# user, order# 2, priority# -80, body# User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 2025-06-03T10:33:03.441029Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:03.441107Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } RequestId: "user-r-2" Deadline: 420130512 } 2025-06-03T10:33:03.441228Z node 25 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-1 2025-06-03T10:33:03.441241Z node 25 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-03T10:33:03.441254Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-03T10:33:03.441280Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-1, reason# explicit remove 2025-06-03T10:33:03.452059Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-03T10:33:03.452137Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-1" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-03T10:33:03.473697Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:03.473735Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:03.473749Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:03.473856Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has temporary lock, VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 2025-06-03T10:33:03.473865Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has temporary lock, VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } 2025-06-03T10:33:03.473874Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 26, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:33:03.473899Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:03.473917Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-2, requestId# user-r-1, owner# user 2025-06-03T10:33:03.473922Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (26) (permission user-p-2 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:03.473931Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:03.473967Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-2, validity# 1970-01-01T00:03:00.233536Z, action# Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 2025-06-03T10:33:03.473977Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-1, owner# user 2025-06-03T10:33:03.495217Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:03.495333Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Id: "user-p-2" Action { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } Deadline: 180233536 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 26 InterconnectPort: 12002 } } } } 2025-06-03T10:33:03.495491Z node 25 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-2 2025-06-03T10:33:03.495503Z node 25 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-03T10:33:03.495523Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-03T10:33:03.495558Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-2, reason# explicit remove 2025-06-03T10:33:03.506406Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-03T10:33:03.506480Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-2" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-03T10:33:03.580123Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:03.580178Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:03.580198Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:03.580379Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 2025-06-03T10:33:03.580394Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } 2025-06-03T10:33:03.580407Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 26, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:33:03.580450Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:03.580476Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-3, requestId# user-r-2, owner# user 2025-06-03T10:33:03.580484Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (26) (permission user-p-3 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:03.580498Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:03.580544Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-3, validity# 1970-01-01T00:03:00.336560Z, action# Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 2025-06-03T10:33:03.580558Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-2, owner# user 2025-06-03T10:33:03.591471Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:03.591559Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-2" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Id: "user-p-3" Action { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } Deadline: 180336560 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 26 InterconnectPort: 12002 } } } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ut/unittest >> TTicketParserTest::NebiusAuthorizationModify [GOOD] Test command err: 2025-06-03T10:32:49.590438Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669178850399196:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:49.590549Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f76/r3tmp/tmp2LbPEd/pdisk_1.dat 2025-06-03T10:32:49.643431Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:49.643576Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669178850399172:2079] 1748946769590210 != 1748946769590213 TServer::EnableGrpc on GrpcPort 8225, node 1 2025-06-03T10:32:49.650750Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:49.650772Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:49.650774Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:49.650806Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15617 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:49.693147Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:49.693175Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:49.694317Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:49.721576Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:49.724267Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:32:49.724280Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:49.724282Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:32:49.724516Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-03T10:32:49.724533Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [70893e3fe070] Connect to grpc://localhost:15485 2025-06-03T10:32:49.725009Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [70893e3fe070] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response 14: "Service Unavailable" 2025-06-03T10:32:49.727313Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [70893e3fe070] Status 14 Service Unavailable 2025-06-03T10:32:49.727369Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:49.727380Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-03T10:32:49.727421Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [70893e3fe070] Request AuthenticateRequest { iam_token: "**** (8E120919)" } 2025-06-03T10:32:49.727951Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [70893e3fe070] Status 1 CANCELLED 2025-06-03T10:32:49.727983Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'CANCELLED' 2025-06-03T10:32:49.935177Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669178185506211:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:49.935199Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f76/r3tmp/tmpJ9W1CK/pdisk_1.dat 2025-06-03T10:32:49.947368Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:49.947574Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669178185506192:2079] 1748946769935097 != 1748946769935100 TServer::EnableGrpc on GrpcPort 17061, node 2 2025-06-03T10:32:49.957257Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:49.957270Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:49.957272Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:49.957336Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19946 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:50.039347Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:50.039375Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:50.039701Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:50.040330Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:50.041446Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:32:50.041462Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:50.041465Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:32:50.041482Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-03T10:32:50.041492Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [70893d1fb670] Connect to grpc://localhost:19150 2025-06-03T10:32:50.041696Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [70893d1fb670] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response 14: "Service Unavailable" 2025-06-03T10:32:50.043361Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [70893d1fb670] Status 14 Service Unavailable 2025-06-03T10:32:50.043406Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-03T10:32:50.043410Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:50.043416Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-03T10:32:50.043466Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [70893d1fb670] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response 14: "Service Unavailable" 2025-06-03T10:32:50.043858Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [70893d1fb670] Status 14 Service Unavailable 2025-06-03T10:32:50.043895Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:1139: Ticket **** (8E120919) permission something.read now has a retryable error "Service Unavailable" retryable: 1 2025-06-03T10:32:50.043902Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:50.936160Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket **** (8E120919) 2025-06-03T10:32:50.936197Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-03T10:32:50.936294Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [70893d1fb670] Request AuthorizeRequest { checks { key: 0 value { permission { name: "somet ... 5-06-03T10:33:04.648600Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:33:04.648601Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:33:04.648604Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-03T10:33:04.648622Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [70893c5b70f0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "XXXXXXXX" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "XXXXXXXX" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-06-03T10:33:04.648831Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [70893c5b70f0] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } } 2025-06-03T10:33:04.648869Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-03T10:33:04.648937Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:33:04.648944Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:33:04.648945Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:33:04.648948Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-03T10:33:04.648966Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [70893c5b70f0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "XXXXXXXX" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "XXXXXXXX" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-06-03T10:33:04.649172Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [70893c5b70f0] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } } 2025-06-03T10:33:04.649212Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-03T10:33:04.649278Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:33:04.649285Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:33:04.649286Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:33:04.649289Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( monitoring.view) 2025-06-03T10:33:04.649323Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [70893c5b70f0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "monitoring.view" } container_id: "folder" iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "monitoring.view" } container_id: "folder" iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-06-03T10:33:04.649940Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [70893c5b70f0] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } } 2025-06-03T10:33:04.649992Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-03T10:33:04.940469Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7511669242045099384:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:04.940491Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f76/r3tmp/tmpIpxI6z/pdisk_1.dat 2025-06-03T10:33:04.951990Z node 5 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:04.952212Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7511669242045099364:2079] 1748946784940377 != 1748946784940380 TServer::EnableGrpc on GrpcPort 27100, node 5 2025-06-03T10:33:04.962060Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:04.962072Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:04.962073Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:04.962144Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13896 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:05.043968Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:05.044001Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:05.044326Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:05.044942Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:05.045108Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:33:05.045980Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:33:05.045990Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:33:05.045992Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:33:05.046011Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read) 2025-06-03T10:33:05.046019Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [70893d1a30f0] Connect to grpc://localhost:4049 2025-06-03T10:33:05.046205Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [70893d1a30f0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-06-03T10:33:05.047894Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [70893d1a30f0] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } } 2025-06-03T10:33:05.047968Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-03T10:33:05.048091Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:33:05.048097Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:33:05.048099Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:33:05.048108Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:526: Ticket **** (8E120919) asking for AccessServiceAuthorization( something.read something.write) 2025-06-03T10:33:05.048174Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [70893d1a30f0] Request AuthorizeRequest { checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "**** (8E120919)" } } } NebiusAccessService::Authorize request checks { key: 0 value { permission { name: "something.read" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } checks { key: 1 value { permission { name: "something.write" } container_id: "aaaa1234" resource_path { path { id: "bbbb4554" } } iam_token: "user1" } } NebiusAccessService::Authorize response results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { account { user_account { id: "user1" } } } } 0: "OK" 2025-06-03T10:33:05.048583Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [70893d1a30f0] Response AuthorizeResponse { results { key: 0 value { account { user_account { id: "user1" } } } } results { key: 1 value { account { user_account { id: "user1" } } } } } 2025-06-03T10:33:05.048624Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as >> PersQueueSdkReadSessionTest::ReadSessionWithAbort [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithClose ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::PriorityRange [GOOD] Test command err: 2025-06-03T10:32:58.189069Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:32:58.189965Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:58.192261Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:32:58.192301Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:32:58.192351Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:32:58.192452Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:32:58.192817Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:58.192876Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:58.193141Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:58.193162Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:58.194552Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:58.194571Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:58.194602Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:32:58.194660Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:58.222267Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:32:58.254722Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:58.254819Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:58.256283Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:58.256415Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:32:58.256420Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:32:58.256426Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:32:58.256429Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:32:58.256442Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:32:58.256463Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:32:58.256477Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:58.258175Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-03T10:32:58.300145Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:58.300200Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:32:58.360196Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:58.360341Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:58.360621Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-1-1" State: DOWN Timestamp: 120029000 } Timestamp: 120029000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-2-2" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-3-3" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-4-4" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1 ... tate for unknown PDisk 24:24 2025-06-03T10:33:02.146757Z node 17 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:02.146808Z node 17 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:02.146856Z node 17 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "17" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:33:02.146863Z node 17 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "17" Services: "storage" Duration: 60000000 2025-06-03T10:33:02.146875Z node 17 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 17, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:33:02.146883Z node 17 :CMS DEBUG: cms.cpp:398: Result: DISALLOW (reason: Affected group 0 has no parity parts) 2025-06-03T10:33:02.146905Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "17" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true Duration: 60000000 AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW Reason: "Affected group 0 has no parity parts" } } 2025-06-03T10:33:03.700308Z node 25 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:33:03.701156Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:03.702970Z node 25 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:33:03.703049Z node 25 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:33:03.703080Z node 25 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:33:03.703134Z node 25 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:33:03.703442Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:03.703506Z node 25 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:33:03.703807Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:03.703908Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:33:03.704998Z node 25 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:33:03.705026Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:33:03.705062Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:33:03.705083Z node 25 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:03.736680Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:33:03.768646Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:33:03.768756Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:03.768781Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:03.768859Z node 25 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:33:03.768864Z node 25 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:33:03.768870Z node 25 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:33:03.768873Z node 25 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:33:03.768884Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:03.768925Z node 25 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:33:03.768936Z node 25 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:33:03.769148Z node 25 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 25 PDiskId: 25 Path: "/25/pdisk-25.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 26 PDiskId: 26 Path: "/26/pdisk-26.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 27 PDiskId: 27 Path: "/27/pdisk-27.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 28 PDiskId: 28 Path: "/28/pdisk-28.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 29 PDiskId: 29 Path: "/29/pdisk-29.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 30 PDiskId: 30 Path: "/30/pdisk-30.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 31 PDiskId: 31 Path: "/31/pdisk-31.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 32 PDiskId: 32 Path: "/32/pdisk-32.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1000 } VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1000 } VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1000 } VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1000 } VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1000 } VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1000 } VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1000 } VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1001 } VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1001 } VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1001 } VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1001 } VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1001 } VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1001 } VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1001 } VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1002 } VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1002 } VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1002 } VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1002 } VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1002 } VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1002 } VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1002 } VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1003 } VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1003 } VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1003 } VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1003 } VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1003 } VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1003 } VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1003 } VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1003 } } } } Success: true 2025-06-03T10:33:03.811251Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:03.811310Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:33:03.811474Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -101 }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: WRONG_REQUEST Reason: "Priority value is out of range" } } 2025-06-03T10:33:03.811528Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: 101 }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: WRONG_REQUEST Reason: "Priority value is out of range" } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTenatsTest::RequestRestartServices [GOOD] Test command err: 2025-06-03T10:32:59.861021Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:32:59.861687Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:59.863253Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:32:59.863281Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:32:59.863316Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:32:59.863363Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:32:59.863634Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:59.863673Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:59.863924Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:59.863942Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:59.865082Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:59.865098Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:59.865125Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:32:59.865168Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:59.892621Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:32:59.924913Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:59.924996Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:59.926167Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:59.926294Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:32:59.926300Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:32:59.926307Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:32:59.926310Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:32:59.926324Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:32:59.926334Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:32:59.926346Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:59.928169Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-03T10:32:59.970355Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:59.970414Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:33:01.609376Z node 9 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:33:01.610213Z node 9 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:01.611709Z node 9 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:33:01.611795Z node 9 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:33:01.611821Z node 9 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:33:01.611870Z node 9 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:33:01.612297Z node 9 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:01.612352Z node 9 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:33:01.612622Z node 9 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:01.612750Z node 9 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:33:01.613701Z node 9 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:33:01.613728Z node 9 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:33:01.613771Z node 9 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:33:01.613790Z node 9 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:01.625463Z node 9 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:33:01.657236Z node 9 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:33:01.657345Z node 9 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:01.657367Z node 9 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:01.657444Z node 9 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:33:01.657450Z node 9 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:33:01.657456Z node 9 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:33:01.657459Z node 9 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:33:01.657473Z node 9 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:33:01.657482Z node 9 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:33:01.657493Z node 9 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Ex ... "dynnode" Duration: 60000000 2025-06-03T10:33:02.003205Z node 9 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 24, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 7, down nodes: 0 2025-06-03T10:33:02.003209Z node 9 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 24, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 7, down nodes: 0 2025-06-03T10:33:02.003215Z node 9 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:02.003232Z node 9 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-5, requestId# user-r-4, owner# user 2025-06-03T10:33:02.003239Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12013 (21) (permission user-p-5 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.003248Z node 9 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-6, requestId# user-r-4, owner# user 2025-06-03T10:33:02.003254Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12014 (22) (permission user-p-6 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.003261Z node 9 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-7, requestId# user-r-4, owner# user 2025-06-03T10:33:02.003267Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12015 (23) (permission user-p-7 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.003273Z node 9 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-8, requestId# user-r-4, owner# user 2025-06-03T10:33:02.003279Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12016 (24) (permission user-p-8 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.003289Z node 9 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:02.003328Z node 9 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-5, validity# 1970-01-01T00:03:00.337048Z, action# Type: RESTART_SERVICES Host: "21" Services: "dynnode" Duration: 60000000 2025-06-03T10:33:02.003340Z node 9 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-6, validity# 1970-01-01T00:03:00.337048Z, action# Type: RESTART_SERVICES Host: "22" Services: "dynnode" Duration: 60000000 2025-06-03T10:33:02.003350Z node 9 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-7, validity# 1970-01-01T00:03:00.337048Z, action# Type: RESTART_SERVICES Host: "23" Services: "dynnode" Duration: 60000000 2025-06-03T10:33:02.003359Z node 9 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-8, validity# 1970-01-01T00:03:00.337048Z, action# Type: RESTART_SERVICES Host: "24" Services: "dynnode" Duration: 60000000 2025-06-03T10:33:02.014402Z node 9 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:02.014522Z node 9 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "21" Services: "dynnode" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "22" Services: "dynnode" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "23" Services: "dynnode" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "24" Services: "dynnode" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-4" Permissions { Id: "user-p-5" Action { Type: RESTART_SERVICES Host: "21" Services: "dynnode" Duration: 60000000 } Deadline: 180337048 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 21 InterconnectPort: 12013 } } } Permissions { Id: "user-p-6" Action { Type: RESTART_SERVICES Host: "22" Services: "dynnode" Duration: 60000000 } Deadline: 180337048 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 22 InterconnectPort: 12014 } } } Permissions { Id: "user-p-7" Action { Type: RESTART_SERVICES Host: "23" Services: "dynnode" Duration: 60000000 } Deadline: 180337048 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 23 InterconnectPort: 12015 } } } Permissions { Id: "user-p-8" Action { Type: RESTART_SERVICES Host: "24" Services: "dynnode" Duration: 60000000 } Deadline: 180337048 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 24 InterconnectPort: 12016 } } } } 2025-06-03T10:33:02.026165Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12013 (21) (permission user-p-5 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.026196Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12009 (17) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.026222Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12014 (22) (permission user-p-6 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.026230Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12010 (18) (permission user-p-2 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.026237Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12012 (20) (permission user-p-4 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.026244Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12011 (19) (permission user-p-3 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.026250Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12016 (24) (permission user-p-8 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.026256Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12015 (23) (permission user-p-7 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.026327Z node 9 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:02.026343Z node 9 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:02.026355Z node 9 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:02.026464Z node 9 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "9" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:33:02.026471Z node 9 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "9" Services: "storage" Duration: 60000000 2025-06-03T10:33:02.026478Z node 9 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 9, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 8, down nodes: 0 2025-06-03T10:33:02.026509Z node 9 :CMS DEBUG: cms.cpp:729: Ring: 0; State: Ok 2025-06-03T10:33:02.026512Z node 9 :CMS DEBUG: cms.cpp:729: Ring: 1; State: Ok 2025-06-03T10:33:02.026514Z node 9 :CMS DEBUG: cms.cpp:729: Ring: 2; State: Ok 2025-06-03T10:33:02.026517Z node 9 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:02.026530Z node 9 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-9, requestId# user-r-5, owner# user 2025-06-03T10:33:02.026534Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (9) (permission user-p-9 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.026541Z node 9 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:02.026574Z node 9 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-9, validity# 1970-01-01T00:03:00.438560Z, action# Type: RESTART_SERVICES Host: "9" Services: "storage" Duration: 60000000 2025-06-03T10:33:02.037692Z node 9 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:02.037790Z node 9 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "9" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-5" Permissions { Id: "user-p-9" Action { Type: RESTART_SERVICES Host: "9" Services: "storage" Duration: 60000000 } Deadline: 180438560 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 9 InterconnectPort: 12001 } } } } 2025-06-03T10:33:02.049443Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12013 (21) (permission user-p-5 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.049468Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12009 (17) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.049473Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (9) (permission user-p-9 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.049480Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12014 (22) (permission user-p-6 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.049484Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12010 (18) (permission user-p-2 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.049488Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12012 (20) (permission user-p-4 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.049492Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12011 (19) (permission user-p-3 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.049496Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12016 (24) (permission user-p-8 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.049500Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12015 (23) (permission user-p-7 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:02.049561Z node 9 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:02.049576Z node 9 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:02.049586Z node 9 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:02.049703Z node 9 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "10" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:33:02.049713Z node 9 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "10" Services: "storage" Duration: 60000000 2025-06-03T10:33:02.049723Z node 9 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 10, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 9, down nodes: 0 2025-06-03T10:33:02.049750Z node 9 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Issue in affected group with id '0': too many unavailable vdisks. Locked: Host ::1:12001 (9) has planned shutdown (permission user-p-9 owned by user), VDisk [0:1:0:1:0] (::1:/10/pdisk-10.data) is locked by this request. Down: ) 2025-06-03T10:33:02.049761Z node 9 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:02.060579Z node 9 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:02.060642Z node 9 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "10" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (9) has planned shutdown (permission user-p-9 owned by user), VDisk [0:1:0:1:0] (::1:/10/pdisk-10.data) is locked by this request. Down: " } RequestId: "user-r-6" Deadline: 420540072 } |69.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut |70.0%| [LD] {RESULT} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut |70.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/http/ut/ydb-core-ymq-http-ut >> TCmsTest::ManagePermissions >> TxUsage::WriteToTopic_Demo_20_RestartBeforeCommit_Query [GOOD] >> TxUsage::WriteToTopic_Demo_34_Query [GOOD] >> TCmsTest::TestForceRestartMode >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Table >> TDowntimeTest::SetIgnoredDowntimeGap [GOOD] >> TMaintenanceApiTest::CompositeActionGroupSameStorageGroup >> TxUsage::WriteToTopic_Demo_35_Table >> TStorageBalanceTest::TestScenario3 [GOOD] >> TTicketParserTest::NebiusAuthenticationRetryErrorImmediately [GOOD] >> TTicketParserTest::NebiusAccessKeySignatureUnsupported >> KqpScan::RightJoinSimple >> TCmsTest::WalleCleanupTest [GOOD] >> TCmsTest::WalleRequestDuringRollingRestart >> KqpScan::UnionThree >> KqpSplit::AfterResolve+Unspecified >> KqpSplit::ChoosePartition+Ascending >> TCmsTest::Mirror3dcPermissions [GOOD] >> TTicketParserTest::AuthenticationRetryErrorImmediately [GOOD] >> TTicketParserTest::NebiusAccessKeySignatureUnsupported [GOOD] >> TCmsTest::RequestRestartServicesPartial [GOOD] >> TCmsTest::RequestRestartServicesNoUser ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::Mirror3dcPermissions [GOOD] Test command err: 2025-06-03T10:32:56.975307Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:32:56.975901Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:56.978017Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:56.978091Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:56.978427Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:56.978461Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:56.978883Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:32:56.978915Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:32:56.978962Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:32:56.979069Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:32:56.980517Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:56.980545Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:56.980580Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:32:56.980625Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:57.008371Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:32:57.040787Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:57.040883Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:57.042015Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:57.042129Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:32:57.042135Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:32:57.042141Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:32:57.042145Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:32:57.042158Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:57.042199Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:32:57.042220Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:32:57.043633Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-03T10:32:57.085752Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:57.085813Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:32:57.145070Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:32:57.145103Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:32:57.145172Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:32:57.145485Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-1-1" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-2-2" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-3-3" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-4-4" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: ... EvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:03.087505Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:33:03.087571Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:69: Loaded config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:33:03.087587Z node 25 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:03.087640Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:33:03.087684Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 } 2025-06-03T10:33:03.109112Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:33:03.176267Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:03.176407Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:03.177846Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "30" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-03T10:33:03.177858Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "30" Services: "storage" Duration: 60000000 2025-06-03T10:33:03.177872Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 30, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 3 2025-06-03T10:33:03.177915Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Issue in affected group with id '0': too many unavailable vdisks. Number of data centers with unavailable vdisks: 3. Locked: VDisk [0:1:1:5:0] (::1:/30/pdisk-270.data) is locked by this request. Down: Host ::1:12008 (32) is down, Host ::1:12005 (29) is down, Host ::1:12002 (26) is down) 2025-06-03T10:33:03.177973Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "30" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Issue in affected group with id \'0\': too many unavailable vdisks. Number of data centers with unavailable vdisks: 3. Locked: VDisk [0:1:1:5:0] (::1:/30/pdisk-270.data) is locked by this request. Down: Host ::1:12008 (32) is down, Host ::1:12005 (29) is down, Host ::1:12002 (26) is down" } Deadline: 420242000 } 2025-06-03T10:33:03.178166Z node 25 :CMS INFO: cms.cpp:104: OnTabletDead: 72057594037936128 2025-06-03T10:33:03.178174Z node 25 :CMS DEBUG: cms.cpp:1209: TCms::Cleanup 2025-06-03T10:33:03.179911Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:03.180498Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:03.180542Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:03.180851Z node 25 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:33:03.180926Z node 25 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:33:03.181026Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:33:03.181092Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:69: Loaded config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:33:03.181108Z node 25 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:03.181164Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:33:03.181236Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 } 2025-06-03T10:33:03.202809Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:33:03.268313Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:03.268410Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:03.269735Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "32" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-03T10:33:03.269748Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "32" Services: "storage" Duration: 60000000 2025-06-03T10:33:03.269761Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 32, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 2 2025-06-03T10:33:03.269805Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Issue in affected group with id '0': too many unavailable vdisks. Number of data centers with unavailable vdisks: 3. Locked: VDisk [0:1:2:7:0] (::1:/32/pdisk-288.data) is locked by this request. Down: Host ::1:12005 (29) is down, Host ::1:12002 (26) is down) 2025-06-03T10:33:03.269862Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "32" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Issue in affected group with id \'0\': too many unavailable vdisks. Number of data centers with unavailable vdisks: 3. Locked: VDisk [0:1:2:7:0] (::1:/32/pdisk-288.data) is locked by this request. Down: Host ::1:12005 (29) is down, Host ::1:12002 (26) is down" } Deadline: 420348000 } 2025-06-03T10:33:03.280833Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:03.296184Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:03.296307Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:03.297845Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "30" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-03T10:33:03.297869Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "30" Services: "storage" Duration: 60000000 2025-06-03T10:33:03.297887Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 30, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 2 2025-06-03T10:33:03.298387Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:03.298500Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "30" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Action { Type: RESTART_SERVICES Host: "30" Services: "storage" Duration: 60000000 } Deadline: 180448000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 30 InterconnectPort: 12006 } } } } 2025-06-03T10:33:03.298784Z node 25 :CMS INFO: cms.cpp:104: OnTabletDead: 72057594037936128 2025-06-03T10:33:03.298794Z node 25 :CMS DEBUG: cms.cpp:1209: TCms::Cleanup 2025-06-03T10:33:03.300716Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:03.301352Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:03.301402Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:03.301739Z node 25 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:33:03.301826Z node 25 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:33:03.301939Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:33:03.302003Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:69: Loaded config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:33:03.302023Z node 25 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:03.302077Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:33:03.302108Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 } 2025-06-03T10:33:03.323746Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:33:03.347530Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:03.347599Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:03.348808Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "27" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-03T10:33:03.348817Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "27" Services: "storage" Duration: 60000000 2025-06-03T10:33:03.348826Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 27, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 3 2025-06-03T10:33:03.348865Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Issue in affected group with id '0': too many unavailable vdisks. Locked: VDisk [0:1:0:2:0] (::1:/27/pdisk-243.data) is locked by this request. Down: Host ::1:12006 (30) is down, Host ::1:12005 (29) is down, Host ::1:12002 (26) is down) 2025-06-03T10:33:03.348920Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "27" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: VDisk [0:1:0:2:0] (::1:/27/pdisk-243.data) is locked by this request. Down: Host ::1:12006 (30) is down, Host ::1:12005 (29) is down, Host ::1:12002 (26) is down" } Deadline: 420554000 } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ut/unittest >> TTicketParserTest::AuthenticationRetryErrorImmediately [GOOD] Test command err: 2025-06-03T10:32:50.998807Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669181398271033:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:50.998824Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f6f/r3tmp/tmp0DD87e/pdisk_1.dat 2025-06-03T10:32:51.051264Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669181398271013:2079] 1748946770998672 != 1748946770998675 2025-06-03T10:32:51.052727Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18692, node 1 2025-06-03T10:32:51.061637Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:51.061648Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:51.061649Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:51.061690Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21380 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:51.128748Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:51.128774Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:51.129479Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:32:51.129721Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:32:51.132418Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-03T10:32:51.132455Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [33ce3e3f0330] Connect to grpc://localhost:25986 2025-06-03T10:32:51.132990Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [33ce3e3f0330] Request AuthenticateRequest { iam_token: "**** (8E120919)" } 2025-06-03T10:32:51.134820Z node 1 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [33ce3e3f0330] Response AuthenticateResponse { subject { user_account { id: "user1" } } } 2025-06-03T10:32:51.134886Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-03T10:32:51.396342Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669185843228330:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:51.396391Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f6f/r3tmp/tmpk2g5n7/pdisk_1.dat 2025-06-03T10:32:51.410758Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:51.411234Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669185843228310:2079] 1748946771396183 != 1748946771396186 TServer::EnableGrpc on GrpcPort 7909, node 2 2025-06-03T10:32:51.424761Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:51.424774Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:51.424776Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:51.424834Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29955 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:51.500893Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:51.500915Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:51.501316Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:51.501917Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:51.503413Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket ApiK****alid (AB5B5EA8) asking for AccessServiceAuthentication 2025-06-03T10:32:51.503447Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [33ce3d04e5f0] Connect to grpc://localhost:64575 2025-06-03T10:32:51.503634Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [33ce3d04e5f0] Request AuthenticateRequest { api_key: "ApiK****alid (AB5B5EA8)" } 2025-06-03T10:32:51.505420Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [33ce3d04e5f0] Response AuthenticateResponse { subject { user_account { id: "ApiKey-value-valid" } } } 2025-06-03T10:32:51.505524Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket ApiK****alid (AB5B5EA8) () has now valid token of ApiKey-value-valid@as 2025-06-03T10:32:51.819363Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511669187691640222:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:51.819395Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f6f/r3tmp/tmpkvRvlM/pdisk_1.dat 2025-06-03T10:32:51.830525Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:51.830695Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511669187691640201:2079] 1748946771819241 != 1748946771819244 TServer::EnableGrpc on GrpcPort 11498, node 3 2025-06-03T10:32:51.840821Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:51.840834Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:51.840835Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:51.840883Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30905 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:51.922760Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:51.922796Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:51.923202Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:51.923845Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:51.925341Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:32:51.925367Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:51.925370Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:32:51.925387Z node 3 :TICKET_PARSER TRACE: ticket_pa ... or_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:32:52.248538Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:52.250033Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7511669191764461405:2079] 1748946772233740 != 1748946772233743 TServer::EnableGrpc on GrpcPort 17455, node 4 2025-06-03T10:32:52.260840Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:52.260853Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:52.260857Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:52.260915Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20019 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:52.339013Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:52.339056Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:52.339478Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:52.340053Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:52.341477Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-03T10:32:52.341497Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [33ce3bc865f0] Connect to grpc://localhost:3107 2025-06-03T10:32:52.341759Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [33ce3bc865f0] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-03T10:32:52.343559Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [33ce3bc865f0] Status 14 Service Unavailable 2025-06-03T10:32:52.343606Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:52.343610Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-03T10:32:52.343652Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [33ce3bc865f0] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-03T10:32:52.344138Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [33ce3bc865f0] Status 14 Service Unavailable 2025-06-03T10:32:52.344177Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:53.235909Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-03T10:32:53.235927Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-03T10:32:53.236008Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [33ce3bc865f0] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-03T10:32:53.236894Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [33ce3bc865f0] Status 14 Service Unavailable 2025-06-03T10:32:53.236931Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:54.236363Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-03T10:32:54.236391Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-03T10:32:54.236471Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [33ce3bc865f0] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-03T10:32:54.237275Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [33ce3bc865f0] Status 14 Service Unavailable 2025-06-03T10:32:54.237380Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:56.237328Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket AKIA****MPLE (B3EDC139) 2025-06-03T10:32:56.237367Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-03T10:32:56.237519Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [33ce3bc865f0] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-03T10:32:56.238361Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [33ce3bc865f0] Response AuthenticateResponse { subject { user_account { id: "user1" } } } 2025-06-03T10:32:56.238457Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket AKIA****MPLE (B3EDC139) () has now valid token of user1@as 2025-06-03T10:32:57.235163Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7511669191764461564:2198];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:57.235214Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:33:04.572756Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7511669241579988164:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:04.572780Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f6f/r3tmp/tmpMiNW8M/pdisk_1.dat 2025-06-03T10:33:04.590720Z node 5 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:04.591443Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7511669241579988144:2079] 1748946784572664 != 1748946784572667 TServer::EnableGrpc on GrpcPort 8040, node 5 2025-06-03T10:33:04.603573Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:04.603591Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:04.603594Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:04.603651Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30386 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:04.677873Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:04.677910Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:04.678280Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:04.678750Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:04.680299Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-03T10:33:04.680334Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [33ce3d110070] Connect to grpc://localhost:27340 2025-06-03T10:33:04.680602Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [33ce3d110070] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-03T10:33:04.682820Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [33ce3d110070] Status 14 Service Unavailable 2025-06-03T10:33:04.682898Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket AKIA****MPLE (B3EDC139) () has now retryable error message 'Service Unavailable' 2025-06-03T10:33:04.682914Z node 5 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket AKIA****MPLE (B3EDC139) asking for AccessServiceAuthentication 2025-06-03T10:33:04.682984Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [33ce3d110070] Request AuthenticateRequest { signature { access_key_id: "AKIAIOSFODNN7EXAMPLE" v4_parameters { signed_at { } } } } 2025-06-03T10:33:04.683587Z node 5 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [33ce3d110070] Response AuthenticateResponse { subject { user_account { id: "user1" } } } 2025-06-03T10:33:04.683650Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket AKIA****MPLE (B3EDC139) () has now valid token of user1@as ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ut/unittest >> TTicketParserTest::NebiusAccessKeySignatureUnsupported [GOOD] Test command err: 2025-06-03T10:32:41.274195Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669144806730564:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:41.274617Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f8d/r3tmp/tmpT6IZUy/pdisk_1.dat 2025-06-03T10:32:41.321950Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:41.322546Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669144806730543:2079] 1748946761273990 != 1748946761273993 TServer::EnableGrpc on GrpcPort 4962, node 1 2025-06-03T10:32:41.337085Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:41.337101Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:41.337103Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:41.337166Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5691 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:41.376774Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:41.376833Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:41.377955Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:41.402405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:41.437047Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 2025-06-03T10:32:41.440050Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:32:41.440062Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:41.440326Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****HMYw (CBE72E8D) () has now retryable error message 'Security state is empty' 2025-06-03T10:32:41.440346Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:32:41.440352Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:41.440366Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****HMYw (CBE72E8D) () has now retryable error message 'Security state is empty' 2025-06-03T10:32:41.440370Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:773: CanInitLoginToken, database /Root, A2 error Security state is empty 2025-06-03T10:32:41.440374Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:773: CanInitLoginToken, database /Root, A2 error Security state is empty 2025-06-03T10:32:41.440377Z node 1 :TICKET_PARSER ERROR: ticket_parser_impl.h:963: Ticket eyJh****HMYw (CBE72E8D): Security state is empty 2025-06-03T10:32:43.275695Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****HMYw (CBE72E8D) 2025-06-03T10:32:43.275763Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:32:43.275767Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:43.275794Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket eyJh****HMYw (CBE72E8D) () has now retryable error message 'Security state is empty' 2025-06-03T10:32:43.275808Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:773: CanInitLoginToken, database /Root, A2 error Security state is empty 2025-06-03T10:32:44.440618Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:32:46.274315Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511669144806730564:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:46.274363Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:32:47.277358Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****HMYw (CBE72E8D) 2025-06-03T10:32:47.277443Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:32:47.277455Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:47.277737Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****HMYw (CBE72E8D) () has now valid token of user1 2025-06-03T10:32:47.277749Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:800: CanInitLoginToken, database /Root, A4 success 2025-06-03T10:32:51.565063Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669186511843009:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:51.565403Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f8d/r3tmp/tmpVgCIIG/pdisk_1.dat 2025-06-03T10:32:51.580658Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:51.581565Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669186511842988:2079] 1748946771564911 != 1748946771564914 TServer::EnableGrpc on GrpcPort 4923, node 2 2025-06-03T10:32:51.591429Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:51.591448Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:51.591451Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:51.591508Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8889 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:51.668966Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:51.668993Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:51.669330Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:51.669903Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:51.671159Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-03T10:32:51.671185Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [13347d0ef0f0] Connect to grpc://localhost:4435 2025-06-03T10:32:51.671626Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [13347d0ef0f0] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response account { user_account { id: "user1" } } 0: "" 2025-06-03T10:32:51.673992Z node 2 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [13347d0ef0f0] Response AuthenticateResponse { account { user_account { id: "user1" } } } 2025-06-03T10:32:51.674093Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-03T10:32:51.936305Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511669184617601132:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:51.936328Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f8d/r3tmp/tmpZRcP5d/pdisk_1.dat 2025-06-03T10:32:51.949815Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:51.950058Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511669184617601113:2079] 1748946771936181 != 1748946771936184 TServer::EnableGrpc on GrpcPort 2799, node 3 2025-06-03T10:32:51.958 ... impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:52.937511Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket **** (8E120919) 2025-06-03T10:32:52.937529Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-03T10:32:52.937576Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [13347d11b670] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" 2025-06-03T10:32:52.938139Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [13347d11b670] Status 14 Service Unavailable 2025-06-03T10:32:52.938186Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:53.937934Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket **** (8E120919) 2025-06-03T10:32:53.937950Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-03T10:32:53.937991Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [13347d11b670] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" 2025-06-03T10:32:53.938829Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [13347d11b670] Status 14 Service Unavailable 2025-06-03T10:32:53.938898Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-03T10:32:56.936755Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7511669184617601132:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:56.936792Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:32:56.939290Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket **** (8E120919) 2025-06-03T10:32:56.939308Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-03T10:32:56.939356Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [13347d11b670] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response account { user_account { id: "user1" } } 0: "" 2025-06-03T10:32:56.940328Z node 3 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [13347d11b670] Response AuthenticateResponse { account { user_account { id: "user1" } } } 2025-06-03T10:32:56.940378Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-03T10:33:04.255995Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511669242583924881:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:04.256028Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f8d/r3tmp/tmpdkO2tY/pdisk_1.dat 2025-06-03T10:33:04.266437Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:04.266589Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7511669242583924861:2079] 1748946784255897 != 1748946784255900 TServer::EnableGrpc on GrpcPort 14112, node 4 2025-06-03T10:33:04.276947Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:04.276957Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:04.276958Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:04.277002Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5563 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:04.359396Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:04.359420Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:04.359772Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:04.360351Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:04.361379Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db , DomainLoginOnly 1 2025-06-03T10:33:04.361389Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:33:04.361391Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:810: CanInitLoginToken, database /Root, A6 error 2025-06-03T10:33:04.361402Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-03T10:33:04.361412Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:81: [13347d0ef3b0] Connect to grpc://localhost:24033 2025-06-03T10:33:04.361603Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [13347d0ef3b0] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response 14: "Service Unavailable" 2025-06-03T10:33:04.363254Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:109: [13347d0ef3b0] Status 14 Service Unavailable 2025-06-03T10:33:04.363300Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1796: Ticket **** (8E120919) () has now retryable error message 'Service Unavailable' 2025-06-03T10:33:04.363304Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:557: Ticket **** (8E120919) asking for AccessServiceAuthentication 2025-06-03T10:33:04.363335Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:120: [13347d0ef3b0] Request AuthenticateRequest { iam_token: "**** (8E120919)" } NebiusAccessService::Authenticate request iam_token: "user1" NebiusAccessService::Authenticate response account { user_account { id: "user1" } } 0: "" 2025-06-03T10:33:04.363813Z node 4 :GRPC_CLIENT DEBUG: grpc_service_client.h:107: [13347d0ef3b0] Response AuthenticateResponse { account { user_account { id: "user1" } } } 2025-06-03T10:33:04.363884Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket **** (8E120919) () has now valid token of user1@as 2025-06-03T10:33:06.539652Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7511669250357958327:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:06.539667Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f8d/r3tmp/tmpypnafq/pdisk_1.dat 2025-06-03T10:33:06.553718Z node 5 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:06.553964Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7511669250357958307:2079] 1748946786539525 != 1748946786539528 TServer::EnableGrpc on GrpcPort 28812, node 5 2025-06-03T10:33:06.560023Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:06.560034Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:06.560038Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:06.560085Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62039 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:06.643429Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:06.643463Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:06.643801Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:06.644418Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:06.644716Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:33:06.645553Z node 5 :TICKET_PARSER ERROR: ticket_parser_impl.h:908: Ticket AKIA****MPLE (B3EDC139): Access key signature is not supported >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestCreateCleanManyTables [GOOD] >> TSchemeshardBackgroundCleaningTest::CreateTableInTemp >> TCmsTest::ScheduledEmergencyDuringRollingRestart [GOOD] >> TCmsTest::ScheduledWalleRequestDuringRollingRestart >> TCmsTest::VDisksEvictionShouldFailOnUnsupportedAction [GOOD] >> TCmsTest::VDisksEvictionShouldFailOnMultipleActions >> KqpSplit::AfterResolve+Unspecified [GOOD] >> KqpSplit::AfterResult+Ascending >> TxUsage::WriteToTopic_Demo_24_Table [GOOD] >> TCmsTenatsTest::TestClusterLimitForceRestartModeScheduled [GOOD] >> TCmsTenatsTest::TestClusterRatioLimitForceRestartModeScheduled >> KqpScan::RightJoinSimple [GOOD] >> KqpScan::RightOnlyJoinSimple >> KqpScan::UnionThree [GOOD] >> KqpScan::UnionSameTable >> TxUsage::WriteToTopic_Demo_24_Query >> TTablesWithReboots::ChainedCopyTableAndDropWithReboots [GOOD] >> TCmsTest::ManagePermissions [GOOD] >> TCmsTest::ManagePermissionWrongRequest >> TCmsTest::TestForceRestartMode [GOOD] >> TCmsTest::TestForceRestartModeDisconnects >> KqpSplit::ChoosePartition+Ascending [GOOD] >> KqpSplit::BorderKeys+Unspecified >> KqpScan::PureExpr >> TMaintenanceApiTest::CompositeActionGroupSameStorageGroup [GOOD] >> TMaintenanceApiTest::ActionReason >> KqpScan::RightOnlyJoinSimple [GOOD] >> KqpScan::UnionSameTable [GOOD] >> TCmsTest::WalleRequestDuringRollingRestart [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/hive/ut/unittest >> TStorageBalanceTest::TestScenario3 [GOOD] Test command err: c[def1] ---------------------------------------------------------------------------------------------------- (0) 2025-06-03T10:30:31.231289Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:30:31.232214Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:31.232281Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "/tmp/pdisk.dat" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-03T10:30:31.232474Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:30:31.232789Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-03T10:30:31.232807Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 0 2025-06-03T10:30:31.233014Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:27:2074] ControllerId# 72057594037932033 2025-06-03T10:30:31.233022Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:30:31.233057Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:30:31.233079Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:30:31.237701Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-03T10:30:31.237722Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:30:31.238135Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:35:2079] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.238183Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:36:2080] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.238219Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:37:2081] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.238260Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:38:2082] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.238294Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:39:2083] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.238324Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:40:2084] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.238346Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:26:2073] Create Queue# [1:41:2085] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:31.238350Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:30:31.238363Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:27:2074] 2025-06-03T10:30:31.238368Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:27:2074] 2025-06-03T10:30:31.238376Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-03T10:30:31.238384Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:30:31.238513Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:30:31.238617Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:30:31.242263Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:27:2074] 2025-06-03T10:30:31.242297Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:31.242305Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:30:31.242620Z node 1 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:31.242663Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:31.242669Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-03T10:30:31.243348Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-03T10:30:31.243429Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-03T10:30:31.243435Z node 1 :LOCAL DEBUG: local.cpp:1441: TDomainLocal(dc-1): Bootstrap 2025-06-03T10:30:31.243470Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:27:2074] 2025-06-03T10:30:31.243476Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:31.243887Z node 1 :LOCAL DEBUG: local.cpp:1149: TDomainLocal(dc-1): Binding to hive 72057594037927937 at domain dc-1 (allocated resources: ) 2025-06-03T10:30:31.243898Z node 1 :LOCAL DEBUG: local.cpp:975: TLocalNodeRegistrar::Bootstrap 2025-06-03T10:30:31.243901Z node 1 :LOCAL DEBUG: local.cpp:181: TLocalNodeRegistrar::TryToRegister 2025-06-03T10:30:31.243922Z node 1 :LOCAL DEBUG: local.cpp:213: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:52:2092] 2025-06-03T10:30:31.243935Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:246: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-03T10:30:31.244188Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 0} 2025-06-03T10:30:31.244208Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 1} 2025-06-03T10:30:31.244215Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037932033 Cookie: 2} 2025-06-03T10:30:31.244223Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033} 2025-06-03T10:30:31.245612Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037936129] ::Bootstrap [1:31:2063] 2025-06-03T10:30:31.245633Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037936129] lookup [1:31:2063] 2025-06-03T10:30:31.245672Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037936129 entry.State: StInit ev: {EvForward TabletID: 72057594037936129 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:30:31.245710Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 1 2025-06-03T10:30:31.245718Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:30:31.245749Z node 1 :BS_NODE DEBUG: {NWDC18@distconf_binding.cpp:322} UpdateBound RefererNodeId# 1 NodeId# ::1:12001/1 Meta# {Fingerprint: "\371$\224\316I\335\243.)W\014\261m\013\346Osy\0160" } 2025-06-03T10:30:31.245792Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037927937] ::Bootstrap [1:52:2092] 2025-06-03T10:30:31.245800Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037927937] lookup [1:52:2092] 2025-06-03T10:30:31.245811Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033} 2025-06-03T10:30:31.245867Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# true Origin# distconf ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:31.245903Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435072 StorageConfigLoaded# true NodeListObtained# true PendingEvents.size# 1 2025-06-03T10:30:31.245913Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:361} StateFunc Type# 2146435075 Sender# [1:47:2090] SessionId# [0:0:0] Cookie# 0 2025-06-03T10:30:31.245923Z node 1 :BS_NODE DEBUG: {NWDC36@distconf_persistent_storage.cpp:205} TEvStorageConfigStored NumOk# 0 NumError# 0 Passed# 0.003228s 2025-06-03T10:30:31.246536Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037927937] queue send [1:52:2092] 2025-06-03T10:30:31.246667Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 1 TabletID: 72057594037932033} 2025-06-03T10:30:31.246713Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037927937 entry.State: StInit ev: {EvForward TabletID: 72057594037927937 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:30:31.246756Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72057594037932033 entry.State: StInitResolve success: false ev: {EvInfo Status: 5 TabletID: 72057594037932033 Cookie: 0 CurrentLeader: [0:0:0] CurrentLeaderTablet: [0:0:0] CurrentGeneration: 0 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 3 Signature: {{[1:24343667:0] : 2}, {[1:2199047599219:0] : 8}, {[1:1099535971443:0] : 5}}}} 2025-06-03T10:30:31.246764Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:361: DropEntry tabletId: 72057594037932033 followers: 0 2025-06-03T10:30:31.246896Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:172: TClient[72057594037932033] forward result error, check reconnect [1:27:2074] 2025-06-03T10:30:31.246907Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:562: TClient[72057594037932033] schedule retry [1:27:2074] 2025-06-03T10:30:31.246919Z node 1 :BS_NODE DEBUG: {NWDC15@distconf.cpp:361} StateFunc Type# 268639248 Sender# [1:12:2059] SessionId# [0:0:0] Cookie# 0 2025-06-03T10:30:31.247807Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:246: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037936129 Cookie: 0 ProxyOptions: SigNone} 2025-06-03T10:30:31.247839Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:27:2074] 2025-06-03T10:30:31.247884Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 0} 2025-06-03T10:30:31.247893Z node 1 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72057594037936129 Cookie: 1} 2025-06-03T10:30:31.24789 ... 6371002f6c] restore Id# [72057594037927937:2:489:0:0:246:0] optimisticReplicas# 1 optimisticState# EBS_FULL Marker# BPG55 2025-06-03T10:33:04.537792Z node 12 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [e2543b6371002f6c] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72057594037927937:2:489:0:0:246:1] Marker# BPG33 2025-06-03T10:33:04.537798Z node 12 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [e2543b6371002f6c] Sending missing VPut part# 0 to# 0 blob Id# [72057594037927937:2:489:0:0:246:1] Marker# BPG32 2025-06-03T10:33:04.537829Z node 12 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [12:489:2089] NKikimr::TEvBlobStorage::TEvVPut# {ID# [72057594037927937:2:489:0:0:246:1] FDS# 246 HandleClass# TabletLog {MsgQoS ExtQueueId# PutTabletLog} DataSize# 0 Data# } cookie# 0 2025-06-03T10:33:04.538900Z node 12 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [e2543b6371002f6c] received {EvVPutResult Status# OK ID# [72057594037927937:2:489:0:0:246:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 505 } Cost# 81937 ExtQueueId# PutTabletLog IntQueueId# IntPutLog Window# { Status# Processed ActualWindowSize# 0 MaxWindowSize# 150000000 ExpectedMsgId# { SequenceId: 1 MsgId: 506 }}}} from# [0:1:0:0:0] Marker# BPP01 2025-06-03T10:33:04.538938Z node 12 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [e2543b6371002f6c] Result# TEvPutResult {Id# [72057594037927937:2:489:0:0:246:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} GroupId# 0 Marker# BPP12 2025-06-03T10:33:04.538947Z node 12 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [e2543b6371002f6c] SendReply putResult# TEvPutResult {Id# [72057594037927937:2:489:0:0:246:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:33:04.538975Z node 12 :BS_PROXY_PUT DEBUG: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# MinLatency History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.206 sample PartId# [72057594037927937:2:489:0:0:246:1] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 12 } TEvVPutResult{ TimestampMs# 1.294 VDiskId# [0:1:0:0:0] NodeId# 12 Status# OK } ] } 2025-06-03T10:33:04.539131Z node 12 :TABLET_MAIN DEBUG: tablet_req_writelog.cpp:54: Put Result: TEvPutResult {Id# [72057594037927937:2:489:0:0:246:0] Status# OK StatusFlags# { Valid } ApproximateFreeSpaceShare# 0.998955} 2025-06-03T10:33:04.539240Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:490} commited cookie 1 for step 489 2025-06-03T10:33:04.539254Z node 12 :HIVE DEBUG: tx__reassign_groups.cpp:56: HIVE#72057594037927937 THive::TTxReassignGroups(72075186224037902)::Complete 2025-06-03T10:33:04.539275Z node 12 :HIVE DEBUG: tx__update_tablet_groups.cpp:332: HIVE#72057594037927937 THive::TTxUpdateTabletGroups{124426255136704}(72075186224037902)::Complete SideEffects: {Notifications: 0x7FF0000F [12:5495:2604] NKikimr::NHive::TEvPrivate::TEvRestartCancelled} 2025-06-03T10:33:04.539379Z node 12 :HIVE DEBUG: storage_balancer.cpp:115: HIVE#72057594037927937 StorageBalancer received RestartCancelled for tablet (72075186224037902,0) 2025-06-03T10:33:04.539389Z node 12 :HIVE DEBUG: storage_balancer.cpp:92: HIVE#72057594037927937 StorageBalancer initiating reassign for tablet 72075186224037971 2025-06-03T10:33:04.539523Z node 12 :HIVE DEBUG: hive_impl.cpp:964: HIVE#72057594037927937 THive::TEvReassignTablet TabletID: 72075186224037971 Channels: 1 Channels: 2 Channels: 0 ReassignReason: HIVE_REASSIGN_REASON_BALANCE 2025-06-03T10:33:04.539544Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:490} Tx{1481, NKikimr::NHive::TTxReassignGroups} queued, type NKikimr::NHive::TTxReassignGroups 2025-06-03T10:33:04.539551Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:490} Tx{1481, NKikimr::NHive::TTxReassignGroups} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-03T10:33:04.539559Z node 12 :HIVE DEBUG: tx__reassign_groups.cpp:30: HIVE#72057594037927937 THive::TTxReassignGroups(72075186224037971,[0,1,2])::Execute 2025-06-03T10:33:04.539651Z node 12 :HIVE DEBUG: hive_impl.cpp:1065: HIVE#72057594037927937 THive::AssignTabletGroups TEvControllerSelectGroups tablet 72075186224037971 GroupParameters { StoragePoolSpecifier { Name: "def1" } } ReturnAllMatchingGroups: true 2025-06-03T10:33:04.539672Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:490} Tx{1481, NKikimr::NHive::TTxReassignGroups} hope 1 -> done Change{990, redo 303b alter 0b annex 0, ~{ 1, 2 } -{ }, 0 gb} 2025-06-03T10:33:04.539681Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:490} Tx{1481, NKikimr::NHive::TTxReassignGroups} release 4194304b of static, Memory{0 dyn 0} 2025-06-03T10:33:04.539713Z node 12 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594037932033] send [12:1309:2642] 2025-06-03T10:33:04.539736Z node 12 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037932033] push event to server [12:1309:2642] 2025-06-03T10:33:04.539745Z node 12 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037932033] HandleSend Sender# [12:1243:2604] EventType# 268637702 c[def1] *****----------------------------------------------------------------------------------------------- (0.048) *****----------------------------------------------------------------------------------------------- (0.054) ******---------------------------------------------------------------------------------------------- (0.058) ******---------------------------------------------------------------------------------------------- (0.056) *****----------------------------------------------------------------------------------------------- (0.052) ******---------------------------------------------------------------------------------------------- (0.056) *****----------------------------------------------------------------------------------------------- (0.052) *****----------------------------------------------------------------------------------------------- (0.052) *******--------------------------------------------------------------------------------------------- (0.066) *****----------------------------------------------------------------------------------------------- (0.05) ******---------------------------------------------------------------------------------------------- (0.056) 2025-06-03T10:33:04.640817Z node 12 :HIVE DEBUG: hive_impl.cpp:433: HIVE#72057594037927937 THive::Handle TEvControllerSelectGroupsResult: success Status: OK MatchingGroups { Groups { GroupID: 2147483649 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 2400000000 Occupancy: 0.048 } AllocatedSize: 2400000000 } Groups { GroupID: 2147483650 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 2700000000 Occupancy: 0.054 } AllocatedSize: 2700000000 } Groups { GroupID: 2147483651 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 2900000000 Occupancy: 0.058 } AllocatedSize: 2900000000 } Groups { GroupID: 2147483652 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 2800000000 Occupancy: 0.056 } AllocatedSize: 2800000000 } Groups { GroupID: 2147483653 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 2600000000 Occupancy: 0.052 } AllocatedSize: 2600000000 } Groups { GroupID: 2147483654 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 2800000000 Occupancy: 0.056 } AllocatedSize: 2800000000 } Groups { GroupID: 2147483655 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 2600000000 Occupancy: 0.052 } AllocatedSize: 2600000000 } Groups { GroupID: 2147483656 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 2600000000 Occupancy: 0.052 } AllocatedSize: 2600000000 } Groups { GroupID: 2147483657 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 3300000000 Occupancy: 0.066 } AllocatedSize: 3300000000 } Groups { GroupID: 2147483658 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 2500000000 Occupancy: 0.05 } AllocatedSize: 2500000000 } Groups { GroupID: 2147483659 StoragePoolName: "def1" AssuredResources { Space: 50000000000 Occupancy: 0 } CurrentResources { Space: 2800000000 Occupancy: 0.056 } AllocatedSize: 2800000000 } } 2025-06-03T10:33:04.640900Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:491} Tx{1482, NKikimr::NHive::TTxUpdateTabletGroups} queued, type NKikimr::NHive::TTxUpdateTabletGroups 2025-06-03T10:33:04.640912Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:491} Tx{1482, NKikimr::NHive::TTxUpdateTabletGroups} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-03T10:33:04.640927Z node 12 :HIVE DEBUG: tx__update_tablet_groups.cpp:63: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{124426255136704}(72075186224037971,HIVE_REASSIGN_REASON_BALANCE,[]) 2025-06-03T10:33:04.640948Z node 12 :HIVE DEBUG: tx__update_tablet_groups.cpp:151: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{124426255136704}: tablet 72075186224037971 channel 0 assigned to group 2147483654 2025-06-03T10:33:04.640953Z node 12 :HIVE DEBUG: tx__update_tablet_groups.cpp:171: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{124426255136704}: tablet 72075186224037971 skipped reassign of channel 0 2025-06-03T10:33:04.640960Z node 12 :HIVE DEBUG: tx__update_tablet_groups.cpp:151: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{124426255136704}: tablet 72075186224037971 channel 1 assigned to group 2147483656 2025-06-03T10:33:04.640963Z node 12 :HIVE DEBUG: tx__update_tablet_groups.cpp:171: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{124426255136704}: tablet 72075186224037971 skipped reassign of channel 1 2025-06-03T10:33:04.640969Z node 12 :HIVE DEBUG: tx__update_tablet_groups.cpp:151: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{124426255136704}: tablet 72075186224037971 channel 2 assigned to group 2147483656 2025-06-03T10:33:04.640973Z node 12 :HIVE DEBUG: tx__update_tablet_groups.cpp:171: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{124426255136704}: tablet 72075186224037971 skipped reassign of channel 2 2025-06-03T10:33:04.640981Z node 12 :HIVE WARN: tx__update_tablet_groups.cpp:272: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{124426255136704}: tablet 72075186224037971 wasn't changed 2025-06-03T10:33:04.640986Z node 12 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{124426255136704}: tablet 72075186224037971 skipped channel 0 2025-06-03T10:33:04.641014Z node 12 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{124426255136704}: tablet 72075186224037971 skipped channel 1 2025-06-03T10:33:04.641020Z node 12 :HIVE WARN: tx__update_tablet_groups.cpp:281: HIVE#72057594037927937 THive::TTxUpdateTabletGroups::Execute{124426255136704}: tablet 72075186224037971 skipped channel 2 2025-06-03T10:33:04.641040Z node 12 :HIVE NOTICE: tx__update_tablet_groups.cpp:326: HIVE#72057594037927937 THive::TTxUpdateTabletGroups{124426255136704}(72075186224037971)::Execute - TryToBoot was not successfull 2025-06-03T10:33:04.641058Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:491} Tx{1482, NKikimr::NHive::TTxUpdateTabletGroups} hope 1 -> done Change{991, redo 257b alter 0b annex 0, ~{ 2, 1 } -{ }, 0 gb} 2025-06-03T10:33:04.641066Z node 12 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:491} Tx{1482, NKikimr::NHive::TTxUpdateTabletGroups} release 4194304b of static, Memory{0 dyn 0} >> TCmsTest::RequestRestartServicesNoUser [GOOD] >> BasicUsage::TWriteSession_WriteAndReadAndCommitRandomMessagesNoClusterDiscovery [GOOD] >> BasicUsage::TWriteSession_WriteEncoded >> KqpSplit::AfterResult+Ascending [GOOD] >> TxUsage::WriteToTopic_Demo_7_Table [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::RightOnlyJoinSimple [GOOD] Test command err: Trying to start YDB, gRPC: 29975, MsgBus: 28895 2025-06-03T10:33:06.541974Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669252329568283:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:06.541992Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001fac/r3tmp/tmpj0Nv5o/pdisk_1.dat 2025-06-03T10:33:06.595790Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669252329568265:2079] 1748946786541836 != 1748946786541839 2025-06-03T10:33:06.597939Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29975, node 1 2025-06-03T10:33:06.609570Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:06.609585Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:06.609587Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:06.609629Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28895 TClient is connected to server localhost:28895 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:06.672087Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:06.672119Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:06.672885Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:33:06.673132Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:33:06.676809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:06.694700Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:06.715673Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:06.728121Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:06.866956Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669252329569899:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:06.866998Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:06.913526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:06.921698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:06.931684Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:06.945632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:06.959918Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:06.974379Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:06.988492Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.004920Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669256624537846:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.004950Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.004976Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669256624537851:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.006006Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:07.015448Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669256624537853:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:07.115125Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669256624537904:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:07.231949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:07.329693Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7511669256624538448:2531] TxId: 281474976715675. Ctx: { TraceId: 01jwtnm6y3ecmztpvpaz35j2hj, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YmE3ZDU1ZWEtYzI3MDQ5OS1jMDIxMGIyMi1hZDkzYmM3ZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-03T10:33:07.333566Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946787371, txId: 281474976715674] shutting down Trying to start YDB, gRPC: 15084, MsgBus: 14850 2025-06-03T10:33:07.627644Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669253863073800:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:07.627666Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001fac/r3tmp/tmpgpXqml/pdisk_1.dat 2025-06-03T10:33:07.644284Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:07.644751Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669253863073781:2079] 1748946787627544 != 1748946787627547 TServer::EnableGrpc on GrpcPort 15084, node 2 2025-06-03T10:33:07.655424Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:07.655438Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:07.655442Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:07.655488Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14850 TClient is connected to server localhost:14850 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:07.733471Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:07.733501Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:07.733574Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:07.734471Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:07.754215Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:07.765775Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:07.792712Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:07.806685Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:07.978084Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669253863075412:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.978129Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.985447Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.994720Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.003002Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.016622Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.030348Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.044641Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.058560Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.074243Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669258158043362:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:08.074275Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:08.074293Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669258158043367:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:08.075144Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:08.078434Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669258158043369:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:08.142107Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669258158043420:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:08.237230Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:08.325911Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946788365, txId: 281474976715674] shutting down ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::UnionSameTable [GOOD] Test command err: Trying to start YDB, gRPC: 26549, MsgBus: 9418 2025-06-03T10:33:06.553360Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669250910644440:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:06.553379Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001fa5/r3tmp/tmp0TvatZ/pdisk_1.dat 2025-06-03T10:33:06.601628Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669250910644416:2079] 1748946786553169 != 1748946786553172 2025-06-03T10:33:06.601958Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26549, node 1 2025-06-03T10:33:06.616372Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:06.616392Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:06.616394Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:06.616460Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:9418 TClient is connected to server localhost:9418 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:06.674855Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:06.679573Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:06.679618Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:06.680705Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:06.684173Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:06.748838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:06.770377Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:06.782405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:06.886992Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669250910646046:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:06.887041Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:06.931073Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:06.987206Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:06.996619Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.051780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.106755Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.120602Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.135081Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.150308Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669255205614000:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.150337Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.150360Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669255205614005:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.151110Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:07.154607Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669255205614007:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:33:07.245690Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669255205614058:3396] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:07.464321Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946787504, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 32328, MsgBus: 17555 2025-06-03T10:33:07.651561Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669256437567217:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:07.651583Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001fa5/r3tmp/tmpBXqJ9J/pdisk_1.dat 2025-06-03T10:33:07.668534Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:07.668826Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669256437567199:2079] 1748946787651431 != 1748946787651434 TServer::EnableGrpc on GrpcPort 32328, node 2 2025-06-03T10:33:07.675581Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:07.675594Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:07.675596Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:07.675647Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17555 TClient is connected to server localhost:17555 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:07.755920Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:07.755952Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:07.756254Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:07.756807Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:07.766444Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:07.779573Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:07.840012Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:07.849753Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:08.005987Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669260732536128:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:08.006017Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:08.016799Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.024226Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.037343Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.092697Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.100987Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.115627Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.129078Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.144266Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669260732536784:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:08.144299Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:08.144308Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669260732536789:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:08.144963Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:08.148220Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669260732536791:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:08.224576Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669260732536842:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:08.375645Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946788421, txId: 281474976715672] shutting down ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::WalleRequestDuringRollingRestart [GOOD] Test command err: 2025-06-03T10:33:02.544026Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:33:02.544581Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:02.546461Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:02.546541Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:33:02.546905Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:02.546935Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:33:02.547175Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:33:02.547200Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:33:02.547237Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:33:02.547307Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:33:02.548311Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:33:02.548328Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:33:02.548347Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:33:02.548380Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:02.577091Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:33:02.609126Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:33:02.609214Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:02.610361Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:02.610484Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:33:02.610490Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:33:02.610497Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:33:02.610500Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:33:02.610512Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:02.610554Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:33:02.610579Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:33:02.612062Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-03T10:33:02.653837Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:02.653884Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:33:02.653996Z node 1 :CMS INFO: walle_create_task_adapter.cpp:30: Processing Wall-E request: TaskId: "task-1" Type: "automated" Issuer: "UT" Action: "reboot" Hosts: "1" DryRun: false 2025-06-03T10:33:02.712303Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:02.712341Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:02.712432Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:02.712728Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-1-1" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-2-2" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-3-3" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-4-4" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 4 In ... -p-1" Action { Type: RESTART_SERVICES Host: "17" Services: "storage" Duration: 60000000 } Deadline: 180028512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 17 InterconnectPort: 12001 } } } } 2025-06-03T10:33:06.858046Z node 17 :CMS DEBUG: cms.cpp:1064: Schedule cleanup at 1970-01-01T00:05:00.028512Z 2025-06-03T10:33:06.859548Z node 17 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-1 2025-06-03T10:33:06.859567Z node 17 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-03T10:33:06.859577Z node 17 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-03T10:33:06.859592Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-1, reason# explicit remove 2025-06-03T10:33:06.882359Z node 17 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-03T10:33:06.882444Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-1" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-03T10:33:06.882607Z node 17 :CMS INFO: walle_create_task_adapter.cpp:30: Processing Wall-E request: TaskId: "task-1" Type: "automated" Issuer: "UT" Action: "reboot" Hosts: "18" DryRun: false 2025-06-03T10:33:06.893768Z node 17 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:06.893806Z node 17 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:06.893820Z node 17 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:06.915507Z node 17 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:06.915550Z node 17 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:06.915570Z node 17 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:06.915735Z node 17 :CMS INFO: cms.cpp:347: Check request: User: "Wall-E" Actions { Type: REBOOT_HOST Host: "18" Duration: 18446744073709551615 } Schedule: true DryRun: false Priority: 20 2025-06-03T10:33:06.915746Z node 17 :CMS DEBUG: cms.cpp:379: Checking action: Type: REBOOT_HOST Host: "18" Duration: 18446744073709551615 2025-06-03T10:33:06.915756Z node 17 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 18, with state: Locked, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:33:06.915765Z node 17 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Cannot lock node '18': node state: 'Locked') 2025-06-03T10:33:06.915787Z node 17 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:06.915841Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# Wall-E-r-2, owner# Wall-E, order# 2, priority# 20, body# User: "Wall-E" Actions { Type: REBOOT_HOST Host: "18" Duration: 18446744073709551615 Issue { Type: GENERIC Message: "Cannot lock node \'18\': node state: \'Locked\'" } } PartialPermissionAllowed: false Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: 20 2025-06-03T10:33:06.937081Z node 17 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:06.937150Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "Wall-E" Actions { Type: REBOOT_HOST Host: "18" Duration: 18446744073709551615 } Schedule: true DryRun: false Priority: 20 }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Cannot lock node \'18\': node state: \'Locked\'" } RequestId: "Wall-E-r-2" Deadline: 420234024 } 2025-06-03T10:33:06.937198Z node 17 :CMS DEBUG: cms_tx_store_walle_task.cpp:23: TTxStoreWalleTask Execute 2025-06-03T10:33:06.937220Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store wall-e task: id# task-1, requestId# Wall-E-r-2 2025-06-03T10:33:06.947983Z node 17 :CMS DEBUG: cms_tx_store_walle_task.cpp:53: TTxStoreWalleTask Complete 2025-06-03T10:33:06.948018Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvStoreWalleTask { Task: { TaskId: task-1 RequestId: Wall-E-r-2 Owner: Permissions: [] HasSingleCompositeActionGroup: 0 CreateTime: 1970-01-01T00:00:00.000000Z LastRefreshTime: 1970-01-01T00:00:00.000000Z } }, response# NKikimr::NCms::TEvCms::TEvWalleTaskStored { TaskId: task-1 } 2025-06-03T10:33:06.948077Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [Wall-E adapter] Reply: request# NKikimr::NCms::TEvCms::TEvWalleCreateTaskRequest { TaskId: "task-1" Type: "automated" Issuer: "UT" Action: "reboot" Hosts: "18" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvWalleCreateTaskResponse { Status { Code: DISALLOW_TEMP Reason: "Cannot lock node \'18\': node state: \'Locked\'" } TaskId: "task-1" Hosts: "18" } 2025-06-03T10:33:07.021491Z node 17 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:07.021531Z node 17 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:07.021549Z node 17 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:07.021717Z node 17 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (17) has temporary lock, VDisk [0:1:0:1:0] (::1:/18/pdisk-18.data) is locked by this request. Down: " } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 2025-06-03T10:33:07.021731Z node 17 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (17) has temporary lock, VDisk [0:1:0:1:0] (::1:/18/pdisk-18.data) is locked by this request. Down: " } 2025-06-03T10:33:07.021742Z node 17 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 18, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:33:07.021786Z node 17 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:07.021810Z node 17 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-2, requestId# user-r-1, owner# user 2025-06-03T10:33:07.021819Z node 17 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (18) (permission user-p-2 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:07.021831Z node 17 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:07.021876Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-2, validity# 1970-01-01T00:03:00.337048Z, action# Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 60000000 2025-06-03T10:33:07.021886Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-1, owner# user 2025-06-03T10:33:07.032863Z node 17 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:07.032945Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Id: "user-p-2" Action { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 60000000 } Deadline: 180337048 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 18 InterconnectPort: 12002 } } } } 2025-06-03T10:33:07.033085Z node 17 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-2 2025-06-03T10:33:07.033096Z node 17 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-03T10:33:07.033112Z node 17 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-03T10:33:07.033135Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-2, reason# explicit remove 2025-06-03T10:33:07.044048Z node 17 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-03T10:33:07.044114Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-2" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-03T10:33:07.044242Z node 17 :CMS INFO: walle_check_task_adapter.cpp:29: Processing Wall-E request: TaskId: "task-1" 2025-06-03T10:33:07.055585Z node 17 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:07.055624Z node 17 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:07.055641Z node 17 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:07.055806Z node 17 :CMS INFO: cms.cpp:347: Check request: User: "Wall-E" Actions { Type: REBOOT_HOST Host: "18" Duration: 18446744073709551615 Issue { Type: GENERIC Message: "Cannot lock node \'18\': node state: \'Locked\'" } } PartialPermissionAllowed: false Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: 20 2025-06-03T10:33:07.055817Z node 17 :CMS DEBUG: cms.cpp:379: Checking action: Type: REBOOT_HOST Host: "18" Duration: 18446744073709551615 Issue { Type: GENERIC Message: "Cannot lock node \'18\': node state: \'Locked\'" } 2025-06-03T10:33:07.055827Z node 17 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 18, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:33:07.055867Z node 17 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:07.055888Z node 17 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# Wall-E-p-3, requestId# Wall-E-r-2, owner# Wall-E 2025-06-03T10:33:07.055896Z node 17 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (18) (permission Wall-E-p-3 until 586524-01-19T08:01:49Z) 2025-06-03T10:33:07.055908Z node 17 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:07.055945Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# Wall-E-p-3, validity# 586524-01-19T08:01:49.551615Z, action# Type: REBOOT_HOST Host: "18" Duration: 18446744073709551615 2025-06-03T10:33:07.055955Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# Wall-E-r-2, owner# Wall-E 2025-06-03T10:33:07.066920Z node 17 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:07.067004Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "Wall-E" RequestId: "Wall-E-r-2" }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Id: "Wall-E-p-3" Action { Type: REBOOT_HOST Host: "18" Duration: 18446744073709551615 } Deadline: 18446744073709551615 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 18 InterconnectPort: 12002 } } } } 2025-06-03T10:33:07.067051Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [Wall-E adapter] Reply: request# NKikimr::NCms::TEvCms::TEvWalleCheckTaskRequest { TaskId: "task-1" }, response# NKikimr::NCms::TEvCms::TEvWalleCheckTaskResponse { Status { Code: ALLOW } Task { TaskId: "task-1" Hosts: "18" } } >> TDSProxyGetTest::TestBlock42WipedErrorWithTwoBlobs [GOOD] >> TDSProxyPatchTest::NaiveErrorOnPut_Erasure4Plus2Block >> TxUsage::WriteToTopic_Demo_7_Query ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::RequestRestartServicesNoUser [GOOD] Test command err: 2025-06-03T10:33:01.683280Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:33:01.684152Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:01.686474Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:01.686550Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:33:01.686869Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:01.687117Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:33:01.687146Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:33:01.687187Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:33:01.687243Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:33:01.687316Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:33:01.688695Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:33:01.688717Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:33:01.688739Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:33:01.688780Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:01.716755Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:33:01.749633Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:33:01.749752Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:01.751348Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:01.751482Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:33:01.751490Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:33:01.751500Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:33:01.751505Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:33:01.751522Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:01.751583Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:33:01.751611Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:33:01.753968Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-03T10:33:01.796370Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:01.796447Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:33:01.855864Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:01.855900Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:01.855960Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:01.856200Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-1-1" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-2-2" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-3-3" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-4-4" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: ... ge" Duration: 60000000 2025-06-03T10:33:05.508408Z node 17 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 18, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:33:05.508426Z node 17 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Issue in affected group with id '0': too many unavailable vdisks. Locked: Host ::1:12001 (17) has temporary lock, VDisk [0:1:0:1:0] (::1:/18/pdisk-18.data) is locked by this request. Down: ) 2025-06-03T10:33:05.508440Z node 17 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-1, requestId# user-r-1, owner# user 2025-06-03T10:33:05.508448Z node 17 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (17) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:05.508460Z node 17 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:05.508503Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-1, validity# 1970-01-01T00:03:00.029000Z, action# Type: RESTART_SERVICES Host: "17" Services: "storage" Duration: 60000000 2025-06-03T10:33:05.559354Z node 17 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:05.601198Z node 17 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:05.601343Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "17" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: true Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW_PARTIAL } RequestId: "user-r-1" Permissions { Id: "user-p-1" Action { Type: RESTART_SERVICES Host: "17" Services: "storage" Duration: 60000000 } Deadline: 180029000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 17 InterconnectPort: 12001 } } } } 2025-06-03T10:33:05.601361Z node 17 :CMS DEBUG: cms.cpp:1064: Schedule cleanup at 1970-01-01T00:05:00.029000Z 2025-06-03T10:33:07.214944Z node 25 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:33:07.215757Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:07.217432Z node 25 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:33:07.217467Z node 25 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:33:07.217496Z node 25 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:33:07.217552Z node 25 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:33:07.217911Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:07.217962Z node 25 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:33:07.218221Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:07.218242Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:33:07.220240Z node 25 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:33:07.220273Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:33:07.220326Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:33:07.220357Z node 25 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:07.242492Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:33:07.275366Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:33:07.275487Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:07.275528Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:07.275656Z node 25 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:33:07.275664Z node 25 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:33:07.275673Z node 25 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:33:07.275679Z node 25 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:33:07.275713Z node 25 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:33:07.275730Z node 25 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:33:07.275745Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:07.276212Z node 25 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 25 PDiskId: 25 Path: "/25/pdisk-25.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 26 PDiskId: 26 Path: "/26/pdisk-26.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 27 PDiskId: 27 Path: "/27/pdisk-27.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 28 PDiskId: 28 Path: "/28/pdisk-28.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 29 PDiskId: 29 Path: "/29/pdisk-29.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 30 PDiskId: 30 Path: "/30/pdisk-30.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 31 PDiskId: 31 Path: "/31/pdisk-31.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 32 PDiskId: 32 Path: "/32/pdisk-32.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1000 } VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1000 } VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1000 } VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1000 } VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1000 } VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1000 } VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1000 } VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1001 } VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1001 } VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1001 } VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1001 } VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1001 } VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1001 } VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1001 } VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1002 } VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1002 } VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1002 } VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1002 } VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1002 } VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1002 } VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1002 } VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1003 } VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1003 } VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1003 } VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1003 } VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1003 } VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1003 } VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1003 } VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1003 } } } } Success: true 2025-06-03T10:33:07.319228Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:07.319316Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:33:07.319567Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "" Actions { Type: RESTART_SERVICES Host: "::1" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: WRONG_REQUEST Reason: "Missing user in request" } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpSplit::AfterResult+Ascending [GOOD] Test command err: Trying to start YDB, gRPC: 24686, MsgBus: 1601 2025-06-03T10:33:06.663755Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669250442002235:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:06.663775Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f95/r3tmp/tmpibt3po/pdisk_1.dat 2025-06-03T10:33:06.721814Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669250442002213:2079] 1748946786663601 != 1748946786663604 2025-06-03T10:33:06.723725Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24686, node 1 2025-06-03T10:33:06.733368Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:06.733384Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:06.733386Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:06.733427Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1601 TClient is connected to server localhost:1601 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:06.780156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:06.790874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:06.797188Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:06.797215Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:06.798336Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:06.853196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:06.871155Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:06.883267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:06.987427Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669250442003847:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:06.987479Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.033311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.040889Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.096069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.106427Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.120577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.135068Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.148601Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.164402Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669254736971797:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.164426Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.164445Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669254736971802:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.165198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:07.168323Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669254736971804:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:33:07.253805Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669254736971855:3397] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:07.413334Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7511669254736972146:2506] TxId: 281474976710673. Ctx: { TraceId: 01jwtnm7244m1q9t2fdk3c6b03, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODQxNDYxMDEtY2MyMDIwNDYtNjhmYTAzZjQtNmMzZGY1ZWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database 2025-06-03T10:33:07.413426Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710673. Ctx: { TraceId: 01jwtnm7244m1q9t2fdk3c6b03, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ODQxNDYxMDEtY2MyMDIwNDYtNjhmYTAzZjQtNmMzZGY1ZWY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root captured evread ----------------------------------------------------------- starting split ----------------------------------------------------------- scheme op Status: 53 TxId: 281474976710674 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 resume evread ----------------------------------------------------------- 2025-06-03T10:33:07.425751Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946787455, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 20272, MsgBus: 30132 2025-06-03T10:33:07.540272Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669252778093279:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:07.540300Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f95/r3tmp/tmpJ92mW4/pdisk_1.dat 2025-06-03T10:33:07.554442Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669252778093259:2079] 1748946787540157 != 1748946787540160 2025-06-03T10:33:07.554467Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20272, node 2 2025-06-03T10:33:07.563460Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:07.563481Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:07.563484Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:07.563541Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30132 TClient is connected to server localhost:30132 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:07.645565Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:07.645588Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:07.646348Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.646649Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:33:07.658597Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:07.668716Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:07.687407Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:07.699669Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:07.862973Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669252778094884:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.862995Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.868221Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.877209Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.890937Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.945782Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.953619Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.967914Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.981914Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.997910Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669252778095539:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.997934Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669252778095544:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.997941Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.998752Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:08.001339Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669252778095546:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:08.089203Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669257073062893:3392] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:08.213181Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jwtnm7v5dkd85kgqh8fsb6az, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=ZjZlNDA5MWItYWE2NDkxZWMtNGI1NTA4NjgtMmNmMzU2ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root captured evread ----------------------------------------------------------- starting split ----------------------------------------------------------- scheme op Status: 53 TxId: 281474976715674 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 captured evreadresult ----------------------------------------------------------- resume evread ----------------------------------------------------------- 2025-06-03T10:33:08.596777Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946788260, txId: 281474976715672] shutting down >> KqpSplit::BorderKeys+Unspecified [GOOD] >> TDSProxyPatchTest::NaiveErrorOnPut_Erasure4Plus2Block [GOOD] >> TDSProxyPutTest::TestBlock42PutAllOk >> KqpScan::PureExpr [GOOD] >> KqpScan::RestrictSqlV0 >> TCmsTest::VDisksEvictionShouldFailOnMultipleActions [GOOD] >> TDSProxyPutTest::TestBlock42PutAllOk [GOOD] >> TDsProxyQuorumTracker::CheckFailModelErasure3Plus1Block [GOOD] >> TCmsTest::ScheduledWalleRequestDuringRollingRestart [GOOD] >> TCmsTest::SamePriorityRequest2 >> KqpScanLogs::GraceJoin-EnabledLogs [GOOD] >> CheckIntegrityBlock42::PlacementOk >> CheckIntegrityMirror3dc::PlacementOk >> TTabletPipeTest::TestTwoNodes ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpSplit::BorderKeys+Unspecified [GOOD] Test command err: Trying to start YDB, gRPC: 62265, MsgBus: 16494 2025-06-03T10:33:06.704335Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669249316698055:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:06.704353Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f88/r3tmp/tmpySzzWK/pdisk_1.dat 2025-06-03T10:33:06.755302Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669249316698032:2079] 1748946786704170 != 1748946786704173 2025-06-03T10:33:06.755755Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62265, node 1 2025-06-03T10:33:06.768989Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:06.769002Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:06.769004Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:06.769041Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16494 TClient is connected to server localhost:16494 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:06.834272Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:06.834303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:06.835290Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:06.835847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:06.847727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:06.865581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:06.924565Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:06.935682Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:07.038771Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669253611666969:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.038800Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.071355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.079070Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.092840Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.106528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.161846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.169390Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.184041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:07.199480Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669253611667624:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.199502Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.199504Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669253611667629:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:07.200164Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:07.203064Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669253611667631:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:07.269895Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669253611667682:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:07.423545Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7511669253611667974:2506] TxId: 281474976715673. Ctx: { TraceId: 01jwtnm72b4jw9xjh2fztmr82z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDY0NzliMjctZjEyNTFlN2UtNmZjNDk2MDgtYzcxODJmOWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database 2025-06-03T10:33:07.423634Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jwtnm72b4jw9xjh2fztmr82z, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZDY0NzliMjctZjEyNTFlN2UtNmZjNDk2MDgtYzcxODJmOWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root captured evread ----------------------------------------------------------- starting split ----------------------------------------------------------- scheme op Status: 53 TxId: 281474976715674 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 captured evreadresult ----------------------------------------------------------- resume evread ----------------------------------------------------------- 2025-06-03T10:33:07.694356Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946787469, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 23221, MsgBus: 13246 2025-06-03T10:33:07.986247Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669254349542279:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:07.986292Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f88/r3tmp/tmp8aPjYJ/pdisk_1.dat 2025-06-03T10:33:08.000810Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:08.000977Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669254349542260:2079] 1748946787986122 != 1748946787986125 TServer::EnableGrpc on GrpcPort 23221, node 2 2025-06-03T10:33:08.009282Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:08.009309Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:08.009311Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:08.009356Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13246 TClient is connected to server localhost:13246 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:08.090702Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:08.090738Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:08.090989Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:08.091735Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:08.091863Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:33:08.095958Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:08.108167Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:08.123951Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:08.136068Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:08.289536Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669258644511187:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:08.289583Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:08.300960Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.310880Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.329255Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.338529Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.353095Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.367335Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.381190Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.397286Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669258644511839:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:08.397337Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669258644511844:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:08.397358Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:08.398133Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:08.400035Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669258644511846:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:08.486518Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669258644511897:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:08.621878Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jwtnm87see63sx5sjvxj64sa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=OTIzY2I0YWMtNDkwNTYyZWItYWYyYzY5Y2EtZjk3ZjQ4NDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root captured evread ----------------------------------------------------------- starting split ----------------------------------------------------------- scheme op Status: 53 TxId: 281474976715674 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 captured evreadresult ----------------------------------------------------------- scheme op Status: 53 TxId: 281474976715675 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 resume evread ----------------------------------------------------------- 2025-06-03T10:33:09.121750Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946788666, txId: 281474976715672] shutting down ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::VDisksEvictionShouldFailOnMultipleActions [GOOD] Test command err: 2025-06-03T10:33:03.849749Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:33:03.850284Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:03.851982Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:03.852031Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:33:03.852295Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:03.852318Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:33:03.852523Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:33:03.852546Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:33:03.852588Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:33:03.852650Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:33:03.853690Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:33:03.853707Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:33:03.853726Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:33:03.853759Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:03.881119Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:33:03.913165Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:33:03.913261Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:03.914429Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:03.914541Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:33:03.914547Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:33:03.914553Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:33:03.914556Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:33:03.914570Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:03.914619Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:33:03.914644Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:33:03.916138Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-03T10:33:03.958099Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:03.958161Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:33:04.018777Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:04.018823Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:04.018903Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:04.019249Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-1-1" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-2-2" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-3-3" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-4-4" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: ... imestamp: 120029000 } Devices { Name: "pdisk-21-21" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 21 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-22-22" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 22 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-23-23" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 23 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-24-24" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 24 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 120029000 } } 2025-06-03T10:33:07.698766Z node 17 :CMS DEBUG: sentinel.cpp:486: [Sentinel] [ConfigUpdater] Handle TEvCms::TEvClusterStateResponse: response# Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-17-17" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 17 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-18-18" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 18 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-19-19" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 19 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-20-20" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 20 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-21-21" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 21 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-22-22" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 22 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-23-23" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 23 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-24-24" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 24 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 120029000 } 2025-06-03T10:33:07.698798Z node 17 :CMS DEBUG: sentinel.cpp:944: [Sentinel] [Main] Config was updated in 120.003000s 2025-06-03T10:33:07.698808Z node 17 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start StateUpdater 2025-06-03T10:33:07.698841Z node 17 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:07.698903Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 17, wbId# [17:8388350642965737326:1634689637] 2025-06-03T10:33:07.698908Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 18, wbId# [18:8388350642965737326:1634689637] 2025-06-03T10:33:07.698911Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 19, wbId# [19:8388350642965737326:1634689637] 2025-06-03T10:33:07.698914Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 20, wbId# [20:8388350642965737326:1634689637] 2025-06-03T10:33:07.698917Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 21, wbId# [21:8388350642965737326:1634689637] 2025-06-03T10:33:07.698920Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 22, wbId# [22:8388350642965737326:1634689637] 2025-06-03T10:33:07.698923Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 23, wbId# [23:8388350642965737326:1634689637] 2025-06-03T10:33:07.698943Z node 17 :CMS DEBUG: sentinel.cpp:683: [Sentinel] [StateUpdater] Request pdisks state: nodeId# 24, wbId# [24:8388350642965737326:1634689637] 2025-06-03T10:33:07.699007Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 17, response# PDiskStateInfo { PDiskId: 17 CreateTime: 0 ChangeTime: 0 Path: "/17/pdisk-17.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120029 2025-06-03T10:33:07.699080Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 20, response# PDiskStateInfo { PDiskId: 20 CreateTime: 0 ChangeTime: 0 Path: "/20/pdisk-20.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120029 2025-06-03T10:33:07.699114Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 21, response# PDiskStateInfo { PDiskId: 21 CreateTime: 0 ChangeTime: 0 Path: "/21/pdisk-21.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120029 2025-06-03T10:33:07.699128Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 22, response# PDiskStateInfo { PDiskId: 22 CreateTime: 0 ChangeTime: 0 Path: "/22/pdisk-22.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120029 2025-06-03T10:33:07.699137Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 18, response# PDiskStateInfo { PDiskId: 18 CreateTime: 0 ChangeTime: 0 Path: "/18/pdisk-18.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120029 2025-06-03T10:33:07.699144Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 19, response# PDiskStateInfo { PDiskId: 19 CreateTime: 0 ChangeTime: 0 Path: "/19/pdisk-19.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120029 2025-06-03T10:33:07.699154Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 23, response# PDiskStateInfo { PDiskId: 23 CreateTime: 0 ChangeTime: 0 Path: "/23/pdisk-23.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120029 2025-06-03T10:33:07.699162Z node 17 :CMS DEBUG: sentinel.cpp:693: [Sentinel] [StateUpdater] Handle TEvWhiteboard::TEvPDiskStateResponse: nodeId# 24, response# PDiskStateInfo { PDiskId: 24 CreateTime: 0 ChangeTime: 0 Path: "/24/pdisk-24.data" Guid: 1 AvailableSize: 107374182400 TotalSize: 214748364800 State: Normal } ResponseTime: 120029 2025-06-03T10:33:07.699169Z node 17 :CMS DEBUG: sentinel.cpp:960: [Sentinel] [Main] State was updated in 0.000000s 2025-06-03T10:33:07.750122Z node 17 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:07.792022Z node 17 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:07.792146Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: SHUTDOWN_HOST Host: "17" Duration: 60000000 } Actions { Type: SHUTDOWN_HOST Host: "18" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false EvictVDisks: true }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: WRONG_REQUEST Reason: "Cannot perform several actions and evict vdisks" } RequestId: "user-r-1" } >> CheckIntegrityBlock42::PlacementOk [GOOD] >> CheckIntegrityBlock42::PlacementOkHandoff >> CheckIntegrityMirror3dc::PlacementOk [GOOD] >> CheckIntegrityMirror3dc::PlacementOkHandoff >> TCmsTest::TestForceRestartModeDisconnects [GOOD] >> TCmsTest::StateStorageTwoRings >> TCmsTest::ManagePermissionWrongRequest [GOOD] >> TCmsTest::ManageRequests >> CheckIntegrityBlock42::PlacementOkHandoff [GOOD] >> CheckIntegrityBlock42::PlacementMissingParts |70.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDsProxyQuorumTracker::CheckFailModelErasure3Plus1Block [GOOD] >> TCmsTenatsTest::TestClusterRatioLimitForceRestartModeScheduled [GOOD] >> KqpScan::IsNull >> TResourceBrokerInstant::TestMerge >> CheckIntegrityBlock42::PlacementMissingParts [GOOD] >> TMaintenanceApiTest::ActionReason [GOOD] >> CheckIntegrityMirror3dc::PlacementOkHandoff [GOOD] >> CheckIntegrityMirror3dc::PlacementMissingParts >> TTabletPipeTest::TestTwoNodes [GOOD] >> KqpScan::NullInKey >> KqpScan::DqSourceFullScan >> KqpScan::RestrictSqlV0 [GOOD] >> CheckIntegrityMirror3dc::PlacementMissingParts [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTenatsTest::TestClusterRatioLimitForceRestartModeScheduled [GOOD] Test command err: 2025-06-03T10:33:00.957712Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:33:00.958333Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:00.960392Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:00.960452Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:33:00.960762Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:00.960795Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:33:00.961034Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:33:00.961061Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:33:00.961110Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:33:00.961175Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:33:00.962331Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:33:00.962356Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:33:00.962383Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:33:00.962417Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:00.990237Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:33:01.022679Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:33:01.022813Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:01.024321Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:01.024471Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:33:01.024477Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:33:01.024485Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:33:01.024488Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:33:01.024504Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:01.024562Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:33:01.024588Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:33:01.025899Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { } } Success: true 2025-06-03T10:33:01.067992Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:01.068061Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:33:01.068228Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvGetConfigRequest { }, response# NKikimr::NCms::TEvCms::TEvGetConfigResponse { Status { Code: OK } Config { DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: false UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 60 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } } } 2025-06-03T10:33:01.068289Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:01.119942Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:01.120070Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:01.120253Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120028000 } Timestamp: 120028000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120028000 } Timestamp: 120028000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120028000 } Timestamp: 120028000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120028000 } Timestamp: 120028000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120028000 } Timestamp: 120028000 NodeId: 5 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120028000 } Timestamp: 120028000 NodeId: 6 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120028000 } Timestamp: 120028000 NodeId: 7 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "dynnode" State: UP Version: "-1" Timestamp: 120028000 } Timestamp: 120028000 NodeId: 8 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 120028000 } } 2025-06-03T10:33:01.161041Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:01.202390Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:01.202476Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: DefaultRetryTime: 300000000 DefaultPermissionDuration: 300000000 TenantLimits { DisabledNodesLimit: 0 DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesLimit: 0 DisabledNodesRatioLimit: 10 } InfoCollectionTimeout: 15000000 LogConfig { DefaultLevel: ENABLED TTL: 1209600000000 } SentinelConfig { Enable: false UpdateConfigInterval: 3600000000 RetryUpdateConfig: 60000000 UpdateStateInterval: 60000000 UpdateStateTimeout: 45000000 RetryChangeStatus: 10000000 ChangeStatusRetries: 5 DefaultStateLimit: 60 DataCenterRatio: 50 RoomRatio: 70 RackRatio: 90 DryRun: false EvictVDisksStatus: FAULTY GoodStateLimit: 5 FaultyPDisksThresholdPerNode: 0 } 2025-06-03T10:33:01.202488Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:01.236568Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:01.236600Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:01.236615Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:01.236687Z node 1 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false TenantPolicy: NONE AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:33:01.236694Z node 1 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 2025-06-03T10:33:01.236704Z node 1 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 1, with state: Up, with limit: 0, with ratio limit: 10, locked nodes: 0, down nodes: 0 2025-06-03T10:33:01.236711Z node 1 :CMS DEBUG: cms.cpp:729: Ring: 0; State: Ok 2025-06-03T10:33:01.236714Z node 1 :CMS DEBUG: cms.cpp:729: Ring: 1; State: Ok 2025-06-03T10:33:01.236716Z node 1 :CMS DEBUG: cms.cpp:729: Ring: 2; State: Ok 2025-06-03T10:33:01.236719Z node 1 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:01.236731Z node 1 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-1, requestId# user-r-1, owner# user 2025-06-03T10:33:01.236736Z node 1 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (1) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:01.236744Z node 1 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:01.236782Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-1, validity# 1970-01-01T00:03:00.129000Z, action# Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 2025-06-03T10:33:01.247670Z node 1 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:01.247769Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false TenantPolicy: NONE AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-1" Permissions { Id: "user-p-1" Action { Type: SHUTDOWN_HOST Host: "1" Duration: 60000000 } Deadline: 180129000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 1 InterconnectPort: 12001 } } } } 2025-06-03T10:33:01.247778Z node 1 :CMS DEBUG: cms.cpp:1064: Schedule cleanup at 1970-01-01T00:05:00.129000Z 2025-06-03T10:33:01.268928Z node 1 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (1) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:01.268981Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:01.268999Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:01.269010Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:01.269053Z node 1 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "2" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: false TenantPolicy: NONE AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:33:01.269060Z node 1 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "2" Duration: 60000000 2025-06-03T10:33:01.269069Z node 1 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 2, with state: Up, with limit: 0, with ratio limit: 10, locked nodes: 1, down nodes: 0 2025-06-03T10:33:01.269078Z node 1 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Cannot lock node '2': too many unavailable nodes. Locked: 1, down: 0, total: 8, limit: 10%) 2025-06-03T10:33:01.269087Z node 1 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:01.279966Z node 1 :CMS DEBU ... ed: 1, down: 0, total: 8, limit: 20%" } 2025-06-03T10:33:08.148772Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 26, with state: Up, with limit: 0, with ratio limit: 20, locked nodes: 0, down nodes: 0 2025-06-03T10:33:08.148778Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 26, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:33:08.148784Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:08.148799Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'27\': too many unavailable nodes. Locked: 1, down: 0, total: 8, limit: 20%" } 2025-06-03T10:33:08.148804Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 27, with state: Up, with limit: 0, with ratio limit: 20, locked nodes: 1, down nodes: 0 2025-06-03T10:33:08.148815Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Cannot lock node '27': too many unavailable nodes. Locked: 1, down: 0, total: 8, limit: 20%) 2025-06-03T10:33:08.148836Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-2, requestId# user-r-1, owner# user 2025-06-03T10:33:08.148844Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (26) (permission user-p-2 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:08.148855Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:08.148896Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-2, validity# 1970-01-01T00:03:00.537560Z, action# Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 2025-06-03T10:33:08.148926Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" Actions { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'27\': too many unavailable nodes. Locked: 1, down: 0, total: 8, limit: 20%" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false 2025-06-03T10:33:08.159827Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:08.159920Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_FORCE_RESTART }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW_PARTIAL } RequestId: "user-r-1" Permissions { Id: "user-p-2" Action { Type: SHUTDOWN_HOST Host: "26" Duration: 60000000 } Deadline: 180537560 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 26 InterconnectPort: 12002 } } } } 2025-06-03T10:33:08.160059Z node 25 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-2 2025-06-03T10:33:08.160071Z node 25 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-03T10:33:08.160084Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-03T10:33:08.160109Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-2, reason# explicit remove 2025-06-03T10:33:08.170911Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-03T10:33:08.170975Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-2" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-03T10:33:08.192492Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:08.192576Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:08.192632Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'27\': too many unavailable nodes. Locked: 1, down: 0, total: 8, limit: 20%" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:33:08.192643Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'27\': too many unavailable nodes. Locked: 1, down: 0, total: 8, limit: 20%" } 2025-06-03T10:33:08.192654Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 27, with state: Up, with limit: 0, with ratio limit: 20, locked nodes: 0, down nodes: 1 2025-06-03T10:33:08.192663Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Cannot lock node '27': too many unavailable nodes. Locked: 0, down: 1, total: 8, limit: 20%) 2025-06-03T10:33:08.192682Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:08.192715Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" Actions { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'27\': too many unavailable nodes. Locked: 0, down: 1, total: 8, limit: 20%" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:33:08.203685Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:08.203728Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:08.203791Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Cannot lock node \'27\': too many unavailable nodes. Locked: 0, down: 1, total: 8, limit: 20%" } RequestId: "user-r-1" Deadline: 420640584 } 2025-06-03T10:33:08.277269Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:08.277412Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:08.277493Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'27\': too many unavailable nodes. Locked: 0, down: 1, total: 8, limit: 20%" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-03T10:33:08.277509Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'27\': too many unavailable nodes. Locked: 0, down: 1, total: 8, limit: 20%" } 2025-06-03T10:33:08.277524Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 27, with state: Up, with limit: 0, with ratio limit: 20, locked nodes: 0, down nodes: 1 2025-06-03T10:33:08.277537Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Cannot lock node '27': too many unavailable nodes. Locked: 0, down: 1, total: 8, limit: 20%) 2025-06-03T10:33:08.277564Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:08.277610Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# 0, body# User: "user" Actions { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'27\': too many unavailable nodes. Locked: 0, down: 1, total: 8, limit: 20%" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-03T10:33:08.291029Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:08.291067Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:08.291141Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_KEEP_AVAILABLE }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Cannot lock node \'27\': too many unavailable nodes. Locked: 0, down: 1, total: 8, limit: 20%" } RequestId: "user-r-1" Deadline: 420742096 } 2025-06-03T10:33:08.313253Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:08.313377Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:08.313457Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'27\': too many unavailable nodes. Locked: 0, down: 1, total: 8, limit: 20%" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_FORCE_RESTART EvictVDisks: false 2025-06-03T10:33:08.313474Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 Issue { Type: DISABLED_NODES_LIMIT_REACHED Message: "Cannot lock node \'27\': too many unavailable nodes. Locked: 0, down: 1, total: 8, limit: 20%" } 2025-06-03T10:33:08.313489Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 27, with state: Up, with limit: 0, with ratio limit: 20, locked nodes: 0, down nodes: 1 2025-06-03T10:33:08.313495Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 27, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 1 2025-06-03T10:33:08.313503Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:08.313529Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-3, requestId# user-r-1, owner# user 2025-06-03T10:33:08.313539Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12003 (27) (permission user-p-3 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:08.313554Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:08.313584Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-3, validity# 1970-01-01T00:03:00.843608Z, action# Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 2025-06-03T10:33:08.313600Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-1, owner# user 2025-06-03T10:33:08.324661Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:08.324692Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:08.324803Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_FORCE_RESTART }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Id: "user-p-3" Action { Type: SHUTDOWN_HOST Host: "27" Duration: 60000000 } Deadline: 180843608 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 27 InterconnectPort: 12003 } } } } |70.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot |70.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot |70.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_snapshot/ydb-core-tx-datashard-ut_snapshot ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityBlock42::PlacementMissingParts [GOOD] Test command err: RandomSeed# 3490268011795939296 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:5:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:6:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:0:3:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:0:4:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:7:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:4] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:5] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:6] TO [82000000:1:0:2:0] FINISHED WITH OK *** >> TResourceBrokerInstant::TestMerge [GOOD] >> TTabletCountersAggregator::ColumnShardCounters >> KqpScan::JoinSimple >> TCmsTest::WalleTasksWithNodeLimit [GOOD] >> TCmsTest::WalleTasksDifferentPriorities >> TTabletCountersAggregator::ColumnShardCounters [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TMaintenanceApiTest::ActionReason [GOOD] Test command err: 2025-06-03T10:33:06.453977Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:33:06.454567Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:06.456762Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:33:06.456804Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:33:06.456853Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:33:06.456928Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:33:06.457358Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:06.457413Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:33:06.457702Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:06.457721Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:33:06.459238Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:33:06.459257Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:33:06.459289Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:33:06.459335Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:06.489213Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:33:06.542359Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:33:06.542443Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:06.543514Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:06.543610Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:33:06.543615Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:33:06.543622Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:33:06.543625Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:33:06.543636Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:06.543681Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:33:06.543720Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:33:06.545532Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-03T10:33:06.588032Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:06.588102Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:33:06.629025Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:06.629076Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:06.629156Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:06.629437Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-1-1" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-2-2" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-3-3" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-4-4" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: ... otId { NodeId: 15 PDiskId: 15 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 15 PDiskId: 15 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 15 PDiskId: 15 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 16 PDiskId: 16 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 16 PDiskId: 16 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 16 PDiskId: 16 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 16 PDiskId: 16 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 9 PDiskId: 9 VSlotId: 1000 } VSlotId { NodeId: 10 PDiskId: 10 VSlotId: 1000 } VSlotId { NodeId: 11 PDiskId: 11 VSlotId: 1000 } VSlotId { NodeId: 12 PDiskId: 12 VSlotId: 1000 } VSlotId { NodeId: 13 PDiskId: 13 VSlotId: 1000 } VSlotId { NodeId: 14 PDiskId: 14 VSlotId: 1000 } VSlotId { NodeId: 15 PDiskId: 15 VSlotId: 1000 } VSlotId { NodeId: 16 PDiskId: 16 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 9 PDiskId: 9 VSlotId: 1001 } VSlotId { NodeId: 10 PDiskId: 10 VSlotId: 1001 } VSlotId { NodeId: 11 PDiskId: 11 VSlotId: 1001 } VSlotId { NodeId: 12 PDiskId: 12 VSlotId: 1001 } VSlotId { NodeId: 13 PDiskId: 13 VSlotId: 1001 } VSlotId { NodeId: 14 PDiskId: 14 VSlotId: 1001 } VSlotId { NodeId: 15 PDiskId: 15 VSlotId: 1001 } VSlotId { NodeId: 16 PDiskId: 16 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 9 PDiskId: 9 VSlotId: 1002 } VSlotId { NodeId: 10 PDiskId: 10 VSlotId: 1002 } VSlotId { NodeId: 11 PDiskId: 11 VSlotId: 1002 } VSlotId { NodeId: 12 PDiskId: 12 VSlotId: 1002 } VSlotId { NodeId: 13 PDiskId: 13 VSlotId: 1002 } VSlotId { NodeId: 14 PDiskId: 14 VSlotId: 1002 } VSlotId { NodeId: 15 PDiskId: 15 VSlotId: 1002 } VSlotId { NodeId: 16 PDiskId: 16 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 9 PDiskId: 9 VSlotId: 1003 } VSlotId { NodeId: 10 PDiskId: 10 VSlotId: 1003 } VSlotId { NodeId: 11 PDiskId: 11 VSlotId: 1003 } VSlotId { NodeId: 12 PDiskId: 12 VSlotId: 1003 } VSlotId { NodeId: 13 PDiskId: 13 VSlotId: 1003 } VSlotId { NodeId: 14 PDiskId: 14 VSlotId: 1003 } VSlotId { NodeId: 15 PDiskId: 15 VSlotId: 1003 } VSlotId { NodeId: 16 PDiskId: 16 VSlotId: 1003 } } } } Success: true 2025-06-03T10:33:08.415049Z node 9 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:08.415134Z node 9 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:33:08.448328Z node 9 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:08.448367Z node 9 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:08.448385Z node 9 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:08.448614Z node 9 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-9-9" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 9 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-10-10" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 10 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-11-11" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 11 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-12-12" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 12 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-4-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-4-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-4-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-4-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-13-13" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 13 InterconnectPort: 12005 Location { DataCenter: "1" Module: "5" Rack: "5" Unit: "5" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-5-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-5-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-5-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-5-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-14-14" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 14 InterconnectPort: 12006 Location { DataCenter: "1" Module: "6" Rack: "6" Unit: "6" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-6-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-6-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-6-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-6-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-15-15" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 15 InterconnectPort: 12007 Location { DataCenter: "1" Module: "7" Rack: "7" Unit: "7" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-7-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-7-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-7-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-7-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-16-16" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 16 InterconnectPort: 12008 Location { DataCenter: "1" Module: "8" Rack: "8" Unit: "8" } StartTimeSeconds: 0 } Timestamp: 120028000 } } 2025-06-03T10:33:08.448663Z node 9 :CMS INFO: cms.cpp:347: Check request: User: "test-user" Actions { Type: SHUTDOWN_HOST Host: "9" Duration: 600000000 } Actions { Type: SHUTDOWN_HOST Host: "10" Duration: 600000000 } PartialPermissionAllowed: true Schedule: true DryRun: false Reason: "" AvailabilityMode: MODE_MAX_AVAILABILITY MaintenanceTaskId: "task-1" 2025-06-03T10:33:08.448670Z node 9 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "9" Duration: 600000000 2025-06-03T10:33:08.448680Z node 9 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 9, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:33:08.448706Z node 9 :CMS DEBUG: cms.cpp:729: Ring: 0; State: Ok 2025-06-03T10:33:08.448709Z node 9 :CMS DEBUG: cms.cpp:729: Ring: 1; State: Ok 2025-06-03T10:33:08.448711Z node 9 :CMS DEBUG: cms.cpp:729: Ring: 2; State: Ok 2025-06-03T10:33:08.448713Z node 9 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:08.448720Z node 9 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "10" Duration: 600000000 2025-06-03T10:33:08.448723Z node 9 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 10, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:33:08.448737Z node 9 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Issue in affected group with id '0': too many unavailable vdisks. Locked: Host ::1:12001 (9) has temporary lock, VDisk [0:1:0:1:0] (::1:/10/pdisk-10.data) is locked by this request. Down: ) 2025-06-03T10:33:08.448750Z node 9 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# test-user-p-1, requestId# test-user-r-1, owner# test-user 2025-06-03T10:33:08.448756Z node 9 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (9) (permission test-user-p-1 until 1970-01-01T00:12:00Z) 2025-06-03T10:33:08.448764Z node 9 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:08.448807Z node 9 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# test-user-p-1, validity# 1970-01-01T00:12:00.028000Z, action# Type: SHUTDOWN_HOST Host: "9" Duration: 600000000 2025-06-03T10:33:08.448827Z node 9 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# test-user-r-1, owner# test-user, order# 1, priority# 0, body# User: "test-user" Actions { Type: SHUTDOWN_HOST Host: "10" Duration: 600000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (9) has temporary lock, VDisk [0:1:0:1:0] (::1:/10/pdisk-10.data) is locked by this request. Down: " } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:33:08.489603Z node 9 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:08.531422Z node 9 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:08.531551Z node 9 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "test-user" Actions { Type: SHUTDOWN_HOST Host: "9" Duration: 600000000 } Actions { Type: SHUTDOWN_HOST Host: "10" Duration: 600000000 } PartialPermissionAllowed: true Schedule: true DryRun: false Reason: "" AvailabilityMode: MODE_MAX_AVAILABILITY MaintenanceTaskId: "task-1" }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW_PARTIAL } RequestId: "test-user-r-1" Permissions { Id: "test-user-p-1" Action { Type: SHUTDOWN_HOST Host: "9" Duration: 600000000 } Deadline: 720028000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 9 InterconnectPort: 12001 } } } } 2025-06-03T10:33:08.531564Z node 9 :CMS DEBUG: cms.cpp:1064: Schedule cleanup at 1970-01-01T00:32:00.028000Z |70.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestTwoNodes [GOOD] >> LocalPartition::DirectWriteWithoutDescribeResourcesPermission [GOOD] >> LocalPartition::WithoutPartitionWithSplit >> LocalPartition::DescribeBadPartition [GOOD] >> LocalPartition::DescribeHang >> KqpScan::TwoAggregatesOneFullFrameWindow ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::RestrictSqlV0 [GOOD] Test command err: Trying to start YDB, gRPC: 22602, MsgBus: 22732 2025-06-03T10:33:08.204881Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669258828701097:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:08.204917Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f84/r3tmp/tmp6hpVwu/pdisk_1.dat 2025-06-03T10:33:08.255189Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669258828701077:2079] 1748946788204742 != 1748946788204745 2025-06-03T10:33:08.256432Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22602, node 1 2025-06-03T10:33:08.267163Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:08.267178Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:08.267181Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:08.267233Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22732 TClient is connected to server localhost:22732 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:08.334755Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:08.334784Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:08.335862Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:08.336636Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:08.350616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:08.415819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:08.433874Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:08.444021Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:08.548042Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669258828702708:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:08.548072Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:08.608523Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.619411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.632953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.689522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.745775Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.758821Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.773208Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:08.789332Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669258828703363:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:08.789348Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669258828703368:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:08.789360Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:08.790349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:08.800285Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669258828703370:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:08.900931Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669258828703421:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:09.082576Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7511669263123670998:2507] TxId: 281474976715673. Ctx: { TraceId: 01jwtnm8pbd9g85yg0ney9aqgh, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzY1NGM0ZDktZWUxMjY4ZWUtMjA1OTdjYzUtNzQ4YjUyMmQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-03T10:33:09.084423Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946789082, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 9396, MsgBus: 14220 2025-06-03T10:33:09.286766Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669263824837977:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:09.286805Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f84/r3tmp/tmpiX5xpO/pdisk_1.dat 2025-06-03T10:33:09.300692Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:09.300902Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669263824837957:2079] 1748946789286661 != 1748946789286664 TServer::EnableGrpc on GrpcPort 9396, node 2 2025-06-03T10:33:09.312262Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:09.312278Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:09.312280Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:09.312336Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14220 TClient is connected to server localhost:14220 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:09.390607Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:09.390640Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:09.391119Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:09.391633Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:09.397802Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:09.409439Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:09.433139Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:09.445643Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:09.625709Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669263824839587:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:09.625743Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:09.634993Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:09.643502Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:09.655221Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:09.669053Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:09.683577Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:09.698114Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:09.711722Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:09.727593Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669263824840240:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:09.727637Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:09.727653Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669263824840245:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:09.728695Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:09.738081Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669263824840247:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:09.815320Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669263824840298:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:09.926795Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7511669263824840569:2511], status: GENERIC_ERROR, issues:
:1:0: Error: V0 syntax is disabled 2025-06-03T10:33:09.926923Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=2&id=ZGM5NjY3MmItN2QwODViY2QtZTU2YmE0YmEtNWEwMDdiYjA=, ActorId: [2:7511669263824840562:2507], ActorState: ExecuteState, TraceId: 01jwtnm9h30xn07pfw61k8ngfv, ReplyQueryCompileError, status GENERIC_ERROR remove tx with tx_id:
:1:0: Error: V0 syntax is disabled ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/unittest >> CheckIntegrityMirror3dc::PlacementMissingParts [GOOD] Test command err: RandomSeed# 2010547040883369356 *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:0:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:2] TO [82000000:1:2:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:3] TO [82000000:1:0:1:0] FINISHED WITH OK *** *** PUT BLOB [72075186270680851:57:3905:6:786432:1024:1] TO [82000000:1:1:0:0] FINISHED WITH OK *** >> KqpSplit::AfterResultMultiRange+Ascending ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletCountersAggregator::ColumnShardCounters [GOOD] Test command err: 2025-06-03T10:33:10.096197Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:1115: TResourceBrokerActor bootstrap 2025-06-03T10:33:10.096341Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-1 (1 by [1:100:2134]) priority=0 resources={100, 200} 2025-06-03T10:33:10.096351Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-1 (1 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:33:10.096358Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {100, 200} for task task-1 (1 by [1:100:2134]) from queue queue_compaction0 2025-06-03T10:33:10.096362Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:33:10.096371Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 0.000000 to 400.000000 (insert task task-1 (1 by [1:100:2134])) 2025-06-03T10:33:10.097089Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction0 task task-2 (2 by [1:100:2134]) priority=0 resources={100, 100} 2025-06-03T10:33:10.097094Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-2 (2 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:33:10.097098Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {100, 100} for task task-2 (2 by [1:100:2134]) from queue queue_compaction0 2025-06-03T10:33:10.097101Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-2 (2 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:33:10.097106Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 400.000000 to 600.000000 (insert task task-2 (2 by [1:100:2134])) 2025-06-03T10:33:10.097118Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:441: Update task task-1 (1 by [1:100:2134]) (priority=0 type=compaction0 resources={200, 300} resubmit=0) 2025-06-03T10:33:10.097121Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-1 (1 by [1:100:2134]) to queue queue_compaction0 2025-06-03T10:33:10.097125Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction0 from 200.000000 to 800.000000 (insert task task-1 (1 by [1:100:2134])) 2025-06-03T10:33:10.097129Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:528: Finish task task-2 (2 by [1:100:2134]) (release resources {100, 100}) 2025-06-03T10:33:10.097134Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:577: Updated planned resource usage for queue queue_compaction0 from 800.000000 to 600.000000 (remove task task-2 (2 by [1:100:2134])) 2025-06-03T10:33:10.097144Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:393: Submitted new compaction1 task task-3 (3 by [1:100:2134]) priority=0 resources={10, 20} 2025-06-03T10:33:10.097147Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning waiting task task-3 (3 by [1:100:2134]) to queue queue_compaction1 2025-06-03T10:33:10.097150Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:637: Allocate resources {10, 20} for task task-3 (3 by [1:100:2134]) from queue queue_compaction1 2025-06-03T10:33:10.097153Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:679: Assigning in-fly task task-3 (3 by [1:100:2134]) to queue queue_compaction1 2025-06-03T10:33:10.097157Z node 1 :RESOURCE_BROKER DEBUG: resource_broker.cpp:711: Updated planned resource usage for queue queue_compaction1 from 0.000000 to 40.000000 (insert task task-3 (3 by [1:100:2134])) ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base_reboots/unittest >> TTablesWithReboots::ChainedCopyTableAndDropWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:127:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:130:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:134:2058] recipient: [1:111:2142] 2025-06-03T10:31:28.209649Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:31:28.209682Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:28.209691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:31:28.209708Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:31:28.209724Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:31:28.209729Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:31:28.209740Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:28.209756Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:31:28.209898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:31:28.209980Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:31:28.245603Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:31:28.245635Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:28.245769Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:176:2058] recipient: [1:15:2062] 2025-06-03T10:31:28.254606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:31:28.254658Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:31:28.254693Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:31:28.256193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:31:28.256271Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:31:28.256401Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:28.256505Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:31:28.262664Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:28.262745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:31:28.263070Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:28.263083Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:28.263126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:31:28.263137Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:28.263143Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:31:28.263168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:31:28.265001Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:31:28.300967Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:31:28.301063Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:28.301135Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:31:28.301191Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:31:28.301201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:28.302219Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:28.302263Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:31:28.302340Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:28.302353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:31:28.302360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:31:28.302368Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:31:28.303144Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:28.303168Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:31:28.303178Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:31:28.303707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:28.303724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:28.303731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:28.303740Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:31:28.304638Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:31:28.305165Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:31:28.305214Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:31:28.305470Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:28.305509Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:28.305518Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:28.305599Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... ate: 4 2025-06-03T10:33:07.723949Z node 167 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409546, state: Offline, at schemeshard: 72057594046678944 2025-06-03T10:33:07.724711Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5554: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 694 RawX2: 717259541071 } TabletId: 72075186233409549 State: 4 2025-06-03T10:33:07.724728Z node 167 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409549, state: Offline, at schemeshard: 72057594046678944 2025-06-03T10:33:07.724901Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:3 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:33:07.724955Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:33:07.725047Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5554: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 437 RawX2: 717259540838 } TabletId: 72075186233409547 State: 4 2025-06-03T10:33:07.725056Z node 167 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409547, state: Offline, at schemeshard: 72057594046678944 2025-06-03T10:33:07.725105Z node 167 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 TabletID: 72075186233409548 2025-06-03T10:33:07.725157Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-03T10:33:07.725209Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-03T10:33:07.725256Z node 167 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 Forgetting tablet 72075186233409548 2025-06-03T10:33:07.725363Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:33:07.725371Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-03T10:33:07.725385Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 5 2025-06-03T10:33:07.725892Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:33:07.725937Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 Forgetting tablet 72075186233409546 2025-06-03T10:33:07.727065Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:4 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:33:07.727119Z node 167 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 4 TxId_Deprecated: 4 TabletID: 72075186233409549 Forgetting tablet 72075186233409549 2025-06-03T10:33:07.727217Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 4 ShardOwnerId: 72057594046678944 ShardLocalIdx: 4, at schemeshard: 72057594046678944 2025-06-03T10:33:07.727261Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 1 2025-06-03T10:33:07.728356Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:33:07.728403Z node 167 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 Forgetting tablet 72075186233409547 2025-06-03T10:33:07.747676Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:33:07.747713Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:3 tabletId 72075186233409548 2025-06-03T10:33:07.747785Z node 167 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 2 candidates, at schemeshard: 72057594046678944 2025-06-03T10:33:07.748497Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:33:07.748510Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:33:07.748575Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:33:07.748592Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 2 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:33:07.748601Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 6], at schemeshard: 72057594046678944 2025-06-03T10:33:07.748632Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 4 2025-06-03T10:33:07.748640Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:33:07.748647Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:33:07.748904Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:33:07.749813Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:4 2025-06-03T10:33:07.749833Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:4 tabletId 72075186233409549 2025-06-03T10:33:07.750280Z node 167 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 2 paths, skipped 0, left 1 candidates, at schemeshard: 72057594046678944 2025-06-03T10:33:07.750317Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:33:07.750328Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:33:07.750343Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:33:07.750352Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-03T10:33:07.750378Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:33:07.751024Z node 167 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 1009, wait until txId: 1009 TestWaitNotification wait txId: 1009 2025-06-03T10:33:07.751121Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1009: send EvNotifyTxCompletion 2025-06-03T10:33:07.751131Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1009 2025-06-03T10:33:07.751216Z node 167 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1009, at schemeshard: 72057594046678944 2025-06-03T10:33:07.751240Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1009: got EvNotifyTxCompletionResult 2025-06-03T10:33:07.751245Z node 167 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1009: satisfy waiter [167:1076:3012] TestWaitNotification: OK eventTxId 1009 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted wait until 72075186233409548 is deleted wait until 72075186233409549 is deleted wait until 72075186233409550 is deleted wait until 72075186233409551 is deleted wait until 72075186233409552 is deleted wait until 72075186233409553 is deleted wait until 72075186233409554 is deleted 2025-06-03T10:33:07.751349Z node 167 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-03T10:33:07.751360Z node 167 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 2025-06-03T10:33:07.751369Z node 167 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409548 2025-06-03T10:33:07.751379Z node 167 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409549 2025-06-03T10:33:07.751387Z node 167 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409550 2025-06-03T10:33:07.751396Z node 167 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409551 2025-06-03T10:33:07.751408Z node 167 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409552 2025-06-03T10:33:07.751417Z node 167 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409553 2025-06-03T10:33:07.751423Z node 167 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409554 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 Deleted tabletId 72075186233409548 Deleted tabletId 72075186233409549 Deleted tabletId 72075186233409550 Deleted tabletId 72075186233409551 Deleted tabletId 72075186233409552 Deleted tabletId 72075186233409553 Deleted tabletId 72075186233409554 >> KqpScan::Join >> TCmsTest::StateStorageTwoRings [GOOD] >> TCmsTest::SysTabletsNode >> KqpScan::IsNull [GOOD] >> KqpScan::IsNullPartial |70.1%| [TA] $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/test-results/unittest/{meta.json ... results_accumulator.log} >> KqpScan::NullInKey [GOOD] >> KqpScan::NullInKeySuffix >> TCmsTest::SamePriorityRequest2 [GOOD] >> KqpScan::JoinSimple [GOOD] >> KqpScan::JoinWithParams >> KqpScan::DecimalColumn >> KqpScan::UnionAggregate >> KqpScan::DqSourceFullScan [GOOD] >> KqpScan::DqSource >> KqpSplit::AfterResolve+Ascending >> TxUsage::WriteToTopic_Demo_18_RestartNo_Table [GOOD] >> KqpSplit::AfterResult+Descending >> KqpScan::TaggedScalar >> KqpScan::RightSemiJoinSimple >> TDSProxyGetTest::TestBlock42GetIntervalsWipedError [GOOD] >> TDSProxyPatchTest::SecuredOk_Erasure4Plus2Block ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::SamePriorityRequest2 [GOOD] Test command err: 2025-06-03T10:33:03.503973Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:33:03.504880Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:03.506492Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:33:03.506558Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:33:03.506599Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:33:03.506661Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:33:03.506994Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:03.507047Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:33:03.507308Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:03.507397Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:33:03.508965Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:33:03.508985Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:33:03.509015Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:33:03.509068Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:03.536787Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:33:03.568700Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:33:03.568781Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:03.569990Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:03.570111Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:33:03.570116Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:33:03.570123Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:33:03.570126Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:33:03.570138Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:33:03.570158Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:33:03.570224Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:03.571758Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-03T10:33:03.613671Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:03.613730Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:33:03.672236Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:03.672274Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:03.672354Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:03.672590Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-1-1" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-2-2" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-3-3" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120029000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120029000 } Devices { Name: "pdisk-4-4" State: UP Timestamp: 120029000 } Timestamp: 120029000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: ... 33:09.680224Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:09.680348Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW_PARTIAL } RequestId: "user-r-1" Permissions { Id: "user-p-1" Action { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 60000000 } Deadline: 180028000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 25 InterconnectPort: 12001 } } } } 2025-06-03T10:33:09.680360Z node 25 :CMS DEBUG: cms.cpp:1064: Schedule cleanup at 1970-01-01T00:05:00.028000Z 2025-06-03T10:33:09.705131Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12001 (25) (permission user-p-1 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:09.705262Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:09.705311Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:09.705332Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:09.705482Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 2025-06-03T10:33:09.705490Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 2025-06-03T10:33:09.705501Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 26, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:33:09.705526Z node 25 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Issue in affected group with id '0': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: ) 2025-06-03T10:33:09.705544Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:09.705602Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-2, owner# user, order# 2, priority# -80, body# User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 2025-06-03T10:33:09.716952Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:09.717085Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } RequestId: "user-r-2" Deadline: 420130512 } 2025-06-03T10:33:09.717262Z node 25 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-1 2025-06-03T10:33:09.717278Z node 25 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-03T10:33:09.717320Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-03T10:33:09.717360Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-1, reason# explicit remove 2025-06-03T10:33:09.728689Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-03T10:33:09.728798Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-1" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-03T10:33:09.750939Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:09.751009Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:09.751033Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:09.751228Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 2025-06-03T10:33:09.751244Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has planned shutdown (permission user-p-1 owned by user), VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } 2025-06-03T10:33:09.751257Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 26, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:33:09.751300Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:09.751326Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-2, requestId# user-r-2, owner# user 2025-06-03T10:33:09.751335Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (26) (permission user-p-2 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:09.751348Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:09.751402Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-2, validity# 1970-01-01T00:03:00.233536Z, action# Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 2025-06-03T10:33:09.751415Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-2, owner# user 2025-06-03T10:33:09.773189Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:09.773327Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-2" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Id: "user-p-2" Action { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } Deadline: 180233536 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 26 InterconnectPort: 12002 } } } } 2025-06-03T10:33:09.773495Z node 25 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-2 2025-06-03T10:33:09.773506Z node 25 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-03T10:33:09.773523Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-03T10:33:09.773554Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-2, reason# explicit remove 2025-06-03T10:33:09.784570Z node 25 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-03T10:33:09.784650Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-2" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-03T10:33:09.858203Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:09.858245Z node 25 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:09.858263Z node 25 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:09.858426Z node 25 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has temporary lock, VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 2025-06-03T10:33:09.858440Z node 25 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (25) has temporary lock, VDisk [0:1:0:1:0] (::1:/26/pdisk-26.data) is locked by this request. Down: " } 2025-06-03T10:33:09.858452Z node 25 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 26, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:33:09.858496Z node 25 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:09.858519Z node 25 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-3, requestId# user-r-1, owner# user 2025-06-03T10:33:09.858528Z node 25 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (26) (permission user-p-3 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:09.858541Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:09.858584Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-3, validity# 1970-01-01T00:03:00.336560Z, action# Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 2025-06-03T10:33:09.858596Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-1, owner# user 2025-06-03T10:33:09.869549Z node 25 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:09.869644Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Id: "user-p-3" Action { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } Deadline: 180336560 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 26 InterconnectPort: 12002 } } } } |70.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut |70.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut |70.1%| [TA] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_check_integrity/test-results/unittest/{meta.json ... results_accumulator.log} >> TCmsTest::ManageRequests [GOOD] >> TCmsTest::EnableCMSRequestPrioritiesFeatureFlag >> KqpScan::StreamLookupByPkPrefix >> TxUsage::WriteToTopic_Demo_18_RestartNo_Query |70.1%| [LD] {RESULT} $(B)/ydb/core/tx/coordinator/ut/ydb-core-tx-coordinator-ut |70.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut |70.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut |70.1%| [LD] {RESULT} $(B)/ydb/core/kqp/gateway/ut/ydb-core-kqp-gateway-ut >> KqpScan::Offset >> TDSProxyPatchTest::SecuredOk_Erasure4Plus2Block [GOOD] >> TDSProxyPatchTest::NaiveErrorOnGetItem_ErasureMirror3dc [GOOD] >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_3_0_0_VdiskErrors >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_3_0_0_VdiskErrors [GOOD] >> KqpScan::Join [GOOD] >> KqpScan::Join2 |70.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/federated_query/large_results/ydb-core-kqp-ut-federated_query-large_results |70.1%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/large_results/ydb-core-kqp-ut-federated_query-large_results >> KqpScan::IsNullPartial [GOOD] >> KqpScan::GrepRange >> KqpSplit::AfterResultMultiRange+Ascending [GOOD] >> TxUsage::WriteToTopic_Demo_35_Table [GOOD] >> TCmsTest::WalleTasksDifferentPriorities [GOOD] >> KqpSplit::AfterResultMultiRange+Descending >> KqpScan::NullInKeySuffix [GOOD] |70.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/federated_query/large_results/ydb-core-kqp-ut-federated_query-large_results |70.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/apps/ydbd/ydbd |70.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/ydbd/ydbd >> KqpScan::JoinWithParams [GOOD] >> KqpScan::JoinLeftOnly >> TxUsage::WriteToTopic_Demo_35_Query >> TCmsTest::SysTabletsNode [GOOD] |70.1%| [LD] {RESULT} $(B)/ydb/apps/ydbd/ydbd >> KqpScan::DqSource [GOOD] >> KqpScan::DqSourceLiteralRange >> KqpSplit::AfterResolve+Ascending [GOOD] >> KqpSplit::AfterResolve+Descending ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_3_0_0_VdiskErrors [GOOD] Test command err: 2025-06-03T10:33:12.147276Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [7e4afa7ea38a37be] bootstrap ActorId# [3:82:2128] Group# 0 BlobCount# 1 BlobIDs# [[72075186224047637:1:863:1:24576:786:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-03T10:33:12.147362Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:33:12.147373Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:33:12.147379Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:33:12.147384Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:33:12.147390Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:33:12.147395Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:33:12.152198Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:1:0] Marker# BPP01 2025-06-03T10:33:12.152279Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 4 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:33:12.152290Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 4 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:33:12.152381Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:1:1:0] Marker# BPP01 2025-06-03T10:33:12.152401Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:2:1:0] Marker# BPP01 2025-06-03T10:33:12.152452Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:2:0] Marker# BPP01 2025-06-03T10:33:12.152462Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 7 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:33:12.152467Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 7 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:33:12.152503Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:0:0] Marker# BPP01 2025-06-03T10:33:12.152511Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 3 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:33:12.152519Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 3 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:33:12.152524Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 5 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:33:12.152529Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 5 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:33:12.152580Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:1:2:0] Marker# BPP01 2025-06-03T10:33:12.152593Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:2:2:0] Marker# BPP01 2025-06-03T10:33:12.152612Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [7e4afa7ea38a37be] Result# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} GroupId# 0 Marker# BPP12 2025-06-03T10:33:12.152621Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [7e4afa7ea38a37be] SendReply putResult# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:33:12.152681Z node 3 :BS_PROXY_PUT NOTICE: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.462 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:1:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.462 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:2:1:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.462 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:1:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 5.288 VDiskId# [0:1:0:1:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 5.351 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:2:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 5.421 VDiskId# [0:1:1:1:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.439 VDiskId# [0:1:2:1:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.492 VDiskId# [0:1:0:2:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 5.509 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 5.542 VDiskId# [0:1:0:0:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 5.574 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:2:0] NodeId# 3 } TEvVPut{ TimestampMs# 5.574 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:2:2:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 5.619 VDiskId# [0:1:1:2:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 5.631 VDiskId# [0:1:2:2:0] NodeId# 3 Status# OK } ] } >> TCmsTest::DisabledEvictVDisks [GOOD] >> TCmsTest::EmergencyDuringRollingRestart >> KqpScan::DecimalColumn [GOOD] >> KqpScan::CustomWindow ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::WalleTasksDifferentPriorities [GOOD] Test command err: 2025-06-03T10:32:58.155472Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:32:58.156243Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:58.158322Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:32:58.158370Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:32:58.158406Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:32:58.158451Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:32:58.159027Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:58.159079Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:58.159372Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:58.159414Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:58.160314Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:58.160470Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:58.160491Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:32:58.160557Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:58.190199Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:32:58.223242Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:58.223364Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:58.225043Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:58.225193Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:32:58.225203Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:32:58.225215Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:32:58.225220Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:32:58.225239Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:58.225314Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:32:58.225344Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:32:58.229324Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 4 Path: "/1/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 5 Path: "/1/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 6 Path: "/1/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 1 PDiskId: 7 Path: "/1/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 8 Path: "/2/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 9 Path: "/2/pdisk-9.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 10 Path: "/2/pdisk-10.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 11 Path: "/2/pdisk-11.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 12 Path: "/3/pdisk-12.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 13 Path: "/3/pdisk-13.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 14 Path: "/3/pdisk-14.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 15 Path: "/3/pdisk-15.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 16 Path: "/4/pdisk-16.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 17 Path: "/4/pdisk-17.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 18 Path: "/4/pdisk-18.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 19 Path: "/4/pdisk-19.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 20 Path: "/5/pdisk-20.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 21 Path: "/5/pdisk-21.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 22 Path: "/5/pdisk-22.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 23 Path: "/5/pdisk-23.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 24 Path: "/6/pdisk-24.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 25 Path: "/6/pdisk-25.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 26 Path: "/6/pdisk-26.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 27 Path: "/6/pdisk-27.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 28 Path: "/7/pdisk-28.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 29 Path: "/7/pdisk-29.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 30 Path: "/7/pdisk-30.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 31 Path: "/7/pdisk-31.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 32 Path: "/8/pdisk-32.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 33 Path: "/8/pdisk-33.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 34 Path: "/8/pdisk-34.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 35 Path: "/8/pdisk-35.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 9 PDiskId: 36 Path: "/9/pdisk-36.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 9 PDiskId: 37 Path: "/9/pdisk-37.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 9 PDiskId: 38 Path: "/9/pdisk-38.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 9 PDiskId: 39 Path: "/9/pdisk-39.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 10 PDiskId: 40 Path: "/10/pdisk-40.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 10 PDiskId: 41 Path: "/10/pdisk-41.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 10 PDiskId: 42 Path: "/10/pdisk-42.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 10 PDiskId: 43 Path: "/10/pdisk-43.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 11 PDiskId: 44 Path: "/11/pdisk-44.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 11 PDiskId: 45 Path: "/11/pdisk-45.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 11 PDiskId: 46 Path: "/11/pdisk-46.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 11 PDiskId: 47 Path: "/11/pdisk-47.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 12 PDiskId: 48 Path: "/12/pdisk-48.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 12 PDiskId: 49 Path: "/12/pdisk-49.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 12 PDiskId: 50 Path: "/12/pdisk-50.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 12 PDiskId: 51 Path: "/12/pdisk-51.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 13 PDiskId: 52 Path: "/13/pdisk-52.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 13 PDiskId: 53 Path: "/13/pdisk-53.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 13 PDiskId: 54 Path: "/13/pdisk-54.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 13 PDiskId: 55 Path: "/13/pdisk-55.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 14 PDiskId: 56 Path: "/14/pdisk-56.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 14 PDiskId: 57 Path: "/14/pdisk-57.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 14 PDiskId: 58 Path: "/14/pdisk-58.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 14 PDiskId: 59 Path: "/14/pdisk-59.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 15 PDiskId: 60 Path: "/15/pdisk-60.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 15 PDiskId: 61 Path: "/15/pdisk-61.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 15 PDiskId: 62 Path: "/15/pdisk-62.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 15 PDiskId: 63 Path: "/15/pdisk-63.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 16 PDiskId: 64 Path: "/16/pdisk-64.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 16 PDiskId: 65 Path: "/16/pdisk-65.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 16 PDiskId: 66 Path: "/16/pdisk-66.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 16 PDiskId: 67 Path: "/16/pdisk-67.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 17 PDiskId: 68 Path: "/17/pdisk-68.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 17 PDiskId: 69 Path: "/17/pdisk-69.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 17 PDiskId: 70 Path: "/17/pdisk-70.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 17 PDiskId: 71 Path: "/17/pdisk-71.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 18 PDiskId: 72 Path: "/18/pdisk-72.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 18 PDiskId: 73 Path: "/18/pdisk-73.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 18 PDiskId: 74 Path: "/18/pdisk-74.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 18 PDiskId: 75 Path: "/18/pdisk-75.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 19 PDiskId: 76 Path: "/19/pdisk-76.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 19 PDiskId: 77 Path: "/19/pdisk-77.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 19 PDiskId: 78 Path: "/19/pdisk-78.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 19 PDiskId: 79 Path: "/19/pdisk-79.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 20 PDiskId: 80 Path: "/20/pdisk-80.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 20 PDiskId: 81 Path: "/20/pdisk-81.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 20 PDiskId: 82 Path: "/20/pdisk-82.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 20 PDiskId: 83 Path: "/20/pdisk-83.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 21 PDiskId: 84 Path: "/21/pdisk-84.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 21 PDiskId: 85 Path: "/21/pdisk-85.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 21 PDiskId: 86 Path: "/21/pdisk-86.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 21 PDiskId: 87 Path: "/21/pdisk-87.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 22 PDiskId: 88 Path: "/22/pdisk-88.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 22 PDiskId: 89 Path: "/22/pdisk-89.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 22 PDiskId: 90 Path: "/22/pdisk-90.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 22 PDiskId: 91 Path: "/22/pdisk-91.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 23 PDiskId: 92 Path: "/23/pdisk-92.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 23 PDiskId: 93 Path: "/23/pdisk-93.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 23 PDiskId: 94 Path: "/23/pdisk-94.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 23 PDiskId: 95 Path: "/23/pdisk-95.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 24 PDiskId: 96 Path: "/24/pdisk-96.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 24 PDiskId: 97 Path: "/24/pdisk-97.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 24 PDiskId: 98 Path: "/24/pdisk-98.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 24 PDiskId: 99 Path: "/24/pdisk-99.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1000 } GroupId: 4 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1001 } GroupId: 5 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 5 VSlotId: 1002 } GroupId: 6 GroupGeneration: 1 } VSl ... } } 2025-06-03T10:33:11.049881Z node 49 :CMS INFO: walle_check_task_adapter.cpp:29: Processing Wall-E request: TaskId: "task-1" 2025-06-03T10:33:11.138949Z node 49 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:11.139008Z node 49 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:11.139030Z node 49 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:11.139206Z node 49 :CMS INFO: cms.cpp:347: Check request: User: "Wall-E" Actions { Type: SHUTDOWN_HOST Host: "51" Duration: 18446744073709551615 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: VDisk [0:1:0:2:0] (::1:/51/pdisk-51.data) is locked by this request, Host ::1:12002 (50) has planned shutdown (permission user-p-2 owned by user). Down: " } } PartialPermissionAllowed: false Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: 50 2025-06-03T10:33:11.139221Z node 49 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "51" Duration: 18446744073709551615 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: VDisk [0:1:0:2:0] (::1:/51/pdisk-51.data) is locked by this request, Host ::1:12002 (50) has planned shutdown (permission user-p-2 owned by user). Down: " } 2025-06-03T10:33:11.139234Z node 49 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 51, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:33:11.139268Z node 49 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Issue in affected group with id '0': too many unavailable vdisks. Locked: VDisk [0:1:0:2:0] (::1:/51/pdisk-51.data) is locked by this request, Host ::1:12002 (50) has scheduled action Wall-E-r-3 owned by Wall-E (priority 20 vs 50). Down: ) 2025-06-03T10:33:11.139298Z node 49 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:11.139365Z node 49 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# Wall-E-r-2, owner# Wall-E, order# 2, priority# 50, body# User: "Wall-E" Actions { Type: SHUTDOWN_HOST Host: "51" Duration: 18446744073709551615 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: VDisk [0:1:0:2:0] (::1:/51/pdisk-51.data) is locked by this request, Host ::1:12002 (50) has scheduled action Wall-E-r-3 owned by Wall-E (priority 20 vs 50). Down: " } } PartialPermissionAllowed: false Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: 50 2025-06-03T10:33:11.152458Z node 49 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:11.152550Z node 49 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "Wall-E" RequestId: "Wall-E-r-2" }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: VDisk [0:1:0:2:0] (::1:/51/pdisk-51.data) is locked by this request, Host ::1:12002 (50) has scheduled action Wall-E-r-3 owned by Wall-E (priority 20 vs 50). Down: " } RequestId: "Wall-E-r-2" Deadline: 420742096 } 2025-06-03T10:33:11.152621Z node 49 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [Wall-E adapter] Reply: request# NKikimr::NCms::TEvCms::TEvWalleCheckTaskRequest { TaskId: "task-1" }, response# NKikimr::NCms::TEvCms::TEvWalleCheckTaskResponse { Status { Code: DISALLOW_TEMP Reason: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: VDisk [0:1:0:2:0] (::1:/51/pdisk-51.data) is locked by this request, Host ::1:12002 (50) has scheduled action Wall-E-r-3 owned by Wall-E (priority 20 vs 50). Down: " } Task { TaskId: "task-1" Hosts: "51" } } 2025-06-03T10:33:11.152765Z node 49 :CMS INFO: walle_check_task_adapter.cpp:29: Processing Wall-E request: TaskId: "task-2" 2025-06-03T10:33:11.164256Z node 49 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:11.164298Z node 49 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:11.164316Z node 49 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:11.164467Z node 49 :CMS INFO: cms.cpp:347: Check request: User: "Wall-E" Actions { Type: REBOOT_HOST Host: "50" Duration: 18446744073709551615 Issue { Type: GENERIC Message: "Cannot lock node \'50\': node state: \'Locked\'" } } PartialPermissionAllowed: false Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: 20 2025-06-03T10:33:11.164478Z node 49 :CMS DEBUG: cms.cpp:379: Checking action: Type: REBOOT_HOST Host: "50" Duration: 18446744073709551615 Issue { Type: GENERIC Message: "Cannot lock node \'50\': node state: \'Locked\'" } 2025-06-03T10:33:11.164489Z node 49 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 50, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:33:11.164534Z node 49 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:11.164559Z node 49 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# Wall-E-p-3, requestId# Wall-E-r-3, owner# Wall-E 2025-06-03T10:33:11.164567Z node 49 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (50) (permission Wall-E-p-3 until 586524-01-19T08:01:49Z) 2025-06-03T10:33:11.164580Z node 49 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:11.164624Z node 49 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# Wall-E-p-3, validity# 586524-01-19T08:01:49.551615Z, action# Type: REBOOT_HOST Host: "50" Duration: 18446744073709551615 2025-06-03T10:33:11.164636Z node 49 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# Wall-E-r-3, owner# Wall-E 2025-06-03T10:33:11.179014Z node 49 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:11.179098Z node 49 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "Wall-E" RequestId: "Wall-E-r-3" }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Id: "Wall-E-p-3" Action { Type: REBOOT_HOST Host: "50" Duration: 18446744073709551615 } Deadline: 18446744073709551615 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 50 InterconnectPort: 12002 } } } } 2025-06-03T10:33:11.179142Z node 49 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [Wall-E adapter] Reply: request# NKikimr::NCms::TEvCms::TEvWalleCheckTaskRequest { TaskId: "task-2" }, response# NKikimr::NCms::TEvCms::TEvWalleCheckTaskResponse { Status { Code: ALLOW } Task { TaskId: "task-2" Hosts: "50" } } 2025-06-03T10:33:11.179265Z node 49 :CMS INFO: walle_remove_task_adapter.cpp:29: Processing Wall-E request: TaskId: "task-2" 2025-06-03T10:33:11.179296Z node 49 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-03T10:33:11.179324Z node 49 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# Wall-E-p-3, reason# explicit remove 2025-06-03T10:33:11.197774Z node 49 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-03T10:33:11.197838Z node 49 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvRemoveWalleTask { TaskId: task-2 }, response# NKikimr::NCms::TEvCms::TEvWalleTaskRemoved { TaskId: task-2 } 2025-06-03T10:33:11.197853Z node 49 :CMS DEBUG: cms.cpp:1191: Found empty task task-2 2025-06-03T10:33:11.197931Z node 49 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [Wall-E adapter] Reply: request# NKikimr::NCms::TEvCms::TEvWalleRemoveTaskRequest { TaskId: "task-2" }, response# NKikimr::NCms::TEvCms::TEvWalleRemoveTaskResponse { Status { Code: OK } } 2025-06-03T10:33:11.197958Z node 49 :CMS DEBUG: cms_tx_remove_task.cpp:22: TTxRemoveTask Execute 2025-06-03T10:33:11.197989Z node 49 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove task: id# task-2 2025-06-03T10:33:11.198188Z node 49 :CMS INFO: walle_check_task_adapter.cpp:29: Processing Wall-E request: TaskId: "task-1" 2025-06-03T10:33:11.209670Z node 49 :CMS DEBUG: cms_tx_remove_task.cpp:42: TTxRemoveTask Complete 2025-06-03T10:33:11.221257Z node 49 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:11.221324Z node 49 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:11.221344Z node 49 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:11.221536Z node 49 :CMS INFO: cms.cpp:347: Check request: User: "Wall-E" Actions { Type: SHUTDOWN_HOST Host: "51" Duration: 18446744073709551615 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: VDisk [0:1:0:2:0] (::1:/51/pdisk-51.data) is locked by this request, Host ::1:12002 (50) has scheduled action Wall-E-r-3 owned by Wall-E (priority 20 vs 50). Down: " } } PartialPermissionAllowed: false Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: 50 2025-06-03T10:33:11.221552Z node 49 :CMS DEBUG: cms.cpp:379: Checking action: Type: SHUTDOWN_HOST Host: "51" Duration: 18446744073709551615 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: VDisk [0:1:0:2:0] (::1:/51/pdisk-51.data) is locked by this request, Host ::1:12002 (50) has scheduled action Wall-E-r-3 owned by Wall-E (priority 20 vs 50). Down: " } 2025-06-03T10:33:11.221564Z node 49 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 51, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:33:11.221609Z node 49 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:11.221634Z node 49 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# Wall-E-p-4, requestId# Wall-E-r-2, owner# Wall-E 2025-06-03T10:33:11.221643Z node 49 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12003 (51) (permission Wall-E-p-4 until 586524-01-19T08:01:49Z) 2025-06-03T10:33:11.221656Z node 49 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:11.221719Z node 49 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# Wall-E-p-4, validity# 586524-01-19T08:01:49.551615Z, action# Type: SHUTDOWN_HOST Host: "51" Duration: 18446744073709551615 2025-06-03T10:33:11.221731Z node 49 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# Wall-E-r-2, owner# Wall-E 2025-06-03T10:33:11.232825Z node 49 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:11.232906Z node 49 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "Wall-E" RequestId: "Wall-E-r-2" }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Id: "Wall-E-p-4" Action { Type: SHUTDOWN_HOST Host: "51" Duration: 18446744073709551615 } Deadline: 18446744073709551615 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 51 InterconnectPort: 12003 } } } } 2025-06-03T10:33:11.232949Z node 49 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [Wall-E adapter] Reply: request# NKikimr::NCms::TEvCms::TEvWalleCheckTaskRequest { TaskId: "task-1" }, response# NKikimr::NCms::TEvCms::TEvWalleCheckTaskResponse { Status { Code: ALLOW } Task { TaskId: "task-1" Hosts: "51" } } >> KqpScan::UnionAggregate [GOOD] >> KqpScan::UdfFailure >> KqpScan::TaggedScalar [GOOD] >> KqpScan::TooManyComputeActors >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Table [GOOD] >> KqpScan::RightSemiJoinSimple [GOOD] >> KqpScan::SecondaryIndex ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::NullInKeySuffix [GOOD] Test command err: Trying to start YDB, gRPC: 4115, MsgBus: 30702 2025-06-03T10:33:10.163387Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669266130702997:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:10.163405Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f7f/r3tmp/tmpvw3zcX/pdisk_1.dat 2025-06-03T10:33:10.233065Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:10.233540Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669266130702977:2079] 1748946790163218 != 1748946790163221 TServer::EnableGrpc on GrpcPort 4115, node 1 2025-06-03T10:33:10.249204Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:10.249216Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:10.249218Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:10.249265Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30702 TClient is connected to server localhost:30702 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:33:10.304088Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:10.304127Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: 2025-06-03T10:33:10.305044Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:10.317549Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.322655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.390947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.453325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:33:10.511871Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.548725Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669266130704635:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:10.548756Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:10.605839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.614629Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.627798Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.641494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.655980Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.670403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.685693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.700243Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669266130705286:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:10.700267Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:10.700312Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669266130705291:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:10.701099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:10.703579Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669266130705293:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:10.768996Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669266130705344:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:10.909967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.981526Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7511669266130705727:2523] TxId: 281474976715675. Ctx: { TraceId: 01jwtnmah800gwtajfsqmhhdwd, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjNiOTlhZGItNTY3N2ExYS04N2Y2ZmZjYy1jZTcwZGIyZQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-03T10:33:10.983881Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946791025, txId: 281474976715674] shutting down Trying to start YDB, gRPC: 22049, MsgBus: 26936 2025-06-03T10:33:11.295033Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669271006867645:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:11.295349Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f7f/r3tmp/tmpZEWXSI/pdisk_1.dat 2025-06-03T10:33:11.308812Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:11.309079Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669271006867620:2079] 1748946791294776 != 1748946791294779 TServer::EnableGrpc on GrpcPort 22049, node 2 2025-06-03T10:33:11.320307Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:11.320322Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:11.320326Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:11.320383Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26936 TClient is connected to server localhost:26936 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:11.399176Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:11.399213Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:11.399560Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.400134Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:11.404610Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.416510Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.440151Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.454824Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.695633Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669271006869258:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.695665Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.704550Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.712957Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.732018Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.742767Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.755183Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.770793Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.828348Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.842993Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669271006869913:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.843028Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.843134Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669271006869918:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.844148Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:11.851998Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669271006869920:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:11.936929Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669271006869971:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:12.115652Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.206717Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946792250, txId: 281474976715674] shutting down ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::SysTabletsNode [GOOD] Test command err: 2025-06-03T10:33:06.194783Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:33:06.195412Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:06.197652Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:06.197736Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:33:06.198066Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:06.198096Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:33:06.198392Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:33:06.198421Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:33:06.198475Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:33:06.198597Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:33:06.199784Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:33:06.199805Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:33:06.199828Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:33:06.199866Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:06.228003Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:33:06.260376Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:33:06.260471Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:06.261572Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:06.261675Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:33:06.261680Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:33:06.261687Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:33:06.261690Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:33:06.261703Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:06.261750Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:33:06.261775Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:33:06.263269Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-03T10:33:06.305543Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:06.305622Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:33:06.365550Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:06.365596Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:06.365662Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:05:00Z 2025-06-03T10:33:06.365905Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 300028000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 300028000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 300028000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 300028000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 300028000 } Devices { Name: "pdisk-1-1" State: UP Timestamp: 300028000 } Timestamp: 300028000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 300028000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 300028000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 300028000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 300028000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 300028000 } Devices { Name: "pdisk-2-2" State: UP Timestamp: 300028000 } Timestamp: 300028000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 300028000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 300028000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 300028000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 300028000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 300028000 } Devices { Name: "pdisk-3-3" State: UP Timestamp: 300028000 } Timestamp: 300028000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 300028000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 300028000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 300028000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 300028000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 300028000 } Devices { Name: "pdisk-4-4" State: UP Timestamp: 300028000 } Timestamp: 300028000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: ... : 24, with state: Down, locked nodes: 0, down nodes: 3 2025-06-03T10:33:11.543802Z node 22 :CMS DEBUG: node_checkers.cpp:164: [Nodes Counter] Checking limits for sys tablet: CMS, on node: 24, with state: Down, locked nodes: 0, down nodes: 3 2025-06-03T10:33:11.543806Z node 22 :CMS DEBUG: node_checkers.cpp:164: [Nodes Counter] Checking limits for sys tablet: NODE_BROKER, on node: 24, with state: Down, locked nodes: 0, down nodes: 3 2025-06-03T10:33:11.543810Z node 22 :CMS DEBUG: node_checkers.cpp:164: [Nodes Counter] Checking limits for sys tablet: TENANT_SLOT_BROKER, on node: 24, with state: Down, locked nodes: 0, down nodes: 3 2025-06-03T10:33:11.543817Z node 22 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:11.543885Z node 22 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "24" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Action { Type: RESTART_SERVICES Host: "24" Services: "storage" Duration: 60000000 } Deadline: 180242000 Extentions { Type: HostInfo Hosts { Name: "::1" State: DOWN NodeId: 24 InterconnectPort: 12003 } } } } 2025-06-03T10:33:11.554997Z node 22 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:11.587106Z node 22 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:11.587236Z node 22 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:11.587308Z node 22 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false 2025-06-03T10:33:11.587324Z node 22 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 60000000 2025-06-03T10:33:11.587346Z node 22 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 25, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 3 2025-06-03T10:33:11.587359Z node 22 :CMS DEBUG: node_checkers.cpp:164: [Nodes Counter] Checking limits for sys tablet: FLAT_BS_CONTROLLER, on node: 25, with state: Up, locked nodes: 0, down nodes: 3 2025-06-03T10:33:11.587371Z node 22 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Cannot lock node '25': tablet 'FLAT_BS_CONTROLLER' has too many unavailable nodes. Locked: 0, down: 3, limit: 3) 2025-06-03T10:33:11.587412Z node 22 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Cannot lock node \'25\': tablet \'FLAT_BS_CONTROLLER\' has too many unavailable nodes. Locked: 0, down: 3, limit: 3" } Deadline: 420342000 } 2025-06-03T10:33:11.587606Z node 22 :CMS INFO: cms.cpp:104: OnTabletDead: 72057594037936128 2025-06-03T10:33:11.587616Z node 22 :CMS DEBUG: cms.cpp:1209: TCms::Cleanup 2025-06-03T10:33:11.589895Z node 22 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:11.591047Z node 22 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:11.591116Z node 22 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:11.591609Z node 22 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:33:11.591768Z node 22 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:33:11.591955Z node 22 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:33:11.592063Z node 22 :CMS DEBUG: cms_tx_load_state.cpp:69: Loaded config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:33:11.592098Z node 22 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:11.592201Z node 22 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:33:11.592245Z node 22 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 } 2025-06-03T10:33:11.614594Z node 22 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:33:11.636185Z node 22 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:11.636300Z node 22 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:11.636372Z node 22 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-03T10:33:11.636386Z node 22 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 2025-06-03T10:33:11.636403Z node 22 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 26, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 4 2025-06-03T10:33:11.636414Z node 22 :CMS DEBUG: node_checkers.cpp:164: [Nodes Counter] Checking limits for sys tablet: FLAT_BS_CONTROLLER, on node: 26, with state: Up, locked nodes: 0, down nodes: 4 2025-06-03T10:33:11.636419Z node 22 :CMS DEBUG: node_checkers.cpp:164: [Nodes Counter] Checking limits for sys tablet: FLAT_SCHEMESHARD, on node: 26, with state: Up, locked nodes: 0, down nodes: 4 2025-06-03T10:33:11.636423Z node 22 :CMS DEBUG: node_checkers.cpp:164: [Nodes Counter] Checking limits for sys tablet: FLAT_TX_COORDINATOR, on node: 26, with state: Up, locked nodes: 0, down nodes: 4 2025-06-03T10:33:11.636428Z node 22 :CMS DEBUG: node_checkers.cpp:164: [Nodes Counter] Checking limits for sys tablet: TX_MEDIATOR, on node: 26, with state: Up, locked nodes: 0, down nodes: 4 2025-06-03T10:33:11.636432Z node 22 :CMS DEBUG: node_checkers.cpp:164: [Nodes Counter] Checking limits for sys tablet: TX_ALLOCATOR, on node: 26, with state: Up, locked nodes: 0, down nodes: 4 2025-06-03T10:33:11.636436Z node 22 :CMS DEBUG: node_checkers.cpp:164: [Nodes Counter] Checking limits for sys tablet: CONSOLE, on node: 26, with state: Up, locked nodes: 0, down nodes: 4 2025-06-03T10:33:11.636441Z node 22 :CMS DEBUG: node_checkers.cpp:164: [Nodes Counter] Checking limits for sys tablet: CMS, on node: 26, with state: Up, locked nodes: 0, down nodes: 4 2025-06-03T10:33:11.636445Z node 22 :CMS DEBUG: node_checkers.cpp:164: [Nodes Counter] Checking limits for sys tablet: NODE_BROKER, on node: 26, with state: Up, locked nodes: 0, down nodes: 4 2025-06-03T10:33:11.636449Z node 22 :CMS DEBUG: node_checkers.cpp:164: [Nodes Counter] Checking limits for sys tablet: TENANT_SLOT_BROKER, on node: 26, with state: Up, locked nodes: 0, down nodes: 4 2025-06-03T10:33:11.636455Z node 22 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:11.636518Z node 22 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Action { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } Deadline: 180448000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 26 InterconnectPort: 12005 } } } } 2025-06-03T10:33:11.636713Z node 22 :CMS INFO: cms.cpp:104: OnTabletDead: 72057594037936128 2025-06-03T10:33:11.636720Z node 22 :CMS DEBUG: cms.cpp:1209: TCms::Cleanup 2025-06-03T10:33:11.638422Z node 22 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:11.639216Z node 22 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:11.639232Z node 22 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:11.639692Z node 22 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:33:11.639784Z node 22 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:33:11.639852Z node 22 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:33:11.639936Z node 22 :CMS DEBUG: cms_tx_load_state.cpp:69: Loaded config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:33:11.639961Z node 22 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:11.640098Z node 22 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:33:11.640144Z node 22 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 } 2025-06-03T10:33:11.662249Z node 22 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:33:11.683893Z node 22 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:11.684013Z node 22 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:11.684086Z node 22 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "27" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false 2025-06-03T10:33:11.684100Z node 22 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "27" Services: "storage" Duration: 60000000 2025-06-03T10:33:11.684119Z node 22 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 27, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 5 2025-06-03T10:33:11.684130Z node 22 :CMS DEBUG: node_checkers.cpp:164: [Nodes Counter] Checking limits for sys tablet: FLAT_BS_CONTROLLER, on node: 27, with state: Up, locked nodes: 0, down nodes: 5 2025-06-03T10:33:11.684142Z node 22 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Cannot lock node '27': tablet 'FLAT_BS_CONTROLLER' has too many unavailable nodes. Locked: 0, down: 5, limit: 5) 2025-06-03T10:33:11.684183Z node 22 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "27" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: false Schedule: false DryRun: true AvailabilityMode: MODE_KEEP_AVAILABLE EvictVDisks: false }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Cannot lock node \'27\': tablet \'FLAT_BS_CONTROLLER\' has too many unavailable nodes. Locked: 0, down: 5, limit: 5" } Deadline: 420554000 } >> KqpScan::EarlyFinish |70.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut |70.2%| [LD] {RESULT} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut |70.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/memory_controller/ut/ydb-core-memory_controller-ut >> KqpScan::Offset [GOOD] >> KqpScan::Order >> PersQueueSdkReadSessionTest::ReadSessionWithClose [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithCloseNotCommitted >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Query >> KqpScan::StreamLookupByPkPrefix [GOOD] >> KqpScan::StreamLookupTryGetDataBeforeSchemeInitialization >> KqpSplit::AfterResult+Descending [GOOD] >> KqpSplit::AfterResult+Unspecified |70.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut |70.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut |70.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/build_index/ut/ydb-core-tx-datashard-build_index-ut >> KqpScan::GrepRange [GOOD] >> KqpScan::Join2 [GOOD] >> KqpScan::Join3 |70.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut |70.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut |70.2%| [LD] {RESULT} $(B)/ydb/library/query_actor/ut/ydb-library-query_actor-ut >> TCmsTest::EnableCMSRequestPrioritiesFeatureFlag [GOOD] >> KqpScan::TwoAggregatesOneFullFrameWindow [GOOD] >> KqpScan::JoinLeftOnly [GOOD] >> KqpScan::TwoAggregatesTwoWindows >> KqpSplit::AfterResolve+Descending [GOOD] |70.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers >> KqpScan::DqSourceLiteralRange [GOOD] |70.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers |70.2%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_followers/ydb-core-tx-datashard-ut_followers ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::GrepRange [GOOD] Test command err: Trying to start YDB, gRPC: 4766, MsgBus: 15608 2025-06-03T10:33:10.053122Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669268671263532:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:10.053149Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f7e/r3tmp/tmpJeanqx/pdisk_1.dat 2025-06-03T10:33:10.109970Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669268671263508:2079] 1748946790052934 != 1748946790052937 2025-06-03T10:33:10.112902Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4766, node 1 2025-06-03T10:33:10.128148Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:10.128163Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:10.128165Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:10.128216Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15608 2025-06-03T10:33:10.154961Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:10.155001Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:10.156080Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15608 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:10.184455Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.198806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.265992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.291699Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.305763Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.470621Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669268671265145:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:10.470674Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:10.533403Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.541431Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.550967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.606803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.621933Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.634637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.649178Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.664879Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669268671265800:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:10.664912Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:10.664930Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669268671265805:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:10.665663Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:10.668253Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669268671265807:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:10.733040Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669268671265858:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:10.869501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.945942Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946790990, txId: 281474976715674] shutting down Trying to start YDB, gRPC: 27022, MsgBus: 16942 2025-06-03T10:33:11.192175Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669273750395371:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:11.192200Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f7e/r3tmp/tmpNbqg4t/pdisk_1.dat 2025-06-03T10:33:11.210864Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:11.214556Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669273750395352:2079] 1748946791192038 != 1748946791192041 TServer::EnableGrpc on GrpcPort 27022, node 2 2025-06-03T10:33:11.223987Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:11.224000Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:11.224003Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:11.224052Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16942 TClient is connected to server localhost:16942 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { ... 2025-06-03T10:33:11.770239Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.792569Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669273750397667:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.792618Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.792644Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669273750397672:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.793629Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:11.796499Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669273750397674:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:11.858942Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669273750397725:3393] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:12.019875Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.137497Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946792180, txId: 281474976715674] shutting down Trying to start YDB, gRPC: 2111, MsgBus: 65436 2025-06-03T10:33:12.447880Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511669275940174974:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:12.447901Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f7e/r3tmp/tmpu1C5M2/pdisk_1.dat 2025-06-03T10:33:12.471435Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:12.471779Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511669275940174955:2079] 1748946792447755 != 1748946792447758 TServer::EnableGrpc on GrpcPort 2111, node 3 2025-06-03T10:33:12.477695Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:12.477713Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:12.477716Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:12.477799Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:65436 TClient is connected to server localhost:65436 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:33:12.555784Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:12.555820Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:12.556402Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.558109Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:12.558207Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:33:12.560227Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.620309Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.650669Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.666928Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.887703Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669275940176588:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.887752Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.897133Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.905913Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.917411Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.931153Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.987509Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.001061Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.015868Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.032634Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669280235144537:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.032661Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.032702Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669280235144542:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.033571Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:13.042794Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511669280235144544:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:13.139344Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511669280235144595:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:13.370888Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946793412, txId: 281474976715672] shutting down ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::EnableCMSRequestPrioritiesFeatureFlag [GOOD] Test command err: 2025-06-03T10:33:05.956048Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:33:05.956783Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:05.959124Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:05.959185Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:33:05.959619Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:05.959654Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:33:05.959959Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:33:05.959994Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:33:05.960041Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:33:05.960144Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:33:05.961462Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:33:05.961487Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:33:05.961515Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:33:05.961558Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:05.993726Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:33:06.026152Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:33:06.026259Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:06.027971Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:06.028097Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:33:06.028106Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:33:06.028116Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:33:06.028120Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:33:06.028136Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:06.028188Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:33:06.028216Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:33:06.030721Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-03T10:33:06.072825Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:06.072882Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:33:06.131307Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:06.131346Z node 1 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:06.131431Z node 1 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:06.131658Z node 1 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvClusterStateRequest { }, response# NKikimr::NCms::TEvCms::TEvClusterStateResponse { Status { Code: OK } State { Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-0-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-1-1" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 1 InterconnectPort: 12001 Location { DataCenter: "1" Module: "1" Rack: "1" Unit: "1" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-1-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-2-2" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 2 InterconnectPort: 12002 Location { DataCenter: "1" Module: "2" Rack: "2" Unit: "2" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-2-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-3-3" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 3 InterconnectPort: 12003 Location { DataCenter: "1" Module: "3" Rack: "3" Unit: "3" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: 120028000 } Devices { Name: "vdisk-0-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-1-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-2-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "vdisk-3-1-0-3-0" State: UP Timestamp: 120028000 } Devices { Name: "pdisk-4-4" State: UP Timestamp: 120028000 } Timestamp: 120028000 NodeId: 4 InterconnectPort: 12004 Location { DataCenter: "1" Module: "4" Rack: "4" Unit: "4" } StartTimeSeconds: 0 } Hosts { Name: "::1" State: UP Services { Name: "storage" State: UP Version: "-1" Timestamp: ... -06-03T10:33:10.501287Z node 17 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 18, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:33:10.501433Z node 17 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:10.501469Z node 17 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user1-p-5, requestId# user1-r-4, owner# user1 2025-06-03T10:33:10.501480Z node 17 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (18) (permission user1-p-5 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:10.501498Z node 17 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:10.501563Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user1-p-5, validity# 1970-01-01T00:03:00.850656Z, action# Type: SHUTDOWN_HOST Host: "18" Duration: 60000000 2025-06-03T10:33:10.501578Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user1-r-4, owner# user1 2025-06-03T10:33:10.512714Z node 17 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:10.512839Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user1" RequestId: "user1-r-4" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Id: "user1-p-5" Action { Type: SHUTDOWN_HOST Host: "18" Duration: 60000000 } Deadline: 180850656 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 18 InterconnectPort: 12002 } } } } 2025-06-03T10:33:10.513029Z node 17 :CMS INFO: cms.cpp:1366: Get all requests for user1 2025-06-03T10:33:10.513040Z node 17 :CMS DEBUG: cms.cpp:1392: Resulting status: OK 2025-06-03T10:33:10.513059Z node 17 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManageRequestRequest { User: "user1" Command: LIST DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManageRequestResponse { Status { Code: OK } } 2025-06-03T10:33:12.159974Z node 25 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:33:12.160674Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:12.162677Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:12.162739Z node 25 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:33:12.163045Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:12.163070Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:33:12.163297Z node 25 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:33:12.163327Z node 25 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:33:12.163358Z node 25 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:33:12.163425Z node 25 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:33:12.164477Z node 25 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:33:12.164500Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:33:12.164532Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:33:12.164552Z node 25 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:12.186145Z node 25 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: false EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:33:12.230830Z node 25 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:33:12.230979Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:12.231026Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:12.233270Z node 25 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:33:12.233332Z node 25 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:33:12.233359Z node 25 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:33:12.233365Z node 25 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:33:12.233390Z node 25 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:33:12.233410Z node 25 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:33:12.233491Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:33:12.233993Z node 25 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 25 PDiskId: 25 Path: "/25/pdisk-25.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 26 PDiskId: 26 Path: "/26/pdisk-26.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 27 PDiskId: 27 Path: "/27/pdisk-27.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 28 PDiskId: 28 Path: "/28/pdisk-28.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 29 PDiskId: 29 Path: "/29/pdisk-29.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 30 PDiskId: 30 Path: "/30/pdisk-30.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 31 PDiskId: 31 Path: "/31/pdisk-31.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 32 PDiskId: 32 Path: "/32/pdisk-32.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1000 } VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1000 } VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1000 } VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1000 } VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1000 } VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1000 } VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1000 } VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1001 } VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1001 } VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1001 } VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1001 } VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1001 } VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1001 } VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1001 } VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1002 } VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1002 } VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1002 } VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1002 } VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1002 } VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1002 } VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1002 } VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 25 PDiskId: 25 VSlotId: 1003 } VSlotId { NodeId: 26 PDiskId: 26 VSlotId: 1003 } VSlotId { NodeId: 27 PDiskId: 27 VSlotId: 1003 } VSlotId { NodeId: 28 PDiskId: 28 VSlotId: 1003 } VSlotId { NodeId: 29 PDiskId: 29 VSlotId: 1003 } VSlotId { NodeId: 30 PDiskId: 30 VSlotId: 1003 } VSlotId { NodeId: 31 PDiskId: 31 VSlotId: 1003 } VSlotId { NodeId: 32 PDiskId: 32 VSlotId: 1003 } } } } Success: true 2025-06-03T10:33:12.277763Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:33:12.277858Z node 25 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:33:12.278137Z node 25 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "25" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "26" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: WRONG_REQUEST Reason: "Unsupported: feature flag EnableCMSRequestPriorities is off" } } >> KqpScan::CustomWindow [GOOD] >> KqpScan::CrossJoinOneColumn >> KqpSplit::IntersectionLosesRange+Ascending |70.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut |70.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut |70.3%| [LD] {RESULT} $(B)/ydb/core/tx/time_cast/ut/ydb-core-tx-time_cast-ut >> KqpScan::TooManyComputeActors [GOOD] >> KqpScan::Order [GOOD] >> TxUsage::WriteToTopic_Demo_11_Query [GOOD] >> KqpScan::AggregateByColumn >> DataShardSnapshots::MvccSnapshotTailCleanup >> KqpSplit::AfterResultMultiRange+Descending [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpSplit::AfterResolve+Descending [GOOD] Test command err: Trying to start YDB, gRPC: 15641, MsgBus: 26167 2025-06-03T10:33:11.672502Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669272093044440:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:11.672542Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f4d/r3tmp/tmpFbv4d4/pdisk_1.dat 2025-06-03T10:33:11.735075Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669272093044420:2079] 1748946791672312 != 1748946791672315 2025-06-03T10:33:11.737267Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 15641, node 1 2025-06-03T10:33:11.749529Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:11.749545Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:11.749548Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:11.749599Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26167 2025-06-03T10:33:11.774856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:11.774890Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:11.775955Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26167 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:11.825327Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.828551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:33:11.833735Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.905970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.931366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.950318Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.131603Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669276388013348:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.131628Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.187698Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.198532Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.211875Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.225692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.238870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.252597Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.266052Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.283655Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669276388014001:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.283709Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.283712Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669276388014006:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.284736Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:12.293007Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669276388014008:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:33:12.372427Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669276388014059:3395] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } captured evread ----------------------------------------------------------- starting split ----------------------------------------------------------- 2025-06-03T10:33:12.565655Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7511669276388014350:2506] TxId: 281474976710673. Ctx: { TraceId: 01jwtnmc2v3w7727ee9mb9nbr0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWVhYjgyMzItNDI3ZjBmOTYtZWMzMzNhYjktNDI2ZmM0ZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database 2025-06-03T10:33:12.565797Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710673. Ctx: { TraceId: 01jwtnmc2v3w7727ee9mb9nbr0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=OWVhYjgyMzItNDI3ZjBmOTYtZWMzMzNhYjktNDI2ZmM0ZGM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root scheme op Status: 53 TxId: 281474976710674 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 resume evread ----------------------------------------------------------- 2025-06-03T10:33:12.592414Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946792607, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 4858, MsgBus: 19380 2025-06-03T10:33:12.816383Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669275180738784:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:12.816411Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f4d/r3tmp/tmpjJkLgc/pdisk_1.dat 2025-06-03T10:33:12.842107Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4858, node 2 2025-06-03T10:33:12.852341Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:12.852354Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:12.852357Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:12.852413Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19380 TClient is connected to server localhost:19380 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:12.917209Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:12.917256Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:12.918154Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:12.920581Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.921869Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:33:12.925707Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.938751Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.961790Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.974382Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.194780Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669279475707685:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.194809Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.205695Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.215875Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.226624Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.241382Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.253201Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.268349Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.283243Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.344845Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669279475708344:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.344879Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.344998Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669279475708349:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.346257Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:13.351227Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669279475708351:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:13.433488Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669279475708402:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:13.593777Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jwtnmd35d1tdra1yyxcgswrv, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NmM1YjU5ZGYtMWY1ZTYyYWMtYTYwYjI1NGUtMWJlNmY1NGU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root captured evread ----------------------------------------------------------- starting split ----------------------------------------------------------- scheme op Status: 53 TxId: 281474976715674 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 resume evread ----------------------------------------------------------- 2025-06-03T10:33:13.608393Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946793636, txId: 281474976715672] shutting down ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::JoinLeftOnly [GOOD] Test command err: Trying to start YDB, gRPC: 14821, MsgBus: 27581 2025-06-03T10:33:10.336846Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669267602953343:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:10.336880Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f6c/r3tmp/tmpornybz/pdisk_1.dat 2025-06-03T10:33:10.409998Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669267602953323:2079] 1748946790336667 != 1748946790336670 2025-06-03T10:33:10.413614Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14821, node 1 2025-06-03T10:33:10.423826Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:10.423841Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:10.423843Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:10.423891Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:33:10.439847Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:10.439881Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:10.441759Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27581 TClient is connected to server localhost:27581 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:10.490133Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.492814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:33:10.517548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.583116Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.609733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.621574Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.719128Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669267602954975:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:10.719172Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:10.767189Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.775947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.789010Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.803209Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.817138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.830999Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.845197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.862013Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669267602955626:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:10.862049Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:10.862054Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669267602955631:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:10.862963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:10.874284Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669267602955633:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:33:10.955496Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669267602955684:3395] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:11.113146Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.238971Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946791277, txId: 281474976710674] shutting down Trying to start YDB, gRPC: 13542, MsgBus: 6738 2025-06-03T10:33:11.470538Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669272795784725:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:11.470568Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f6c/r3tmp/tmpYcC19k/pdisk_1.dat 2025-06-03T10:33:11.485137Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:11.485384Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669272795784702:2079] 1748946791470355 != 1748946791470358 TServer::EnableGrpc on GrpcPort 13542, node 2 2025-06-03T10:33:11.495526Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:11.495541Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:11.495545Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:11.495623Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6738 TClient is connected to server localhost:6738 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true ... seId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.038639Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.038687Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669277090754313:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.039810Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:12.042914Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669277090754315:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:12.096650Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669277090754366:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:12.285021Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946792327, txId: 281474976715672] shutting down 2025-06-03T10:33:12.324874Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946792369, txId: 281474976715674] shutting down Trying to start YDB, gRPC: 16281, MsgBus: 17086 2025-06-03T10:33:12.744373Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511669277267697562:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:12.744420Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f6c/r3tmp/tmpHMAO4b/pdisk_1.dat 2025-06-03T10:33:12.777984Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511669277267697542:2079] 1748946792744213 != 1748946792744216 2025-06-03T10:33:12.782384Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16281, node 3 2025-06-03T10:33:12.789643Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:12.789662Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:12.789665Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:12.789730Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17086 2025-06-03T10:33:12.857668Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:12.857719Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:12.859144Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17086 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:12.871381Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.873512Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:33:12.877358Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.891418Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:33:12.920932Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.935295Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.142898Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669281562666464:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.142933Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.152728Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.162130Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.176924Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.192474Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.204301Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.217914Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.233537Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.250021Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669281562667115:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.250063Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.250169Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669281562667120:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.250969Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:13.258463Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511669281562667122:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:13.352954Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511669281562667173:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } waiting... 2025-06-03T10:33:13.467905Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.587187Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946793622, txId: 281474976715674] shutting down 2025-06-03T10:33:13.649572Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946793685, txId: 281474976715676] shutting down >> TxUsage::Sinks_Oltp_WriteToTopic_4_Table ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::DqSourceLiteralRange [GOOD] Test command err: Trying to start YDB, gRPC: 25268, MsgBus: 64072 2025-06-03T10:33:10.159392Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669267560341661:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:10.159415Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f74/r3tmp/tmpGzYwON/pdisk_1.dat 2025-06-03T10:33:10.214514Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:10.214630Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669267560341641:2079] 1748946790159241 != 1748946790159244 TServer::EnableGrpc on GrpcPort 25268, node 1 2025-06-03T10:33:10.228058Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:10.228072Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:10.228075Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:10.228134Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64072 2025-06-03T10:33:10.262064Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:10.262096Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:10.263226Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:64072 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:10.295905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.300486Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.366944Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.433084Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.449071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.581403Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669267560343296:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:10.581456Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:10.626862Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.634996Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.691225Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.747970Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.803557Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.817197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.831080Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:10.847961Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669267560343955:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:10.847991Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669267560343960:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:10.848006Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:10.848937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:10.858049Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669267560343962:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:10.926923Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669267560344013:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:11.102656Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.136973Z node 1 :RPC_REQUEST WARN: rpc_stream_execute_scan_query.cpp:410: Client lost Trying to start YDB, gRPC: 3234, MsgBus: 17354 2025-06-03T10:33:11.542318Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669270420759779:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:11.542348Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f74/r3tmp/tmpvDNCi8/pdisk_1.dat 2025-06-03T10:33:11.557371Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:11.557558Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669270420759751:2079] 1748946791542184 != 1748946791542187 TServer::EnableGrpc on GrpcPort 3234, node 2 2025-06-03T10:33:11.568211Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:11.568224Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:11.568227Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:11.568296Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17354 TClient is connected to server localhost:17354 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:11.6465 ... QP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669274715729330:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.130953Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.131161Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669274715729335:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.132127Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:12.138601Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669274715729337:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:12.225333Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669274715729388:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:12.376772Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.490447Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946792530, txId: 281474976715674] shutting down Trying to start YDB, gRPC: 1521, MsgBus: 3949 2025-06-03T10:33:12.789366Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511669274303711589:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:12.789389Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f74/r3tmp/tmpnnOECq/pdisk_1.dat 2025-06-03T10:33:12.809821Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:12.810209Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511669274303711567:2079] 1748946792789209 != 1748946792789212 TServer::EnableGrpc on GrpcPort 1521, node 3 2025-06-03T10:33:12.821903Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:12.821921Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:12.821923Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:12.821974Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3949 TClient is connected to server localhost:3949 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:12.894805Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:12.894846Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:12.895222Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.895843Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:33:12.898563Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.911819Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.935167Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.947231Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.242083Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669278598680511:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.242116Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.253210Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.265643Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.299183Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.316090Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.372948Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.387204Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.400514Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.416686Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669278598681168:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.416744Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.416772Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669278598681173:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.417808Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:13.427363Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511669278598681175:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:13.515161Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511669278598681226:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:13.649313Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.712424Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946793755, txId: 281474976715674] shutting down 2025-06-03T10:33:13.744408Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946793790, txId: 281474976715676] shutting down |70.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats >> DataShardSnapshots::UncommittedChangesRenameTable+UseSink |70.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats |70.3%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_column_stats/ydb-core-tx-datashard-ut_column_stats ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::TooManyComputeActors [GOOD] Test command err: Trying to start YDB, gRPC: 4798, MsgBus: 17382 2025-06-03T10:33:11.781980Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669273389649015:2199];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:11.782159Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f33/r3tmp/tmpcZ9aVu/pdisk_1.dat 2025-06-03T10:33:11.864952Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669273389648855:2079] 1748946791778993 != 1748946791778996 2025-06-03T10:33:11.866270Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4798, node 1 2025-06-03T10:33:11.901584Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:11.901606Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:11.901609Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:11.901672Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17382 2025-06-03T10:33:11.937374Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:11.937410Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:11.938233Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:17382 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:11.986540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.997767Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:33:12.009739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.081635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.111469Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.135298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.256791Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669277684617784:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.256854Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.314126Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.324401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.336578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.349637Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.405402Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.414169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.430244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.493543Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669277684618445:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.493575Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.493774Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669277684618450:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.494945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:12.498810Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669277684618452:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:12.579009Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669277684618503:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:12.795698Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946792791, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 10440, MsgBus: 8139 2025-06-03T10:33:13.101164Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669282124760885:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:13.101197Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f33/r3tmp/tmpC2KefT/pdisk_1.dat 2025-06-03T10:33:13.118523Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:13.118726Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669282124760862:2079] 1748946793100945 != 1748946793100948 TServer::EnableGrpc on GrpcPort 10440, node 2 2025-06-03T10:33:13.129890Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:13.129906Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:13.129908Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:13.129969Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8139 TClient is connected to server localhost:8139 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:13.207242Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:13.207270Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:13.207686Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.208285Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:13.214377Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.227133Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.254048Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.266323Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.572303Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669282124762492:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.572345Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.582174Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.591017Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.603304Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.617407Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.631088Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.644868Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.659453Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.674988Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669282124763145:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.675008Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.675024Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669282124763150:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.675817Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:13.678807Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669282124763152:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:13.757209Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669282124763203:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:13.904808Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.076168Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=2&id=ZjRjYzllNzYtNTI2MTI1ZTAtNWVhZDFmMS03ZjZlOWVlNA==, ActorId: [2:7511669282124763699:2531], ActorState: ExecuteState, TraceId: 01jwtnmdet58hsb55eepfdjshe, Create QueryResponse for error on request, msg: 2025-06-03T10:33:14.076322Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946794119, txId: 281474976715674] shutting down
: Warning: Type annotation, code: 1030
:7:13: Warning: At function: RemovePrefixMembers, At function: Sort, At function: PersistableRepr, At function: SqlProject
:8:18: Warning: At function: AssumeColumnOrderPartial, At function: Aggregate, At function: Filter, At lambda, At function: Coalesce
:9:67: Warning: At function: And
:9:39: Warning: At function: <
:9:46: Warning: At function: -
:9:46: Warning: Integral type implicit bitcast: Optional and Int32, code: 1107
: Warning: Execution, code: 1060
:4:44: Warning: Cost Based Optimizer could not be applied to this query: couldn't load statistics, code: 8001
: Error: Requested too many execution units: 21, code: 2029 >> KqpScan::SecondaryIndex [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::Order [GOOD] Test command err: Trying to start YDB, gRPC: 61576, MsgBus: 28606 2025-06-03T10:33:12.146622Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669276764746096:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:12.146817Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ed7/r3tmp/tmpKbtGoT/pdisk_1.dat 2025-06-03T10:33:12.208931Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669276764746071:2079] 1748946792146069 != 1748946792146072 2025-06-03T10:33:12.217120Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 61576, node 1 2025-06-03T10:33:12.225020Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:12.225054Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:12.225057Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:12.225111Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28606 2025-06-03T10:33:12.249168Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:12.249197Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:12.250303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28606 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:33:12.291661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.302761Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.324548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.347441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.359432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.607657Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669276764747713:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.607713Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.666937Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.687344Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.699877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.713981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.722762Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.736123Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.750998Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.766724Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669276764748367:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.766740Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669276764748372:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.766760Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.767976Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:12.776172Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669276764748374:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:12.831513Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669276764748425:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:13.065034Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7511669281059716025:2507] TxId: 281474976715673. Ctx: { TraceId: 01jwtnmchzc0wbxnbzdde6575t, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZWRlZmY5ZGYtMjhhYmQ2NTYtOWQzNWVjMDQtZmYwMTgxMWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-03T10:33:13.068213Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946793111, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 26427, MsgBus: 3232 2025-06-03T10:33:13.326942Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669281569962039:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:13.326979Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ed7/r3tmp/tmptogCwF/pdisk_1.dat 2025-06-03T10:33:13.343285Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669281569962019:2079] 1748946793326806 != 1748946793326809 2025-06-03T10:33:13.344759Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26427, node 2 2025-06-03T10:33:13.353129Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:13.353142Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:13.353144Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:13.353211Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3232 TClient is connected to server localhost:3232 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:13.429902Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:13.429951Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:13.430303Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.430859Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:13.446452Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.459828Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.481686Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.496178Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.749469Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669281569963647:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.749502Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.758143Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.766934Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.777737Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.792535Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.806283Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.820303Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.834196Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.849709Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669281569964298:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.849732Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.849742Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669281569964303:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.850547Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:13.853677Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669281569964305:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:13.920663Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669281569964356:3393] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:14.055759Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946794054, txId: 281474976715672] shutting down ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpSplit::AfterResultMultiRange+Descending [GOOD] Test command err: Trying to start YDB, gRPC: 18468, MsgBus: 20196 2025-06-03T10:33:10.805651Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669267446374753:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:10.805684Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f69/r3tmp/tmp9SUHn8/pdisk_1.dat 2025-06-03T10:33:10.865345Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:10.865699Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669267446374732:2079] 1748946790805460 != 1748946790805463 TServer::EnableGrpc on GrpcPort 18468, node 1 2025-06-03T10:33:10.886435Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:10.886456Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:10.886459Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:10.886533Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20196 TClient is connected to server localhost:20196 WaitRootIsUp 'Root'... TClient::Ls request: Root 2025-06-03T10:33:10.942166Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:10.942220Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient::Ls response: 2025-06-03T10:33:10.943182Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:10.952354Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.962374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.039410Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.102311Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.115733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.285510Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669271741343677:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.285544Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.322783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.331802Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.341727Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.355450Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.410917Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.420586Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.433907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.450382Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669271741344330:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.450417Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.450424Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669271741344335:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.451285Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:11.459933Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669271741344337:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:33:11.525086Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669271741344388:3397] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:11.785760Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7511669271741344682:2506] TxId: 281474976710673. Ctx: { TraceId: 01jwtnmb8t260s08z7y5j6mg56, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Njc4MTY4OWYtZTgwZjE2OWQtOGUyYjZhZjItNjQ2YzM0OTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database 2025-06-03T10:33:11.785873Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976710673. Ctx: { TraceId: 01jwtnmb8t260s08z7y5j6mg56, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Njc4MTY4OWYtZTgwZjE2OWQtOGUyYjZhZjItNjQ2YzM0OTc=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root captured evread ----------------------------------------------------------- starting split ----------------------------------------------------------- scheme op Status: 53 TxId: 281474976710674 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 captured evreadresult ----------------------------------------------------------- resume evread ----------------------------------------------------------- 2025-06-03T10:33:12.152515Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946791830, txId: 281474976710672] shutting down Trying to start YDB, gRPC: 17957, MsgBus: 13181 2025-06-03T10:33:12.448818Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669277679042112:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:12.448841Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f69/r3tmp/tmpebojMN/pdisk_1.dat 2025-06-03T10:33:12.487115Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:12.487298Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669277679042086:2079] 1748946792448693 != 1748946792448696 TServer::EnableGrpc on GrpcPort 17957, node 2 2025-06-03T10:33:12.491871Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:12.491886Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:12.491890Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:12.491950Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13181 2025-06-03T10:33:12.555005Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:12.555047Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:12.556093Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13181 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:12.565403Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.582241Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.602323Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.634249Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.662402Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.955863Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669277679043733:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.955963Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.959483Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.966973Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.980005Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.994646Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.008672Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.022058Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.036083Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.053925Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669281974011682:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.053957Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.053972Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669281974011687:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.054790Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:13.063407Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669281974011689:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:13.124434Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669281974011740:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:13.400356Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jwtnmctdcd628e16nbsk5feq, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=Y2JiNTI2MC0yYWMwYTY2OC02YTc4MDY5NS0zYWIyNDRlNA==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root captured evread ----------------------------------------------------------- starting split ----------------------------------------------------------- scheme op Status: 53 TxId: 281474976715674 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 captured evreadresult ----------------------------------------------------------- resume evread ----------------------------------------------------------- 2025-06-03T10:33:13.866235Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946793447, txId: 281474976715672] shutting down >> KqpSplit::AfterResult+Unspecified [GOOD] >> KqpScan::StreamLookupTryGetDataBeforeSchemeInitialization [GOOD] >> TCmsTest::EmergencyDuringRollingRestart [GOOD] >> KqpScan::TwoAggregatesTwoWindows [GOOD] |70.3%| [TA] $(B)/ydb/services/ydb/table_split_ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::SecondaryIndex [GOOD] Test command err: Trying to start YDB, gRPC: 32747, MsgBus: 61979 2025-06-03T10:33:11.835828Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669271769411924:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:11.836039Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f46/r3tmp/tmp6RJo3M/pdisk_1.dat 2025-06-03T10:33:11.919512Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:11.921282Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669271769411728:2079] 1748946791831964 != 1748946791831967 TServer::EnableGrpc on GrpcPort 32747, node 1 2025-06-03T10:33:11.935001Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:11.935030Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:11.936055Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:11.937516Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:11.937529Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:11.937532Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:11.937586Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61979 TClient is connected to server localhost:61979 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:12.102713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.106491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:33:12.109253Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.176160Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.203100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.216519Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.304544Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669276064380660:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.304576Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.373027Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.382087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.392541Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.405422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.421341Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.434049Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.448104Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.469516Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669276064381316:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.469540Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.473390Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669276064381321:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.477510Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:12.481559Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669276064381323:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:12.543419Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669276064381374:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:12.714149Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.858807Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946792894, txId: 281474976715674] shutting down Trying to start YDB, gRPC: 2062, MsgBus: 27961 2025-06-03T10:33:13.160573Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669281504627705:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:13.160595Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f46/r3tmp/tmpl5YFV1/pdisk_1.dat 2025-06-03T10:33:13.176689Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:13.179018Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669281504627685:2079] 1748946793160473 != 1748946793160476 TServer::EnableGrpc on GrpcPort 2062, node 2 2025-06-03T10:33:13.187777Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:13.187792Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:13.187795Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:13.187856Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27961 TClient is connected to server localhost:27961 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:13.264172Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:13.264206Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:13.264598Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.265178Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:13.267438Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.283191Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.306724Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.321646Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.606350Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669281504629332:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.606374Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.617617Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.625908Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.637692Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.651894Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.665752Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.680086Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.694667Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.711237Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669281504629984:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.711267Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.711296Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669281504629989:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.712276Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:13.721185Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669281504629991:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:13.796969Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669281504630042:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:13.963485Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.972768Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.982222Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.251470Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946794294, txId: 281474976715677] shutting down 2025-06-03T10:33:14.297774Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946794343, txId: 281474976715679] shutting down 2025-06-03T10:33:14.336617Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946794378, txId: 281474976715681] shutting down 2025-06-03T10:33:14.386932Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946794427, txId: 281474976715683] shutting down >> KqpScan::UdfFailure [GOOD] >> KqpSplit::IntersectionLosesRange+Ascending [GOOD] >> KqpSplit::IntersectionLosesRange+Descending >> DataShardSnapshots::LockedWriteReuseAfterCommit+UseSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpSplit::AfterResult+Unspecified [GOOD] Test command err: Trying to start YDB, gRPC: 30672, MsgBus: 64191 2025-06-03T10:33:11.801997Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669271117343274:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:11.802059Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f44/r3tmp/tmp6ROuFE/pdisk_1.dat 2025-06-03T10:33:11.863121Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 30672, node 1 2025-06-03T10:33:11.877536Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:11.877560Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:11.877564Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:11.877620Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:64191 2025-06-03T10:33:11.902461Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:11.902499Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:11.903793Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:64191 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:11.958564Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.969667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:33:11.973362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:33:12.004111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.098819Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.125236Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.297660Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669275412312027:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.297703Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.350364Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.357992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.414111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.426646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.483422Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.497794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.513701Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.530945Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669275412312682:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.530977Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.531088Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669275412312687:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.532207Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:12.538303Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669275412312689:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:12.622750Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669275412312740:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:12.869555Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jwtnmccd5mjvxwxffn2mbqbx, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzQxOTY5MGYtZGM3ZmM1YjAtYmY2NGYyNmYtZWQ1YjI0OTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root captured evread ----------------------------------------------------------- starting split ----------------------------------------------------------- scheme op Status: 53 TxId: 281474976715674 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 captured evreadresult ----------------------------------------------------------- resume evread ----------------------------------------------------------- 2025-06-03T10:33:13.355302Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946792915, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 10909, MsgBus: 19063 2025-06-03T10:33:13.546925Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669282196581889:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:13.547117Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f44/r3tmp/tmpAibhpd/pdisk_1.dat 2025-06-03T10:33:13.565886Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:13.566105Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669282196581867:2079] 1748946793546742 != 1748946793546745 TServer::EnableGrpc on GrpcPort 10909, node 2 2025-06-03T10:33:13.576892Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:13.576910Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:13.576913Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:13.576979Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19063 TClient is connected to server localhost:19063 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:13.653641Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:13.653681Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:13.654026Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.654676Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:13.664964Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.674557Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.697392Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.710050Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.907263Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669282196583494:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.907318Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.914582Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.923065Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.931692Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.945756Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.959748Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.974136Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.988141Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.004341Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669286491551443:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.004378Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.004387Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669286491551448:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.005154Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:14.007281Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669286491551450:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:14.106520Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669286491551501:3393] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:14.249794Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jwtnmdqrfx6are9p8h4hfe5m, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=MzRmZWMxMC1hN2YwY2YwOC1lMWJkZDhhYS0yMzRlMjA2Mg==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root captured evread ----------------------------------------------------------- starting split ----------------------------------------------------------- scheme op Status: 53 TxId: 281474976715674 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 captured evreadresult ----------------------------------------------------------- resume evread ----------------------------------------------------------- 2025-06-03T10:33:14.548885Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946794294, txId: 281474976715672] shutting down |70.3%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign |70.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::StreamLookupTryGetDataBeforeSchemeInitialization [GOOD] Test command err: Trying to start YDB, gRPC: 4139, MsgBus: 15986 2025-06-03T10:33:12.052890Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669277965626712:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:12.053218Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f21/r3tmp/tmpq3Fwqt/pdisk_1.dat 2025-06-03T10:33:12.178322Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:12.178590Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669277965626686:2079] 1748946792052538 != 1748946792052541 TServer::EnableGrpc on GrpcPort 4139, node 1 2025-06-03T10:33:12.197572Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:12.197590Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:12.197593Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:12.197645Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15986 2025-06-03T10:33:12.246166Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:12.246197Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:12.247377Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15986 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:12.290907Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.295815Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.359333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.382087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.397120Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.514376Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669277965628325:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.514429Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.584517Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.595362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.613396Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.627034Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.639485Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.657430Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.674139Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.698312Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669277965628978:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.698352Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.698443Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669277965628983:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.699441Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:12.703496Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669277965628985:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:12.761611Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669277965629036:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:12.946845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:12.987316Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.089240Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946793132, txId: 281474976715676] shutting down 2025-06-03T10:33:13.873445Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [2:323:2365], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:33:13.873548Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:33:13.873560Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f21/r3tmp/tmpsvry4D/pdisk_1.dat 2025-06-03T10:33:13.969365Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.985025Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:13.986063Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:32:2079] 1748946793415461 != 1748946793415465 2025-06-03T10:33:14.028171Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:14.028214Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:14.038888Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:14.115791Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:642:2549], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.115839Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.117422Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.313594Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:790:2654], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.313638Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.313697Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:795:2659], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.314814Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:33:14.462705Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:797:2661], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:33:14.494285Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:868:2701] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Captured TEvTxProxySchemeCache::TEvResolveKeySetResult from NKikimr::NSchemeBoard::(anonymous namespace)::TAccessCheckerResolve to NKikimr::NTxProxy::TResolveTablesActor Captured TEvTxProxySchemeCache::TEvResolveKeySetResult from NKikimr::NSchemeBoard::(anonymous namespace)::TAccessCheckerResolve to KQP_TABLE_RESOLVER 2025-06-03T10:33:14.638887Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715661. Ctx: { TraceId: 01jwtnmdt91qcts2an1ahjfndn, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTcwNmViZmItYTM2MDYxZS1mNWMwNzYxYS1iYWFhMWNhYw==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root Captured TEvTxProxySchemeCache::TEvResolveKeySetResult from NKikimr::NSchemeBoard::(anonymous namespace)::TAccessCheckerResolve to KQP_STREAM_LOOKUP_ACTOR 2025-06-03T10:33:14.640810Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 2000, txId: 281474976715660] shutting down Captured TEvTxProxySchemeCache::TEvResolveKeySetResult from NKikimr::NSchemeBoard::(anonymous namespace)::TAccessCheckerResolve to NKikimr::NTxProxy::TResolveTablesActor ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/cms/ut/unittest >> TCmsTest::EmergencyDuringRollingRestart [GOOD] Test command err: 2025-06-03T10:32:55.043880Z node 1 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:32:55.044484Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:55.046302Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:55.046358Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:55.046621Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:55.046648Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:55.046934Z node 1 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:32:55.046959Z node 1 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:32:55.047007Z node 1 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:32:55.047077Z node 1 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:32:55.048130Z node 1 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:55.048147Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:55.048169Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:32:55.048202Z node 1 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:55.076463Z node 1 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:32:55.108740Z node 1 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:55.108843Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:55.110222Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:55.110339Z node 1 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:32:55.110345Z node 1 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:32:55.110352Z node 1 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:32:55.110355Z node 1 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:32:55.110368Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:55.110417Z node 1 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:32:55.110441Z node 1 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:32:55.111988Z node 1 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Handle TEvBlobStorage::TEvControllerConfigResponse: response# Status { Success: true BaseConfig { PDisk { NodeId: 1 PDiskId: 1 Path: "/1/pdisk-1.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 2 PDiskId: 2 Path: "/2/pdisk-2.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 3 PDiskId: 3 Path: "/3/pdisk-3.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 4 PDiskId: 4 Path: "/4/pdisk-4.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 5 PDiskId: 5 Path: "/5/pdisk-5.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 6 PDiskId: 6 Path: "/6/pdisk-6.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 7 PDiskId: 7 Path: "/7/pdisk-7.data" Guid: 1 DriveStatus: ACTIVE } PDisk { NodeId: 8 PDiskId: 8 Path: "/8/pdisk-8.data" Guid: 1 DriveStatus: ACTIVE } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 1 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 2 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 3 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 4 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 5 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 6 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } GroupId: 1 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } GroupId: 2 GroupGeneration: 1 FailDomainIdx: 7 } VSlot { VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } GroupId: 3 GroupGeneration: 1 FailDomainIdx: 7 } Group { GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1000 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1000 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1000 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1000 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1000 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1000 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1000 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1000 } } Group { GroupId: 1 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1001 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1001 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1001 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1001 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1001 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1001 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1001 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1001 } } Group { GroupId: 2 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1002 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1002 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1002 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1002 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1002 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1002 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1002 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1002 } } Group { GroupId: 3 GroupGeneration: 1 ErasureSpecies: "block-4-2" VSlotId { NodeId: 1 PDiskId: 1 VSlotId: 1003 } VSlotId { NodeId: 2 PDiskId: 2 VSlotId: 1003 } VSlotId { NodeId: 3 PDiskId: 3 VSlotId: 1003 } VSlotId { NodeId: 4 PDiskId: 4 VSlotId: 1003 } VSlotId { NodeId: 5 PDiskId: 5 VSlotId: 1003 } VSlotId { NodeId: 6 PDiskId: 6 VSlotId: 1003 } VSlotId { NodeId: 7 PDiskId: 7 VSlotId: 1003 } VSlotId { NodeId: 8 PDiskId: 8 VSlotId: 1003 } } } } Success: true 2025-06-03T10:32:55.154141Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:55.154201Z node 1 :CMS DEBUG: cms_tx_update_config.cpp:44: Updated config: TenantLimits { DisabledNodesRatioLimit: 0 } ClusterLimits { DisabledNodesRatioLimit: 0 } SentinelConfig { Enable: false } 2025-06-03T10:32:56.643060Z node 9 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060000 event: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:32:56.645061Z node 9 :CMS DEBUG: cms_impl.h:185: StateInit event type: 10060001 event: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:32:56.645145Z node 9 :CMS DEBUG: cms_tx_init_scheme.cpp:16: TTxInitScheme Execute 2025-06-03T10:32:56.645480Z node 9 :CMS DEBUG: cms_impl.h:185: StateInit event type: 1006000c event: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:32:56.645533Z node 9 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104d0001 event: NKikimr::NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionResponse 2025-06-03T10:32:56.646083Z node 9 :CMS DEBUG: console__init_scheme.cpp:14: TConsole::TTxInitScheme Execute 2025-06-03T10:32:56.646806Z node 9 :CMS DEBUG: cms_tx_init_scheme.cpp:24: TTxInitScheme Complete 2025-06-03T10:32:56.646961Z node 9 :CMS DEBUG: cms_tx_load_state.cpp:33: TTxLoadState Execute 2025-06-03T10:32:56.646993Z node 9 :CMS DEBUG: cms_tx_load_state.cpp:76: Using default config 2025-06-03T10:32:56.647008Z node 9 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:32:56.648541Z node 9 :CMS DEBUG: console__init_scheme.cpp:23: TConsole::TTxInitScheme Complete 2025-06-03T10:32:56.648714Z node 9 :CMS DEBUG: console__load_state.cpp:28: TConsole::TTxLoadState Execute 2025-06-03T10:32:56.648736Z node 9 :CMS DEBUG: console__load_state.cpp:50: Using default config. 2025-06-03T10:32:56.648782Z node 9 :CMS DEBUG: console__load_state.cpp:66: TConsole::TTxLoadState Complete 2025-06-03T10:32:56.670097Z node 9 :CMS DEBUG: cms_impl.h:185: StateInit event type: 104a0012 event: NKikimr::NConsole::TEvConsole::TEvConfigNotificationRequest { Config { FeatureFlags { EnableCMSRequestPriorities: true EnableSingleCompositeActionGroup: true } } ItemKinds: 25 ItemKinds: 26 Local: true } 2025-06-03T10:32:56.680940Z node 9 :CMS DEBUG: cms_tx_load_state.cpp:256: TTxLoadState Complete 2025-06-03T10:32:56.681041Z node 9 :CMS DEBUG: cms_tx_update_config.cpp:23: TTxUpdateConfig Execute 2025-06-03T10:32:56.681092Z node 9 :CMS DEBUG: cms_tx_update_config.cpp:37: TTxUpdateConfig Complete 2025-06-03T10:32:56.681173Z node 9 :CMS DEBUG: sentinel.cpp:939: [Sentinel] [Main] UpdateConfig 2025-06-03T10:32:56.681180Z node 9 :CMS DEBUG: sentinel.cpp:884: [Sentinel] [Main] Start ConfigUpdater 2025-06-03T10:32:56.681188Z node 9 :CMS DEBUG: sentinel.cpp:955: [Sentinel] [Main] UpdateState 2025-06-03T10:32:56.681192Z node 9 :CMS INFO: sentinel.cpp:879: [Sentinel] [Main] StateUpdater was delayed 2025-06-03T10:32:56.681199Z node 9 :CMS DEBUG: sentinel.cpp:464: [Sentinel] [ConfigUpdater] Request blobstorage config: attempt# 0 2025-06-03T10:32:56.681219Z node 9 :CMS DEBUG: sentinel.cpp:477: [Sentinel] [ConfigUpdater] Request CMS cluster state: attempt# 0 2025-06-03T10:32:56.681416Z node 9 :CMS DEBUG: sentinel.cpp:530: [Sentinel] [ConfigUpdater] Ha ... de 18 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:13.226052Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-1, validity# 1970-01-01T00:03:00.029000Z, action# Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 60000000 2025-06-03T10:33:13.226085Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# -80, body# User: "user" Actions { Type: RESTART_SERVICES Host: "19" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (18) has temporary lock, VDisk [0:1:0:1:0] (::1:/19/pdisk-19.data) is locked by this request. Down: " } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 2025-06-03T10:33:13.256837Z node 18 :CMS DEBUG: cms.cpp:1147: Running CleanupWalleTasks 2025-06-03T10:33:13.298777Z node 18 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:13.298930Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 60000000 } Actions { Type: RESTART_SERVICES Host: "19" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW_PARTIAL } RequestId: "user-r-1" Permissions { Id: "user-p-1" Action { Type: RESTART_SERVICES Host: "18" Services: "storage" Duration: 60000000 } Deadline: 180029000 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 18 InterconnectPort: 12001 } } } } 2025-06-03T10:33:13.298947Z node 18 :CMS DEBUG: cms.cpp:1064: Schedule cleanup at 1970-01-01T00:05:00.029000Z 2025-06-03T10:33:13.300805Z node 18 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-1 2025-06-03T10:33:13.300836Z node 18 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-03T10:33:13.300849Z node 18 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-03T10:33:13.300868Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-1, reason# explicit remove 2025-06-03T10:33:13.329835Z node 18 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-03T10:33:13.329944Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-1" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-03T10:33:13.342222Z node 18 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:13.342270Z node 18 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:13.342286Z node 18 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:13.342403Z node 18 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "19" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -100 2025-06-03T10:33:13.342410Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "19" Services: "storage" Duration: 60000000 2025-06-03T10:33:13.342419Z node 18 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 19, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:33:13.342449Z node 18 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:13.342468Z node 18 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-2, requestId# user-r-2, owner# user 2025-06-03T10:33:13.342477Z node 18 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (19) (permission user-p-2 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:13.342489Z node 18 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:13.342537Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-2, validity# 1970-01-01T00:03:00.133512Z, action# Type: RESTART_SERVICES Host: "19" Services: "storage" Duration: 60000000 2025-06-03T10:33:13.353815Z node 18 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:13.353935Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvPermissionRequest { User: "user" Actions { Type: RESTART_SERVICES Host: "19" Services: "storage" Duration: 60000000 } PartialPermissionAllowed: true Schedule: true DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -100 }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } RequestId: "user-r-2" Permissions { Id: "user-p-2" Action { Type: RESTART_SERVICES Host: "19" Services: "storage" Duration: 60000000 } Deadline: 180133512 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 19 InterconnectPort: 12002 } } } } 2025-06-03T10:33:13.386970Z node 18 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (19) (permission user-p-2 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:13.387097Z node 18 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:13.387127Z node 18 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:13.387146Z node 18 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:13.387345Z node 18 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "19" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (18) has temporary lock, VDisk [0:1:0:1:0] (::1:/19/pdisk-19.data) is locked by this request. Down: " } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 2025-06-03T10:33:13.387360Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "19" Services: "storage" Duration: 60000000 Issue { Type: TOO_MANY_UNAVAILABLE_VDISKS Message: "Issue in affected group with id \'0\': too many unavailable vdisks. Locked: Host ::1:12001 (18) has temporary lock, VDisk [0:1:0:1:0] (::1:/19/pdisk-19.data) is locked by this request. Down: " } 2025-06-03T10:33:13.387374Z node 18 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 19, with state: Locked, with limit: 0, with ratio limit: 0, locked nodes: 1, down nodes: 0 2025-06-03T10:33:13.387384Z node 18 :CMS DEBUG: cms.cpp:398: Result: DISALLOW_TEMP (reason: Cannot lock node '19': node state: 'Locked') 2025-06-03T10:33:13.387415Z node 18 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:13.387484Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store request: id# user-r-1, owner# user, order# 1, priority# -80, body# User: "user" Actions { Type: RESTART_SERVICES Host: "19" Services: "storage" Duration: 60000000 Issue { Type: GENERIC Message: "Cannot lock node \'19\': node state: \'Locked\'" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 2025-06-03T10:33:13.398677Z node 18 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:13.398775Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: DISALLOW_TEMP Reason: "Cannot lock node \'19\': node state: \'Locked\'" } RequestId: "user-r-1" Deadline: 420235024 } 2025-06-03T10:33:13.398947Z node 18 :CMS INFO: cms.cpp:1326: User user is done with permissions user-p-2 2025-06-03T10:33:13.398959Z node 18 :CMS DEBUG: cms.cpp:1349: Resulting status: OK 2025-06-03T10:33:13.398988Z node 18 :CMS DEBUG: cms_tx_remove_permissions.cpp:28: TTxRemovePermissions Execute 2025-06-03T10:33:13.399023Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove permission: id# user-p-2, reason# explicit remove 2025-06-03T10:33:13.410106Z node 18 :CMS DEBUG: cms_tx_remove_permissions.cpp:79: TTxRemovePermissions Complete 2025-06-03T10:33:13.410215Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvManagePermissionRequest { User: "user" Command: DONE Permissions: "user-p-2" DryRun: false }, response# NKikimr::NCms::TEvCms::TEvManagePermissionResponse { Status { Code: OK } } 2025-06-03T10:33:13.484206Z node 18 :CMS DEBUG: cms_tx_update_downtimes.cpp:17: TTxUpdateDowntimes Execute 2025-06-03T10:33:13.484271Z node 18 :CMS DEBUG: cms_tx_update_downtimes.cpp:26: TTxUpdateDowntimes Complete 2025-06-03T10:33:13.484295Z node 18 :CMS DEBUG: cluster_info.cpp:968: Timestamp: 1970-01-01T00:02:00Z 2025-06-03T10:33:13.484499Z node 18 :CMS INFO: cms.cpp:347: Check request: User: "user" Actions { Type: RESTART_SERVICES Host: "19" Services: "storage" Duration: 60000000 Issue { Type: GENERIC Message: "Cannot lock node \'19\': node state: \'Locked\'" } } PartialPermissionAllowed: true Schedule: true Reason: "" TenantPolicy: DEFAULT AvailabilityMode: MODE_MAX_AVAILABILITY EvictVDisks: false Priority: -80 2025-06-03T10:33:13.484516Z node 18 :CMS DEBUG: cms.cpp:379: Checking action: Type: RESTART_SERVICES Host: "19" Services: "storage" Duration: 60000000 Issue { Type: GENERIC Message: "Cannot lock node \'19\': node state: \'Locked\'" } 2025-06-03T10:33:13.484532Z node 18 :CMS DEBUG: node_checkers.cpp:101: [Nodes Counter] Checking Node: 19, with state: Up, with limit: 0, with ratio limit: 0, locked nodes: 0, down nodes: 0 2025-06-03T10:33:13.484580Z node 18 :CMS DEBUG: cms.cpp:387: Result: ALLOW 2025-06-03T10:33:13.484608Z node 18 :CMS DEBUG: cms.cpp:1036: Accepting permission: id# user-p-3, requestId# user-r-1, owner# user 2025-06-03T10:33:13.484618Z node 18 :CMS INFO: cluster_info.cpp:777: Adding lock for Host ::1:12002 (19) (permission user-p-3 until 1970-01-01T00:03:00Z) 2025-06-03T10:33:13.484632Z node 18 :CMS DEBUG: cms_tx_store_permissions.cpp:26: TTxStorePermissions Execute 2025-06-03T10:33:13.484684Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Store permission: id# user-p-3, validity# 1970-01-01T00:03:00.338048Z, action# Type: RESTART_SERVICES Host: "19" Services: "storage" Duration: 60000000 2025-06-03T10:33:13.484696Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Remove request: id# user-r-1, owner# user 2025-06-03T10:33:13.500426Z node 18 :CMS DEBUG: cms_tx_store_permissions.cpp:137: TTxStorePermissions complete 2025-06-03T10:33:13.500566Z node 18 :CMS NOTICE: audit_log.cpp:12: [AuditLog] [CMS tablet] Reply: request# NKikimr::NCms::TEvCms::TEvCheckRequest { User: "user" RequestId: "user-r-1" DryRun: false AvailabilityMode: MODE_MAX_AVAILABILITY }, response# NKikimr::NCms::TEvCms::TEvPermissionResponse { Status { Code: ALLOW } Permissions { Id: "user-p-3" Action { Type: RESTART_SERVICES Host: "19" Services: "storage" Duration: 60000000 } Deadline: 180338048 Extentions { Type: HostInfo Hosts { Name: "::1" State: UP NodeId: 19 InterconnectPort: 12002 } } } } >> DataShardSnapshots::MvccSnapshotAndSplit >> DataShardSnapshots::VolatileSnapshotSplit >> TxUsage::WriteToTopic_Demo_24_Query [GOOD] >> KqpScan::AggregateByColumn [GOOD] >> KqpScan::AggregateCountStar >> KqpScan::CrossJoinOneColumn [GOOD] >> TxUsage::WriteToTopic_Demo_25_Table >> TTicketParserTest::LoginCheckRemovedUser [GOOD] >> TTicketParserTest::LoginEmptyTicketBad >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Table [GOOD] >> KqpSplit::IntersectionLosesRange+Descending [GOOD] >> KqpScan::Join3 [GOOD] >> TTicketParserTest::LoginEmptyTicketBad [GOOD] >> DataShardSnapshots::UncommittedChangesRenameTable+UseSink [GOOD] >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Query >> DataShardSnapshots::ShardRestartWholeShardLockBasic >> LocalPartition::DescribeHang [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::TwoAggregatesTwoWindows [GOOD] Test command err: Trying to start YDB, gRPC: 12793, MsgBus: 30010 2025-06-03T10:33:10.711832Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669266404323782:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:10.711852Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f6a/r3tmp/tmp8ycmrM/pdisk_1.dat 2025-06-03T10:33:10.762998Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:10.763251Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669266404323762:2079] 1748946790711674 != 1748946790711677 TServer::EnableGrpc on GrpcPort 12793, node 1 2025-06-03T10:33:10.779493Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:10.779510Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:10.779513Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:10.779567Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30010 2025-06-03T10:33:10.815131Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:10.815162Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:10.815997Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:30010 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:10.847911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.861103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.934391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:10.995995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.023291Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.233275Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669270699292719:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.233342Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.270528Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.326631Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.334833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.348308Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.362666Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.377056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.390863Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.407644Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669270699293372:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.407672Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669270699293377:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.407701Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.408507Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:11.418245Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669270699293379:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:11.512715Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669270699293430:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:11.821520Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7511669270699293745:2507] TxId: 281474976715673. Ctx: { TraceId: 01jwtnmb6tatdftv9ytsnnjbp3, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=Y2M5YjEwYTktNmQyZWVjYy02YmJkM2U5Yi1lN2EzNjYxMQ==, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-03T10:33:13.530467Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946791865, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 3913, MsgBus: 6910 2025-06-03T10:33:13.941481Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669280129517770:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:13.941505Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f6a/r3tmp/tmpdywfWO/pdisk_1.dat 2025-06-03T10:33:13.960797Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:13.960949Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669280129517751:2079] 1748946793941341 != 1748946793941344 TServer::EnableGrpc on GrpcPort 3913, node 2 2025-06-03T10:33:13.971447Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:13.971468Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:13.971470Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:13.971526Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6910 TClient is connected to server localhost:6910 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:14.048319Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:14.048352Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:14.048763Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.049254Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:14.060483Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.071009Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.096998Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.112198Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.294577Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669284424486686:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.294620Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.303741Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.312214Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.324247Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.337964Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.352511Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.366390Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.380457Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.396574Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669284424487338:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.396626Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.396772Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669284424487343:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.397922Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:14.407248Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669284424487345:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:14.492408Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669284424487396:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:14.799407Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946794840, txId: 281474976715672] shutting down |70.4%| [TA] {RESULT} $(B)/ydb/services/ydb/table_split_ut/test-results/unittest/{meta.json ... results_accumulator.log} |70.4%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_reassign/ydb-core-tx-datashard-ut_reassign ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::UdfFailure [GOOD] Test command err: Trying to start YDB, gRPC: 4559, MsgBus: 1921 2025-06-03T10:33:11.528470Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669271983586552:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:11.528492Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f58/r3tmp/tmpbEZjvD/pdisk_1.dat 2025-06-03T10:33:11.603196Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:11.603488Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669271983586528:2079] 1748946791528197 != 1748946791528200 TServer::EnableGrpc on GrpcPort 4559, node 1 2025-06-03T10:33:11.612867Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:11.612899Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:11.612901Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:11.612954Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:33:11.630843Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:11.630885Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:11.631971Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1921 TClient is connected to server localhost:1921 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:11.690018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.700435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.766646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.792569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.808213Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.982119Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669271983588167:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.982174Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.049814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.063112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.090502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.112655Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.124424Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.143803Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.154646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.171789Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669276278556116:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.171827Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.171881Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669276278556121:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.173161Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:12.181602Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669276278556123:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:12.272599Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669276278556174:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:12.538993Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7511669276278556501:2507] TxId: 281474976715673. Ctx: { TraceId: 01jwtnmbztf3xqzqs7kdvthan9, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzM0NWIwZTItMzhjOWMyZDktZTAwMjgzYTktOGUxNDA0NGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-03T10:33:12.763425Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7511669276278556570:2507] TxId: 281474976715674. Ctx: { TraceId: 01jwtnmbztf3xqzqs7kdvthan9, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzM0NWIwZTItMzhjOWMyZDktZTAwMjgzYTktOGUxNDA0NGQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-03T10:33:12.764986Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946792579, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 6566, MsgBus: 7159 2025-06-03T10:33:13.055792Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669282584564916:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:13.055816Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f58/r3tmp/tmpWzFJfF/pdisk_1.dat 2025-06-03T10:33:13.074836Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:13.075046Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669282584564895:2079] 1748946793055609 != 1748946793055612 TServer::EnableGrpc on GrpcPort 6566, node 2 2025-06-03T10:33:13.087173Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:13.087187Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:13.087190Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:13.087251Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7159 TClient is connected to server localhost:7159 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSu ... source_pool.cpp:92" severity: 1 } 2025-06-03T10:33:13.902601Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [2:7511669282584567535:2514], TxId: 281474976715673, task: 1. Ctx: { TraceId : 01jwtnmdcj9g4j54jybyqjc66x. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=2&id=NTI1YjZiMy0yN2UxZDZkZS1iZDg5YTZiMC00YTQwZDg1Mw==. CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: INTERNAL_ERROR DEFAULT_ERROR: {
: Error: Terminate was called, reason(17): Bad filter value. }. 2025-06-03T10:33:13.902861Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=2&id=NTI1YjZiMy0yN2UxZDZkZS1iZDg5YTZiMC00YTQwZDg1Mw==, ActorId: [2:7511669282584567506:2507], ActorState: ExecuteState, TraceId: 01jwtnmdcj9g4j54jybyqjc66x, Create QueryResponse for error on request, msg: 2025-06-03T10:33:13.902883Z node 2 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [2:7511669282584567536:2515], TxId: 281474976715673, task: 2. Ctx: { SessionId : ydb://session/3?node_id=2&id=NTI1YjZiMy0yN2UxZDZkZS1iZDg5YTZiMC00YTQwZDg1Mw==. TraceId : 01jwtnmdcj9g4j54jybyqjc66x. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: INTERNAL_ERROR DEFAULT_ERROR: {
: Error: Terminate execution }. 2025-06-03T10:33:13.903046Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946793944, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 3838, MsgBus: 11935 2025-06-03T10:33:14.220240Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511669283442124553:2060];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:14.220265Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f58/r3tmp/tmpvqaRpb/pdisk_1.dat 2025-06-03T10:33:14.234541Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:14.234947Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511669283442124535:2079] 1748946794220115 != 1748946794220118 TServer::EnableGrpc on GrpcPort 3838, node 3 2025-06-03T10:33:14.245499Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:14.245516Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:14.245519Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:14.245577Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:11935 TClient is connected to server localhost:11935 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:14.326042Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:14.326079Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:14.326390Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.327018Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:14.335583Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.347252Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.372127Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.386611Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.584907Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669283442126163:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.584953Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.597623Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.607226Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.619780Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.677189Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.688259Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.702812Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.716330Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.733349Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669283442126816:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.733385Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.733389Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669283442126821:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.734354Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:14.743291Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511669283442126823:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:14.799163Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511669283442126874:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:14.959033Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [3:7511669283442127167:2514], TxId: 281474976715673, task: 1. Ctx: { TraceId : 01jwtnmedn59xd0zd4akggxj42. SessionId : ydb://session/3?node_id=3&id=YjkzYWQxZTItNGVkNzhiM2EtODE1ODdjY2ItNWUzMGZkZWQ=. CustomerSuppliedId : . CurrentExecutionId : . DatabaseId : /Root. Database : /Root. PoolId : default. }. InternalError: INTERNAL_ERROR DEFAULT_ERROR: {
: Error: Terminate was called, reason(17): Bad filter value. }. 2025-06-03T10:33:14.959205Z node 3 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=3&id=YjkzYWQxZTItNGVkNzhiM2EtODE1ODdjY2ItNWUzMGZkZWQ=, ActorId: [3:7511669283442127138:2507], ActorState: ExecuteState, TraceId: 01jwtnmedn59xd0zd4akggxj42, Create QueryResponse for error on request, msg: 2025-06-03T10:33:14.959213Z node 3 :KQP_COMPUTE ERROR: dq_compute_actor_impl.h:678: SelfId: [3:7511669283442127168:2515], TxId: 281474976715673, task: 2. Ctx: { TraceId : 01jwtnmedn59xd0zd4akggxj42. CustomerSuppliedId : . SessionId : ydb://session/3?node_id=3&id=YjkzYWQxZTItNGVkNzhiM2EtODE1ODdjY2ItNWUzMGZkZWQ=. CurrentExecutionId : . DatabaseId : /Root. PoolId : default. Database : /Root. }. InternalError: INTERNAL_ERROR DEFAULT_ERROR: {
: Error: Terminate execution }. 2025-06-03T10:33:14.959395Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946795001, txId: 281474976715672] shutting down >> KqpScan::AggregateCountStar [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpSplit::IntersectionLosesRange+Descending [GOOD] Test command err: Trying to start YDB, gRPC: 29435, MsgBus: 5388 2025-06-03T10:33:14.133875Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669286417001996:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:14.134088Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ecc/r3tmp/tmpsiplRE/pdisk_1.dat 2025-06-03T10:33:14.194061Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669286417001974:2079] 1748946794133597 != 1748946794133600 2025-06-03T10:33:14.196230Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29435, node 1 2025-06-03T10:33:14.207602Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:14.207629Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:14.207632Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:14.207695Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5388 2025-06-03T10:33:14.236962Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:14.237001Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:14.238046Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:5388 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:14.277079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.279689Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:33:14.291076Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.311777Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.334718Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.347877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.527132Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669286417003607:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.527193Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.574128Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.582981Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.597851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.611971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.626437Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.639628Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.653392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.669937Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669286417004261:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.669965Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.669974Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669286417004266:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.670955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:14.680188Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669286417004268:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:14.755244Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669286417004319:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:14.971055Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jwtnmed1b1qs9qypx3t3fq9w, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MTkxNTZiOTQtNWU4MGUwNDctNTM1MTRhNDgtOGNmZjIyYzY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root captured evread ----------------------------------------------------------- starting split ----------------------------------------------------------- scheme op Status: 53 TxId: 281474976715674 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 resume evread ----------------------------------------------------------- 2025-06-03T10:33:14.987610Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946795015, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 9797, MsgBus: 29392 2025-06-03T10:33:15.243155Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669289142007270:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:15.243219Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ecc/r3tmp/tmpJejhwi/pdisk_1.dat 2025-06-03T10:33:15.259905Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:15.263569Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669289142007250:2079] 1748946795243002 != 1748946795243005 TServer::EnableGrpc on GrpcPort 9797, node 2 2025-06-03T10:33:15.271248Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:15.271269Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:15.271271Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:15.271332Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29392 TClient is connected to server localhost:29392 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:15.348028Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:15.348063Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:15.348430Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:15.348984Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:15.359300Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:15.369802Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:15.392146Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:15.405139Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:15.627458Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669289142008902:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:15.627497Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:15.637179Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:15.645251Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:15.653327Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:15.667792Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:15.682212Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:15.695892Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:15.709919Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:15.726453Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669289142009552:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:15.726485Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669289142009557:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:15.726488Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:15.727342Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:15.736786Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669289142009559:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:15.802399Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669289142009610:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:15.978593Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jwtnmfcz3dg06xbzvrp0915y, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=YTdkMDZhMDItMzY5ODQ0NjEtN2Q4ZWY2M2QtMjMyZmZjZTU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root captured evread ----------------------------------------------------------- starting split ----------------------------------------------------------- scheme op Status: 53 TxId: 281474976715674 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 resume evread ----------------------------------------------------------- 2025-06-03T10:33:15.994141Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946796023, txId: 281474976715672] shutting down ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::CrossJoinOneColumn [GOOD] Test command err: Trying to start YDB, gRPC: 16689, MsgBus: 14354 2025-06-03T10:33:11.464104Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669272484532557:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:11.464124Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f52/r3tmp/tmpkz5svv/pdisk_1.dat 2025-06-03T10:33:11.555652Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 16689, node 1 2025-06-03T10:33:11.565488Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:11.565518Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:11.566620Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:11.570438Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:11.570456Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:11.570459Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:11.570509Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:14354 TClient is connected to server localhost:14354 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:11.644169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.657786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.734201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.765994Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.781620Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.897625Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669272484534165:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.897668Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.947196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.959223Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.972270Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.986240Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.000017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.062813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.081075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.103133Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669276779502117:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.103170Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.103306Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669276779502122:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.104413Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:12.108919Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:33:12.109017Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669276779502124:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:12.203575Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669276779502175:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:12.365697Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:33:12.512939Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7511669276779502621:2525] TxId: 281474976715675. Ctx: { TraceId: 01jwtnmc0e07fjqadarr4t6g3j, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=MzVkY2Y4YTctMTNjM2JiOTgtYWJlODY3MjgtZjk2OGNjNDA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-03T10:33:12.622571Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946792558, txId: 281474976715674] shutting down 2025-06-03T10:33:12.783780Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946792747, txId: 281474976715676] shutting down Trying to start YDB, gRPC: 26813, MsgBus: 5990 2025-06-03T10:33:12.997847Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669278045725310:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:12.997868Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f52/r3tmp/tmpLuLDdx/pdisk_1.dat 2025-06-03T10:33:13.018956Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:13.019517Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669278045725291:2079] 1748946792997734 != 1748946792997737 TServer::EnableGrpc on GrpcPort 26813, node 2 2025-06-03T10:33:13.031092Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:13.031108Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:13.031111Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:13.031164Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5990 TClient is connected to server localhost:5990 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" Pat ... e 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.542540Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669282340694871:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.542571Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669282340694876:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.542573Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.543609Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:13.553529Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669282340694878:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:13.626177Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669282340694929:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:13.900346Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946793944, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 26087, MsgBus: 24005 2025-06-03T10:33:14.107794Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511669283819652107:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:14.108009Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f52/r3tmp/tmpXdbnDK/pdisk_1.dat 2025-06-03T10:33:14.123623Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:14.124180Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511669283819652083:2079] 1748946794107533 != 1748946794107536 TServer::EnableGrpc on GrpcPort 26087, node 3 2025-06-03T10:33:14.135463Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:14.135480Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:14.135483Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:14.135562Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24005 TClient is connected to server localhost:24005 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:14.213102Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:14.213139Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:14.213518Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.214054Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:14.225286Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.238356Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.262571Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.276158Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.513271Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669283819653713:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.513321Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.520048Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.527789Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.541718Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.557484Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.569240Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.585312Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.641890Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.658005Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669283819654369:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.658033Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.658036Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669283819654374:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.658972Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:14.666390Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511669283819654376:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:14.737015Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511669283819654427:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:14.871023Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:15.160077Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946795043, txId: 281474976715674] shutting down 2025-06-03T10:33:15.535048Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946795260, txId: 281474976715677] shutting down >> KqpScan::AggregateEmptyCountStar ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/security/ut/unittest >> TTicketParserTest::LoginEmptyTicketBad [GOOD] Test command err: 2025-06-03T10:32:44.359184Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669155898814478:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:44.359389Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f88/r3tmp/tmpGvh8IB/pdisk_1.dat 2025-06-03T10:32:44.410719Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:44.411933Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669155898814459:2079] 1748946764359029 != 1748946764359032 TServer::EnableGrpc on GrpcPort 20621, node 1 2025-06-03T10:32:44.422972Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:44.422990Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:44.422992Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:44.423036Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29768 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:44.489590Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:44.489627Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:44.490636Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:44.491370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:44.558659Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:32:44.564235Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:32:44.564249Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:44.564625Z node 1 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****vflQ (88DBA131) () has now valid token of user1 2025-06-03T10:32:44.564633Z node 1 :TICKET_PARSER TRACE: ticket_parser_impl.h:800: CanInitLoginToken, database /Root, A4 success 2025-06-03T10:32:44.812599Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669156292179892:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:44.812622Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f88/r3tmp/tmpIpZzk7/pdisk_1.dat 2025-06-03T10:32:44.823174Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:44.824724Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669156292179871:2079] 1748946764812491 != 1748946764812494 TServer::EnableGrpc on GrpcPort 63665, node 2 2025-06-03T10:32:44.833391Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:44.833400Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:44.833402Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:44.833439Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:19635 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:44.916517Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:44.916546Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:44.916933Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:44.917555Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:45.044611Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:32:45.047978Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:32:45.047990Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:45.048187Z node 2 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****xUkw (D4C15902) () has now valid token of user1 2025-06-03T10:32:45.048196Z node 2 :TICKET_PARSER TRACE: ticket_parser_impl.h:800: CanInitLoginToken, database /Root, A4 success 2025-06-03T10:32:45.268007Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511669158445697959:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:45.268257Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f88/r3tmp/tmpJCDcPU/pdisk_1.dat 2025-06-03T10:32:45.283327Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:45.283561Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511669158445697934:2079] 1748946765267822 != 1748946765267825 TServer::EnableGrpc on GrpcPort 21533, node 3 2025-06-03T10:32:45.293763Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:45.293779Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:45.293781Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:45.293830Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1142 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:45.371651Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:45.371681Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:45.372398Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:32:45.372727Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:45.448622Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:32:45.452161Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:32:45.452176Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:32:45.452363Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****j2HQ (93C982CD) () has now valid token of user1 2025-06-03T10:32:45.452373Z node 3 :TICKET_PARSER TRACE: ticket_parser_impl.h:800: CanInitLoginToken, database /Root, A4 success 2025-06-03T10:32:45.452752Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:32:50.268286Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[3:7511669158445697959:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:50.268342Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:32:50.270339Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****j2HQ (93C982CD) 2025-06-03T10:32:50.270430Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****j2HQ (93C982CD) () has now valid token of user1 2025-06-03T10:32:54.272118Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****j2HQ (93C982CD) 2025-06-03T10:32:54.272290Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****j2HQ (93C982CD) () has now valid token of user1 2025-06-03T10:32:55.453062Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:32:59.274358Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****j2HQ (93C982CD) 2025-06-03T10:32:59.274453Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****j2HQ (93C982CD) () has now valid token of user1 2025-06-03T10:33:00.279378Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7306: Cannot get console configs 2025-06-03T10:33:00.279395Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:04.276579Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****j2HQ (93C982CD) 2025-06-03T10:33:04.276672Z node 3 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****j2HQ (93C982CD) () has now valid token of user1 2025-06-03T10:33:05.602154Z node 4 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[4:7511669244970975482:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:05.602206Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f88/r3tmp/tmp0QOWPl/pdisk_1.dat 2025-06-03T10:33:05.614223Z node 4 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:05.614451Z node 4 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [4:7511669244970975458:2079] 1748946785601982 != 1748946785601985 TServer::EnableGrpc on GrpcPort 3722, node 4 2025-06-03T10:33:05.625418Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:05.625435Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:05.625438Z node 4 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:05.625491Z node 4 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17287 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:05.705264Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:05.705335Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:05.705731Z node 4 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:05.706259Z node 4 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(4, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:05.765083Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:33:05.768037Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:752: CanInitLoginToken, domain db /Root, request db , token db /Root, DomainLoginOnly 1 2025-06-03T10:33:05.768051Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:757: CanInitLoginToken, target database candidates(1): /Root 2025-06-03T10:33:05.768235Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1778: Ticket eyJh****ZqVw (08BD3202) () has now valid token of user1 2025-06-03T10:33:05.768245Z node 4 :TICKET_PARSER TRACE: ticket_parser_impl.h:800: CanInitLoginToken, database /Root, A4 success 2025-06-03T10:33:05.768404Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:33:10.602421Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[4:7511669244970975482:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:10.602480Z node 4 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; 2025-06-03T10:33:10.604457Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****ZqVw (08BD3202) 2025-06-03T10:33:10.604554Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1809: Ticket eyJh****ZqVw (08BD3202) () has now permanent error message 'User not found' 2025-06-03T10:33:14.609390Z node 4 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1500: Refreshing ticket eyJh****ZqVw (08BD3202) 2025-06-03T10:33:15.972510Z node 5 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[5:7511669289771481769:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:15.972541Z node 5 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000f88/r3tmp/tmpyr9ozU/pdisk_1.dat 2025-06-03T10:33:15.991939Z node 5 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:15.995726Z node 5 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [5:7511669289771481746:2079] 1748946795972293 != 1748946795972296 TServer::EnableGrpc on GrpcPort 9087, node 5 2025-06-03T10:33:16.005199Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:16.005220Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:16.005222Z node 5 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:16.005286Z node 5 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13768 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:16.078460Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:16.078490Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:16.079025Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:33:16.079420Z node 5 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(5, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:33:16.158628Z node 5 :TICKET_PARSER DEBUG: ticket_parser_impl.h:1480: Updated state for /Root keys 1 2025-06-03T10:33:16.165018Z node 5 :TICKET_PARSER ERROR: ticket_parser_impl.h:916: Ticket **** (00000000): Ticket is empty ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::Join3 [GOOD] Test command err: Trying to start YDB, gRPC: 13721, MsgBus: 10781 2025-06-03T10:33:10.991509Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669269321439408:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:10.991536Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f60/r3tmp/tmpFSbnFM/pdisk_1.dat 2025-06-03T10:33:11.059594Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669269321439386:2079] 1748946790991309 != 1748946790991312 2025-06-03T10:33:11.062591Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13721, node 1 2025-06-03T10:33:11.073097Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:11.073114Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:11.073117Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:11.073198Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10781 2025-06-03T10:33:11.126250Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:11.126276Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:11.127337Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10781 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:33:11.155151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.170627Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.202041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.227298Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.241169Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.369106Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669273616408315:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.369158Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.432101Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.442017Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.457463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.476074Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.489864Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.504733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.518376Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:11.533686Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669273616408970:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.533726Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669273616408975:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.533724Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:11.534672Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:11.544319Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669273616408977:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:11.627617Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669273616409028:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:11.795759Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:11.919054Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7511669273616409573:2531] TxId: 281474976715675. Ctx: { TraceId: 01jwtnmbde896t7429zsd7mqw6, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YjM2ZGJiNzctNTdmNzVmYzAtZjQ2ZjhiZDAtZmU0ZGY3NjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-03T10:33:11.924994Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946791963, txId: 281474976715674] shutting down 2025-06-03T10:33:12.000795Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7511669273616409740:2559] TxId: 281474976715677. Ctx: { TraceId: 01jwtnmbfy3g18y4qezqfeswkr, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NzIwOTM5MTMtMTFiNTNjZjktZjU2OTNlLWIwMjBjMjJj, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database /Root 2025-06-03T10:33:12.005911Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946792047, txId: 281474976715676] shutting down Trying to start YDB, gRPC: 3436, MsgBus: 8257 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f60/r3tmp/tmpozOZUs/pdisk_1.dat 2025-06-03T10:33:12.393809Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:33:12.400252Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:12.400436Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669277852270101:2079] 1748946792384544 != 1748946792384547 TServer::EnableGrpc on GrpcPort 3436, node 2 2025-06-03T10:33:12.410727Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:12.410740Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:12.410743Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:12.410820Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:8257 TClient is connected to server localhost:8257 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 Sche ... _WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669277852272395:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.994668Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.994691Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669277852272400:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:12.995585Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:12.999848Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669277852272402:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:13.096283Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669282147239749:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:13.240470Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.422179Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946793461, txId: 281474976715674] shutting down Trying to start YDB, gRPC: 65397, MsgBus: 6654 2025-06-03T10:33:13.737362Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511669280455106633:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:13.737407Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001f60/r3tmp/tmpc0kW1j/pdisk_1.dat 2025-06-03T10:33:13.752548Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:13.752729Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511669280455106613:2079] 1748946793737196 != 1748946793737199 TServer::EnableGrpc on GrpcPort 65397, node 3 2025-06-03T10:33:13.761552Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:13.761569Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:13.761572Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:13.761634Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6654 TClient is connected to server localhost:6654 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:13.843370Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:13.843402Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:13.843683Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.844447Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:13.871630Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.883771Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.911462Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.925139Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.187208Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669284750075536:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.187239Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.193809Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.203584Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.212320Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.226839Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.240163Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.254294Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.268390Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.283973Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669284750076189:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.283996Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.284044Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669284750076194:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.284993Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:14.288022Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511669284750076196:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:14.353025Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511669284750076247:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:14.486560Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:15.299645Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946794714, txId: 281474976715674] shutting down 2025-06-03T10:33:16.072709Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946795484, txId: 281474976715676] shutting down >> DataShardSnapshots::LockedWriteReuseAfterCommit+UseSink [GOOD] >> DataShardSnapshots::LockedWriteReuseAfterCommit-UseSink >> KqpScan::EarlyFinish [GOOD] >> KqpScan::Effects |70.4%| [TA] $(B)/ydb/core/security/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> DataShardSnapshots::VolatileSnapshotSplit [GOOD] >> DataShardSnapshots::VolatileSnapshotMerge >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_scheme_then_ok >> test_copy_ops.py::TestSchemeShardCopyOps::test_when_copy_table_partition_config >> DataShardSnapshots::MvccSnapshotAndSplit [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWrites+UseSink >> DataShardSnapshots::ShardRestartWholeShardLockBasic [GOOD] >> DataShardSnapshots::ShardRestartLockUnrelatedUpsert >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_other_keys_then_ok ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> LocalPartition::DescribeHang [GOOD] Test command err: 2025-06-03T10:31:31.664437Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668841845337267:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:31.664694Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:31:31.711524Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000dc7/r3tmp/tmp8HRJ5L/pdisk_1.dat 2025-06-03T10:31:31.750483Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:31.751398Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668841845337243:2079] 1748946691664133 != 1748946691664136 TServer::EnableGrpc on GrpcPort 18166, node 1 2025-06-03T10:31:31.768259Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:31.768303Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:31.769906Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:31.785318Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/000dc7/r3tmp/yandexZFDZoi.tmp 2025-06-03T10:31:31.785331Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/000dc7/r3tmp/yandexZFDZoi.tmp 2025-06-03T10:31:31.785437Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/000dc7/r3tmp/yandexZFDZoi.tmp 2025-06-03T10:31:31.785503Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:31:31.793344Z INFO: TTestServer started on Port 27241 GrpcPort 18166 TClient is connected to server localhost:27241 PQClient connected to localhost:18166 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:31.838845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:31:31.851191Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-03T10:31:32.170765Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668846140305323:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:32.170849Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:32.171095Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668846140305359:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:32.172307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-06-03T10:31:32.180188Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710662, at schemeshard: 72057594046644480 2025-06-03T10:31:32.180404Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668846140305361:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-03T10:31:32.231163Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:32.243728Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:32.269719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:32.281733Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668846140305630:2553] txid# 281474976710666, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:32.305007Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668846140305638:2370], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:31:32.305934Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=ZTZlNjhiOS1lNGFlNzU3ZC04N2E2OGRiMy00ZmQxZWIwNg==, ActorId: [1:7511668846140305319:2332], ActorState: ExecuteState, TraceId: 01jwtnha269vxwb3c9g8kp6ram, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:31:32.306539Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7511668846140305718:2606] 2025-06-03T10:31:36.664671Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668841845337267:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:36.664710Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:31:37.571590Z :ReadWithRestarts INFO: TTopicSdkTestSetup started 2025-06-03T10:31:37.586193Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-03T10:31:37.593527Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:31:37.593652Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7511668867615142404:2687] connected; active server actors: 1 2025-06-03T10:31:37.593807Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-03T10:31:37.593891Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3089: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-03T10:31:37.593954Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:31:37.593991Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72075186224037892] doesn't have tx info 2025-06-03T10:31:37.593995Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:31:37.593996Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-03T10:31:37.593999Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:31:37.594004Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:31:37.594018Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72075186224037892] doesn't have tx writes info 2025-06-03T10:31:37.594044Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-03T10:31:37.594070Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-03T10:31:37.594371Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-03T10:31:37.594374Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72075186224037892, NodeId 1, Generation 1 2025-06-03T10:31:37.594387Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72075186224037892] server connected, pipe [1:7511668867615142440:2449], now ... QUEUE DEBUG: pq_impl.cpp:1231: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-03T10:33:16.398370Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:4291: [PQ: 72075186224037892] Try execute txs with state EXECUTED 2025-06-03T10:33:16.398373Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:4336: [PQ: 72075186224037892] TxId 281474976715674, State EXECUTED 2025-06-03T10:33:16.398376Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:4283: [PQ: 72075186224037892] TxId 281474976715674 State EXECUTED FrontTxId 281474976715674 2025-06-03T10:33:16.398379Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:3987: [PQ: 72075186224037892] TPersQueue::SendEvReadSetAckToSenders 2025-06-03T10:33:16.398382Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:4226: [PQ: 72075186224037892] TxId 281474976715674, NewState WAIT_RS_ACKS 2025-06-03T10:33:16.398384Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:4261: [PQ: 72075186224037892] TxId 281474976715674 moved from EXECUTED to WAIT_RS_ACKS 2025-06-03T10:33:16.398387Z node 12 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715674] PredicateAcks: 0/0 2025-06-03T10:33:16.398388Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:4537: [PQ: 72075186224037892] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-03T10:33:16.398390Z node 12 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715674] PredicateAcks: 0/0 2025-06-03T10:33:16.398392Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:4598: [PQ: 72075186224037892] add an TxId 281474976715674 to the list for deletion 2025-06-03T10:33:16.398395Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:4226: [PQ: 72075186224037892] TxId 281474976715674, NewState DELETING 2025-06-03T10:33:16.398399Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:3832: [PQ: 72075186224037892] delete key for TxId 281474976715674 2025-06-03T10:33:16.398407Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:3633: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-03T10:33:16.398572Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:1231: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-03T10:33:16.398580Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:4291: [PQ: 72075186224037892] Try execute txs with state DELETING 2025-06-03T10:33:16.398581Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:4336: [PQ: 72075186224037892] TxId 281474976715674, State DELETING 2025-06-03T10:33:16.398585Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:4548: [PQ: 72075186224037892] delete TxId 281474976715674 2025-06-03T10:33:16.399007Z :DEBUG: [/Root] MessageGroupId [src] SessionId [] Write session: try to update token 2025-06-03T10:33:16.399270Z :INFO: [/Root] MessageGroupId [src] SessionId [] Write session: Do CDS request 2025-06-03T10:33:16.399278Z :INFO: [/Root] MessageGroupId [src] SessionId [] Start write session. Will connect to endpoint: localhost:62308 2025-06-03T10:33:16.400643Z :DEBUG: [/Root] MessageGroupId [src] SessionId [] Write session: send init request: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-03T10:33:16.400743Z node 12 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-03T10:33:16.400756Z node 12 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 1 2025-06-03T10:33:16.400889Z node 12 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: grpc read done: success: 1 data: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-03T10:33:16.400912Z node 12 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 1 topic: "test-topic" message_group_id: "src" from ipv6:[::1]:54284 2025-06-03T10:33:16.400923Z node 12 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=1 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:54284 proto=v1 topic=test-topic durationSec=0 2025-06-03T10:33:16.400927Z node 12 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-03T10:33:16.401766Z node 12 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-03T10:33:16.401816Z node 12 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-03T10:33:16.401818Z node 12 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-03T10:33:16.401820Z node 12 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-03T10:33:16.401833Z node 12 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [12:7511669295083490061:2465] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-03T10:33:16.401838Z node 12 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-03T10:33:16.402017Z node 12 :PQ_WRITE_PROXY DEBUG: writer.cpp:798: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 12, Generation: 1 2025-06-03T10:33:16.402040Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72075186224037892] server connected, pipe [12:7511669295083490064:2465], now have 1 active actors on pipe 2025-06-03T10:33:16.402048Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'test-topic' requestId: 2025-06-03T10:33:16.402055Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037892] got client message batch for topic 'test-topic' partition 0 2025-06-03T10:33:16.402087Z node 12 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|9d5135b2-555d77d6-15bb1b88-d12eaebd_0 generated for partition 0 topic 'test-topic' owner src 2025-06-03T10:33:16.402126Z node 12 :PERSQUEUE DEBUG: partition_write.cpp:35: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-03T10:33:16.402150Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-03T10:33:16.402188Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'test-topic' requestId: 2025-06-03T10:33:16.402198Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037892] got client message batch for topic 'test-topic' partition 0 2025-06-03T10:33:16.402213Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-03T10:33:16.402236Z node 12 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|9d5135b2-555d77d6-15bb1b88-d12eaebd_0 2025-06-03T10:33:16.402516Z :INFO: [/Root] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1748946796402 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:33:16.402563Z :INFO: [/Root] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|9d5135b2-555d77d6-15bb1b88-d12eaebd_0" topic: "test-topic" 2025-06-03T10:33:16.402676Z :INFO: [/Root] MessageGroupId [src] SessionId [src|9d5135b2-555d77d6-15bb1b88-d12eaebd_0] Write session: close. Timeout = 0 ms 2025-06-03T10:33:16.402682Z :INFO: [/Root] MessageGroupId [src] SessionId [src|9d5135b2-555d77d6-15bb1b88-d12eaebd_0] Write session will now close 2025-06-03T10:33:16.402686Z :DEBUG: [/Root] MessageGroupId [src] SessionId [src|9d5135b2-555d77d6-15bb1b88-d12eaebd_0] Write session: aborting 2025-06-03T10:33:16.402804Z :INFO: [/Root] MessageGroupId [src] SessionId [src|9d5135b2-555d77d6-15bb1b88-d12eaebd_0] Write session: gracefully shut down, all writes complete 2025-06-03T10:33:16.402810Z :DEBUG: [/Root] MessageGroupId [src] SessionId [src|9d5135b2-555d77d6-15bb1b88-d12eaebd_0] Write session: destroy 2025-06-03T10:33:16.402948Z node 12 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|9d5135b2-555d77d6-15bb1b88-d12eaebd_0 grpc read done: success: 0 data: 2025-06-03T10:33:16.402960Z node 12 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|9d5135b2-555d77d6-15bb1b88-d12eaebd_0 grpc read failed 2025-06-03T10:33:16.402967Z node 12 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|9d5135b2-555d77d6-15bb1b88-d12eaebd_0 grpc closed 2025-06-03T10:33:16.402970Z node 12 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|9d5135b2-555d77d6-15bb1b88-d12eaebd_0 is DEAD 2025-06-03T10:33:16.403209Z node 12 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:33:16.403314Z node 12 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037892] server disconnected, pipe [12:7511669295083490064:2465] destroyed 2025-06-03T10:33:16.403332Z node 12 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-03T10:33:16.403434Z :DescribeHang INFO: Topic created ==== TMockDiscovery server started on port 6621 ==== TMockDiscovery add endpoints, firstNodeId 9999, nodeCount 2, port 0 ====TYdbPqTestRetryPolicy() ====ExpectBreakDown === Create write session ==== ListEndpoints request: database: "/Root" ==== ListEndpoints response: operation { ready: true status: SUCCESS result { [type.googleapis.com/Ydb.Discovery.ListEndpointsResult] { endpoints { address: "localhost" node_id: 9999 } endpoints { address: "ip6-localhost" node_id: 10000 } } } } 2025-06-03T10:33:16.407529Z :DEBUG: [/Root] TraceId [] SessionId [] PartitionId [0] Generation [0] Write session: try to update token 2025-06-03T10:33:16.407792Z :INFO: [/Root] TraceId [] SessionId [] PartitionId [0] Generation [0] Get partition location async, partition 0, delay 0.000000s 2025-06-03T10:33:16.407805Z :TRACE: [/Root] TRACE_EVENT DescribePartitionRequest path=/Root/test-topic partition_id=0 === Close write session 2025-06-03T10:33:16.407841Z :INFO: [/Root] TraceId [] SessionId [] PartitionId [0] Generation [0] Write session: close. Timeout 18446744073709.551615s 2025-06-03T10:33:16.407844Z :INFO: [/Root] TraceId [] SessionId [] PartitionId [0] Generation [0] Write session will now close 2025-06-03T10:33:16.407850Z :DEBUG: [/Root] TraceId [] SessionId [] PartitionId [0] Generation [0] Write session: aborting 2025-06-03T10:33:16.407854Z :INFO: [/Root] TraceId [] SessionId [] PartitionId [0] Generation [0] Write session: gracefully shut down, all writes complete 2025-06-03T10:33:16.407856Z :DEBUG: [/Root] TraceId [] SessionId [] PartitionId [0] Generation [0] Getting partition location, partition 0 2025-06-03T10:33:16.407863Z :DEBUG: [/Root] TraceId [] SessionId [] PartitionId [0] Generation [0] Write session: destroy >> KqpScan::AggregateEmptyCountStar [GOOD] |70.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test |70.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> DataShardSnapshots::LockedWriteReuseAfterCommit-UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitSuccess+UseSink >> TxUsage::WriteToTopic_Demo_35_Query [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::AggregateEmptyCountStar [GOOD] Test command err: Trying to start YDB, gRPC: 9620, MsgBus: 30759 2025-06-03T10:33:14.290432Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669285703428164:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:14.290469Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ecb/r3tmp/tmpPSKRlO/pdisk_1.dat 2025-06-03T10:33:14.359088Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:14.359146Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669285703428145:2079] 1748946794290271 != 1748946794290274 TServer::EnableGrpc on GrpcPort 9620, node 1 2025-06-03T10:33:14.374198Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:14.374212Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:14.374214Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:14.374261Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30759 TClient is connected to server localhost:30759 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:33:14.432091Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:14.432126Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:14.433342Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:14.445247Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.458765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.477878Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.497035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.509693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:14.715257Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669285703429777:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.715294Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.763129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.772198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.786181Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.800467Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.814346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.828307Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.842293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.860691Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669285703430431:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.860722Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.860778Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669285703430436:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:14.861780Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:14.868729Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669285703430438:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:14.954658Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669285703430489:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:15.317596Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946795197, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 21503, MsgBus: 30060 2025-06-03T10:33:15.582481Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669290470228835:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:15.582584Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ecb/r3tmp/tmp0MjBHs/pdisk_1.dat 2025-06-03T10:33:15.599969Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:15.600189Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669290470228810:2079] 1748946795582212 != 1748946795582215 TServer::EnableGrpc on GrpcPort 21503, node 2 2025-06-03T10:33:15.609331Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:15.609355Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:15.609358Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:15.609420Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:30060 TClient is connected to server localhost:30060 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:15.687564Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:15.687596Z node 2 :HIVE WARN ... rd__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:15.969754Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:15.983133Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:15.999175Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669290470231088:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:15.999197Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:15.999200Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669290470231093:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:15.999903Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:16.002414Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669290470231095:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:16.068071Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669294765198442:3393] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:16.416551Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946796317, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 10714, MsgBus: 21642 2025-06-03T10:33:16.754702Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511669294982096541:2064];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:16.754999Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ecb/r3tmp/tmpfoJspk/pdisk_1.dat 2025-06-03T10:33:16.772165Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:16.772407Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511669294982096518:2079] 1748946796754419 != 1748946796754422 TServer::EnableGrpc on GrpcPort 10714, node 3 2025-06-03T10:33:16.786502Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:16.786519Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:16.786522Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:16.786589Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21642 TClient is connected to server localhost:21642 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:16.861649Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:16.861682Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:16.862192Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:16.862674Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:16.870252Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:16.886486Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:33:16.948133Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:33:16.969317Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:17.280151Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669299277065470:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:17.280218Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:17.292152Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:17.306552Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:17.318259Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:17.329054Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:17.343724Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:17.356435Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:17.370038Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:17.390090Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669299277066122:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:17.390115Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:17.390297Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669299277066127:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:17.391284Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:17.399411Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511669299277066129:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:17.487884Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511669299277066180:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:17.878758Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946797808, txId: 281474976715672] shutting down |70.4%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator >> KqpScan::Effects [GOOD] >> KqpScan::DropRedundantSortByPk |70.4%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator |70.4%| [TA] {RESULT} $(B)/ydb/core/security/ut/test-results/unittest/{meta.json ... results_accumulator.log} |70.4%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_ru_calculator/ydb-core-tx-schemeshard-ut_ru_calculator |70.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut >> DataShardSnapshots::MvccSnapshotTailCleanup [GOOD] >> DataShardSnapshots::MvccSnapshotReadWithLongPlanQueue |70.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut |70.5%| [LD] {RESULT} $(B)/ydb/core/ymq/ut/ydb-core-ymq-ut >> DataShardSnapshots::ShardRestartLockUnrelatedUpsert [GOOD] >> TxUsage::WriteToTopic_Demo_36_Table >> DataShardSnapshots::ShardRestartLockBrokenByConflict >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_delete_directory_from_leaf_success >> TSchemeshardBackgroundCleaningTest::CreateTableInTemp [GOOD] >> TxUsage::WriteToTopic_Demo_7_Query [GOOD] >> DataShardSnapshots::VolatileSnapshotMerge [GOOD] >> DataShardSnapshots::VolatileSnapshotAndLocalMKQLUpdate >> DataShardSnapshots::MvccSnapshotLockedWrites+UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWrites-UseSink >> LocalPartition::WithoutPartitionWithSplit [GOOD] >> TxUsage::SessionAbort_Table ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_background_cleaning/unittest >> TSchemeshardBackgroundCleaningTest::CreateTableInTemp [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:227:2060] recipient: [1:221:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:227:2060] recipient: [1:221:2142] Leader for TabletID 72057594046678944 is [1:235:2150] sender: [1:237:2060] recipient: [1:221:2142] 2025-06-03T10:32:25.694094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:32:25.694129Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:32:25.694136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:32:25.694143Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:32:25.694159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:32:25.694164Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:32:25.694173Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:32:25.694192Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:32:25.694322Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:32:25.694426Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:32:25.712650Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:32:25.712684Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:25.716231Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:32:25.716549Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:32:25.716614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:32:25.718306Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:32:25.718365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:32:25.718517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:32:25.718638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:32:25.719506Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:32:25.719573Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:32:25.719906Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:32:25.719919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:32:25.719959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:32:25.719968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:32:25.719975Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:32:25.720032Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:32:25.721657Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:235:2150] sender: [1:351:2060] recipient: [1:17:2064] 2025-06-03T10:32:25.743687Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:32:25.743806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:25.743883Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:32:25.743952Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:32:25.743967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:25.744937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:32:25.744968Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:32:25.745034Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:25.745054Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:32:25.745060Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:32:25.745065Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:32:25.745542Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:25.745554Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:32:25.745561Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:32:25.745898Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:25.745908Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:25.745916Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:32:25.745924Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:32:25.746634Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:32:25.747083Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:32:25.747133Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:32:25.747346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:32:25.747376Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 246 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:32:25.747389Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:32:25.747454Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:32:25.747462Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:32:25.747499Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:32:25.747511Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:32:25.747919Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:32:25.747945Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:32:25.747997Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... RD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 104, ready parts: 2/3, is published: true 2025-06-03T10:33:18.660546Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-03T10:33:18.660556Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:33:18.660562Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:275: Activate send for 104:0 2025-06-03T10:33:18.660576Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [7:985:2747] msg type: 269552132 msg: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 104 at schemeshard: 72057594046678944 2025-06-03T10:33:18.660598Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269552132, Sender [7:236:2151], Recipient [7:985:2747]: NKikimrTxDataShard.TEvSchemaChangedResult TxId: 104 2025-06-03T10:33:18.660603Z node 7 :TX_DATASHARD TRACE: datashard_impl.h:3133: StateWork, processing event TEvDataShard::TEvSchemaChangedResult 2025-06-03T10:33:18.660609Z node 7 :TX_DATASHARD DEBUG: datashard.cpp:2953: Handle TEvSchemaChangedResult 104 datashard 72075186233409551 state Ready 2025-06-03T10:33:18.660617Z node 7 :TX_DATASHARD DEBUG: datashard__schema_changed.cpp:22: 72075186233409551 Got TEvSchemaChangedResult from SS at 72075186233409551 2025-06-03T10:33:18.660653Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 2146435072, Sender [7:236:2151], Recipient [7:236:2151]: NKikimr::NSchemeShard::TEvPrivate::TEvProgressOperation 2025-06-03T10:33:18.660659Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4899: StateWork, processing event TEvPrivate::TEvProgressOperation 2025-06-03T10:33:18.660665Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 104:0, at schemeshard: 72057594046678944 2025-06-03T10:33:18.660672Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 104:0 ProgressState 2025-06-03T10:33:18.660682Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:33:18.660687Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 3/3 2025-06-03T10:33:18.660691Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-03T10:33:18.660696Z node 7 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#104:0 progress is 3/3 2025-06-03T10:33:18.660700Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-03T10:33:18.660705Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 104, ready parts: 3/3, is published: true 2025-06-03T10:33:18.660715Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1630: TOperation DoNotify send TEvNotifyTxCompletionResult to actorId: [7:583:2403] message: TxId: 104 2025-06-03T10:33:18.660722Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 104 ready parts: 3/3 2025-06-03T10:33:18.660730Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:0 2025-06-03T10:33:18.660735Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 104:0 2025-06-03T10:33:18.660769Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 10] was 4 2025-06-03T10:33:18.660775Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:1 2025-06-03T10:33:18.660779Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 104:1 2025-06-03T10:33:18.660786Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 11] was 3 2025-06-03T10:33:18.660791Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 104:2 2025-06-03T10:33:18.660794Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 104:2 2025-06-03T10:33:18.660802Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 12] was 3 2025-06-03T10:33:18.662549Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:33:18.662588Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 2025-06-03T10:33:18.662609Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:630: Send to actor: [7:583:2403] msg type: 271124998 msg: NKikimrScheme.TEvNotifyTxCompletionResult TxId: 104 at schemeshard: 72057594046678944 2025-06-03T10:33:18.662645Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 104: got EvNotifyTxCompletionResult 2025-06-03T10:33:18.662652Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 104: satisfy waiter [7:1037:2784] 2025-06-03T10:33:18.662701Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [7:1039:2786], Recipient [7:236:2151]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:33:18.662708Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:33:18.662713Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 104 TestModificationResults wait txId: 105 2025-06-03T10:33:18.662929Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [8:557:2102], Recipient [7:236:2151] 2025-06-03T10:33:18.662936Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:33:18.663808Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/tmp" OperationType: ESchemeOpCreateIndexedTable CreateIndexedTable { TableDescription { Name: "NotTempTable" Columns { Name: "key" Type: "Uint64" } Columns { Name: "value" Type: "Utf8" } KeyColumnNames: "key" } IndexDescription { Name: "ValueIndex" KeyColumnNames: "value" } } AllowCreateInTempDir: false } TxId: 105 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:33:18.663920Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_just_reject.cpp:47: TReject Propose, opId: 105:0, explain: Check failed: path: '/MyRoot/tmp', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_indexed_table.cpp:66, at schemeshard: 72057594046678944 2025-06-03T10:33:18.663930Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 105:1, propose status:StatusPreconditionFailed, reason: Check failed: path: '/MyRoot/tmp', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_indexed_table.cpp:66, at schemeshard: 72057594046678944 2025-06-03T10:33:18.672107Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:33:18.673118Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 105, response: Status: StatusPreconditionFailed Reason: "Check failed: path: \'/MyRoot/tmp\', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_indexed_table.cpp:66" TxId: 105 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:33:18.673185Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 105, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Check failed: path: '/MyRoot/tmp', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 3], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_indexed_table.cpp:66, operation: CREATE TABLE WITH INDEXES, path: /MyRoot/tmp/NotTempTable 2025-06-03T10:33:18.673195Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 105, wait until txId: 105 TestWaitNotification wait txId: 105 2025-06-03T10:33:18.673360Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 105: send EvNotifyTxCompletion 2025-06-03T10:33:18.673370Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 105 2025-06-03T10:33:18.673464Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [7:1109:2856], Recipient [7:236:2151]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:33:18.673473Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:33:18.673479Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046678944 2025-06-03T10:33:18.673504Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124996, Sender [7:583:2403], Recipient [7:236:2151]: NKikimrScheme.TEvNotifyTxCompletion TxId: 105 2025-06-03T10:33:18.673510Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4895: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-03T10:33:18.673529Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 105, at schemeshard: 72057594046678944 2025-06-03T10:33:18.673556Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 105: got EvNotifyTxCompletionResult 2025-06-03T10:33:18.673562Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 105: satisfy waiter [7:1107:2854] 2025-06-03T10:33:18.673588Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [7:1109:2856], Recipient [7:236:2151]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:33:18.673594Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:33:18.673599Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 105 >> TxUsage::WriteToTopic_Demo_8_Table >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v1-std] |70.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/runtime/unittest >> KqpScanLogs::GraceJoin-EnabledLogs [GOOD] Test command err: cwd: /home/runner/.ya/build/build_root/u93c/00290d/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk1 Trying to start YDB, gRPC: 20574, MsgBus: 4788 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00290d/r3tmp/tmpXjp032/pdisk_1.dat TServer::EnableGrpc on GrpcPort 20574, node 1 TClient is connected to server localhost:4788 TClient is connected to server localhost:4788 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (OptionalType (DataType 'Uint64))) (let $4 (OptionalType (DataType 'String))) (let $5 '('('"_logical_id" '778) '('"_id" '"402bd86b-d51707c8-83b680d2-9f3ee293") '('"_wide_channels" (StructType '('"Key" $3) '('"Value" $4))))) (let $6 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($17) (block '( (let $18 (lambda '($19) (Member $19 '"Key") (Member $19 '"Value"))) (return (FromFlow (ExpandMap (ToFlow $17) $18))) ))) $5)) (let $7 '('1)) (let $8 (DqCnHashShuffle (TDqOutput $6 '0) $7 '1)) (let $9 (StructType '('"t1.Key" $3) '('"t1.Value" $4) '('"t2.Key" $3) '('"t2.Value" $4))) (let $10 '('('"_logical_id" '676) '('"_id" '"c87cf8a7-62ee5e28-342f3226-1bfded5a") '('"_wide_channels" $9))) (let $11 (DqPhyStage '($8) (lambda '($20) (block '( (let $21 '('0 '0 '1 '1)) (let $22 '('0 '2 '1 '3)) (let $23 (GraceSelfJoinCore (ToFlow $20) 'Full $7 $7 $21 $22 '('"t1.Value") '('"t2.Value") '())) (return (FromFlow (WideSort $23 '('('1 (Bool 'true)))))) ))) $10)) (let $12 (DqCnMerge (TDqOutput $11 '0) '('('1 '"Asc")))) (let $13 (DqPhyStage '($12) (lambda '($24) (FromFlow (NarrowMap (ToFlow $24) (lambda '($25 $26 $27 $28) (AsStruct '('"t1.Key" $25) '('"t1.Value" $26) '('"t2.Key" $27) '('"t2.Value" $28)))))) '('('"_logical_id" '688) '('"_id" '"d3f410c4-6063f31e-dc2f9ac5-b26c608e")))) (let $14 '($6 $11 $13)) (let $15 '('"t1.Key" '"t1.Value" '"t2.Key" '"t2.Value")) (let $16 (DqCnResult (TDqOutput $13 '0) $15)) (return (KqpPhysicalQuery '((KqpPhysicalTx $14 '($16) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType $9) '0 '0)) '('('"type" '"query")))) ) cwd: /home/runner/.ya/build/build_root/u93c/00290d/ydb/core/kqp/ut/runtime/test-results/unittest/testing_out_stuff/chunk1 Trying to start YDB, gRPC: 63723, MsgBus: 26710 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00290d/r3tmp/tmpSx83aH/pdisk_1.dat TServer::EnableGrpc on GrpcPort 63723, node 2 TClient is connected to server localhost:26710 TClient is connected to server localhost:26710 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... waiting... waiting... waiting... waiting... ( (let $1 (KqpTable '"/Root/KeyValue" '"72057594046644480:6" '"" '1)) (let $2 (KqpRowsSourceSettings $1 '('"Key" '"Value") '() (Void) '())) (let $3 (OptionalType (DataType 'Uint64))) (let $4 (OptionalType (DataType 'String))) (let $5 '('('"_logical_id" '778) '('"_id" '"d7827cc8-704577e1-a8028b4-aa665eed") '('"_wide_channels" (StructType '('"Key" $3) '('"Value" $4))))) (let $6 (DqPhyStage '((DqSource (DataSource '"KqpReadRangesSource") $2)) (lambda '($17) (block '( (let $18 (lambda '($19) (Member $19 '"Key") (Member $19 '"Value"))) (return (FromFlow (ExpandMap (ToFlow $17) $18))) ))) $5)) (let $7 '('1)) (let $8 (DqCnHashShuffle (TDqOutput $6 '0) $7 '1)) (let $9 (StructType '('"t1.Key" $3) '('"t1.Value" $4) '('"t2.Key" $3) '('"t2.Value" $4))) (let $10 '('('"_logical_id" '676) '('"_id" '"ac7a5434-63caea9-d83ddfd7-ebd10d05") '('"_wide_channels" $9))) (let $11 (DqPhyStage '($8) (lambda '($20) (block '( (let $21 '('0 '0 '1 '1)) (let $22 '('0 '2 '1 '3)) (let $23 (GraceSelfJoinCore (ToFlow $20) 'Full $7 $7 $21 $22 '('"t1.Value") '('"t2.Value") '())) (return (FromFlow (WideSort $23 '('('1 (Bool 'true)))))) ))) $10)) (let $12 (DqCnMerge (TDqOutput $11 '0) '('('1 '"Asc")))) (let $13 (DqPhyStage '($12) (lambda '($24) (FromFlow (NarrowMap (ToFlow $24) (lambda '($25 $26 $27 $28) (AsStruct '('"t1.Key" $25) '('"t1.Value" $26) '('"t2.Key" $27) '('"t2.Value" $28)))))) '('('"_logical_id" '688) '('"_id" '"2c8f6d74-2f865e82-747d790f-c0f32980")))) (let $14 '($6 $11 $13)) (let $15 '('"t1.Key" '"t1.Value" '"t2.Key" '"t2.Value")) (let $16 (DqCnResult (TDqOutput $13 '0) $15)) (return (KqpPhysicalQuery '((KqpPhysicalTx $14 '($16) '() '('('"type" '"generic")))) '((KqpTxResultBinding (ListType $9) '0 '0)) '('('"type" '"query")))) ) |70.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> TxUsage::WriteToTopic_Demo_18_RestartNo_Query [GOOD] >> KqpScan::DropRedundantSortByPk [GOOD] >> DataShardSnapshots::ShardRestartLockBrokenByConflict [GOOD] >> DataShardSnapshots::ShardRestartWholeShardLockBrokenByUpsert >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_single_key_column_failure >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_other_keys_then_ok [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitSuccess+UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitSuccess-UseSink ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::DropRedundantSortByPk [GOOD] Test command err: Trying to start YDB, gRPC: 14955, MsgBus: 10146 2025-06-03T10:33:13.280210Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669280942544771:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:13.280263Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ecf/r3tmp/tmp3qjsu2/pdisk_1.dat 2025-06-03T10:33:13.362516Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:13.362786Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669280942544751:2079] 1748946793280030 != 1748946793280033 TServer::EnableGrpc on GrpcPort 14955, node 1 2025-06-03T10:33:13.382224Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:13.382240Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:13.382242Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:13.382297Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10146 2025-06-03T10:33:13.426821Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:13.426856Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:13.427849Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:10146 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:13.444457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.450954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.473111Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:13.495129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:33:13.509693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.727379Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669280942546381:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.727404Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.777496Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.786042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.798622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.813124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.869020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.883096Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.897203Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:13.913383Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669280942547036:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.913425Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.913430Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669280942547041:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:13.914509Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:13.924543Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669280942547043:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:33:13.985336Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669280942547094:3398] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:16.891769Z node 1 :TX_DATASHARD ERROR: datashard__kqp_scan.cpp:163: Undelivered event: 65542, at: [1:7511669293827452667:2054], tablet: [1:7511669280942545543:2324], scanId: 4, table: /Root/EightShard 2025-06-03T10:33:16.894046Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946796933, txId: 281474976710772] shutting down [[[3];[300u];["Value4"]];[[3];[301u];["Value4"]]] Trying to start YDB, gRPC: 61085, MsgBus: 26869 2025-06-03T10:33:17.350488Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669296138730376:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:17.350529Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ecf/r3tmp/tmpewppat/pdisk_1.dat 2025-06-03T10:33:17.378830Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:17.379046Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669296138730357:2079] 1748946797350351 != 1748946797350354 TServer::EnableGrpc on GrpcPort 61085, node 2 2025-06-03T10:33:17.396719Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:17.396733Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:17.396734Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:17.396781Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26869 TClient is connected to server localhost:26869 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 Processing ... part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:18.009907Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669300433699940:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:18.009948Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:18.010662Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669300433699945:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:18.011967Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:18.016064Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669300433699947:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:18.081926Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669300433699998:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:18.330295Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7511669300433700269:2511], status: PRECONDITION_FAILED, issues:
: Error: Execution, code: 1060
:5:13: Error: Scan query cannot have data modifications., code: 2029
: Error: Execution, code: 1060
:5:13: Error: Scan query cannot have data modifications., code: 2029 2025-06-03T10:33:18.331612Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=2&id=MzExMzFiODgtNjQ1YjhhMTctMjRkMzY1NWYtNGZjMmZiODE=, ActorId: [2:7511669300433700262:2507], ActorState: ExecuteState, TraceId: 01jwtnmhpke5qj98e6xf7bx9t6, ReplyQueryCompileError, status PRECONDITION_FAILED remove tx with tx_id:
: Error: Execution, code: 1060
:5:13: Error: Scan query cannot have data modifications., code: 2029
: Error: Execution, code: 1060
:5:13: Error: Scan query cannot have data modifications., code: 2029 Trying to start YDB, gRPC: 2268, MsgBus: 27917 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ecf/r3tmp/tmpgai9ax/pdisk_1.dat 2025-06-03T10:33:18.775659Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:33:18.785924Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:18.789416Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511669303214483215:2079] 1748946798752715 != 1748946798752718 TServer::EnableGrpc on GrpcPort 2268, node 3 2025-06-03T10:33:18.795107Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:18.795124Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:18.795127Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:18.795185Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27917 2025-06-03T10:33:18.863223Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:18.863264Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:18.864161Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:27917 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:18.888554Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:18.893225Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:33:18.901446Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:33:18.920195Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:33:18.954679Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:18.972155Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:19.187854Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669307509452163:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:19.187881Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:19.195405Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:19.205431Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:19.218669Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:19.240292Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:19.258799Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:19.276669Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:19.299395Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:19.362506Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669307509452822:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:19.362529Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:19.362686Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669307509452827:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:19.363512Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:19.373011Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511669307509452829:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:19.438978Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511669307509452880:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> DataShardSnapshots::MvccSnapshotLockedWrites-UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesRestart+UseSink >> DataShardSnapshots::VolatileSnapshotAndLocalMKQLUpdate [GOOD] >> DataShardSnapshots::VolatileSnapshotReadTable >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_1_Query [GOOD] >> PersQueueSdkReadSessionTest::ReadSessionWithCloseNotCommitted [GOOD] >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds |70.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Table >> YdbSdkSessionsPool::WaitQueue/1 >> YdbSdkSessionsPool::StressTestAsync/0 >> YdbSdkSessionsPool1Session::RunSmallPlan/0 >> YdbSdkSessionsPool1Session::RunSmallPlan/0 [GOOD] >> BasicUsage::TWriteSession_WriteEncoded [GOOD] >> CompressExecutor::TestReorderedExecutor >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_scheme_then_ok [GOOD] >> test_copy_ops.py::TestSchemeShardCopyOps::test_when_copy_table_partition_config [GOOD] |70.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> THiveTest::TestCheckSubHiveMigrationWithReboots [GOOD] >> THiveTest::PipeAlivenessOfDeadTablet >> YdbSdkSessionsPool1Session::CustomPlan/0 >> DataShardSnapshots::ShardRestartWholeShardLockBrokenByUpsert [GOOD] >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedBeforeRead+UseSink >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_delete_directory_from_leaf_success [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_delete_table_that_doesnt_exist_failure [GOOD] >> THiveTest::PipeAlivenessOfDeadTablet [GOOD] >> THiveTest::TestBootProgress >> YdbSdkSessionsPool::WaitQueue/1 [GOOD] >> YdbSdkSessionsPool::PeriodicTask/0 >> YdbSdkSessionsPool1Session::GetSession/0 [GOOD] |70.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut |70.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut |70.5%| [LD] {RESULT} $(B)/ydb/core/tx/columnshard/splitter/ut/ydb-core-tx-columnshard-splitter-ut >> DataShardSnapshots::MvccSnapshotLockedWritesRestart+UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesRestart-UseSink >> DataShardSnapshots::LockedWriteDistributedCommitSuccess-UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitFreeze+UseSink >> THiveTest::TestBootProgress [GOOD] |70.5%| [TA] $(B)/ydb/core/kqp/ut/runtime/test-results/unittest/{meta.json ... results_accumulator.log} |70.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedBeforeRead+UseSink [GOOD] >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedBeforeRead-UseSink >> YdbSdkSessionsPool::StressTestSync/0 >> DataShardSnapshots::VolatileSnapshotReadTable [GOOD] >> DataShardSnapshots::VolatileSnapshotRefreshDiscard >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_single_key_column_failure [GOOD] |70.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool1Session::RunSmallPlan/0 [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_4_Table [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleDropIndex [GOOD] >> TSchemeshardBackgroundCleaningTest::TempInTemp >> TxUsage::Sinks_Oltp_WriteToTopic_4_Query >> TDSProxyGetTest::TestBlock42GetIntervalsWipedAllOk [GOOD] >> TDSProxyPatchTest::SecuredErrorOnPut_ErasureNone >> DataShardSnapshots::MvccSnapshotLockedWritesRestart-UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesWithoutConflicts+UseSink >> TColumnShardTestSchema::HotTiersAfterTtl [GOOD] >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedBeforeRead-UseSink [GOOD] >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedBeforeRead+UseSink >> DataShardSnapshots::MvccSnapshotReadWithLongPlanQueue [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesWithoutConflicts-UseSink >> TxUsage::WriteToTopic_Demo_36_Table [GOOD] >> TDSProxyPatchTest::SecuredErrorOnPut_ErasureNone [GOOD] >> TDSProxyPatchTest::NaiveErrorOnGetItem_Erasure4Plus2Block >> TDSProxyPatchTest::NaiveErrorOnGetItem_Erasure4Plus2Block [GOOD] >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_2_1_0_VdiskErrors >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_2_1_0_VdiskErrors [GOOD] |70.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load |70.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load >> DataShardSnapshots::LockedWriteDistributedCommitFreeze+UseSink [GOOD] >> TxUsage::WriteToTopic_Demo_25_Table [GOOD] >> TxUsage::SessionAbort_Table [GOOD] >> DataShardSnapshots::VolatileSnapshotRefreshDiscard [GOOD] >> TxUsage::WriteToTopic_Demo_20_RestartAfterCommit_Query [GOOD] >> YdbSdkSessionsPool::StressTestSync/1 >> YdbSdkSessionsPool1Session::FailTest/0 [GOOD] >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedBeforeRead+UseSink [GOOD] >> YdbSdkSessionsPool::WaitQueue/0 >> YdbSdkSessions::TestMultipleSessions >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncTableClient >> DataShardSnapshots::MvccSnapshotLockedWritesWithoutConflicts+UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesWithoutConflicts-UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitFreeze-UseSink >> YdbSdkSessions::MultiThreadSync >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v1-std] [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Table >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v0-fifo] >> TxUsage::WriteToTopic_Demo_25_Query >> TxUsage::SessionAbort_Query >> DataShardSnapshots::MvccSnapshotLockedWritesWithConflicts+UseSink >> DataShardSnapshots::MvccSnapshotReadLockedWrites+UseSink >> DataShardSnapshots::VolatileSnapshotTimeout >> DataShardSnapshots::LockedWriteDistributedCommitFreeze-UseSink [GOOD] >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedBeforeRead-UseSink >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds [GOOD] >> YdbSdkSessions::TestSessionPool >> YdbSdkSessions::CloseSessionWithSessionPoolExplicitDriverStopOnly >> TxUsage::WriteToTopic_Demo_8_Table [GOOD] >> AnalyzeColumnshard::AnalyzeRebootColumnShard [FAIL] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/columnshard/ut_schema/unittest >> TColumnShardTestSchema::HotTiersAfterTtl [GOOD] Test command err: FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=9934960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=TRACE;component=2100;fline=native.cpp:71;event=parsing;size=6442960;columns=10; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=2100;fline=native.cpp:110;event=serialize;size=6442960;columns=10; WaitEmptyAfter=0;Tiers=;TTL={Column=timestamp;EvictAfter=148947346.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=148947346.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=148947346.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=148947346.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=128947346.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=148947346.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=148947346.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=128946146.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=128947346.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=128947346.000000s;Name=;Codec=}; WaitEmptyAfter=0;Tiers={{Column=timestamp;EvictAfter=128946146.000000s;Name=tier0;Codec=};}{{Column=timestamp;EvictAfter=128946146.000000s;Name=tier1;Codec=zstd};};TTL={Column=timestamp;EvictAfter=128946146.000000s;Name=;Codec=}; 2025-06-03T10:32:26.295264Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];fline=columnshard.cpp:102;event=initialize_shard;step=OnActivateExecutor; 2025-06-03T10:32:26.299309Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];fline=columnshard.cpp:120;event=initialize_shard;step=initialize_tiring_finished; 2025-06-03T10:32:26.299420Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Execute at tablet 9437184 2025-06-03T10:32:26.300483Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:32:26.300578Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:32:26.300637Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:32:26.300669Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:32:26.300692Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:32:26.300716Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:32:26.300743Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:32:26.300767Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:32:26.300791Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:32:26.300832Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:32:26.300874Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:32:26.300897Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;self_id=[1:139:2170];tablet_id=9437184;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:32:26.309509Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TxInitSchema.Complete at tablet 9437184 2025-06-03T10:32:26.309604Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=columnshard__init.cpp:137;step=TTxUpdateSchema.Execute_Start;details=normalizers_count=11;current_normalizer=CLASS_NAME=Granules; 2025-06-03T10:32:26.309620Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=1;type=Granules; 2025-06-03T10:32:26.309671Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:127;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-03T10:32:26.309728Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-03T10:32:26.309748Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-03T10:32:26.309756Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=2;type=Chunks; 2025-06-03T10:32:26.309770Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=chunks.cpp:138;normalizer=TChunksNormalizer;message=0 chunks found; 2025-06-03T10:32:26.309784Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-03T10:32:26.309796Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-03T10:32:26.309803Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=4;type=TablesCleaner; 2025-06-03T10:32:26.309830Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=normalizer.cpp:123;normalizer=TGranulesNormalizer;message=0 chunks found; 2025-06-03T10:32:26.309842Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-03T10:32:26.309854Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-03T10:32:26.309861Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=6;type=CleanGranuleId; 2025-06-03T10:32:26.309877Z node 1 :TX_COLUMNSHARD INFO: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=clean_granule.cpp:133;normalizer=TCleanGranuleIdNormalizer;message=0 chunks found; 2025-06-03T10:32:26.309888Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-03T10:32:26.309899Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-06-03T10:32:26.309904Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=8;type=CleanInsertionDedup; 2025-06-03T10:32:26.309921Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-06-03T10:32:26.309932Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-03T10:32:26.309938Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=9;type=GCCountersNormalizer; 2025-06-03T10:32:26.309950Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-03T10:32:26.309962Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-03T10:32:26.309969Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=11;type=SyncPortionFromChunks; 2025-06-03T10:32:26.310001Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:32:26.310011Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:32:26.310017Z node 1 :TX_COLUMNSHARD NOTICE: log.cpp:784: tablet_id=9437184;process=TTxUpdateSchema::Execute;fline=abstract.cpp:156;event=normalizer_init;last=0;seq_id=15;type=RestoreV1Chunks_V2; 2025-06-03T10:32:26.310043Z node 1 :TX_COLU ... 6747631; 2025-06-03T10:33:25.307802Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:139:2170];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=column_engine_logs.cpp:321;event=StartCleanup;portions_count=3;portions_prepared=0;drop=0;skip=0;portions_counter=0;chunks=0;limit=0;max_portions=1000;max_chunks=500000; 2025-06-03T10:33:25.307819Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:139:2170];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:1060;background=cleanup;skip_reason=no_changes; 2025-06-03T10:33:25.307826Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:139:2170];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:1092;background=cleanup;skip_reason=no_changes; 2025-06-03T10:33:25.307864Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:139:2170];ev=NKikimr::NColumnShard::TEvPrivate::TEvPeriodicWakeup;tablet_id=9437184;fline=columnshard_impl.cpp:1001;background=ttl;skip_reason=no_changes; 2025-06-03T10:33:25.307931Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: EvScan txId: 18446744073709551615 scanId: 0 version: {1748946794645:max} readable: {1748946794645:max} at tablet 9437184 2025-06-03T10:33:25.307991Z node 1 :TX_COLUMNSHARD DEBUG: ctor_logger.h:56: TTxScan prepare txId: 18446744073709551615 scanId: 0 at tablet 9437184 2025-06-03T10:33:25.308056Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:139:2170];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1748946794645:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:33;event=parse_program;program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-03T10:33:25.308067Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:139:2170];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1748946794645:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:102;parse_proto_program=Command { Projection { Columns { Id: 1 } } } ; 2025-06-03T10:33:25.308190Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:139:2170];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1748946794645:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=program.cpp:51;event=program_parsed;result={"edges":[{"owner_id":0,"inputs":[{"from":2}]},{"owner_id":1,"inputs":[{"from":3}]},{"owner_id":2,"inputs":[{"from":1}]},{"owner_id":3,"inputs":[]}],"nodes":{"1":{"p":{"i":"0","p":{"data":[{"name":"timestamp","id":1}]},"o":"1","t":"FetchOriginalData"},"w":2,"id":1},"3":{"p":{"p":{"data":[{"name":"timestamp","id":1}]},"o":"0","t":"ReserveMemory"},"w":0,"id":3},"2":{"p":{"i":"1","p":{"address":{"name":"timestamp","id":1}},"o":"1","t":"AssembleOriginalData"},"w":7,"id":2},"0":{"p":{"i":"1","t":"Projection"},"w":7,"id":0}}}; 2025-06-03T10:33:25.308211Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:139:2170];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1748946794645:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=read_metadata.h:141;filter_limit_not_detected= range{ from {+Inf} to {-Inf}}; 2025-06-03T10:33:25.308354Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;tablet_id=9437184;self_id=[1:139:2170];ev=NKikimr::TEvDataShard::TEvKqpScan;tx_id=18446744073709551615;scan_id=0;gen=0;table=;snapshot={1748946794645:max};tablet=9437184;timeout=0.000000s;cpu_limits=Disabled;;fline=tx_scan.cpp:169;event=TTxScan started;actor_id=[1:1466:3411];trace_detailed=; 2025-06-03T10:33:25.308489Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:84;ff_first=(column_ids=1;column_names=timestamp;);; 2025-06-03T10:33:25.308528Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;fline=context.cpp:99;columns_context_info=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; 2025-06-03T10:33:25.308560Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:33:25.308571Z node 1 :TX_COLUMNSHARD DEBUG: log.cpp:784: TEST_STEP=4;method=produce result;fline=actor.cpp:193;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:33:25.308657Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1466:3411];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:105;event=TEvScanDataAck;info=limits:(bytes=8388608;chunks=1);; 2025-06-03T10:33:25.308670Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1466:3411];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:188;stage=start;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:33:25.308680Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1466:3411];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;method=produce result;fline=actor.cpp:193;stage=scan iterator is finished;iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:33:25.308687Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: actor.cpp:410: Scan [1:1466:3411] finished for tablet 9437184 2025-06-03T10:33:25.308746Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1466:3411];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:416;event=scan_finish;compute_actor_id=[1:1465:3410];stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_ack","l_ack","f_processing","l_processing","f_ProduceResults","l_ProduceResults","f_Finish","l_Finish"],"t":0}],"full":{"a":1748946805308334,"name":"_full_task","f":1748946805308334,"d_finished":0,"c":0,"l":1748946805308697,"d":363},"events":[{"name":"bootstrap","f":1748946805308421,"d_finished":155,"c":1,"l":1748946805308576,"d":155},{"a":1748946805308651,"name":"ack","f":1748946805308651,"d_finished":0,"c":0,"l":1748946805308697,"d":46},{"a":1748946805308647,"name":"processing","f":1748946805308647,"d_finished":0,"c":0,"l":1748946805308697,"d":50},{"name":"ProduceResults","f":1748946805308549,"d_finished":48,"c":2,"l":1748946805308684,"d":48},{"a":1748946805308684,"name":"Finish","f":1748946805308684,"d_finished":0,"c":0,"l":1748946805308697,"d":13}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); 2025-06-03T10:33:25.308760Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1466:3411];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:366;event=send_data;compute_actor_id=[1:1465:3410];bytes=0;rows=0;faults=0;finished=1;fault=0;schema=; 2025-06-03T10:33:25.308797Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1466:3411];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=actor.cpp:371;event=scan_finished;compute_actor_id=[1:1465:3410];packs_sum=0;bytes=0;bytes_sum=0;rows=0;rows_sum=0;faults=0;finished=1;fault=0;stats={"p":[{"events":["f_bootstrap","l_bootstrap","f_ack","l_ack","f_processing","l_processing","f_ProduceResults","l_ProduceResults","f_Finish","l_Finish"],"t":0}],"full":{"a":1748946805308334,"name":"_full_task","f":1748946805308334,"d_finished":0,"c":0,"l":1748946805308767,"d":433},"events":[{"name":"bootstrap","f":1748946805308421,"d_finished":155,"c":1,"l":1748946805308576,"d":155},{"a":1748946805308651,"name":"ack","f":1748946805308651,"d_finished":0,"c":0,"l":1748946805308767,"d":116},{"a":1748946805308647,"name":"processing","f":1748946805308647,"d_finished":0,"c":0,"l":1748946805308767,"d":120},{"name":"ProduceResults","f":1748946805308549,"d_finished":48,"c":2,"l":1748946805308684,"d":48},{"a":1748946805308684,"name":"Finish","f":1748946805308684,"d_finished":0,"c":0,"l":1748946805308767,"d":83}],"id":"9437184::10"};iterator=ready_results:(count:0;records_count:0;);indexed_data:(ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;;); Got TEvKqpCompute::TEvScanData [1:1466:3411]->[1:1465:3410] 2025-06-03T10:33:25.308815Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1466:3411];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=stats.cpp:8;event=statistic;begin=2025-06-03T10:33:25.308204Z;index_granules=0;index_portions=0;index_batches=0;committed_batches=0;schema_columns=1;filter_columns=0;additional_columns=0;compacted_portions_bytes=0;inserted_portions_bytes=0;committed_portions_bytes=0;data_filter_bytes=0;data_additional_bytes=0;delta_bytes=0;selected_rows=0; 2025-06-03T10:33:25.308822Z node 1 :TX_COLUMNSHARD_SCAN DEBUG: log.cpp:784: TEST_STEP=4;SelfId=[1:1466:3411];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=read_context.h:190;event=scan_aborted;reason=unexpected on destructor; 2025-06-03T10:33:25.308835Z node 1 :TX_COLUMNSHARD_SCAN INFO: log.cpp:784: TEST_STEP=4;SelfId=[1:1466:3411];TabletId=9437184;ScanId=0;TxId=18446744073709551615;ScanGen=0;task_identifier=;fline=context.h:81;fetching=ef=(column_ids=;column_names=;);;sharding=(column_ids=;column_names=;);;pk=(column_ids=1,2,3,4;column_names=resource_id,resource_type,timestamp,uid;);;ff=(column_ids=1;column_names=timestamp;);;program_input=(column_ids=1;column_names=timestamp;);;; FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier0' stopped at tablet 9437184 FALLBACK_ACTOR_LOGGING;priority=DEBUG;component=1600;manager.cpp:150 :Tier '/tier1' stopped at tablet 9437184 160000/9750768 160000/9750768 160000/9750768 80000/4885288 0/0 |70.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool1Session::GetSession/0 [GOOD] |70.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::WaitQueue/1 [GOOD] >> YdbSdkSessionsPool::WaitQueue/0 [GOOD] >> YdbSdkSessions::MultiThreadSync [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v0-fifo] [GOOD] >> YdbSdkSessions::TestMultipleSessions [GOOD] >> DataShardSnapshots::MvccSnapshotReadLockedWrites+UseSink [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesWithConflicts+UseSink [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartNo_Table >> DataShardSnapshots::LockedWriteDistributedCommitCrossConflict-UseSink >> DataShardSnapshots::MvccSnapshotReadLockedWrites-UseSink >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedBeforeRead-UseSink [GOOD] >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedAfterRead+UseSink >> TxUsage::WriteToTopic_Demo_8_Query >> CompressExecutor::TestReorderedExecutor [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Table [GOOD] >> YdbSdkSessionsPool1Session::CustomPlan/0 [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_2_1_0_VdiskErrors [GOOD] Test command err: 2025-06-03T10:33:25.868087Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [7e4afa7ea38a37be] bootstrap ActorId# [3:82:2128] Group# 0 BlobCount# 1 BlobIDs# [[72075186224047637:1:863:1:24576:786:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-03T10:33:25.868183Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:33:25.868192Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:33:25.868200Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:33:25.868205Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:33:25.868211Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:33:25.868216Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:33:25.874192Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:1:0] Marker# BPP01 2025-06-03T10:33:25.874287Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 4 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:33:25.874298Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 4 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:33:25.874418Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:1:1:0] Marker# BPP01 2025-06-03T10:33:25.874430Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 5 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:33:25.874435Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 5 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:33:25.874464Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:2:1:0] Marker# BPP01 2025-06-03T10:33:25.874539Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:2:0] Marker# BPP01 2025-06-03T10:33:25.874549Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 7 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:33:25.874554Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 7 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:33:25.874578Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:1:2:0] Marker# BPP01 2025-06-03T10:33:25.874613Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:0:0] Marker# BPP01 2025-06-03T10:33:25.874635Z node 3 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [7e4afa7ea38a37be] Result# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} GroupId# 0 Marker# BPP12 2025-06-03T10:33:25.874647Z node 3 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [7e4afa7ea38a37be] SendReply putResult# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:33:25.874711Z node 3 :BS_PROXY_PUT NOTICE: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.449 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:1:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.449 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:2:1:0] NodeId# 3 } TEvVPut{ TimestampMs# 0.449 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:1:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 6.454 VDiskId# [0:1:0:1:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 6.528 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:2:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 6.62 VDiskId# [0:1:1:1:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 6.64 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:2:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 6.666 VDiskId# [0:1:2:1:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 6.741 VDiskId# [0:1:0:2:0] NodeId# 3 Status# ERROR } TEvVPut{ TimestampMs# 6.758 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:0:0] NodeId# 3 } TEvVPutResult{ TimestampMs# 6.781 VDiskId# [0:1:1:2:0] NodeId# 3 Status# OK } TEvVPutResult{ TimestampMs# 6.815 VDiskId# [0:1:0:0:0] NodeId# 3 Status# OK } ] } >> YdbSdkSessionsPool::StressTestSync/0 [GOOD] >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedAfterRead+UseSink [GOOD] >> YdbSdkSessions::TestActiveSessionCountAfterTransportError >> DataShardSnapshots::MvccSnapshotLockedWritesWithConflicts-UseSink >> DataShardSnapshots::MvccSnapshotReadLockedWrites-UseSink [GOOD] >> YdbSdkSessions::SessionsServerLimit [SKIPPED] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v0-std] >> TxUsage::SessionAbort_Query [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitCrossConflict-UseSink [GOOD] >> YdbSdkSessions::TestSessionPool [GOOD] >> YdbSdkSessions::CloseSessionWithSessionPoolExplicitDriverStopOnly [GOOD] >> CompressExecutor::TestExecutorMemUsage >> YdbSdkSessions::CloseSessionWithSessionPoolFromDtors |70.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool1Session::FailTest/0 [GOOD] >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedAfterRead-UseSink >> YdbSdkSessions::TestActiveSessionCountAfterTransportError [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesWithConflicts-UseSink [GOOD] >> DataShardSnapshots::ReadIteratorLocalSnapshotThenRestart >> DataShardSnapshots::LockedWriteWithAsyncIndex-WithRestart-UseSink >> TxUsage::Offsets_Cannot_Be_Promoted_When_Reading_In_A_Transaction_Table >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v0-std] [GOOD] >> DataShardSnapshots::MvccSnapshotLockedWritesWithReadConflicts >> DataShardSnapshots::ShardRestartLockNotBrokenByUncommittedAfterRead-UseSink [GOOD] >> YdbSdkSessions::CloseSessionWithSessionPoolFromDtors [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Query >> DataShardSnapshots::ReadIteratorLocalSnapshotThenRestart [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndex-WithRestart-UseSink [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v1-fifo] >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedAfterRead+UseSink >> DataShardSnapshots::ReadIteratorLocalSnapshotThenWrite >> DataShardSnapshots::MvccSnapshotLockedWritesWithReadConflicts [GOOD] >> DataShardSnapshots::LockedWritesLimitedPerKey+UseSink >> DataShardSnapshots::LockedWriteWithAsyncIndex+WithRestart-UseSink >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedAfterRead+UseSink [GOOD] >> DataShardSnapshots::ReadIteratorLocalSnapshotThenWrite [GOOD] >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedAfterRead-UseSink >> DataShardSnapshots::RepeatableReadAfterSplitRace >> DataShardSnapshots::LockedWritesLimitedPerKey+UseSink [GOOD] >> DataShardSnapshots::LockedWritesLimitedPerKey-UseSink >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedAfterRead-UseSink [GOOD] >> TDSProxyFaultTolerancePatchTest::mirror3dc [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndex+WithRestart-UseSink [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartNo_Table [GOOD] >> TSchemeshardBackgroundCleaningTest::TempInTemp [GOOD] >> DataShardSnapshots::VolatileSnapshotTimeout [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_4_Query [GOOD] >> LabeledDbCounters::OneTabletRestart [GOOD] >> TxUsage::WriteToTopic_Demo_36_Query >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v1-fifo] [GOOD] >> DataShardSnapshots::RepeatableReadAfterSplitRace [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Table [GOOD] >> TxUsage::WriteToTopic_Demo_8_Query [GOOD] >> TxUsage::WriteToTopic_Demo_25_Query [GOOD] >> YdbSdkSessionsPool::StressTestSync/1 [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_2_Query [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestReboot [GOOD] >> YdbSdkSessionsPool::PeriodicTask/0 [GOOD] >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncTableClient [GOOD] >> TxUsage::Offsets_Cannot_Be_Promoted_When_Reading_In_A_Transaction_Table [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndex-WithRestart+UseSink >> TxUsage::WriteToTopic_Demo_21_RestartNo_Query >> DataShardSnapshots::VolatileSnapshotTimeoutRefresh >> TxUsage::Sinks_Oltp_WriteToTopic_5_Table >> LabeledDbCounters::TwoTablets >> TxUsage::WriteToTopic_Demo_36_Query [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] >> DataShardSnapshots::PostMergeNotCompactedTooEarly >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Query >> TxUsage::WriteToTopic_Demo_9_Table >> YdbSdkSessionsPool::PeriodicTask/1 >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncQueryClient >> TxUsage::WriteToTopic_Demo_26_Table >> TxUsage::Offsets_Cannot_Be_Promoted_When_Reading_In_A_Transaction_Query >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleCleanIndex >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Table >> DataShardSnapshots::LockedWritesLimitedPerKey-UseSink [GOOD] >> TDSProxyPatchTest::SecuredErrorOnGet_ErasureNone >> DataShardSnapshots::LockedWriteWithAsyncIndex-WithRestart+UseSink [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_5_Table [GOOD] >> DataShardSnapshots::PostMergeNotCompactedTooEarly [GOOD] >> DataShardSnapshots::LockedWriteWithPendingVolatileCommit+UseSink >> TxUsage::WriteToTopic_Demo_37_Table >> TDSProxyPatchTest::SecuredErrorOnGet_ErasureNone [GOOD] |70.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::WaitQueue/0 [GOOD] |70.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut |70.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut |70.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestSessionPool [GOOD] |70.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestActiveSessionCountAfterTransportError [GOOD] |70.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool1Session::CustomPlan/0 [GOOD] |70.6%| [LD] {RESULT} $(B)/ydb/tests/olap/high_load/ydb-tests-olap-high_load ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::SessionsServerLimit [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:543: Enable after accepting a pull request with merging configs |70.6%| [TA] {RESULT} $(B)/ydb/core/kqp/ut/runtime/test-results/unittest/{meta.json ... results_accumulator.log} |70.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::CloseSessionWithSessionPoolFromDtors [GOOD] |70.6%| [LD] {RESULT} $(B)/ydb/core/tx/sequenceshard/ut/ydb-core-tx-sequenceshard-ut |70.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::StressTestSync/0 [GOOD] |70.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::StressTestSync/1 [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartNo_Query [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndex+WithRestart+UseSink >> DataShardSnapshots::PipelineAndMediatorRestoreRace >> DataShardSnapshots::LockedWriteWithPendingVolatileCommit+UseSink [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopic_5_Query >> TDSProxyPatchTest::MovedOk_Erasure4Plus2Block >> TxUsage::WriteToTopic_Demo_21_RestartBeforeCommit_Table >> DataShardSnapshots::PipelineAndMediatorRestoreRace [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndex+WithRestart+UseSink [GOOD] >> TDSProxyPatchTest::MovedOk_Erasure4Plus2Block [GOOD] >> DataShardSnapshots::LockedWriteWithPendingVolatileCommit-UseSink >> DataShardSnapshots::LockedWriteWithAsyncIndexAndVolatileCommit+UseSink >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_1_0_0_VdiskErrors >> DataShardSnapshots::ShardRestartLockBasic >> DataShardSnapshots::LockedWriteWithPendingVolatileCommit-UseSink [GOOD] >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_1_0_0_VdiskErrors [GOOD] >> DataShardSnapshots::ShardRestartLockBasic [GOOD] >> DataShardSnapshots::ShardRestartAfterDropTable >> DataShardSnapshots::ShardRestartAfterDropTable [GOOD] >> DataShardSnapshots::ShardRestartAfterDropTableAndAbort >> YdbSdkSessions::TestActiveSessionCountAfterBadSession >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryServiceStreamCall [SKIPPED] >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryService [SKIPPED] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/dsproxy/ut/unittest >> TDSProxyPutTest::TestMirror3dcPutStatusOkWith_1_0_0_VdiskErrors [GOOD] Test command err: 2025-06-03T10:33:39.255880Z node 11 :BS_PROXY_PUT INFO: dsproxy_put.cpp:645: [7e4afa7ea38a37be] bootstrap ActorId# [11:82:2128] Group# 0 BlobCount# 1 BlobIDs# [[72075186224047637:1:863:1:24576:786:0]] HandleClass# TabletLog Tactic# Default RestartCounter# 0 Marker# BPP13 2025-06-03T10:33:39.255974Z node 11 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG33 2025-06-03T10:33:39.255980Z node 11 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 0 to# 0 blob Id# [72075186224047637:1:863:1:24576:786:1] Marker# BPG32 2025-06-03T10:33:39.255984Z node 11 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:33:39.255987Z node 11 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 1 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:33:39.255990Z node 11 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG33 2025-06-03T10:33:39.255993Z node 11 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 2 to# 2 blob Id# [72075186224047637:1:863:1:24576:786:3] Marker# BPG32 2025-06-03T10:33:39.258895Z node 11 :BS_PROXY_PUT INFO: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# ERROR ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:1:0] Marker# BPP01 2025-06-03T10:33:39.258936Z node 11 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:324: [7e4afa7ea38a37be] partPlacement record partSituation# ESituation::Unknown to# 4 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG33 2025-06-03T10:33:39.258941Z node 11 :BS_PROXY_PUT DEBUG: dsproxy_strategy_base.cpp:345: [7e4afa7ea38a37be] Sending missing VPut part# 1 to# 4 blob Id# [72075186224047637:1:863:1:24576:786:2] Marker# BPG32 2025-06-03T10:33:39.258989Z node 11 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:3] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:1:1:0] Marker# BPP01 2025-06-03T10:33:39.259005Z node 11 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:1] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:2:1:0] Marker# BPP01 2025-06-03T10:33:39.259032Z node 11 :BS_PROXY_PUT DEBUG: dsproxy_put.cpp:260: [7e4afa7ea38a37be] received {EvVPutResult Status# OK ID# [72075186224047637:1:863:1:24576:786:2] {MsgQoS MsgId# { SequenceId: 1 MsgId: 0 }}} from# [0:1:0:2:0] Marker# BPP01 2025-06-03T10:33:39.259058Z node 11 :BS_PROXY_PUT DEBUG: dsproxy_put_impl.cpp:72: [7e4afa7ea38a37be] Result# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} GroupId# 0 Marker# BPP12 2025-06-03T10:33:39.259067Z node 11 :BS_PROXY_PUT INFO: dsproxy_put.cpp:486: [7e4afa7ea38a37be] SendReply putResult# TEvPutResult {Id# [72075186224047637:1:863:1:24576:786:0] Status# OK StatusFlags# { } ApproximateFreeSpaceShare# 0} ResponsesSent# 0 PutImpl.Blobs.size# 1 Last# true Marker# BPP21 2025-06-03T10:33:39.259099Z node 11 :BS_PROXY_PUT NOTICE: {BPP72@dsproxy_put.cpp:470} Query history GroupId# 0 HandleClass# TabletLog Tactic# Default History# THistory { Entries# [ TEvVPut{ TimestampMs# 0.343 sample PartId# [72075186224047637:1:863:1:24576:786:3] QueryCount# 1 VDiskId# [0:1:1:1:0] NodeId# 11 } TEvVPut{ TimestampMs# 0.343 sample PartId# [72075186224047637:1:863:1:24576:786:1] QueryCount# 1 VDiskId# [0:1:2:1:0] NodeId# 11 } TEvVPut{ TimestampMs# 0.343 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:1:0] NodeId# 11 } TEvVPutResult{ TimestampMs# 3.249 VDiskId# [0:1:0:1:0] NodeId# 11 Status# ERROR } TEvVPut{ TimestampMs# 3.281 sample PartId# [72075186224047637:1:863:1:24576:786:2] QueryCount# 1 VDiskId# [0:1:0:2:0] NodeId# 11 } TEvVPutResult{ TimestampMs# 3.319 VDiskId# [0:1:1:1:0] NodeId# 11 Status# OK } TEvVPutResult{ TimestampMs# 3.334 VDiskId# [0:1:2:1:0] NodeId# 11 Status# OK } TEvVPutResult{ TimestampMs# 3.362 VDiskId# [0:1:0:2:0] NodeId# 11 Status# OK } ] } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_background_cleaning/unittest >> TSchemeshardBackgroundCleaningTest::TempInTemp [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:227:2060] recipient: [1:221:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:227:2060] recipient: [1:221:2142] Leader for TabletID 72057594046678944 is [1:238:2153] sender: [1:239:2060] recipient: [1:221:2142] 2025-06-03T10:32:25.944484Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:32:25.944517Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:32:25.944524Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:32:25.944531Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:32:25.944548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:32:25.944554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:32:25.944565Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:32:25.944581Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:32:25.944688Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:32:25.944769Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:32:25.961252Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:32:25.961284Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:25.966282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:32:25.966347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:32:25.966410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:32:25.969728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:32:25.969815Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:32:25.969969Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:32:25.970036Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:32:25.971042Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:32:25.971116Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:32:25.971488Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:32:25.971502Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:32:25.971551Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:32:25.971561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:32:25.971567Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:32:25.971600Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:32:25.973569Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:238:2153] sender: [1:353:2060] recipient: [1:17:2064] 2025-06-03T10:32:25.998646Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:32:25.998759Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:25.998844Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:32:25.998899Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:32:25.998913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:26.000008Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:32:26.000045Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:32:26.000120Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:26.000138Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:32:26.000146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:32:26.000152Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:32:26.000847Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:26.000865Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:32:26.000871Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:32:26.001388Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:26.001403Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:26.001410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:32:26.001420Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:32:26.002325Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:32:26.002918Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:32:26.002974Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:32:26.003213Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:32:26.003251Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 246 RawX2: 4294969454 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:32:26.003261Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:32:26.003337Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:32:26.003347Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:32:26.003387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:32:26.003403Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:32:26.004015Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:32:26.004031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:32:26.004090Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... d EvNotifyTxCompletion 2025-06-03T10:33:36.201251Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 106 2025-06-03T10:33:36.201313Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [7:686:2506], Recipient [7:238:2153]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:33:36.201319Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:33:36.201322Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046678944 2025-06-03T10:33:36.201336Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124996, Sender [7:581:2401], Recipient [7:238:2153]: NKikimrScheme.TEvNotifyTxCompletion TxId: 106 2025-06-03T10:33:36.201339Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4895: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-03T10:33:36.201347Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 106, at schemeshard: 72057594046678944 2025-06-03T10:33:36.201361Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 106: got EvNotifyTxCompletionResult 2025-06-03T10:33:36.201365Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 106: satisfy waiter [7:684:2504] 2025-06-03T10:33:36.201381Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [7:686:2506], Recipient [7:238:2153]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:33:36.201386Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:33:36.201388Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 106 TestModificationResults wait txId: 107 2025-06-03T10:33:36.201461Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [8:557:2102], Recipient [7:238:2153] 2025-06-03T10:33:36.201466Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:33:36.201947Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/test/tmp/a/b" OperationType: ESchemeOpMkDir MkDir { Name: "tmp2" } TempDirOwnerActorId { RawX1: 557 RawX2: 34359740470 } AllowCreateInTempDir: false } TxId: 107 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:33:36.201985Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/test/tmp/a/b/tmp2, operationId: 107:0, at schemeshard: 72057594046678944 2025-06-03T10:33:36.202003Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 107:1, propose status:StatusPreconditionFailed, reason: Check failed: path: '/MyRoot/test/tmp/a/b', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:134, at schemeshard: 72057594046678944 2025-06-03T10:33:36.202039Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:33:36.202383Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 107, response: Status: StatusPreconditionFailed Reason: "Check failed: path: \'/MyRoot/test/tmp/a/b\', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:134" TxId: 107 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:33:36.202405Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 107, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Check failed: path: '/MyRoot/test/tmp/a/b', error: path is temporary (id: [OwnerId: 72057594046678944, LocalPathId: 5], type: EPathTypeDir, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_mkdir.cpp:134, operation: CREATE DIRECTORY, path: /MyRoot/test/tmp/a/b/tmp2 2025-06-03T10:33:36.202410Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 107, wait until txId: 107 TestWaitNotification wait txId: 107 2025-06-03T10:33:36.202460Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 107: send EvNotifyTxCompletion 2025-06-03T10:33:36.202464Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 107 2025-06-03T10:33:36.202501Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [7:692:2512], Recipient [7:238:2153]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:33:36.202505Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:33:36.202508Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046678944 2025-06-03T10:33:36.202521Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124996, Sender [7:581:2401], Recipient [7:238:2153]: NKikimrScheme.TEvNotifyTxCompletion TxId: 107 2025-06-03T10:33:36.202525Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4895: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-03T10:33:36.202531Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 107, at schemeshard: 72057594046678944 2025-06-03T10:33:36.202542Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 107: got EvNotifyTxCompletionResult 2025-06-03T10:33:36.202545Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 107: satisfy waiter [7:690:2510] 2025-06-03T10:33:36.202560Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [7:692:2512], Recipient [7:238:2153]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:33:36.202563Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:33:36.202565Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 107 TestModificationResults wait txId: 108 2025-06-03T10:33:36.202607Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122432, Sender [8:557:2102], Recipient [7:238:2153] 2025-06-03T10:33:36.202610Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4893: StateWork, processing event TEvSchemeShard::TEvModifySchemeTransaction 2025-06-03T10:33:36.203022Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot/test/tmp/a/b" OperationType: ESchemeOpMkDir MkDir { Name: "tmp2" } TempDirOwnerActorId { RawX1: 557 RawX2: 34359740470 } AllowCreateInTempDir: true } TxId: 108 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:33:36.203057Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_mkdir.cpp:115: TMkDir Propose, path: /MyRoot/test/tmp/a/b/tmp2, operationId: 108:0, at schemeshard: 72057594046678944 2025-06-03T10:33:36.203062Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 108:1, propose status:StatusPreconditionFailed, reason: Can't create temporary directory while flag AllowCreateInTempDir is set. Temporary directory can't be created in another temporary directory., at schemeshard: 72057594046678944 2025-06-03T10:33:36.203091Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:165: TSideEffects ApplyOnExecute at tablet# 72057594046678944 2025-06-03T10:33:36.203493Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 108, response: Status: StatusPreconditionFailed Reason: "Can\'t create temporary directory while flag AllowCreateInTempDir is set. Temporary directory can\'t be created in another temporary directory." TxId: 108 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:33:36.203514Z node 7 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 108, database: /MyRoot, subject: , status: StatusPreconditionFailed, reason: Can't create temporary directory while flag AllowCreateInTempDir is set. Temporary directory can't be created in another temporary directory., operation: CREATE DIRECTORY, path: /MyRoot/test/tmp/a/b/tmp2 2025-06-03T10:33:36.203519Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard__operation_side_effects.cpp:207: TSideEffects ApplyOnComplete at tablet# 72057594046678944 TestModificationResult got TxId: 108, wait until txId: 108 TestWaitNotification wait txId: 108 2025-06-03T10:33:36.203584Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 108: send EvNotifyTxCompletion 2025-06-03T10:33:36.203588Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 108 2025-06-03T10:33:36.203624Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877761, Sender [7:698:2518], Recipient [7:238:2153]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:33:36.203629Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4979: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:33:36.203631Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5758: Pipe server connected, at tablet: 72057594046678944 2025-06-03T10:33:36.203643Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124996, Sender [7:581:2401], Recipient [7:238:2153]: NKikimrScheme.TEvNotifyTxCompletion TxId: 108 2025-06-03T10:33:36.203646Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4895: StateWork, processing event TEvSchemeShard::TEvNotifyTxCompletion 2025-06-03T10:33:36.203651Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 108, at schemeshard: 72057594046678944 2025-06-03T10:33:36.203663Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 108: got EvNotifyTxCompletionResult 2025-06-03T10:33:36.203666Z node 7 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 108: satisfy waiter [7:696:2516] 2025-06-03T10:33:36.203679Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 269877764, Sender [7:698:2518], Recipient [7:238:2153]: NKikimr::TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:33:36.203682Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4980: StateWork, processing event TEvTabletPipe::TEvServerDisconnected 2025-06-03T10:33:36.203684Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:5806: Server pipe is reset, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 108 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/mind/hive/ut/unittest >> THiveTest::TestBootProgress [GOOD] Test command err: 2025-06-03T10:30:30.233011Z node 1 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:30:30.233793Z node 1 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:30.233856Z node 1 :BS_NODE DEBUG: {NW04@node_warden_pdisk.cpp:196} StartLocalPDisk NodeId# 1 PDiskId# 1 Path# "/tmp/pdisk.dat" PDiskCategory# {Type# DEVICE_TYPE_ROT Kind# 0} Temporary# false 2025-06-03T10:30:30.234012Z node 1 :BS_NODE DEBUG: {NW23@node_warden_vdisk.cpp:67} StartLocalVDiskActor SlayInFlight# false VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 DonorMode# false PDiskRestartInFlight# false PDisksWaitingToStart# false 2025-06-03T10:30:30.234379Z node 1 :BS_NODE DEBUG: {NW24@node_warden_vdisk.cpp:265} StartLocalVDiskActor done VDiskId# [0:1:0:0:0] VSlotId# 1:1:0 PDiskGuid# 1 2025-06-03T10:30:30.234396Z node 1 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 0 2025-06-03T10:30:30.234605Z node 1 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [1:48:2075] ControllerId# 72057594037932033 2025-06-03T10:30:30.234611Z node 1 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:30:30.234665Z node 1 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:30:30.234688Z node 1 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:30:30.238241Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-03T10:30:30.238259Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:30:30.238723Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:47:2074] Create Queue# [1:56:2080] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.238763Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:47:2074] Create Queue# [1:57:2081] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.238800Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:47:2074] Create Queue# [1:58:2082] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.238859Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:47:2074] Create Queue# [1:59:2083] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.238897Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:47:2074] Create Queue# [1:60:2084] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.238932Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:47:2074] Create Queue# [1:61:2085] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.238971Z node 1 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [1:47:2074] Create Queue# [1:62:2086] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.238976Z node 1 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:30:30.238990Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [1:48:2075] 2025-06-03T10:30:30.238994Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [1:48:2075] 2025-06-03T10:30:30.239002Z node 1 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-03T10:30:30.239009Z node 1 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:30:30.239201Z node 1 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:30:30.239218Z node 2 :BS_NODE DEBUG: {NW26@node_warden_impl.cpp:321} Bootstrap 2025-06-03T10:30:30.239783Z node 2 :BS_NODE DEBUG: {NW18@node_warden_resource.cpp:51} ApplyServiceSet IsStatic# true Comprehensive# false Origin# initial ServiceSet# {PDisks { NodeID: 1 PDiskID: 1 Path: "/tmp/pdisk.dat" PDiskGuid: 1 } VDisks { VDiskID { GroupID: 0 GroupGeneration: 1 Ring: 0 Domain: 0 VDisk: 0 } VDiskLocation { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } Groups { GroupID: 0 GroupGeneration: 1 ErasureSpecies: 0 Rings { FailDomains { VDiskLocations { NodeID: 1 PDiskID: 1 VDiskSlotID: 0 PDiskGuid: 1 } } } } AvailabilityDomains: 0 } 2025-06-03T10:30:30.239816Z node 2 :BS_NODE DEBUG: {NW12@node_warden_proxy.cpp:23} StartLocalProxy GroupId# 0 2025-06-03T10:30:30.239946Z node 2 :BS_NODE DEBUG: {NW21@node_warden_pipe.cpp:23} EstablishPipe AvailDomainId# 0 PipeClientId# [2:71:2073] ControllerId# 72057594037932033 2025-06-03T10:30:30.239949Z node 2 :BS_NODE DEBUG: {NW20@node_warden_pipe.cpp:72} SendRegisterNode 2025-06-03T10:30:30.239963Z node 2 :BS_NODE DEBUG: {NW11@node_warden_impl.cpp:296} StartInvalidGroupProxy GroupId# 4294967295 2025-06-03T10:30:30.239994Z node 2 :BS_NODE DEBUG: {NW62@node_warden_impl.cpp:308} StartRequestReportingThrottler 2025-06-03T10:30:30.241029Z node 2 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:30.241069Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:146: Group# 0 TEvConfigureProxy received GroupGeneration# 1 IsLimitedKeyless# false Marker# DSP02 2025-06-03T10:30:30.241075Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:294: EnsureMonitoring Group# 0 IsLimitedKeyless# 0 fullIfPossible# 0 Marker# DSP58 2025-06-03T10:30:30.241593Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:70:2072] Create Queue# [2:77:2077] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.241641Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:70:2072] Create Queue# [2:78:2078] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.241676Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:70:2072] Create Queue# [2:79:2079] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.241703Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:70:2072] Create Queue# [2:80:2080] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.241748Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:70:2072] Create Queue# [2:81:2081] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.241782Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:70:2072] Create Queue# [2:82:2082] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.241814Z node 2 :BS_PROXY DEBUG: group_sessions.cpp:83: Group# 0 Actor# [2:70:2072] Create Queue# [2:83:2083] targetNodeId# 1 Marker# DSP01 2025-06-03T10:30:30.241821Z node 2 :BS_PROXY INFO: dsproxy_state.cpp:29: Group# 0 SetStateEstablishingSessions Marker# DSP03 2025-06-03T10:30:30.241839Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72057594037932033] ::Bootstrap [2:71:2073] 2025-06-03T10:30:30.241845Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72057594037932033] lookup [2:71:2073] 2025-06-03T10:30:30.241856Z node 2 :BS_PROXY NOTICE: dsproxy_state.cpp:234: Group# 4294967295 HasInvalidGroupId# 1 Bootstrap -> StateEjected Marker# DSP42 2025-06-03T10:30:30.241866Z node 2 :BS_NODE DEBUG: {NWDC00@distconf.cpp:20} Bootstrap 2025-06-03T10:30:30.241964Z node 2 :BS_NODE DEBUG: {NWDC40@distconf_persistent_storage.cpp:25} TReaderActor bootstrap Paths# [] 2025-06-03T10:30:30.245251Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:48:2075] 2025-06-03T10:30:30.245284Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:30.245306Z node 1 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:30:30.245670Z node 1 :LOCAL DEBUG: local.cpp:1491: TLocal::Bootstrap 2025-06-03T10:30:30.245701Z node 2 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:30:30.245746Z node 2 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [2:71:2073] 2025-06-03T10:30:30.245753Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 131082 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:30.245756Z node 2 :BS_NODE DEBUG: {NWDC11@distconf_binding.cpp:6} TEvNodesInfo 2025-06-03T10:30:30.245760Z node 2 :LOCAL DEBUG: local.cpp:1441: TDomainLocal(dc-1): Bootstrap 2025-06-03T10:30:30.245776Z node 1 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72057594037932033 entry.State: StInit ev: {EvForward TabletID: 72057594037932033 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:30:30.245850Z node 1 :LOCAL DEBUG: local.cpp:1441: TDomainLocal(dc-1): Bootstrap 2025-06-03T10:30:30.245886Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:30.245893Z node 2 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-03T10:30:30.246665Z node 2 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-03T10:30:30.246770Z node 2 :LOCAL DEBUG: local.cpp:1149: TDomainLocal(dc-1): Binding to hive 72057594037927937 at domain dc-1 (allocated resources: ) 2025-06-03T10:30:30.246779Z node 2 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-03T10:30:30.246788Z node 2 :LOCAL DEBUG: local.cpp:975: TLocalNodeRegistrar::Bootstrap 2025-06-03T10:30:30.246792Z node 2 :LOCAL DEBUG: local.cpp:181: TLocalNodeRegistrar::TryToRegister 2025-06-03T10:30:30.246826Z node 2 :LOCAL DEBUG: local.cpp:213: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[2:95:2087] 2025-06-03T10:30:30.246869Z node 1 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:149: TClient[72057594037932033] queue send [1:48:2075] 2025-06-03T10:30:30.246875Z node 1 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435074 StorageConfigLoaded# false NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:30.246879Z node 1 :BS_NODE DEBUG: {NWDC32@distconf_persistent_storage.cpp:221} TEvStorageConfigLoaded Cookie# 0 NumItemsRead# 0 2025-06-03T10:30:30.246894Z node 1 :BS_NODE DEBUG: {NWDC35@distconf_persistent_storage.cpp:184} PersistConfig Record# {} Drives# [] 2025-06-03T10:30:30.246958Z node 2 :STATESTORAGE DEBUG: statestorage_proxy.cpp:246: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72057594037932033 Cookie: 0 ProxyOptions: SigNone} 2025-06-03T10:30:30.247006Z node 1 :LOCAL DEBUG: local.cpp:1149: TDomainLocal(dc-1): Binding to hive 72057594037927937 at domain dc-1 (allocated resources: ) 2025-06-03T10:30:30.247016Z node 1 :BS_NODE DEBUG: {NWDC51@distconf_persistent_storage.cpp:103} TWriterActor bootstrap Drives# [] Record# {} 2025-06-03T10:30:30.247021Z node 1 :LOCAL DEBUG: local.cpp:975: TLocalNodeRegistrar::Bootstrap 2025-06-03T10:30:30.247024Z node 1 :LOCAL DEBUG: local.cpp:181: TLocalNodeRegistrar::TryToRegister 2025-06-03T10:30:30.247031Z node 1 :LOCAL DEBUG: local.cpp:213: TLocalNodeRegistrar::TryToRegister pipe to hive, pipe:[1:99:2093] 2025-06-03T10:30:30.247041Z node 2 :BS_NODE DEBUG: {NWDC53@distconf.cpp:300} StateWaitForInit event Type# 2146435075 StorageConfigLoaded# true NodeListObtained# false PendingEvents.size# 0 2025-06-03T10:30:30.247051Z node 1 :STATESTORAGE DEBUG: statestorage_proxy.cpp:246: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup Tabl ... ikimr::(anonymous namespace)::TDummyFlatTablet::TTxInit} queued, type NKikimr::(anonymous namespace)::TDummyFlatTablet::TTxInit 2025-06-03T10:33:23.514217Z node 69 :TABLET_EXECUTOR DEBUG: Leader{72075186224037893:1:3} Tx{2, NKikimr::(anonymous namespace)::TDummyFlatTablet::TTxInit} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-03T10:33:23.514264Z node 69 :TABLET_EXECUTOR DEBUG: Leader{72075186224037893:1:3} Tx{2, NKikimr::(anonymous namespace)::TDummyFlatTablet::TTxInit} hope 1 -> done Change{2, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-03T10:33:23.514271Z node 69 :TABLET_EXECUTOR DEBUG: Leader{72075186224037893:1:3} Tx{2, NKikimr::(anonymous namespace)::TDummyFlatTablet::TTxInit} release 4194304b of static, Memory{0 dyn 0} 2025-06-03T10:33:23.514289Z node 69 :BS_PROXY_COLLECT DEBUG: dsproxy_collect.cpp:44: [52d29a5e326db0d3] received TEvVCollectGarbageResult# {EvVCollectGarbageResult Status# OK TabletId# 72075186224037893 RecordGeneration# 1 Channel# 0 VDisk# [80000000:1:0:0:0]} Marker# DSPC01 2025-06-03T10:33:23.514297Z node 69 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:112: [52d29a5e326db0d3] Result# TEvCollectGarbageResult {TabletId# 72075186224037893 RecordGeneration# 1 PerGenerationCounter# 1 Channel# 0 Status# OK} Marker# DSPC02 2025-06-03T10:33:23.514307Z node 69 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:172: [312c30a25b7db152] bootstrap ActorId# [69:634:2562] Group# 2147483653 TabletId# 72075186224037893 Channel# 1 RecordGeneration# 1 PerGenerationCounter# 1 Deadline# 586524-01-19T08:01:49.551615Z CollectGeneration# 1 CollectStep# 0 Collect# true Hard# false RestartCounter# 0 Marker# DSPC03 2025-06-03T10:33:23.514312Z node 69 :BS_PROXY DEBUG: group_sessions.h:165: Send to queueActorId# [69:599:2532] NKikimr::TEvBlobStorage::TEvVCollectGarbage# {TEvVCollectGarbage for [tablet:gen:cnt:channel]=[72075186224037893:1:1:1] collect=[1:0] cookie# 0 2025-06-03T10:33:23.514331Z node 69 :TABLET_MAIN INFO: tablet_sys.cpp:1009: Tablet: 72075186224037893 Active! Generation: 1, Type: Dummy started in 9msec Marker# TSYS24 2025-06-03T10:33:23.514336Z node 69 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:338: [72075186224037893] Activate 2025-06-03T10:33:23.514456Z node 69 :LOCAL DEBUG: local.cpp:765: TLocalNodeRegistrar: Handle TEvTablet::TEvReady tablet 72075186224037893 generation 1 2025-06-03T10:33:23.514462Z node 69 :LOCAL DEBUG: local.cpp:740: TLocalNodeRegistrar: tablet (72075186224037893,0) marked as running at generation 1 2025-06-03T10:33:23.514495Z node 69 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:155: TClient[72057594037927937] send [69:52:2092] 2025-06-03T10:33:23.514500Z node 69 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:629: TClient[72057594037927937] push event to server [69:52:2092] 2025-06-03T10:33:23.514527Z node 69 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:141: [72057594037927937] HandleSend Sender# [69:51:2092] EventType# 268960257 2025-06-03T10:33:23.514547Z node 69 :BS_PROXY_COLLECT DEBUG: dsproxy_collect.cpp:44: [312c30a25b7db152] received TEvVCollectGarbageResult# {EvVCollectGarbageResult Status# OK TabletId# 72075186224037893 RecordGeneration# 1 Channel# 1 VDisk# [80000005:1:0:0:0]} Marker# DSPC01 2025-06-03T10:33:23.514556Z node 69 :BS_PROXY_COLLECT INFO: dsproxy_collect.cpp:112: [312c30a25b7db152] Result# TEvCollectGarbageResult {TabletId# 72075186224037893 RecordGeneration# 1 PerGenerationCounter# 1 Channel# 1 Status# OK} Marker# DSPC02 2025-06-03T10:33:23.514585Z node 69 :HIVE DEBUG: hive_impl.cpp:480: HIVE#72057594037927937 Handle TEvLocal::TEvTabletStatus, TabletId: 72075186224037893 2025-06-03T10:33:23.514603Z node 69 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:11} Tx{37, NKikimr::NHive::TTxUpdateTabletStatus} queued, type NKikimr::NHive::TTxUpdateTabletStatus 2025-06-03T10:33:23.514609Z node 69 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:11} Tx{37, NKikimr::NHive::TTxUpdateTabletStatus} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-03T10:33:23.514621Z node 69 :HIVE DEBUG: tx__update_tablet_status.cpp:77: HIVE#72057594037927937 THive::TTxUpdateTabletStatus::Execute for tablet Dummy.72075186224037893.Leader.1 status 0 generation 1 follower 0 from local [69:51:2092] 2025-06-03T10:33:23.514637Z node 69 :HIVE DEBUG: tablet_info.cpp:123: HIVE#72057594037927937 Tablet(Dummy.72075186224037893.Leader.1) VolatileState: Starting -> Running (Node 69) 2025-06-03T10:33:23.514649Z node 69 :HIVE TRACE: node_info.cpp:118: HIVE#72057594037927937 Node(69, (0,1048576,0,0)->(0,0,0,0)) 2025-06-03T10:33:23.514675Z node 69 :HIVE TRACE: hive_impl.cpp:2557: HIVE#72057594037927937 UpdateTotalResources: ObjectId (72057594037927937,0): {Memory: 1048576} -> {} 2025-06-03T10:33:23.514686Z node 69 :HIVE TRACE: hive_impl.cpp:2563: HIVE#72057594037927937 UpdateTotalResources: Type Dummy: {Memory: 1048576} -> {} 2025-06-03T10:33:23.514698Z node 69 :HIVE TRACE: node_info.cpp:118: HIVE#72057594037927937 Node(69, (0,0,0,0)->(0,1048576,0,0)) 2025-06-03T10:33:23.514705Z node 69 :HIVE TRACE: hive_impl.cpp:2557: HIVE#72057594037927937 UpdateTotalResources: ObjectId (72057594037927937,0): {} -> {Memory: 1048576} 2025-06-03T10:33:23.514711Z node 69 :HIVE TRACE: hive_impl.cpp:2563: HIVE#72057594037927937 UpdateTotalResources: Type Dummy: {} -> {Memory: 1048576} 2025-06-03T10:33:23.514760Z node 69 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037927937 ProcessBootQueue (0) 2025-06-03T10:33:23.514765Z node 69 :HIVE TRACE: hive_impl.cpp:344: HIVE#72057594037927937 ProcessBootQueue - sending 2025-06-03T10:33:23.514782Z node 69 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:11} Tx{37, NKikimr::NHive::TTxUpdateTabletStatus} hope 1 -> done Change{25, redo 162b alter 0b annex 0, ~{ 1 } -{ }, 0 gb} 2025-06-03T10:33:23.514790Z node 69 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:11} Tx{37, NKikimr::NHive::TTxUpdateTabletStatus} release 4194304b of static, Memory{0 dyn 0} 2025-06-03T10:33:23.514847Z node 69 :HIVE TRACE: hive_impl.cpp:328: HIVE#72057594037927937 ProcessBootQueue - executing 2025-06-03T10:33:23.514854Z node 69 :HIVE DEBUG: hive_impl.cpp:361: HIVE#72057594037927937 ProcessWaitQueue (5) 2025-06-03T10:33:23.514857Z node 69 :HIVE DEBUG: hive_impl.cpp:342: HIVE#72057594037927937 ProcessBootQueue (0) 2025-06-03T10:33:23.514867Z node 69 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:12} Tx{38, NKikimr::NHive::TTxProcessBootQueue} queued, type NKikimr::NHive::TTxProcessBootQueue 2025-06-03T10:33:23.514872Z node 69 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:12} Tx{38, NKikimr::NHive::TTxProcessBootQueue} took 4194304b of static mem, Memory{4194304 dyn 0} 2025-06-03T10:33:23.514878Z node 69 :HIVE DEBUG: tx__process_boot_queue.cpp:18: HIVE#72057594037927937 THive::TTxProcessBootQueue()::Execute 2025-06-03T10:33:23.514885Z node 69 :HIVE DEBUG: hive_impl.cpp:222: HIVE#72057594037927937 Handle ProcessBootQueue (size: 0) 2025-06-03T10:33:23.514890Z node 69 :HIVE DEBUG: hive_impl.cpp:225: HIVE#72057594037927937 Handle ProcessWaitQueue (size: 5) 2025-06-03T10:33:23.514900Z node 69 :HIVE TRACE: hive_impl.cpp:238: HIVE#72057594037927937 Tablet 72075186224037891.0 has priority 4.000002048 2025-06-03T10:33:23.514909Z node 69 :HIVE DEBUG: hive_impl.cpp:1211: HIVE#72057594037927937 [FBN] Finding best node for tablet Hive.72075186224037891.Leader.0 2025-06-03T10:33:23.514916Z node 69 :HIVE TRACE: hive_impl.cpp:1212: HIVE#72057594037927937 [FBN] Tablet Hive.72075186224037891.Leader.0 family {Hive.72075186224037891.Leader.0 Booting} 2025-06-03T10:33:23.514930Z node 69 :HIVE TRACE: hive_impl.cpp:1335: HIVE#72057594037927937 [FBN] Node 69 is not allowed to run the tablet Hive.72075186224037891.Leader.0 node domains [72057594046678944:1] tablet object domain 52:42 tablet allowed domains [52:42] tablet effective allowed domains [52:42] 2025-06-03T10:33:23.514936Z node 69 :HIVE TRACE: hive_impl.cpp:1343: HIVE#72057594037927937 [FBN] Tablet Hive.72075186224037891.Leader.0 selected nodes count 0 2025-06-03T10:33:23.514942Z node 69 :HIVE TRACE: hive_impl.cpp:1375: HIVE#72057594037927937 [FBN] Tablet Hive.72075186224037891.Leader.0 no node was selected 2025-06-03T10:33:23.514953Z node 69 :HIVE DEBUG: hive_impl.cpp:302: HIVE#72057594037927937 ProcessBootQueue - BootQueue empty (WaitQueue: 5) 2025-06-03T10:33:23.514961Z node 69 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:12} Tx{38, NKikimr::NHive::TTxProcessBootQueue} hope 1 -> done Change{26, redo 0b alter 0b annex 0, ~{ } -{ }, 0 gb} 2025-06-03T10:33:23.514966Z node 69 :TABLET_EXECUTOR DEBUG: Leader{72057594037927937:2:12} Tx{38, NKikimr::NHive::TTxProcessBootQueue} release 4194304b of static, Memory{0 dyn 0} 2025-06-03T10:33:23.515067Z node 69 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:50: TClient[72075186224037893] ::Bootstrap [69:636:2564] 2025-06-03T10:33:23.515074Z node 69 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:533: TClient[72075186224037893] lookup [69:636:2564] 2025-06-03T10:33:23.515098Z node 69 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:484: Handle TEvForward tabletId: 72075186224037893 entry.State: StInit ev: {EvForward TabletID: 72075186224037893 Ev: nullptr Flags: 1:2:0} 2025-06-03T10:33:23.515133Z node 69 :STATESTORAGE DEBUG: statestorage_proxy.cpp:246: ProxyRequest::HandleInit ringGroup:0 ev: {EvLookup TabletID: 72075186224037893 Cookie: 0 ProxyOptions: SigNone} 2025-06-03T10:33:23.515161Z node 69 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037893 Cookie: 0} 2025-06-03T10:33:23.515172Z node 69 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037893 Cookie: 1} 2025-06-03T10:33:23.515178Z node 69 :STATESTORAGE DEBUG: statestorage_replica.cpp:183: Replica::Handle ev: {EvReplicaLookup TabletID: 72075186224037893 Cookie: 2} 2025-06-03T10:33:23.515191Z node 69 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037893 CurrentLeader: [69:574:2515] CurrentLeaderTablet: [69:589:2527] CurrentGeneration: 1 CurrentStep: 0} 2025-06-03T10:33:23.515208Z node 69 :STATESTORAGE DEBUG: statestorage_proxy.cpp:355: ProxyRequest::HandleLookup ringGroup:0 ev: {EvReplicaInfo Status: 0 TabletID: 72075186224037893 CurrentLeader: [69:574:2515] CurrentLeaderTablet: [69:589:2527] CurrentGeneration: 1 CurrentStep: 0} 2025-06-03T10:33:23.515231Z node 69 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:610: Handle TEvInfo tabletId: 72075186224037893 entry.State: StInitResolve success: true ev: {EvInfo Status: 0 TabletID: 72075186224037893 Cookie: 0 CurrentLeader: [69:574:2515] CurrentLeaderTablet: [69:589:2527] CurrentGeneration: 1 CurrentStep: 0 Locked: false LockedFor: 0 Signature: { Size: 2 Signature: {{[69:24343667:0] : 2}, {[69:1099535971443:0] : 5}}}} 2025-06-03T10:33:23.515238Z node 69 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:354: ApplyEntry leader tabletId: 72075186224037893 followers: 0 2025-06-03T10:33:23.515248Z node 69 :TABLET_RESOLVER DEBUG: tablet_resolver.cpp:279: SelectForward node 69 selfDC leaderDC 1:2:0 local 1 localDc 1 other 0 disallowed 0 tabletId: 72075186224037893 followers: 0 countLeader 1 allowFollowers 0 winner: [69:574:2515] 2025-06-03T10:33:23.515269Z node 69 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:190: TClient[72075186224037893] forward result local node, try to connect [69:636:2564] 2025-06-03T10:33:23.515275Z node 69 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:671: TClient[72075186224037893]::SendEvent [69:636:2564] 2025-06-03T10:33:23.515296Z node 69 :PIPE_SERVER DEBUG: tablet_pipe_server.cpp:291: [72075186224037893] Accept Connect Originator# [69:636:2564] 2025-06-03T10:33:23.515323Z node 69 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:310: TClient[72075186224037893] connected with status OK role: Leader [69:636:2564] 2025-06-03T10:33:23.515329Z node 69 :PIPE_CLIENT DEBUG: tablet_pipe_client.cpp:325: TClient[72075186224037893] send queued [69:636:2564] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::ShardRestartLockBrokenByUncommittedAfterRead-UseSink [GOOD] Test command err: 2025-06-03T10:33:15.078238Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:33:15.078362Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:33:15.078405Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000fd3/r3tmp/tmpPQ43v5/pdisk_1.dat 2025-06-03T10:33:15.187463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:33:15.203764Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:15.204709Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946794544018 != 1748946794544022 2025-06-03T10:33:15.240825Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=NWYxZGE5NmUtYmU3MDE3ODctM2EzMDhmMS02OTRmZTViMw==, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id NWYxZGE5NmUtYmU3MDE3ODctM2EzMDhmMS02OTRmZTViMw== 2025-06-03T10:33:15.241023Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=NWYxZGE5NmUtYmU3MDE3ODctM2EzMDhmMS02OTRmZTViMw==, ActorId: [1:617:2539], ActorState: unknown state, session actor bootstrapped 2025-06-03T10:33:15.241071Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=1&id=NWYxZGE5NmUtYmU3MDE3ODctM2EzMDhmMS02OTRmZTViMw==, ActorId: [1:617:2539], ActorState: ReadyState, TraceId: 01jwtnmeq9ca5j2ny1wrfxh051, received request, proxyRequestId: 3 prepared: 0 tx_control: 0 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DDL text: CREATE TABLE `/Root/table1` (key int, value int, PRIMARY KEY (key)); rpcActor: [0:0:0] database: databaseId: /Root pool id: default 2025-06-03T10:33:15.268507Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:621:2542], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:15.268553Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:15.292208Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:15.292249Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:15.292877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:33:15.305115Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:15.318571Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:684:2576], Recipient [1:689:2579]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:15.318838Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:684:2576], Recipient [1:689:2579]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:15.318946Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:689:2579] 2025-06-03T10:33:15.319025Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:33:15.326661Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:684:2576], Recipient [1:689:2579]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:15.326841Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:33:15.326870Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:33:15.327011Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:33:15.327017Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:33:15.327023Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:33:15.327081Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:33:15.327100Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:33:15.327111Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:703:2579] in generation 1 2025-06-03T10:33:15.327176Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:33:15.334851Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:33:15.334945Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:33:15.334983Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:705:2588] 2025-06-03T10:33:15.334988Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:33:15.334992Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:33:15.334996Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:33:15.335064Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:689:2579], Recipient [1:689:2579]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:33:15.335071Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:33:15.335143Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:33:15.335162Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:33:15.335180Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:33:15.335187Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:33:15.335194Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:33:15.335200Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:33:15.335205Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:33:15.335210Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:33:15.335214Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:33:15.366243Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:708:2590], Recipient [1:689:2579]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:33:15.366270Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:33:15.366279Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:680:2574], serverId# [1:708:2590], sessionId# [0:0:0] 2025-06-03T10:33:15.366303Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:708:2590] 2025-06-03T10:33:15.366308Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:33:15.366347Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:33:15.366402Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:33:15.366414Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:33:15.366443Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:33:15.366450Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:33:15.366454Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:33:15.366458Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:33:15.366462Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit StoreSchemeTx 2025-06-03T10:33:15.366534Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayCompleteNoMoreRestarts 2025-06-03T10:33:15.366537Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit StoreSchemeTx 2025-06-03T10:33:15.366540Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit FinishPropose 2025-06-03T10:33:15.366543Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit FinishPropose 2025-06-03T10:33:15.366554Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is DelayComplete 2025-06-03T10:33:15.366556Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit FinishPropose 2025-06-03T10:33:15.366560Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit WaitForPlan 2025-06-03T10:33:15.366565Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit WaitForPlan 2025-06-03T10:33:15.366573Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1832 ... d_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:33:32.173943Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435074, Sender [13:902:2730], Recipient [13:902:2730]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-06-03T10:33:32.173946Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3156: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-06-03T10:33:32.173952Z node 13 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:33:32.173968Z node 13 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-03T10:33:32.173976Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715666] at 72075186224037888 on unit CheckDataTx 2025-06-03T10:33:32.173980Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715666] at 72075186224037888 is Executed 2025-06-03T10:33:32.173985Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715666] at 72075186224037888 executing on unit CheckDataTx 2025-06-03T10:33:32.173988Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715666] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-03T10:33:32.173990Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715666] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-03T10:33:32.173994Z node 13 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v400/18446744073709551615 ImmediateWriteEdge# v500/18446744073709551615 ImmediateWriteEdgeReplied# v500/18446744073709551615 2025-06-03T10:33:32.173999Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:281474976715666] at 72075186224037888 2025-06-03T10:33:32.174002Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715666] at 72075186224037888 is Executed 2025-06-03T10:33:32.174004Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715666] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-03T10:33:32.174007Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715666] at 72075186224037888 to execution unit ExecuteKqpDataTx 2025-06-03T10:33:32.174009Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715666] at 72075186224037888 on unit ExecuteKqpDataTx 2025-06-03T10:33:32.174016Z node 13 :TX_DATASHARD TRACE: execute_kqp_data_tx_unit.cpp:236: Operation [0:281474976715666] (execute_kqp_data_tx) at 72075186224037888 set memory limit 4193448 2025-06-03T10:33:32.174022Z node 13 :TX_DATASHARD TRACE: datashard_kqp.cpp:787: KqpEraseLock LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: false 2025-06-03T10:33:32.174031Z node 13 :TX_DATASHARD TRACE: execute_kqp_data_tx_unit.cpp:482: add locks to result: 0 2025-06-03T10:33:32.174039Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715666] at 72075186224037888 is Executed 2025-06-03T10:33:32.174041Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715666] at 72075186224037888 executing on unit ExecuteKqpDataTx 2025-06-03T10:33:32.174044Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715666] at 72075186224037888 to execution unit FinishPropose 2025-06-03T10:33:32.174046Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715666] at 72075186224037888 on unit FinishPropose 2025-06-03T10:33:32.174050Z node 13 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715666 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-03T10:33:32.174059Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715666] at 72075186224037888 is DelayComplete 2025-06-03T10:33:32.174061Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715666] at 72075186224037888 executing on unit FinishPropose 2025-06-03T10:33:32.174063Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715666] at 72075186224037888 to execution unit CompletedOperations 2025-06-03T10:33:32.174066Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715666] at 72075186224037888 on unit CompletedOperations 2025-06-03T10:33:32.174070Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715666] at 72075186224037888 is Executed 2025-06-03T10:33:32.174074Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715666] at 72075186224037888 executing on unit CompletedOperations 2025-06-03T10:33:32.174076Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715666] at 72075186224037888 has finished 2025-06-03T10:33:32.174081Z node 13 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:33:32.174084Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715666] at 72075186224037888 on unit FinishPropose 2025-06-03T10:33:32.174087Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:33:32.174240Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 275709965, Sender [13:61:2108], Recipient [13:902:2730]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715661 LockNode: 13 Status: STATUS_NOT_FOUND 2025-06-03T10:33:32.187198Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jwtnmz8ee84a3204d3p3xq0t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YjQyNWFmMmMtYjlhNzBmZDMtNGMyM2UwNjgtNGQxNjJkODE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:33:32.187620Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553215, Sender [13:968:2774], Recipient [13:902:2730]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2025-06-03T10:33:32.187672Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-03T10:33:32.187681Z node 13 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v500/0 IncompleteEdge# v{min} UnprotectedReadEdge# v400/18446744073709551615 ImmediateWriteEdge# v500/18446744073709551615 ImmediateWriteEdgeReplied# v500/18446744073709551615 2025-06-03T10:33:32.187689Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v500/18446744073709551615 2025-06-03T10:33:32.187699Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit CheckRead 2025-06-03T10:33:32.187715Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-03T10:33:32.187719Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit CheckRead 2025-06-03T10:33:32.187723Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-03T10:33:32.187726Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-03T10:33:32.187740Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037888 2025-06-03T10:33:32.187745Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-03T10:33:32.187747Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-03T10:33:32.187750Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit ExecuteRead 2025-06-03T10:33:32.187753Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit ExecuteRead 2025-06-03T10:33:32.187769Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2025-06-03T10:33:32.187814Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[13:968:2774], 0} after executionsCount# 1 2025-06-03T10:33:32.187821Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[13:968:2774], 0} sends rowCount# 2, bytes# 64, quota rows left# 999, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-03T10:33:32.187835Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[13:968:2774], 0} finished in read 2025-06-03T10:33:32.187842Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-03T10:33:32.187845Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit ExecuteRead 2025-06-03T10:33:32.187847Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037888 to execution unit CompletedOperations 2025-06-03T10:33:32.187850Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037888 on unit CompletedOperations 2025-06-03T10:33:32.187860Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037888 is Executed 2025-06-03T10:33:32.187862Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037888 executing on unit CompletedOperations 2025-06-03T10:33:32.187864Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:4] at 72075186224037888 has finished 2025-06-03T10:33:32.187868Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-03T10:33:32.187884Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-03T10:33:32.188066Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553219, Sender [13:968:2774], Recipient [13:902:2730]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-03T10:33:32.188073Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 11 } }, { items { uint32_value: 2 } items { uint32_value: 22 } } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> PersQueueSdkReadSessionTest::ClosesAfterFailedConnectionToCds [GOOD] Test command err: 2025-06-03T10:32:27.318528Z :WriteRAW INFO: Random seed for debugging is 1748946747318516 2025-06-03T10:32:27.445565Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669084066769424:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:27.445632Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:32:27.448193Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669083936652474:2222];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002517/r3tmp/tmpK5Me7Q/pdisk_1.dat 2025-06-03T10:32:27.493027Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:32:27.505738Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:32:27.505772Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:32:27.534871Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:27.542871Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:27.542901Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:27.544109Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 13966, node 1 2025-06-03T10:32:27.580538Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/002517/r3tmp/yandex8JsYxm.tmp 2025-06-03T10:32:27.580554Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/002517/r3tmp/yandex8JsYxm.tmp 2025-06-03T10:32:27.580622Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/002517/r3tmp/yandex8JsYxm.tmp 2025-06-03T10:32:27.580675Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:27.590173Z INFO: TTestServer started on Port 4480 GrpcPort 13966 2025-06-03T10:32:27.591616Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:27.591642Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:27.592779Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:32:27.593107Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4480 PQClient connected to localhost:13966 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:27.636351Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-06-03T10:32:27.970402Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669084066770189:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:27.970427Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:27.970602Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669084066770216:2338], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:27.971630Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715661:3, at schemeshard: 72057594046644480 2025-06-03T10:32:27.971971Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669084066770245:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:27.971987Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:27.979607Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669084066770218:2339], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715661 completed, doublechecking } 2025-06-03T10:32:28.020973Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:28.037769Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7511669088231619933:2311], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:32:28.037906Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=2&id=NjQ5OGE4NC05MGM5MGYyMy1jZWM0MDU2MS0zOWZmYjMw, ActorId: [2:7511669088231619893:2305], ActorState: ExecuteState, TraceId: 01jwtnk0kkf1n0ccdb32qr5b0h, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:32:28.038770Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:32:28.045530Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669088361737653:2703] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:28.049740Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511669088361737673:2350], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:32:28.049850Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=OTUyNDg0NWItZmJlNDVhYzYtNzdhMzNjN2QtZmMxMjlhOWQ=, ActorId: [1:7511669084066770186:2333], ActorState: ExecuteState, TraceId: 01jwtnk0j1fmfnpmax7s8mwyqz, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:32:28.049999Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:32:28.095992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:28.165168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:13966", true, true, 1000); 2025-06-03T10:32:28.215155Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715666. Ctx: { TraceId: 01jwtnk0rycb83v32qsxth19sy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=NDA2OWYyN2QtYjQwZWFmZTAtN2MyZDliOGQtOWI2NWE4ZjY=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7511669088361737996:2953] 2025-06-03T10:32:32.444777Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511669084066769424:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:32.4 ... 28.878183Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-03T10:33:28.878185Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-03T10:33:28.878192Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [15:7511669346477111460:2487] (SourceId=src, PreferedPartition=(NULL)) StartKqpSession 2025-06-03T10:33:28.878760Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [15:7511669346477111460:2487] (SourceId=src, PreferedPartition=(NULL)) Select from the table 2025-06-03T10:33:28.900437Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:67: TPartitionChooser [15:7511669346477111460:2487] (SourceId=src, PreferedPartition=(NULL)) RequestPQRB 2025-06-03T10:33:28.900508Z node 15 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][rt3.dc1--test-topic] pipe [15:7511669346477111494:2487] connected; active server actors: 1 2025-06-03T10:33:28.900525Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__old_chooser_actor.h:80: TPartitionChooser [15:7511669346477111460:2487] (SourceId=src, PreferedPartition=(NULL)) Received partition 0 from PQRB for SourceId=src 2025-06-03T10:33:28.900539Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:174: TPartitionChooser [15:7511669346477111460:2487] (SourceId=src, PreferedPartition=(NULL)) Update the table 2025-06-03T10:33:28.900607Z node 15 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037893][rt3.dc1--test-topic] pipe [15:7511669346477111494:2487] disconnected; active server actors: 1 2025-06-03T10:33:28.900615Z node 15 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037893][rt3.dc1--test-topic] pipe [15:7511669346477111494:2487] disconnected no session 2025-06-03T10:33:28.916858Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:183: TPartitionChooser [15:7511669346477111460:2487] (SourceId=src, PreferedPartition=(NULL)) HandleUpdate PartitionPersisted=0 Status=SUCCESS 2025-06-03T10:33:28.916879Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [15:7511669346477111460:2487] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=(NULL) 2025-06-03T10:33:28.916883Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:268: TPartitionChooser [15:7511669346477111460:2487] (SourceId=src, PreferedPartition=(NULL)) Start idle 2025-06-03T10:33:28.916892Z node 15 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-03T10:33:28.917238Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72075186224037892] server connected, pipe [15:7511669346477111517:2487], now have 1 active actors on pipe 2025-06-03T10:33:28.917429Z node 15 :PQ_WRITE_PROXY DEBUG: writer.cpp:798: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 16, Generation: 1 2025-06-03T10:33:28.917564Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-03T10:33:28.917578Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-03T10:33:28.917625Z node 16 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|a1be74b3-2bb5e9e5-1fb0f1ee-610947d0_0 generated for partition 0 topic 'rt3.dc1--test-topic' owner src 2025-06-03T10:33:28.917678Z node 16 :PERSQUEUE DEBUG: partition_write.cpp:35: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-03T10:33:28.917714Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-03T10:33:28.917953Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'rt3.dc1--test-topic' requestId: 2025-06-03T10:33:28.917963Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037892] got client message batch for topic 'rt3.dc1--test-topic' partition 0 2025-06-03T10:33:28.917987Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-03T10:33:28.918153Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|a1be74b3-2bb5e9e5-1fb0f1ee-610947d0_0 2025-06-03T10:33:28.918591Z :INFO: [] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1748946808918 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:33:28.918681Z :INFO: [] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|a1be74b3-2bb5e9e5-1fb0f1ee-610947d0_0" topic: "test-topic" cluster: "dc1" supported_codecs: CODEC_RAW supported_codecs: CODEC_GZIP supported_codecs: CODEC_LZOP 2025-06-03T10:33:28.918919Z :INFO: [] MessageGroupId [src] SessionId [src|a1be74b3-2bb5e9e5-1fb0f1ee-610947d0_0] Write session: close. Timeout = 0 ms 2025-06-03T10:33:28.918929Z :INFO: [] MessageGroupId [src] SessionId [src|a1be74b3-2bb5e9e5-1fb0f1ee-610947d0_0] Write session will now close 2025-06-03T10:33:28.918934Z :DEBUG: [] MessageGroupId [src] SessionId [src|a1be74b3-2bb5e9e5-1fb0f1ee-610947d0_0] Write session: aborting 2025-06-03T10:33:28.919093Z :INFO: [] MessageGroupId [src] SessionId [src|a1be74b3-2bb5e9e5-1fb0f1ee-610947d0_0] Write session: gracefully shut down, all writes complete 2025-06-03T10:33:28.919098Z :DEBUG: [] MessageGroupId [src] SessionId [src|a1be74b3-2bb5e9e5-1fb0f1ee-610947d0_0] Write session: destroy 2025-06-03T10:33:28.919380Z node 15 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|a1be74b3-2bb5e9e5-1fb0f1ee-610947d0_0 grpc read done: success: 0 data: 2025-06-03T10:33:28.919393Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|a1be74b3-2bb5e9e5-1fb0f1ee-610947d0_0 grpc read failed 2025-06-03T10:33:28.919402Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|a1be74b3-2bb5e9e5-1fb0f1ee-610947d0_0 grpc closed 2025-06-03T10:33:28.919410Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|a1be74b3-2bb5e9e5-1fb0f1ee-610947d0_0 is DEAD 2025-06-03T10:33:28.919565Z node 15 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:33:28.919729Z node 16 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037892] server disconnected, pipe [15:7511669346477111517:2487] destroyed 2025-06-03T10:33:28.919746Z node 16 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. 2025-06-03T10:33:28.926187Z :INFO: [/Root] [/Root] [33ff60c2-683ef19a-be6df24d-dfbf7381] Starting read session 2025-06-03T10:33:28.926206Z :DEBUG: [/Root] [/Root] [33ff60c2-683ef19a-be6df24d-dfbf7381] Starting cluster discovery 2025-06-03T10:33:28.926269Z :INFO: [/Root] [/Root] [33ff60c2-683ef19a-be6df24d-dfbf7381] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21277: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:21277
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:21277. " 2025-06-03T10:33:28.926274Z :DEBUG: [/Root] [/Root] [33ff60c2-683ef19a-be6df24d-dfbf7381] Restart cluster discovery in 0.009680s 2025-06-03T10:33:28.936469Z :DEBUG: [/Root] [/Root] [33ff60c2-683ef19a-be6df24d-dfbf7381] Starting cluster discovery 2025-06-03T10:33:28.936578Z :INFO: [/Root] [/Root] [33ff60c2-683ef19a-be6df24d-dfbf7381] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21277: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:21277
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:21277. " 2025-06-03T10:33:28.936584Z :DEBUG: [/Root] [/Root] [33ff60c2-683ef19a-be6df24d-dfbf7381] Restart cluster discovery in 0.016980s 2025-06-03T10:33:28.954408Z :DEBUG: [/Root] [/Root] [33ff60c2-683ef19a-be6df24d-dfbf7381] Starting cluster discovery 2025-06-03T10:33:28.954491Z :INFO: [/Root] [/Root] [33ff60c2-683ef19a-be6df24d-dfbf7381] Cluster discovery request failed. Status: TRANSPORT_UNAVAILABLE. Issues: "
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21277: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:21277
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:21277. " 2025-06-03T10:33:28.954497Z :DEBUG: [/Root] [/Root] [33ff60c2-683ef19a-be6df24d-dfbf7381] Restart cluster discovery in 0.024707s 2025-06-03T10:33:28.979462Z :DEBUG: [/Root] [/Root] [33ff60c2-683ef19a-be6df24d-dfbf7381] Starting cluster discovery 2025-06-03T10:33:28.979561Z :NOTICE: [/Root] [/Root] [33ff60c2-683ef19a-be6df24d-dfbf7381] Aborting read session. Description: SessionClosed { Status: TRANSPORT_UNAVAILABLE Issues: "
: Error: Failed to discover clusters
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21277: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:21277
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:21277. " } 2025-06-03T10:33:28.979629Z :NOTICE: [/Root] [/Root] [33ff60c2-683ef19a-be6df24d-dfbf7381] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } SessionClosed { Status: TRANSPORT_UNAVAILABLE Issues: "
: Error: Failed to discover clusters
: Error: GRpc error: (14): failed to connect to all addresses; last error: UNKNOWN: ipv4:127.0.0.1:21277: Failed to connect to remote host: Connection refused
: Error: Grpc error response on endpoint localhost:21277
: Error: Endpoint list is empty for database /Root, cluster endpoint localhost:21277. " } 2025-06-03T10:33:28.979664Z :INFO: [/Root] [/Root] [33ff60c2-683ef19a-be6df24d-dfbf7381] Closing read session. Close timeout: 0.000000s 2025-06-03T10:33:28.979673Z :NOTICE: [/Root] [/Root] [33ff60c2-683ef19a-be6df24d-dfbf7381] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::LockedWriteWithPendingVolatileCommit-UseSink [GOOD] Test command err: 2025-06-03T10:33:15.834633Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:33:15.834740Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:33:15.834777Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000ea7/r3tmp/tmpuHB6Xh/pdisk_1.dat 2025-06-03T10:33:15.958987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:33:15.977351Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:15.978715Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946795441384 != 1748946795441388 2025-06-03T10:33:16.020573Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:33:16.020721Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-03T10:33:16.020869Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:16.020888Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:16.031384Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:16.103992Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-03T10:33:16.104019Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-03T10:33:16.104045Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:639:2547] 2025-06-03T10:33:16.118296Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:639:2547] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-03T10:33:16.118338Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:639:2547] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-03T10:33:16.118550Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1627: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-03T10:33:16.118563Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:639:2547] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:33:16.118614Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:33:16.118648Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:639:2547] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:33:16.118663Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:639:2547] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-03T10:33:16.118730Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvClientConnected 2025-06-03T10:33:16.119071Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:33:16.119280Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [1:639:2547] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-03T10:33:16.119289Z node 1 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [1:639:2547] txid# 281474976715657 SEND to# [1:591:2517] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-03T10:33:16.133349Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:16.133626Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:16.133736Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:33:16.133807Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:33:16.141357Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:16.141525Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:33:16.141554Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:33:16.141759Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:33:16.141771Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:33:16.141776Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:33:16.141834Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:33:16.141853Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:33:16.141864Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:33:16.141923Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:33:16.145597Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:33:16.145662Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:33:16.145698Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:33:16.145702Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:33:16.145705Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:33:16.145709Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:33:16.145764Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:33:16.145770Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:33:16.145853Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:33:16.145870Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:33:16.145963Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:33:16.145971Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:33:16.145978Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:33:16.145982Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:33:16.145986Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:33:16.145990Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:33:16.145994Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:33:16.146004Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:33:16.146009Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:33:16.146013Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:33:16.146033Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:33:16.146036Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:33:16.146053Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:33:16.146097Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:33:16.146108Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:33:16.146122Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:33:16.146128Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:33:16.146131Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:33:16.146135Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:33:16.146138Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 7 ... 37888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 0 Seqno# 0 Flags# 7} 2025-06-03T10:33:38.433504Z node 13 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-03T10:33:38.433512Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287425, Sender [13:750:2629], Recipient [13:664:2568]: {TEvReadSet step# 3015 txid# 281474976715663 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-03T10:33:38.433514Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3148: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-03T10:33:38.433516Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037888 source 72075186224037889 dest 72075186224037888 producer 72075186224037889 txId 281474976715663 2025-06-03T10:33:38.433520Z node 13 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037888 got read set: {TEvReadSet step# 3015 txid# 281474976715663 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletProducer# 72075186224037889 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-03T10:33:38.433547Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:705: Complete [3015 : 281474976715663] from 72075186224037888 at tablet 72075186224037888 send result to client [13:967:2766], exec latency: 0 ms, propose latency: 0 ms 2025-06-03T10:33:38.433566Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287425, Sender [13:664:2568], Recipient [13:750:2629]: {TEvReadSet step# 3015 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-03T10:33:38.433569Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3148: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-03T10:33:38.433571Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037888 dest 72075186224037889 producer 72075186224037888 txId 281474976715663 2025-06-03T10:33:38.433574Z node 13 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 3015 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 2 Seqno# 1 Flags# 0} 2025-06-03T10:33:38.433588Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:705: Complete [3015 : 281474976715663] from 72075186224037889 at tablet 72075186224037889 send result to client [13:967:2766], exec latency: 0 ms, propose latency: 0 ms TEvProposeTransactionResult: TxKind: TX_KIND_DATA Origin: 72075186224037888 Status: COMPLETE TxId: 281474976715663 TxResult: "" ExecLatency: 0 ProposeLatency: 0 TxStats { PerShardStats { ShardId: 72075186224037888 CpuTimeUsec: 210 } } ComputeActorStats { Tasks { Tables { TablePath: "/Root/table-1" WriteRows: 1 WriteBytes: 8 } } } CommitVersion { Step: 3015 TxId: 281474976715663 } TEvProposeTransactionResult: TxKind: TX_KIND_DATA Origin: 72075186224037889 Status: COMPLETE TxId: 281474976715663 TxResult: "" ExecLatency: 0 ProposeLatency: 0 TxStats { PerShardStats { ShardId: 72075186224037889 CpuTimeUsec: 95 } } ComputeActorStats { Tasks { Tables { TablePath: "/Root/table-2" WriteRows: 1 WriteBytes: 8 } } } CommitVersion { Step: 3015 TxId: 281474976715663 } 2025-06-03T10:33:38.433723Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:33:38.433746Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037889 2025-06-03T10:33:38.433955Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:33:38.434492Z node 13 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037888 2025-06-03T10:33:38.434514Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287938, Sender [13:664:2568], Recipient [13:750:2629]: {TEvReadSet step# 3015 txid# 281474976715663 TabletSource# 72075186224037889 TabletDest# 72075186224037888 SetTabletConsumer# 72075186224037888 Flags# 0 Seqno# 1} 2025-06-03T10:33:38.434518Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3149: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-03T10:33:38.434523Z node 13 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037889 source 72075186224037889 dest 72075186224037888 consumer 72075186224037888 txId 281474976715663 2025-06-03T10:33:38.436176Z node 13 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-03T10:33:38.436233Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287938, Sender [13:750:2629], Recipient [13:664:2568]: {TEvReadSet step# 3015 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletConsumer# 72075186224037889 Flags# 0 Seqno# 1} 2025-06-03T10:33:38.436238Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3149: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-03T10:33:38.436242Z node 13 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 281474976715663 2025-06-03T10:33:38.446697Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [13:59:2106] Handle TEvExecuteKqpTransaction 2025-06-03T10:33:38.446720Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [13:59:2106] TxId# 281474976715667 ProcessProposeKqpTransaction 2025-06-03T10:33:38.446878Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715667. Ctx: { TraceId: 01jwtnn5c201xn8n9bdcg4b2px, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGIxOWZhZGQtOTQ0MGNkMmMtZmFhZTNhN2ItNDkxNzliMjE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TEvRead: ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 2025-06-03T10:33:38.447344Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553215, Sender [13:1077:2872], Recipient [13:664:2568]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 RangesSize: 1 2025-06-03T10:33:38.447385Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037888, FollowerId 0 2025-06-03T10:33:38.447393Z node 13 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v3015/281474976715663 IncompleteEdge# v{min} UnprotectedReadEdge# v4000/18446744073709551615 ImmediateWriteEdge# v4001/0 ImmediateWriteEdgeReplied# v4001/0 2025-06-03T10:33:38.447400Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037888 changed HEAD read to non-repeatable v4001/18446744073709551615 2025-06-03T10:33:38.447409Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CheckRead 2025-06-03T10:33:38.447423Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-03T10:33:38.447429Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CheckRead 2025-06-03T10:33:38.447433Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-03T10:33:38.447436Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-03T10:33:38.447445Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:7] at 72075186224037888 2025-06-03T10:33:38.447449Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-03T10:33:38.447451Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-03T10:33:38.447454Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit ExecuteRead 2025-06-03T10:33:38.447456Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit ExecuteRead 2025-06-03T10:33:38.447466Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037888 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 2 SchemaVersion: 1 } Columns: 1 Columns: 2 Columns: 3 ResultFormat: FORMAT_CELLVEC MaxRows: 1001 MaxBytes: 5242880 Reverse: false TotalRowsLimit: 1001 } 2025-06-03T10:33:38.447558Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037888 Complete read# {[13:1077:2872], 0} after executionsCount# 1 2025-06-03T10:33:38.447563Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037888 read iterator# {[13:1077:2872], 0} sends rowCount# 2, bytes# 96, quota rows left# 999, quota bytes left# 5242784, hasUnreadQueries# 0, total queries# 1, firstUnprocessed# 0 2025-06-03T10:33:38.447577Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037888 read iterator# {[13:1077:2872], 0} finished in read 2025-06-03T10:33:38.447584Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-03T10:33:38.447586Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit ExecuteRead 2025-06-03T10:33:38.447589Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:7] at 72075186224037888 to execution unit CompletedOperations 2025-06-03T10:33:38.447591Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:7] at 72075186224037888 on unit CompletedOperations 2025-06-03T10:33:38.447600Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:7] at 72075186224037888 is Executed 2025-06-03T10:33:38.447602Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:7] at 72075186224037888 executing on unit CompletedOperations 2025-06-03T10:33:38.447604Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:7] at 72075186224037888 has finished 2025-06-03T10:33:38.447607Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037888 2025-06-03T10:33:38.447623Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037888 2025-06-03T10:33:38.447796Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553219, Sender [13:1077:2872], Recipient [13:664:2568]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-03T10:33:38.447803Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037888 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 1 } items { uint32_value: 11 } }, { items { uint32_value: 2 } items { uint32_value: 2 } items { uint32_value: 22 } } >> YdbSdkSessions::TestActiveSessionCountAfterBadSession [GOOD] |70.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut |70.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut |70.7%| [LD] {RESULT} $(B)/ydb/core/external_sources/s3/ut/ydb-core-external_sources-s3-ut |70.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 |70.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 |70.7%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/federated_query/s3/ydb-core-kqp-ut-federated_query-s3 >> YdbSdkSessions::SessionsServerLimitWithSessionPool [SKIPPED] >> DataShardSnapshots::LockedWriteWithAsyncIndexAndVolatileCommit+UseSink [GOOD] >> DataShardSnapshots::LockedWriteWithAsyncIndexAndVolatileCommit-UseSink >> DataShardSnapshots::ShardRestartAfterDropTableAndAbort [GOOD] |70.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |70.7%| [LD] {RESULT} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |70.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/fq/pq_async_io/ut/ydb-tests-fq-pq_async_io-ut |70.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tests/tools/kqprun/kqprun |70.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tests/tools/kqprun/kqprun |70.7%| [LD] {RESULT} $(B)/ydb/tests/tools/kqprun/kqprun >> TxUsage::Offsets_Cannot_Be_Promoted_When_Reading_In_A_Transaction_Query [GOOD] |70.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs |70.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs |70.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys |70.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys |70.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_keys/ydb-core-tx-datashard-ut_keys |70.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_rs/ydb-core-tx-datashard-ut_rs |70.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view |70.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view |70.7%| [LD] {RESULT} $(B)/ydb/core/kqp/ut/view/ydb-core-kqp-ut-view ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::ShardRestartAfterDropTableAndAbort [GOOD] Test command err: 2025-06-03T10:33:14.718973Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:33:14.719089Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:33:14.719128Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000fda/r3tmp/tmpUJWHq8/pdisk_1.dat 2025-06-03T10:33:14.846949Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:33:14.866381Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:14.867935Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946794255059 != 1748946794255063 2025-06-03T10:33:14.910044Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:33:14.910258Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-03T10:33:14.910447Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:14.910466Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:14.921118Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:14.994333Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-03T10:33:14.994379Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-03T10:33:14.994413Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:639:2547] 2025-06-03T10:33:15.010663Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:639:2547] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-03T10:33:15.010729Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:639:2547] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-03T10:33:15.010978Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1627: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-03T10:33:15.010993Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:639:2547] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:33:15.011046Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:33:15.011084Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:639:2547] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:33:15.011101Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:639:2547] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-03T10:33:15.011190Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvClientConnected 2025-06-03T10:33:15.011593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:33:15.011869Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [1:639:2547] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-03T10:33:15.011880Z node 1 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [1:639:2547] txid# 281474976715657 SEND to# [1:591:2517] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-03T10:33:15.027717Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:15.028112Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:15.028242Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:33:15.028329Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:33:15.041021Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:15.041311Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:33:15.041353Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:33:15.041991Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:33:15.042012Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:33:15.042022Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:33:15.042152Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:33:15.042197Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:33:15.042218Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:33:15.042335Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:33:15.048288Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:33:15.048401Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:33:15.048459Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:33:15.048467Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:33:15.048473Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:33:15.048481Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:33:15.048569Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:33:15.048580Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:33:15.048718Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:33:15.048744Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:33:15.048886Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:33:15.048898Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:33:15.048909Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:33:15.048917Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:33:15.048923Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:33:15.048931Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:33:15.048938Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:33:15.048954Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:33:15.048961Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:33:15.048970Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:33:15.048996Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:33:15.049002Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:33:15.049027Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:33:15.049091Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:33:15.049104Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:33:15.049125Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:33:15.049135Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:33:15.049141Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:33:15.049149Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:33:15.049154Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 7 ... data TxId 0 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state), code: 2029 . sessionActorId=[14:841:2682]. isRollback=0 2025-06-03T10:33:43.913797Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:1848: SessionId: ydb://session/3?node_id=14&id=ZTY4ZDgzMzctODlmMDg4YTMtYzZjZDViYTMtZmM2NTc2ZWM=, ActorId: [14:841:2682], ActorState: ExecuteState, TraceId: 01jwtnnapz4zj110sm7xh1wy43, got TEvKqpBuffer::TEvError in ExecuteState, status: UNAVAILABLE send to: [14:995:2682] from: [14:862:2682] 2025-06-03T10:33:43.913824Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 278003712, Sender [14:862:2682], Recipient [14:689:2579]: NKikimrDataEvents.TEvWrite TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true } Op: Rollback } 2025-06-03T10:33:43.913829Z node 14 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-06-03T10:33:43.913839Z node 14 :GLOBAL WARN: log.cpp:784: fline=events.h:103;event=ev_write_error;status=STATUS_WRONG_SHARD_STATE;details=Rejecting data TxId 0 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state);tx_id=0; 2025-06-03T10:33:43.913845Z node 14 :TX_DATASHARD NOTICE: datashard.cpp:3137: Rejecting data TxId 0 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state) 2025-06-03T10:33:43.913865Z node 14 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [14:995:2682] TxId: 281474976715665. Ctx: { TraceId: 01jwtnnapz4zj110sm7xh1wy43, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=14&id=ZTY4ZDgzMzctODlmMDg4YTMtYzZjZDViYTMtZmM2NTc2ZWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. UNAVAILABLE: {
: Error: Wrong shard state. Table `/Root/table`., code: 2005 subissue: {
: Error: Rejecting data TxId 0 because datashard 72075186224037888: is in a pre/offline state assuming this is due to a finished split (wrong shard state), code: 2029 } } 2025-06-03T10:33:43.913909Z node 14 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=14&id=ZTY4ZDgzMzctODlmMDg4YTMtYzZjZDViYTMtZmM2NTc2ZWM=, ActorId: [14:841:2682], ActorState: ExecuteState, TraceId: 01jwtnnapz4zj110sm7xh1wy43, Create QueryResponse for error on request, msg: ... blocking NKikimr::NLongTxService::TEvLongTxService::TEvLockStatus from LONG_TX_SERVICE to TX_DATASHARD_ACTOR cookie 0 2025-06-03T10:33:43.914235Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 65543, Sender [14:591:2517], Recipient [14:689:2579]: NActors::TEvents::TEvPoison 2025-06-03T10:33:43.914312Z node 14 :TX_DATASHARD INFO: datashard.cpp:190: OnDetach: 72075186224037888 2025-06-03T10:33:43.914320Z node 14 :TX_DATASHARD INFO: datashard.cpp:1301: Change sender killed: at tablet: 72075186224037888 2025-06-03T10:33:43.926325Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [14:999:2811], Recipient [14:1001:2812]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:43.927470Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [14:999:2811], Recipient [14:1001:2812]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:43.927521Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828684, Sender [14:999:2811], Recipient [14:1001:2812]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:43.928185Z node 14 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [14:1001:2812] 2025-06-03T10:33:43.928249Z node 14 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:33:43.928713Z node 14 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:33:43.929073Z node 14 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:33:43.929282Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:33:43.929328Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:33:43.929339Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:33:43.929416Z node 14 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:33:43.929493Z node 14 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:33:43.929501Z node 14 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:33:43.929513Z node 14 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state PreOffline tabletId 72075186224037888 2025-06-03T10:33:43.929539Z node 14 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037888 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 1 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: 2025-06-03T10:33:43.929547Z node 14 :TX_DATASHARD INFO: datashard.cpp:475: Send registration request to time cast PreOffline tabletId 72075186224037888 mediators count is 1 coordinators count is 1 buckets per mediator 2 2025-06-03T10:33:43.929568Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [14:1015:2819] 2025-06-03T10:33:43.929575Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:33:43.929581Z node 14 :TX_DATASHARD INFO: datashard.cpp:1283: Cannot activate change sender: at tablet: 72075186224037888, state: PreOffline, queue size: 0 2025-06-03T10:33:43.929587Z node 14 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:33:43.929671Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 275709965, Sender [14:61:2108], Recipient [14:1001:2812]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715661 LockNode: 14 Status: STATUS_NOT_FOUND 2025-06-03T10:33:43.929744Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [14:1001:2812], Recipient [14:1001:2812]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:33:43.929751Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:33:43.929772Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435075, Sender [14:1001:2812], Recipient [14:1001:2812]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressResendReadSet 2025-06-03T10:33:43.929777Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3157: StateWork, processing event TEvPrivate::TEvProgressResendReadSet 2025-06-03T10:33:43.930000Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 270270976, Sender [14:24:2071], Recipient [14:1001:2812]: {TEvRegisterTabletResult TabletId# 72075186224037888 Entry# 600} 2025-06-03T10:33:43.930007Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3167: StateWork, processing event TEvMediatorTimecast::TEvRegisterTabletResult 2025-06-03T10:33:43.930014Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:3742: Got TEvMediatorTimecast::TEvRegisterTabletResult at 72075186224037888 time 600 2025-06-03T10:33:43.930020Z node 14 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:33:43.930160Z node 14 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:33:43.930170Z node 14 :TX_DATASHARD INFO: datashard__progress_tx.cpp:21: Progress tx at non-ready tablet 72075186224037888 state 5 2025-06-03T10:33:43.930198Z node 14 :TX_DATASHARD DEBUG: datashard__progress_resend_rs.cpp:14: Start TTxProgressResendRS at tablet 72075186224037888 2025-06-03T10:33:43.930206Z node 14 :TX_DATASHARD INFO: datashard.cpp:4101: Resend RS at 72075186224037888 from 72075186224037888 to 72075186224037889 txId 281474976715663 2025-06-03T10:33:43.930214Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:3990: Send RS 1 at 72075186224037888 from 72075186224037888 to 72075186224037889 txId 281474976715663 2025-06-03T10:33:43.930266Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287425, Sender [14:1001:2812], Recipient [14:902:2726]: {TEvReadSet step# 500 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 138 Seqno# 1 Flags# 0} 2025-06-03T10:33:43.930273Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3148: StateWork, processing event TEvTxProcessing::TEvReadSet 2025-06-03T10:33:43.930280Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:3359: Receive RS at 72075186224037889 source 72075186224037888 dest 72075186224037889 producer 72075186224037888 txId 281474976715663 2025-06-03T10:33:43.930292Z node 14 :TX_DATASHARD DEBUG: datashard__readset.cpp:15: TTxReadSet::Execute at 72075186224037889 got read set: {TEvReadSet step# 500 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 138 Seqno# 1 Flags# 0} 2025-06-03T10:33:43.930301Z node 14 :TX_DATASHARD NOTICE: datashard_pipeline.cpp:734: Outdated readset for 500:281474976715663 at 72075186224037889 2025-06-03T10:33:43.930310Z node 14 :TX_DATASHARD DEBUG: datashard__readset.cpp:91: TTxReadSet::Complete at 72075186224037889 2025-06-03T10:33:43.930317Z node 14 :TX_DATASHARD DEBUG: datashard__readset.cpp:99: Send RS Ack at 72075186224037889 {TEvReadSet step# 500 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletProducer# 72075186224037888 ReadSet.Size()# 138 Seqno# 1 Flags# 0} 2025-06-03T10:33:43.930336Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 270270978, Sender [14:24:2071], Recipient [14:1001:2812]: NKikimr::TEvMediatorTimecast::TEvSubscribeReadStepResult{ CoordinatorId# 72057594046316545 LastReadStep# 400 NextReadStep# 600 ReadStep# 600 } 2025-06-03T10:33:43.930342Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3168: StateWork, processing event TEvMediatorTimecast::TEvSubscribeReadStepResult 2025-06-03T10:33:43.930348Z node 14 :TX_DATASHARD DEBUG: datashard.cpp:3760: Got TEvMediatorTimecast::TEvSubscribeReadStepResult at 72075186224037888 coordinator 72057594046316545 last step 400 next step 600 2025-06-03T10:33:43.930369Z node 14 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:33:43.930387Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269287938, Sender [14:902:2726], Recipient [14:1001:2812]: {TEvReadSet step# 500 txid# 281474976715663 TabletSource# 72075186224037888 TabletDest# 72075186224037889 SetTabletConsumer# 72075186224037889 Flags# 0 Seqno# 1} 2025-06-03T10:33:43.930393Z node 14 :TX_DATASHARD TRACE: datashard_impl.h:3149: StateWork, processing event TEvTxProcessing::TEvReadSetAck 2025-06-03T10:33:43.930398Z node 14 :TX_DATASHARD DEBUG: datashard_outreadset.cpp:150: Receive RS Ack at 72075186224037888 source 72075186224037888 dest 72075186224037889 consumer 72075186224037889 txId 281474976715663 2025-06-03T10:33:43.930408Z node 14 :TX_DATASHARD DEBUG: datashard_loans.cpp:220: 72075186224037888 in PreOffline state HasSharedBobs: 1 SchemaOperations: [ ] OutReadSets count: 0 ChangesQueue size: 0 ChangeExchangeSplit: 1 siblings to be activated: wait to activation from: |70.8%| [TA] $(B)/ydb/core/blobstorage/dsproxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TxUsage::ReadRuleGeneration |70.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut |70.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut |70.8%| [LD] {RESULT} $(B)/ydb/core/tx/mediator/ut/ydb-core-tx-mediator-ut |70.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats |70.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats |70.8%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_stats/ydb-core-tx-datashard-ut_stats >> CompressExecutor::TestExecutorMemUsage [GOOD] |70.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut |70.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut |70.8%| [LD] {RESULT} $(B)/ydb/core/grpc_services/tablet/ut/ydb-core-grpc_services-tablet-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryService [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:195: Test is failing right now ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::TestSdkFreeSessionAfterBadSessionQueryServiceStreamCall [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:243: Test is failing right now ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::SessionsServerLimitWithSessionPool [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:583: Enable after accepting a pull request with merging configs >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_compaction_policy_options ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> CompressExecutor::TestExecutorMemUsage [GOOD] Test command err: 2025-06-03T10:32:30.125378Z :WriteAndReadSomeMessagesWithAsyncCompression INFO: Random seed for debugging is 1748946750125368 2025-06-03T10:32:30.371120Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669096779775799:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:30.371147Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:32:30.419371Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0024e9/r3tmp/tmpeWAPVI/pdisk_1.dat 2025-06-03T10:32:30.428118Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:32:30.431641Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:32:30.471371Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:30.473980Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:30.474009Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:30.476111Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27140, node 1 2025-06-03T10:32:30.496722Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/0024e9/r3tmp/yandexVECRRu.tmp 2025-06-03T10:32:30.496737Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/0024e9/r3tmp/yandexVECRRu.tmp 2025-06-03T10:32:30.496814Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/0024e9/r3tmp/yandexVECRRu.tmp 2025-06-03T10:32:30.496875Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:30.503931Z INFO: TTestServer started on Port 13236 GrpcPort 27140 TClient is connected to server localhost:13236 PQClient connected to localhost:27140 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:32:30.530963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:30.530996Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:30.532305Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:32:30.532707Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:30.538494Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-06-03T10:32:30.807758Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669095948774997:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:30.807784Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669095948775022:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:30.807794Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:30.809357Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976720657:3, at schemeshard: 72057594046644480 2025-06-03T10:32:30.845480Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669095948775026:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976720657 completed, doublechecking } 2025-06-03T10:32:30.884825Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511669096779776616:2340], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:32:30.885471Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:32:30.885897Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=M2VlNTkyNjAtZDU4MGMyZTEtZDc2ZmI0NDgtM2UwODk1ZQ==, ActorId: [1:7511669096779776572:2332], ActorState: ExecuteState, TraceId: 01jwtnk3bk6n9vj1dtz1nwxjdq, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:32:30.886389Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:32:30.937103Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669095948775095:2154] txid# 281474976720658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:30.942114Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7511669095948775110:2317], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:32:30.942632Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=2&id=NDgzNDIwNjktNTA5ZTM3ZjMtNjJmM2UzMzAtN2JjNTA2N2M=, ActorId: [2:7511669095948774995:2305], ActorState: ExecuteState, TraceId: 01jwtnk3ap11y4c1mszd1adhza, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:32:30.942775Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:32:30.974866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:31.067667Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost:27140", true, true, 1000); 2025-06-03T10:32:31.154534Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715664. Ctx: { TraceId: 01jwtnk3n2a10fdxw5caze3efa, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=ZjBiMGU3MjUtODIxZGVjMjktMTg0MzJjZWEtMjRiZTAwMmI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root === CheckClustersList. Subcribe to ClusterTracker from [1:7511669101074744339:2933] 2025-06-03T10:32:35.369579Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511669096779775799:2212];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:35.369623Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:32:37.180705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715679:0, at schemeshard: 72057594046644480 waiting... PQ Client: create topic: rt3.dc1--test-topic with 1 partitions CallPersQueueGRPC request to localhost:27140 MetaRequest { CmdGetTopicMetadata { Topic: "rt3.dc1--test-topic" } } 2025-06-03T10:32:37.202267Z node 1 :PERSQUEUE INFO: msgbus_server_persqueue.cpp:1531: proxy answer CallPersQueueGRPC r ... E $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `/Root/PQ/SourceIdMeta2` WHERE Hash == $Hash AND Topic == $Topic AND SourceId == $SourceId; 2025-06-03T10:33:43.541157Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64;DECLARE $SeqNo AS Uint64; UPSERT INTO `/Root/PQ/SourceIdMeta2` (Hash, Topic, SourceId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-03T10:33:43.541158Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint32; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `/Root/PQ/SourceIdMeta2` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND SourceId = $SourceId AND Partition = $Partition; 2025-06-03T10:33:43.541163Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:111: TPartitionChooser [15:7511669410092986339:2560] (SourceId=test-message-group-id, PreferedPartition=(NULL)) StartKqpSession 2025-06-03T10:33:43.541661Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:142: TPartitionChooser [15:7511669410092986339:2560] (SourceId=test-message-group-id, PreferedPartition=(NULL)) Select from the table 2025-06-03T10:33:43.704309Z node 15 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715701. Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-03T10:33:43.704384Z node 15 :KQP_EXECUTER WARN: kqp_executer_impl.h:257: ActorId: [15:7511669410092986350:2562] TxId: 281474976715701. Ctx: { TraceId: 01jwtnnabn8vjyn8zr3vxdxbgy, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=YTE1YjA0MGItM2YwYTQ5M2UtNTJlMjJjNWEtZmM4N2Y3YjQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037891 after several retries. 2025-06-03T10:33:43.704497Z node 15 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=15&id=YTE1YjA0MGItM2YwYTQ5M2UtNTJlMjJjNWEtZmM4N2Y3YjQ=, ActorId: [15:7511669410092986340:2562], ActorState: ExecuteState, TraceId: 01jwtnnabn8vjyn8zr3vxdxbgy, Create QueryResponse for error on request, msg: 2025-06-03T10:33:43.705081Z node 15 :PQ_PARTITION_CHOOSER INFO: partition_chooser_impl__abstract_chooser_actor.h:312: TPartitionChooser [15:7511669410092986339:2560] (SourceId=test-message-group-id, PreferedPartition=(NULL)) ReplyError: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=15&id=YTE1YjA0MGItM2YwYTQ5M2UtNTJlMjJjNWEtZmM4N2Y3YjQ=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jwtnnabn8vjyn8zr3w4mmyey" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 2025-06-03T10:33:43.705118Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:809: session v1 error cookie: 3 reason: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=15&id=YTE1YjA0MGItM2YwYTQ5M2UtNTJlMjJjNWEtZmM4N2Y3YjQ=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jwtnnabn8vjyn8zr3w4mmyey" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 sessionId: 2025-06-03T10:33:43.705391Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 3 sessionId: is DEAD Test retry state: get retry delay 2025-06-03T10:33:43.705676Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|cd4fa79-39db3fe7-1463df24-c289f428_0] Got error. Status: UNAVAILABLE, Description:
: Error: kqp error Marker# PQ50 : Response { SessionId: "ydb://session/3?node_id=15&id=YTE1YjA0MGItM2YwYTQ5M2UtNTJlMjJjNWEtZmM4N2Y3YjQ=" QueryIssues { message: "Failed to resolve tablet: 72075186224037891 after several retries." severity: 1 } TxMeta { id: "01jwtnnabn8vjyn8zr3w4mmyey" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 , code: 500001 2025-06-03T10:33:43.705705Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|cd4fa79-39db3fe7-1463df24-c289f428_0] Write session will restart in 2.000000s 2025-06-03T10:33:43.705730Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|cd4fa79-39db3fe7-1463df24-c289f428_0] Write session: Do CDS request 2025-06-03T10:33:43.705737Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|cd4fa79-39db3fe7-1463df24-c289f428_0] Do schedule cds request after 2000 ms 2025-06-03T10:33:43.854563Z node 16 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976720682. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-03T10:33:43.854619Z node 16 :KQP_EXECUTER WARN: kqp_executer_impl.h:257: ActorId: [16:7511669407859590330:2480] TxId: 281474976720682. Ctx: { TraceId: 01jwtnnaf0dqh512yzr1ph9s5f, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=16&id=YzdkYTk4NjAtOWY2MzVmMmEtMzQ5NTUzZjgtOWI5OGI4OWU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-03T10:33:43.854695Z node 16 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=16&id=YzdkYTk4NjAtOWY2MzVmMmEtMzQ5NTUzZjgtOWI5OGI4OWU=, ActorId: [16:7511669407859590317:2480], ActorState: ExecuteState, TraceId: 01jwtnnaf0dqh512yzr1ph9s5f, Create QueryResponse for error on request, msg: 2025-06-03T10:33:43.855078Z node 16 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Execution" issue_code: 1060 severity: 2 issues { position { row: 3 column: 120 } message: "Cost Based Optimizer could not be applied to this query: couldn\'t load statistics" end_position { row: 3 column: 120 } issue_code: 8001 severity: 2 } } QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jwtnnagdcjhkg2yz89h1n3dg" } } YdbStatus: UNAVAILABLE ConsumedRu: 29 } 2025-06-03T10:33:44.113572Z node 15 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715703. Failed to resolve tablet: 72075186224037888 after several retries. 2025-06-03T10:33:44.113629Z node 15 :KQP_EXECUTER WARN: kqp_executer_impl.h:257: ActorId: [15:7511669410092986421:2566] TxId: 281474976715703. Ctx: { TraceId: 01jwtnnaq4aarfwf064n18yn2s, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=OTBjYjBhZjYtYzFiOTg1NjMtOTZiMDFmZDYtM2FmNWI4MGI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037888 after several retries. 2025-06-03T10:33:44.113727Z node 15 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=15&id=OTBjYjBhZjYtYzFiOTg1NjMtOTZiMDFmZDYtM2FmNWI4MGI=, ActorId: [15:7511669410092986402:2566], ActorState: ExecuteState, TraceId: 01jwtnnaq4aarfwf064n18yn2s, Create QueryResponse for error on request, msg: 2025-06-03T10:33:44.114205Z node 15 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Execution" issue_code: 1060 severity: 2 issues { position { row: 3 column: 120 } message: "Cost Based Optimizer could not be applied to this query: couldn\'t load statistics" end_position { row: 3 column: 120 } issue_code: 8001 severity: 2 } } QueryIssues { message: "Failed to resolve tablet: 72075186224037888 after several retries." severity: 1 } TxMeta { id: "01jwtnnarj8p3stq68x3e6g5mp" } } YdbStatus: UNAVAILABLE ConsumedRu: 30 } 2025-06-03T10:33:44.538879Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|cd4fa79-39db3fe7-1463df24-c289f428_0] Write session: close. Timeout = 0 ms 2025-06-03T10:33:44.538901Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|cd4fa79-39db3fe7-1463df24-c289f428_0] Write session will now close 2025-06-03T10:33:44.538912Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|cd4fa79-39db3fe7-1463df24-c289f428_0] Write session: aborting 2025-06-03T10:33:44.539159Z :WARNING: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|cd4fa79-39db3fe7-1463df24-c289f428_0] Write session: could not confirm all writes in time or session aborted, perform hard shutdown 2025-06-03T10:33:44.539166Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|cd4fa79-39db3fe7-1463df24-c289f428_0] Write session: destroy 2025-06-03T10:33:44.610274Z node 16 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976720684. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-03T10:33:44.610331Z node 16 :KQP_EXECUTER WARN: kqp_executer_impl.h:257: ActorId: [16:7511669412154557713:2489] TxId: 281474976720684. Ctx: { TraceId: 01jwtnnb7z80hsj0xdqsfsff7v, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=16&id=NzA3NmRkYzUtYjdmYTg1NjktNzQyODE2ZGUtYzc1NWZlYTM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-03T10:33:44.610410Z node 16 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=16&id=NzA3NmRkYzUtYjdmYTg1NjktNzQyODE2ZGUtYzc1NWZlYTM=, ActorId: [16:7511669412154557710:2489], ActorState: ExecuteState, TraceId: 01jwtnnb7z80hsj0xdqsfsff7v, Create QueryResponse for error on request, msg: 2025-06-03T10:33:44.610767Z node 16 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jwtnnb7z80hsj0xdqx63xjcs" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } 2025-06-03T10:33:44.680833Z node 15 :KQP_EXECUTER WARN: kqp_shards_resolver.cpp:86: [ShardsResolver] TxId: 281474976715705. Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-03T10:33:44.680887Z node 15 :KQP_EXECUTER WARN: kqp_executer_impl.h:257: ActorId: [15:7511669414387953803:2573] TxId: 281474976715705. Ctx: { TraceId: 01jwtnnbaa2tqr0f60tj2wyd3h, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=15&id=Zjg2MzYxNzMtMTg3MjYzYTEtM2JhN2ZmYjUtN2Q1YWM3ODE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Shards nodes resolve failed, status: UNAVAILABLE, issues:
: Error: Failed to resolve tablet: 72075186224037890 after several retries. 2025-06-03T10:33:44.680972Z node 15 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=15&id=Zjg2MzYxNzMtMTg3MjYzYTEtM2JhN2ZmYjUtN2Q1YWM3ODE=, ActorId: [15:7511669414387953800:2573], ActorState: ExecuteState, TraceId: 01jwtnnbaa2tqr0f60tj2wyd3h, Create QueryResponse for error on request, msg: 2025-06-03T10:33:44.681499Z node 15 :PQ_METACACHE ERROR: msgbus_server_pq_metacache.cpp:260: Got error trying to perform request: { Response { QueryIssues { message: "Failed to resolve tablet: 72075186224037890 after several retries." severity: 1 } TxMeta { id: "01jwtnnbab25bxq5zak087c6g6" } } YdbStatus: UNAVAILABLE ConsumedRu: 1 } |70.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |70.8%| [TA] {RESULT} $(B)/ydb/core/blobstorage/dsproxy/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_postgres.py::TestPGSQL::test_sql_suite[plan-abstime.test] >> TxUsage::WriteToTopic_Demo_26_Table [GOOD] >> TxUsage::WriteToTopic_Demo_26_Query >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_18_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 18] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_4_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 4] >> alter_compression.py::TestAllCompression::test_all_supported_compression[lz4_compression-COMPRESSION = "lz4"] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_2_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 2] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_20_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 20] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_15_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 15] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_12_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 12] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_8_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 8] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_compression-COMPRESSION = "zstd"] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_6_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 6] >> TxUsage::Sinks_Oltp_WriteToTopic_5_Query [GOOD] |70.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_single_key_column_failure [GOOD] >> TxUsage::WriteToTopic_Demo_37_Table [GOOD] >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsTableClient >> DataShardSnapshots::LockedWriteWithAsyncIndexAndVolatileCommit-UseSink [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_1_Table >> TxUsage::WriteToTopic_Demo_37_Query >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_pk.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_selector_aliases_2.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_params.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[compute/scheduler.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_1.sql-result_sets] >> TxUsage::WriteToTopic_Demo_18_RestartBeforeCommit_Query [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::LockedWriteWithAsyncIndexAndVolatileCommit-UseSink [GOOD] Test command err: 2025-06-03T10:33:15.716660Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:33:15.716743Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:33:15.716765Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000fcf/r3tmp/tmpcMKdPh/pdisk_1.dat 2025-06-03T10:33:15.819273Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:33:15.836164Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:15.837033Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946795270573 != 1748946795270577 2025-06-03T10:33:15.878929Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:33:15.879156Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-03T10:33:15.879343Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:15.879375Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:15.890037Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:15.962592Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-03T10:33:15.962616Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-03T10:33:15.962642Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:639:2547] 2025-06-03T10:33:15.982189Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:639:2547] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-03T10:33:15.982239Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:639:2547] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-03T10:33:15.982444Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1627: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-03T10:33:15.982458Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:639:2547] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:33:15.982509Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:33:15.982544Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:639:2547] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:33:15.982557Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:639:2547] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-03T10:33:15.982611Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvClientConnected 2025-06-03T10:33:15.982986Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:33:15.983220Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [1:639:2547] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-03T10:33:15.983233Z node 1 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [1:639:2547] txid# 281474976715657 SEND to# [1:591:2517] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-03T10:33:15.997000Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:15.997242Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:15.997348Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:33:15.997403Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:33:16.004616Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:16.004803Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:33:16.004830Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:33:16.004991Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:33:16.004999Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:33:16.005004Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:33:16.005065Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:33:16.005082Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:33:16.005093Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:33:16.005158Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:33:16.008763Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:33:16.008840Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:33:16.008884Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:33:16.008889Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:33:16.008893Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:33:16.008898Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:33:16.008954Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:33:16.008961Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:33:16.009067Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:33:16.009087Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:33:16.009217Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:33:16.009226Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:33:16.009234Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:33:16.009239Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:33:16.009244Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:33:16.009249Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:33:16.009254Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:33:16.009267Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:33:16.009274Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:33:16.009280Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:33:16.009323Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:33:16.009330Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:33:16.009355Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:33:16.009419Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:33:16.009433Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:33:16.009450Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:33:16.009457Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:33:16.009460Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:33:16.009464Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:33:16.009467Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 7 ... r.cpp:119: TxId: 281474976715671. Ctx: { TraceId: 01jwtnndjh0f738gc69kwvpahp, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=YzU1YjJjZDctZDY4YmEwYjgtYjQ0MGZmNDYtODVhOGJmNWE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TEvRead: ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 4 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false 2025-06-03T10:33:46.865532Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553215, Sender [13:1636:3345], Recipient [13:786:2654]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 4 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false RangesSize: 3 2025-06-03T10:33:46.865582Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037889, FollowerId 0 2025-06-03T10:33:46.865591Z node 13 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037889 CompleteEdge# v8021/281474976715670 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-03T10:33:46.865598Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037889 changed HEAD read to non-repeatable v9000/18446744073709551615 2025-06-03T10:33:46.865608Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit CheckRead 2025-06-03T10:33:46.865627Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-03T10:33:46.865631Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit CheckRead 2025-06-03T10:33:46.865635Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit BuildAndWaitDependencies 2025-06-03T10:33:46.865638Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit BuildAndWaitDependencies 2025-06-03T10:33:46.865651Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037889 2025-06-03T10:33:46.865655Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-03T10:33:46.865657Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit BuildAndWaitDependencies 2025-06-03T10:33:46.865661Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit ExecuteRead 2025-06-03T10:33:46.865664Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit ExecuteRead 2025-06-03T10:33:46.865674Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037889 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 4 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false } 2025-06-03T10:33:46.865723Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037889 Complete read# {[13:1636:3345], 0} after executionsCount# 1 2025-06-03T10:33:46.865730Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037889 read iterator# {[13:1636:3345], 0} sends rowCount# 2, bytes# 64, quota rows left# 32765, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 3, firstUnprocessed# 0 2025-06-03T10:33:46.865755Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037889 read iterator# {[13:1636:3345], 0} finished in read 2025-06-03T10:33:46.865763Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-03T10:33:46.865765Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit ExecuteRead 2025-06-03T10:33:46.865768Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037889 to execution unit CompletedOperations 2025-06-03T10:33:46.865770Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037889 on unit CompletedOperations 2025-06-03T10:33:46.865780Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037889 is Executed 2025-06-03T10:33:46.865782Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037889 executing on unit CompletedOperations 2025-06-03T10:33:46.865785Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:4] at 72075186224037889 has finished 2025-06-03T10:33:46.865788Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037889 2025-06-03T10:33:46.865805Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037889 2025-06-03T10:33:46.865989Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553219, Sender [13:1636:3345], Recipient [13:786:2654]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-03T10:33:46.865995Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037889 ReadCancel: { ReadId: 0 } { items { uint32_value: 1 } items { uint32_value: 11 } }, { items { uint32_value: 2 } items { uint32_value: 21 } } 2025-06-03T10:33:46.899787Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:353: actor# [13:59:2106] Handle TEvExecuteKqpTransaction 2025-06-03T10:33:46.899820Z node 13 :TX_PROXY DEBUG: proxy_impl.cpp:342: actor# [13:59:2106] TxId# 281474976715672 ProcessProposeKqpTransaction 2025-06-03T10:33:46.900085Z node 13 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715672. Ctx: { TraceId: 01jwtnndkj9t9je4pz3beb7sa3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=N2Y1ZjRiODctOWNjYTJkMWMtMTU1ZTdmN2MtMTNmYzY2ZDQ=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root TEvRead: ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 7 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false 2025-06-03T10:33:46.900594Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553215, Sender [13:1667:3370], Recipient [13:1058:2872]: NKikimrTxDataShard.TEvRead ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 7 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false RangesSize: 3 2025-06-03T10:33:46.900673Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2452: TTxReadViaPipeline execute: at tablet# 72075186224037891, FollowerId 0 2025-06-03T10:33:46.900688Z node 13 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037891 CompleteEdge# v8021/281474976715670 IncompleteEdge# v{min} UnprotectedReadEdge# v{min} ImmediateWriteEdge# v{min} ImmediateWriteEdgeReplied# v{min} 2025-06-03T10:33:46.900698Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2555: 72075186224037891 changed HEAD read to non-repeatable v9000/18446744073709551615 2025-06-03T10:33:46.900739Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037891 on unit CheckRead 2025-06-03T10:33:46.900766Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037891 is Executed 2025-06-03T10:33:46.900772Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037891 executing on unit CheckRead 2025-06-03T10:33:46.900779Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037891 to execution unit BuildAndWaitDependencies 2025-06-03T10:33:46.900783Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037891 on unit BuildAndWaitDependencies 2025-06-03T10:33:46.900800Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:4] at 72075186224037891 2025-06-03T10:33:46.900806Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037891 is Executed 2025-06-03T10:33:46.900809Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037891 executing on unit BuildAndWaitDependencies 2025-06-03T10:33:46.900814Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037891 to execution unit ExecuteRead 2025-06-03T10:33:46.900818Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037891 on unit ExecuteRead 2025-06-03T10:33:46.900841Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:1578: 72075186224037891 Execute read# 1, request: { ReadId: 0 TableId { OwnerId: 72057594046644480 TableId: 7 SchemaVersion: 2 } Columns: 2 Columns: 1 ResultFormat: FORMAT_CELLVEC MaxRows: 32767 MaxBytes: 5242880 Reverse: false } 2025-06-03T10:33:46.900935Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2163: 72075186224037891 Complete read# {[13:1667:3370], 0} after executionsCount# 1 2025-06-03T10:33:46.900945Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2137: 72075186224037891 read iterator# {[13:1667:3370], 0} sends rowCount# 2, bytes# 64, quota rows left# 32765, quota bytes left# 5242816, hasUnreadQueries# 0, total queries# 3, firstUnprocessed# 0 2025-06-03T10:33:46.900969Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2188: 72075186224037891 read iterator# {[13:1667:3370], 0} finished in read 2025-06-03T10:33:46.900980Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037891 is Executed 2025-06-03T10:33:46.900984Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037891 executing on unit ExecuteRead 2025-06-03T10:33:46.900989Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:4] at 72075186224037891 to execution unit CompletedOperations 2025-06-03T10:33:46.900993Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:4] at 72075186224037891 on unit CompletedOperations 2025-06-03T10:33:46.901007Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:4] at 72075186224037891 is Executed 2025-06-03T10:33:46.901011Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:4] at 72075186224037891 executing on unit CompletedOperations 2025-06-03T10:33:46.901015Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:4] at 72075186224037891 has finished 2025-06-03T10:33:46.901020Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2687: TTxReadViaPipeline(69) Execute with status# Executed at tablet# 72075186224037891 2025-06-03T10:33:46.901046Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:2736: TTxReadViaPipeline(69) Complete: at tablet# 72075186224037891 2025-06-03T10:33:46.901312Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553219, Sender [13:1667:3370], Recipient [13:1058:2872]: NKikimrTxDataShard.TEvReadCancel ReadId: 0 2025-06-03T10:33:46.901326Z node 13 :TX_DATASHARD TRACE: datashard__read_iterator.cpp:3409: 72075186224037891 ReadCancel: { ReadId: 0 } { items { uint32_value: 10 } items { uint32_value: 110 } }, { items { uint32_value: 20 } items { uint32_value: 210 } } >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncQueryClient [GOOD] >> DataShardSnapshots::VolatileSnapshotTimeoutRefresh [GOOD] >> DataShardSnapshots::VolatileSnapshotCleanupOnReboot >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Table >> YdbSdkSessions::CloseSessionAfterDriverDtorWithoutSessionPool >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_compaction_policy_options [GOOD] |70.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_other_keys_then_ok [GOOD] >> YdbSdkSessions::CloseSessionAfterDriverDtorWithoutSessionPool [GOOD] >> YdbSdkSessions::CloseSessionWithSessionPoolExplicit >> test_postgres.py::TestPGSQL::test_sql_suite[plan-abstime.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-boolean.test] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[remove-other-admin] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-boolean.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-case.test] >> ColumnShardTiers::DSConfigsStub [GOOD] |70.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_copy_ops.py::TestSchemeShardCopyOps::test_when_copy_table_partition_config [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_3] >> YdbSdkSessions::CloseSessionWithSessionPoolExplicit [GOOD] |70.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_scheme_then_ok [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/aggregator/ut/unittest >> AnalyzeColumnshard::AnalyzeRebootColumnShard [FAIL] Test command err: 2025-06-03T10:23:55.502020Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:23:55.502074Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:23:55.502088Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002aa4/r3tmp/tmpilSoPj/pdisk_1.dat 2025-06-03T10:23:55.638173Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9464, node 1 2025-06-03T10:23:55.770274Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:23:55.770300Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:23:55.770306Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:23:55.770372Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:23:55.771122Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:23:55.853532Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:55.853570Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:55.868143Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61841 2025-06-03T10:23:56.256960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:23:57.266093Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:23:57.288481Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:57.288518Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:57.355184Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:23:57.356124Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:57.547822Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:57.548041Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:57.548241Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:57.548282Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:57.548339Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:57.548361Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:57.548381Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:57.548401Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:57.548425Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:23:57.735105Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:23:57.735158Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:23:57.747030Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:23:57.782401Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:23:57.801827Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:23:57.801873Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:23:57.811775Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:23:57.811835Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:23:57.811856Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:23:57.811862Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:23:57.811867Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:23:57.811872Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:23:57.811876Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:23:57.811883Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:23:57.812022Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:23:57.826537Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:23:57.826580Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:23:57.828424Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:23:57.829619Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:23:57.829785Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:23:57.832363Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:23:57.837491Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:23:57.837523Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:23:57.837537Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:23:57.842386Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:23:57.844669Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:23:57.844720Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:23:57.990284Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:23:58.122124Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:23:58.173721Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:23:58.821232Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2216:3061], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:23:58.821367Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:23:58.826146Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:23:58.880596Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:23:58.880737Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:23:58.880824Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:23:58.880870Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:23:58.880925Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:23:58.880972Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:23:58.881010Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:23:58.881056Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2304:2842];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_reg ... [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:33:06.902957Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-03T10:33:08.114663Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:08.114696Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:33:08.114700Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-03T10:33:09.328053Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:33:09.328087Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:33:09.328091Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-03T10:33:10.541827Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:33:10.562587Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:10.562627Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:33:10.562643Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-03T10:33:11.810902Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-03T10:33:11.810961Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7881: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-03T10:33:11.810970Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7912: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-03T10:33:11.810976Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-03T10:33:12.016324Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:33:12.016360Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:33:12.016365Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-03T10:33:13.236984Z node 2 :SYSTEM_VIEWS WARN: tx_top_partitions.cpp:151: [72075186224037891] TEvSendTopPartitions, time mismath: , partition interval end# 1970-01-01T00:07:31.000000Z, event time# 1970-01-01T00:07:31.079536Z 2025-06-03T10:33:13.333607Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:33:13.333706Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:33:13.354907Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:13.354946Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:33:13.354952Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-03T10:33:14.609187Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:33:14.609223Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:33:14.609228Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-03T10:33:15.845897Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:15.845953Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:33:15.845960Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-03T10:33:16.995954Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:33:17.007557Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:33:17.007607Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:33:17.007614Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-03T10:33:18.393804Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:18.393850Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:33:18.393856Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-03T10:33:19.688191Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:33:19.688303Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:33:19.698741Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:33:19.698782Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:33:19.698787Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-03T10:33:21.089288Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:21.089358Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:33:21.089365Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-03T10:33:22.380250Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:33:22.380293Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:33:22.380301Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-03T10:33:23.654758Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:33:23.665679Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:23.665720Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:33:23.665726Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-03T10:33:25.161706Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:33:25.161751Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:33:25.161770Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-03T10:33:26.583818Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:33:26.583926Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:33:26.594388Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:26.594429Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:33:26.594435Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. 2025-06-03T10:33:27.826392Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:626: [72075186224037894] ScheduleNextAnalyze 2025-06-03T10:33:27.826436Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:652: [72075186224037894] ScheduleNextAnalyze. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:33:27.826443Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:656: [72075186224037894] ScheduleNextAnalyze. All the force traversal operations sent the requests. 2025-06-03T10:33:29.010390Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:29.010425Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:680: [72075186224037894] ScheduleNextTraversal. All the force traversal tables sent the requests. OperationId=operationId 2025-06-03T10:33:29.010430Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:689: [72075186224037894] ScheduleNextTraversal. All the force traversal operations sent the requests. (TWithBackTrace) ydb/library/actors/testlib/test_runtime.h:579: Exception occured while waiting for NKikimr::NStat::TEvStatistics::TEvAnalyzeResponse: (NActors::TSchedulingLimitReachedException) TestActorRuntime Processed over 100000 events.ydb/library/actors/testlib/test_runtime.cpp:716: TBackTrace::Capture()+28 (0x138641AC) TWithBackTrace::TWithBackTrace<>()+51 (0x1374DB23) NKikimr::NStat::TEvStatistics::TEvAnalyzeResponse::TPtr NActors::TTestActorRuntimeBase::GrabEdgeEventRethrow(NActors::TActorId const&, TDuration)+100 (0x1373C094) NKikimr::NStat::NTestSuiteAnalyzeColumnshard::TTestCaseAnalyzeRebootColumnShard::Execute_(NUnitTest::TTestContext&)+2114 (0x13747552) NKikimr::NStat::NTestSuiteAnalyzeColumnshard::TCurrentTest::Execute()::'lambda'()::operator()() const+71 (0x1374C067) NUnitTest::TTestBase::Run(std::__y1::function, TBasicString> const&, char const*, bool)+126 (0x13A19DEE) NKikimr::NStat::NTestSuiteAnalyzeColumnshard::TCurrentTest::Execute()+425 (0x1374BA29) NUnitTest::TTestFactory::Execute()+803 (0x13A1A563) NUnitTest::RunMain(int, char**)+3021 (0x13A2887D) ??+0 (0x7FB420CB7D90) __libc_start_main+128 (0x7FB420CB7E40) _start+41 (0x1288A029) |70.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::MultiThreadSessionPoolLimitSyncQueryClient [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/null_select.sql-result_sets] >> DataShardSnapshots::VolatileSnapshotCleanupOnReboot [GOOD] >> DataShardSnapshots::VolatileSnapshotCleanupOnFinish ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::DSConfigsStub [GOOD] Test command err: 2025-06-03T10:32:18.822107Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:32:18.822209Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:32:18.822248Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016e5/r3tmp/tmpCeO8ul/pdisk_1.dat TServer::EnableGrpc on GrpcPort 30608, node 1 TClient is connected to server localhost:13783 2025-06-03T10:32:19.041277Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:32:19.067209Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:19.068458Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:19.068480Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:19.068485Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:19.068601Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:19.068728Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946738246039 != 1748946738246043 2025-06-03T10:32:19.118707Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:19.118753Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:19.130168Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:32:19.233876Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnStore, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:32:19.292302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:742:2623];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:32:19.292373Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:742:2623];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:32:19.292453Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:742:2623];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:32:19.292478Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:742:2623];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:32:19.292500Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:742:2623];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:32:19.292522Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:742:2623];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:32:19.292546Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:742:2623];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:32:19.292568Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:742:2623];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:32:19.292590Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:742:2623];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:32:19.292613Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:742:2623];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:32:19.292639Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:742:2623];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:32:19.292662Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:742:2623];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:32:19.310571Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:128;event=start_subscribing_metadata; 2025-06-03T10:32:19.311157Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-03T10:32:19.311187Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-03T10:32:19.311205Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-03T10:32:19.311216Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-03T10:32:19.311244Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-03T10:32:19.311253Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-03T10:32:19.311268Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-03T10:32:19.311277Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-06-03T10:32:19.311293Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-06-03T10:32:19.311302Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-03T10:32:19.311311Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-03T10:32:19.311320Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-03T10:32:19.311351Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:32:19.311362Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:32:19.311388Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:32:19.311397Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:32:19.311416Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:32:19.311425Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:32:19.311435Z node 1 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:32:19.311446Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:32:19.311454Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:32:19.311608Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-03T10:32:19.311616Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-03T10:32:19.315979Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:746:2625];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:32:19.316026Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:746:2625];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:32:19.316098Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;self_id=[1:746:2625];tablet_id=72075186224037889;process=TTxInitSchema::Execute;fline=abstract ... :secretKey;}; Initialization finished REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=0;WAITING=1 2025-06-03T10:33:16.738555Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:3569:4698] txid# 281474976715753, issues: { message: "Other entities depend on this data source, please remove them at the beginning: /Root/olapStore/olapTable" severity: 1 } REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;RESULT=
: Error: Execution, code: 1060
:1:27: Error: Executing DROP OBJECT EXTERNAL_DATA_SOURCE
: Error:
: Error: Other entities depend on this data source, please remove them at the beginning: /Root/olapStore/olapTable, code: 2003 , code: 2003 ;EXPECTATION=0 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=0;WAITING=1 REQUEST=DROP TABLE `/Root/olapStore/olapTable`;EXPECTATION=1;WAITING=1 2025-06-03T10:33:27.436197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropTable, opId: 281474976715764:0, at schemeshard: 72057594046644480 2025-06-03T10:33:27.785710Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715764;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715764; 2025-06-03T10:33:27.785797Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715764;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715764; 2025-06-03T10:33:27.786038Z node 1 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715764;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715764; REQUEST=DROP TABLE `/Root/olapStore/olapTable`;RESULT=
: Info: Execution, code: 1060
:1:12: Info: Executing DROP TABLE
: Info: Success, code: 4 ;EXPECTATION=1 FINISHED_REQUEST=DROP TABLE `/Root/olapStore/olapTable`;EXPECTATION=1;WAITING=1 REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 2025-06-03T10:33:38.076462Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-03T10:33:38.076516Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-03T10:33:38.076589Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-03T10:33:38.076596Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037890;has_config=0; 2025-06-03T10:33:38.076605Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037890 2025-06-03T10:33:38.076613Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier2' at tablet 72075186224037890 2025-06-03T10:33:38.076616Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037890 2025-06-03T10:33:38.076628Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier2' started at tablet 72075186224037890 2025-06-03T10:33:38.076636Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:38.076640Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-03T10:33:38.076646Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-03T10:33:38.076659Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-03T10:33:38.076662Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037888;has_config=0; 2025-06-03T10:33:38.076667Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037888 2025-06-03T10:33:38.076671Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier2' at tablet 72075186224037888 2025-06-03T10:33:38.076674Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037888 2025-06-03T10:33:38.076678Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier2' started at tablet 72075186224037888 2025-06-03T10:33:38.076682Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:38.076686Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-03T10:33:38.076688Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037889;has_config=0; 2025-06-03T10:33:38.076693Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037889 2025-06-03T10:33:38.076696Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier2' at tablet 72075186224037889 2025-06-03T10:33:38.076699Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037889 2025-06-03T10:33:38.076703Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier2' started at tablet 72075186224037889 2025-06-03T10:33:38.076706Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:38.076735Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:752:2629];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2025-06-03T10:33:38.076794Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-03T10:33:38.076797Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-06-03T10:33:38.076802Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-03T10:33:38.076806Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier2' at tablet 0 2025-06-03T10:33:38.076810Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 2025-06-03T10:33:38.076813Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier2' started at tablet 0 2025-06-03T10:33:38.076817Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier2;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:38.076925Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:742:2623];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2025-06-03T10:33:38.076937Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[1:746:2625];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;EXPECTATION=1;WAITING=1 2025-06-03T10:33:48.709100Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2025-06-03T10:33:48.709205Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2025-06-03T10:33:48.709902Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-03T10:33:48.709920Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037890;has_config=0; 2025-06-03T10:33:48.709933Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037890 2025-06-03T10:33:48.709949Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:48.709959Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2025-06-03T10:33:48.709983Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2025-06-03T10:33:48.710036Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-03T10:33:48.710041Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=0;has_config=0; 2025-06-03T10:33:48.710049Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 2025-06-03T10:33:48.710057Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:48.710084Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-03T10:33:48.710089Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037888;has_config=0; 2025-06-03T10:33:48.710095Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037888 2025-06-03T10:33:48.710103Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:48.710110Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-03T10:33:48.710114Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037889;has_config=0; 2025-06-03T10:33:48.710121Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037889 2025-06-03T10:33:48.710128Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:48.710214Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037888;self_id=[1:742:2623];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-06-03T10:33:48.710228Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037889;self_id=[1:746:2625];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-06-03T10:33:48.710240Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037890;self_id=[1:752:2629];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;EXPECTATION=1;WAITING=1 |70.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_delete_table_that_doesnt_exist_failure [GOOD] >> BasicStatistics::NotFullStatisticsColumnshard [GOOD] >> TxUsage::WriteToTopic_Demo_9_Table [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/statistics/service/ut/unittest >> BasicStatistics::NotFullStatisticsColumnshard [GOOD] Test command err: 2025-06-03T10:26:18.224535Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:252:2214], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:26:18.224581Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; 2025-06-03T10:26:18.224592Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001cab/r3tmp/tmp5mBfVS/pdisk_1.dat 2025-06-03T10:26:18.363537Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14602, node 1 2025-06-03T10:26:18.472867Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:26:18.472893Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:26:18.472898Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:26:18.472958Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:26:18.473744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:26:18.553075Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:18.553108Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:18.565573Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22611 2025-06-03T10:26:18.949849Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:26:19.864491Z node 2 :STATISTICS INFO: service_impl.cpp:232: Subscribed for config changes on node 2 2025-06-03T10:26:19.875252Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:19.875293Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:19.929113Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:26:19.929770Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:20.086440Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:20.086592Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:20.086747Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:20.086780Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:20.086819Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:20.086834Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:20.086847Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:20.086865Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:20.086880Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:26:20.253433Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:26:20.253477Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:26:20.264888Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:26:20.303797Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:26:20.321189Z node 2 :STATISTICS INFO: aggregator_impl.cpp:45: [72075186224037894] OnActivateExecutor 2025-06-03T10:26:20.321229Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:13: [72075186224037894] TTxInitSchema::Execute 2025-06-03T10:26:20.334869Z node 2 :STATISTICS DEBUG: tx_init_schema.cpp:34: [72075186224037894] TTxInitSchema::Complete 2025-06-03T10:26:20.334959Z node 2 :STATISTICS DEBUG: tx_init.cpp:18: [72075186224037894] TTxInit::Execute 2025-06-03T10:26:20.334991Z node 2 :STATISTICS DEBUG: tx_init.cpp:118: [72075186224037894] Loaded BaseStatistics: schemeshard count# 0 2025-06-03T10:26:20.334998Z node 2 :STATISTICS DEBUG: tx_init.cpp:143: [72075186224037894] Loaded ColumnStatistics: column count# 0 2025-06-03T10:26:20.335005Z node 2 :STATISTICS DEBUG: tx_init.cpp:182: [72075186224037894] Loaded ScheduleTraversals: table count# 0 2025-06-03T10:26:20.335013Z node 2 :STATISTICS DEBUG: tx_init.cpp:216: [72075186224037894] Loaded ForceTraversalOperations: table count# 0 2025-06-03T10:26:20.335019Z node 2 :STATISTICS DEBUG: tx_init.cpp:264: [72075186224037894] Loaded ForceTraversalTables: table count# 0 2025-06-03T10:26:20.335026Z node 2 :STATISTICS DEBUG: tx_init.cpp:271: [72075186224037894] TTxInit::Complete 2025-06-03T10:26:20.335199Z node 2 :STATISTICS INFO: aggregator_impl.cpp:62: [72075186224037894] Subscribed for config changes 2025-06-03T10:26:20.349441Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:20.349472Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7899: ConnectToSA(), pipe client id: [2:1860:2596], at schemeshard: 72075186224037897, StatisticsAggregatorId: 72075186224037894, at schemeshard: 72075186224037897 2025-06-03T10:26:20.351231Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1873:2607] 2025-06-03T10:26:20.352194Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:86: [72075186224037894] EvServerConnected, pipe server id = [2:1894:2616] 2025-06-03T10:26:20.352326Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:213: [72075186224037894] EvConnectSchemeShard, pipe server id = [2:1894:2616], schemeshard id = 72075186224037897 2025-06-03T10:26:20.355379Z node 2 :STATISTICS DEBUG: tx_configure.cpp:21: [72075186224037894] TTxConfigure::Execute: database# /Root/Database 2025-06-03T10:26:20.361346Z node 2 :STATISTICS DEBUG: table_creator.cpp:147: Table _statistics updater. Describe result: PathErrorUnknown 2025-06-03T10:26:20.361372Z node 2 :STATISTICS NOTICE: table_creator.cpp:167: Table _statistics updater. Creating table 2025-06-03T10:26:20.361385Z node 2 :STATISTICS DEBUG: table_creator.cpp:100: Table _statistics updater. Full table path:/Root/Database/.metadata/_statistics 2025-06-03T10:26:20.367437Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720657:1, at schemeshard: 72075186224037897 2025-06-03T10:26:20.369229Z node 2 :STATISTICS DEBUG: table_creator.cpp:190: Table _statistics updater. TEvProposeTransactionStatus: { Status: 53 TxId: 281474976720657 SchemeShardStatus: 1 SchemeShardTabletId: 72075186224037897 PathId: 3 } 2025-06-03T10:26:20.369270Z node 2 :STATISTICS DEBUG: table_creator.cpp:261: Table _statistics updater. Subscribe on create table tx: 281474976720657 2025-06-03T10:26:20.494414Z node 2 :STATISTICS DEBUG: tx_configure.cpp:36: [72075186224037894] TTxConfigure::Complete 2025-06-03T10:26:20.579924Z node 2 :STATISTICS DEBUG: table_creator.cpp:290: Table _statistics updater. Request: create. Transaction completed: 281474976720657. Doublechecking... 2025-06-03T10:26:20.626724Z node 2 :STATISTICS DEBUG: table_creator.cpp:362: Table _statistics updater. Column diff is empty, finishing 2025-06-03T10:26:21.176586Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:2216:3061], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:21.176645Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:26:21.181131Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715659:0, at schemeshard: 72075186224037897 2025-06-03T10:26:21.231273Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2330:2859];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:26:21.231393Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2330:2859];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:26:21.231475Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2330:2859];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:26:21.231503Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2330:2859];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:26:21.231532Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2330:2859];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:26:21.231562Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2330:2859];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:26:21.231585Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2330:2859];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:26:21.231614Z node 2 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037899;self_id=[2:2330:2859];tablet_id=72075186224037899;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_re ... ard count = 1 2025-06-03T10:32:59.807799Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:33:00.637975Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:00.638015Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:01.835933Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:33:01.940525Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:01.940570Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:03.723125Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:33:03.723231Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:33:03.910879Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:03.910912Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:05.776323Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:05.776374Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:06.685677Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:33:07.941559Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:07.941592Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:08.899443Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:33:08.899551Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:33:10.245593Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:10.245626Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:12.328961Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:33:12.497230Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:12.497273Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:13.559236Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-03T10:33:13.559284Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7881: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-03T10:33:13.559290Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7912: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-03T10:33:13.559296Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-03T10:33:14.889145Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:33:14.889263Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:33:15.100465Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:15.100510Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:17.376981Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:17.377035Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:18.397546Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:33:19.803274Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:19.803322Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:20.877728Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:33:20.877833Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:33:22.229352Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:22.229400Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:24.509813Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:33:24.733714Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:24.733755Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:27.231200Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:33:27.231311Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:33:27.440999Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:27.441057Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:29.581345Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:29.581388Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:30.539708Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:33:31.993031Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:31.993073Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:32.977490Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:33:32.977581Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:33:34.180060Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:34.180092Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:36.129961Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:33:36.295550Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:36.295591Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:38.475060Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:33:38.475149Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:33:38.692289Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:38.692322Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:40.846704Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:40.846740Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:41.722270Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:33:42.976589Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:42.976622Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:43.909518Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:33:43.909644Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 2025-06-03T10:33:45.189567Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:45.189599Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:47.171598Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:285: [72075186224037894] EvPropagateTimeout 2025-06-03T10:33:47.338338Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:660: [72075186224037894] ScheduleNextTraversal 2025-06-03T10:33:47.338379Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:668: [72075186224037894] ScheduleNextTraversal. No force traversals. 2025-06-03T10:33:48.463456Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7869: ResolveSA(), StatisticsAggregatorId=18446744073709551615, at schemeshard: 72057594046644480 2025-06-03T10:33:48.463534Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7881: ConnectToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-03T10:33:48.463540Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7912: SendBaseStatsToSA(), no StatisticsAggregatorId, at schemeshard: 72057594046644480 2025-06-03T10:33:48.463546Z node 1 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 30.000000s, at schemeshard: 72057594046644480 2025-06-03T10:33:48.633671Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7996: SendBaseStatsToSA(), path count: 2, at schemeshard: 72075186224037897 2025-06-03T10:33:48.633714Z node 2 :STATISTICS DEBUG: schemeshard_impl.cpp:7839: Schedule next SendBaseStatsToSA in 202.000000s, at schemeshard: 72075186224037897 2025-06-03T10:33:48.633802Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:21: [72075186224037894] TTxSchemeShardStats::Execute: schemeshard id# 72075186224037897, stats size# 53 ... waiting for TEvSchemeShardStats 2 (done) ... waiting for TEvPropagateStatistics 2025-06-03T10:33:48.649965Z node 2 :STATISTICS DEBUG: tx_schemeshard_stats.cpp:132: [72075186224037894] TTxSchemeShardStats::Complete 2025-06-03T10:33:49.905169Z node 2 :STATISTICS DEBUG: aggregator_impl.cpp:330: [72075186224037894] PropagateStatistics(), node count = 1, schemeshard count = 1 2025-06-03T10:33:49.905279Z node 2 :STATISTICS DEBUG: service_impl.cpp:937: EvPropagateStatistics, node id = 2 ... waiting for TEvPropagateStatistics (done) 2025-06-03T10:33:49.905441Z node 2 :STATISTICS DEBUG: service_impl.cpp:771: [TStatService::TEvGetStatistics] RequestId[ 4 ], ReplyToActorId[ [2:17177:10366]], StatType[ 0 ], StatRequestsCount[ 1 ] 2025-06-03T10:33:49.911170Z node 2 :STATISTICS DEBUG: service_impl.cpp:787: [TStatService::TEvNavigateKeySetResult] RequestId[ 4 ] 2025-06-03T10:33:49.911212Z node 2 :STATISTICS DEBUG: service_impl.cpp:1260: ReplySuccess(), request id = 4, ReplyToActorId = [2:17177:10366], StatRequests.size() = 1 >> TxUsage::WriteToTopic_Demo_9_Query >> DataShardSnapshots::LockedWriteBulkUpsertConflict+UseSink >> test_users_groups_with_acl.py::test_query_create_group_by_domain_admin[domain_login_only--true-YDB] >> KqpSplit::AfterResultMultiRangeSegmentPartition+Descending >> KqpScan::CrossJoin >> test_postgres.py::TestPGSQL::test_sql_suite[plan-case.test] [GOOD] >> DataShardSnapshots::VolatileSnapshotCleanupOnFinish [GOOD] >> DataShardSnapshots::VolatileSnapshotRenameTimeout |70.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::CloseSessionWithSessionPoolExplicit [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Table [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartBeforeCommit_Table [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Query >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_lefonly.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_pk.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[udfs/math.sql-plan] >> TxUsage::WriteToTopic_Demo_21_RestartBeforeCommit_Query |70.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |70.9%| [LD] {RESULT} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut |70.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/public_http/ut/ydb-core-public_http-ut >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_cant_add_existing_column >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_selector_aliases_2.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_selector_aliases_2.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[compute/scheduler.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[compute/scheduler.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_params.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/json_query.sql-plan] >> TxUsage::WriteToTopic_Demo_37_Query [GOOD] |70.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-std] |70.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut |70.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut |70.9%| [LD] {RESULT} $(B)/ydb/core/fq/libs/checkpointing/ut/ydb-core-fq-libs-checkpointing-ut >> KqpSplit::AfterResultMultiRangeSegmentPartition+Descending [GOOD] >> KqpSplit::AfterResultMultiRangeSegmentPartition+Unspecified >> DataShardSnapshots::LockedWriteBulkUpsertConflict+UseSink [GOOD] >> DataShardSnapshots::LockedWriteBulkUpsertConflict-UseSink >> TResourceBrokerConfig::UpdateTasks [GOOD] >> TResourceBrokerConfig::UpdateResourceLimit [GOOD] >> ColumnShardTiers::DSConfigsWithQueryServiceDdl [GOOD] >> TxUsage::WriteToTopic_Demo_38_Table >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_1.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_int.sql-plan] |70.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TResourceBrokerConfig::UpdateResourceLimit [GOOD] >> TxUsage::ReadRuleGeneration [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Table ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::DSConfigsWithQueryServiceDdl [GOOD] Test command err: 2025-06-03T10:32:10.338805Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:32:10.338875Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:32:10.338896Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00179b/r3tmp/tmpCfdBMG/pdisk_1.dat TServer::EnableGrpc on GrpcPort 23360, node 1 TClient is connected to server localhost:10783 2025-06-03T10:32:10.479544Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:32:10.497670Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:10.498993Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:10.499017Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:10.499023Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:10.499140Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:10.499276Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946729903944 != 1748946729903948 2025-06-03T10:32:10.541352Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:10.541403Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:10.553472Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Initialization finished REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 2025-06-03T10:32:22.217575Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:748:2628], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:22.217617Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:758:2633], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:22.217630Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:22.219262Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715657:3, at schemeshard: 72057594046644480 2025-06-03T10:32:22.232694Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:762:2636], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715657 completed, doublechecking } 2025-06-03T10:32:22.262985Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:813:2668] txid# 281474976715658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:22.437720Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:1, at schemeshard: 72057594046644480 2025-06-03T10:32:22.802686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:32:22.921095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:1, at schemeshard: 72057594046644480 2025-06-03T10:32:23.371800Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480 2025-06-03T10:32:23.747267Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:32:23.866020Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480 2025-06-03T10:32:24.577069Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:24.857367Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;RESULT=;EXPECTATION=1 FINISHED_REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-06-03T10:32:36.225468Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715702:0, at schemeshard: 72057594046644480 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;RESULT=;EXPECTATION=1 2025-06-03T10:32:36.591639Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7306: Cannot get console configs 2025-06-03T10:32:36.591674Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded FINISHED_REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-06-03T10:32:36.811743Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:215;event=skip_tier_manager_start;tier=/Root/tier1;has_secrets=1;tier_config=0; 2025-06-03T10:32:36.811773Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:196;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2025-06-03T10:32:36.811782Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=0};SECRETS={}; 2025-06-03T10:32:36.811799Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:128;event=start_subscribing_metadata; 2025-06-03T10:32:36.811856Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:154;event=watch_scheme_objects;names=/Root/tier1; 2025-06-03T10:32:36.811966Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:62;event=TEvRefreshSubscriberData;snapshot=secrets; 2025-06-03T10:32:36.811973Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:271;event=update_secrets;tablet=0; 2025-06-03T10:32:36.811980Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:196;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2025-06-03T10:32:36.811988Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=0};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:32:36.812389Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:111;component=TSchemeObjectWatcher;event=NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult;path=Root/tier1; 2025-06-03T10:32:36.812684Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:140;event=object_fetched;path=/Root/tier1; 2025-06-03T10:32:36.812720Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:75;component=tiering_manager;event=object_updated;path=/Root/tier1; 2025-06-03T10:32:36.812755Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=1; 2025-06-03T10:32:36.812776Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 0 2025-06-03T10:32:36.812784Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier2` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc2", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-06-03T10:32:47.383463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715717:0, at schemeshard: 72057594046644480 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier2` WITH ( ... ect_deleted;path=/Root/tier2; 2025-06-03T10:33:42.491703Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-03T10:33:42.491710Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037892;has_config=0; 2025-06-03T10:33:42.491721Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037892 2025-06-03T10:33:42.491728Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 72075186224037892 2025-06-03T10:33:42.491732Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037892 2025-06-03T10:33:42.491746Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 72075186224037892 2025-06-03T10:33:42.491755Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:42.491760Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-03T10:33:42.491763Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037893;has_config=0; 2025-06-03T10:33:42.491770Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037893 2025-06-03T10:33:42.491774Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 72075186224037893 2025-06-03T10:33:42.491778Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037893 2025-06-03T10:33:42.491782Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 72075186224037893 2025-06-03T10:33:42.491785Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:42.491789Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-03T10:33:42.491792Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037894;has_config=0; 2025-06-03T10:33:42.491806Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037894 2025-06-03T10:33:42.491810Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 72075186224037894 2025-06-03T10:33:42.491813Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037894 2025-06-03T10:33:42.491816Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 72075186224037894 2025-06-03T10:33:42.491820Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:42.491825Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-03T10:33:42.491828Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=0;has_config=0; 2025-06-03T10:33:42.491835Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 2025-06-03T10:33:42.491839Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 0 2025-06-03T10:33:42.491841Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-03T10:33:42.491845Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 0 2025-06-03T10:33:42.491848Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:42.491883Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2025-06-03T10:33:42.491903Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-03T10:33:42.491912Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=0;has_config=0; 2025-06-03T10:33:42.491918Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 2025-06-03T10:33:42.491922Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 0 2025-06-03T10:33:42.491925Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-03T10:33:42.491929Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 0 2025-06-03T10:33:42.491932Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:42.491981Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037892;self_id=[1:3013:4285];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2025-06-03T10:33:42.492016Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:3020:4287];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2025-06-03T10:33:42.492034Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037894;self_id=[1:3027:4291];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;EXPECTATION=1;WAITING=1 REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 2025-06-03T10:33:53.061821Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-03T10:33:53.061854Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-03T10:33:53.061862Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-03T10:33:53.061879Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-03T10:33:53.061918Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-03T10:33:53.061932Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-03T10:33:53.061941Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037892;has_config=0; 2025-06-03T10:33:53.061953Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037892 2025-06-03T10:33:53.061969Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:53.061976Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-03T10:33:53.061981Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037893;has_config=0; 2025-06-03T10:33:53.061987Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037893 2025-06-03T10:33:53.061995Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:53.062002Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-03T10:33:53.062007Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037894;has_config=0; 2025-06-03T10:33:53.062013Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037894 2025-06-03T10:33:53.062020Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:53.062027Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-03T10:33:53.062032Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-06-03T10:33:53.062038Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-03T10:33:53.062045Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:53.062060Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-03T10:33:53.062108Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-03T10:33:53.062117Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-06-03T10:33:53.062126Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-03T10:33:53.062133Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:53.062175Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-03T10:33:53.062185Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-06-03T10:33:53.062191Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-03T10:33:53.062198Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:53.062230Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037892;self_id=[1:3013:4285];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-06-03T10:33:53.062245Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:3020:4287];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-06-03T10:33:53.062257Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037894;self_id=[1:3027:4291];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 >> TTabletCountersPercentile::SingleBucket [GOOD] >> TTabletCountersPercentile::StartFromZero [GOOD] >> KqpScan::CrossJoin [GOOD] >> KqpScan::Counters >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in_range.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[udfs/math.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[udfs/math.sql-result_sets] |71.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |71.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletCountersPercentile::StartFromZero [GOOD] >> DataShardSnapshots::LockedWriteBulkUpsertConflict-UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitAborted+UseSink >> KqpSplit::AfterResultMultiRangeSegmentPartition+Unspecified [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/json_query.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/json_query.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/null_select.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q1.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_selector_aliases_2.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index_predicate_point.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[compute/scheduler.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dt.sql-plan] >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsTableClient [GOOD] >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsQueryClient [SKIPPED] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpSplit::AfterResultMultiRangeSegmentPartition+Unspecified [GOOD] Test command err: Trying to start YDB, gRPC: 32013, MsgBus: 1376 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ec9/r3tmp/tmpsLnbLv/pdisk_1.dat 2025-06-03T10:33:51.927436Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669442245062149:2199];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:51.929808Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:33:52.056195Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669442245061989:2079] 1748946831922687 != 1748946831922690 2025-06-03T10:33:52.061614Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 32013, node 1 2025-06-03T10:33:52.095489Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:52.095529Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:52.100779Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:52.101726Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:52.101736Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:52.101740Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:52.101796Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1376 TClient is connected to server localhost:1376 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:52.226524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:52.242801Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:52.285828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:52.312677Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:52.327845Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:52.500566Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669446540030917:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:52.500621Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:52.551407Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:52.570744Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:52.589794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:52.609869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:52.624347Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:52.640309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:52.670440Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:52.705999Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669446540031567:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:52.706023Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:52.706134Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669446540031575:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:52.707187Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:52.720462Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669446540031577:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:52.816501Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669446540031628:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } captured evread ----------------------------------------------------------- starting split ----------------------------------------------------------- 2025-06-03T10:33:53.190550Z node 1 :KQP_EXECUTER ERROR: kqp_scan_executer.cpp:163: ActorId: [1:7511669450834999225:2507] TxId: 281474976715673. Ctx: { TraceId: 01jwtnnknk437b1verchec0gj0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzY5MjRkZTMtODAyOTc0ZmEtMWZhNThiNmYtMmM0MzYzNjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Can not find default state storage group for database 2025-06-03T10:33:53.190693Z node 1 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jwtnnknk437b1verchec0gj0, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=1&id=YzY5MjRkZTMtODAyOTc0ZmEtMWZhNThiNmYtMmM0MzYzNjA=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root scheme op Status: 53 TxId: 281474976715674 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 captured evreadresult ----------------------------------------------------------- resume evread ----------------------------------------------------------- 2025-06-03T10:33:53.493102Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946833235, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 22779, MsgBus: 5784 2025-06-03T10:33:53.979781Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669450500010532:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:53.979816Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001ec9/r3tmp/tmpTCftSw/pdisk_1.dat 2025-06-03T10:33:54.005476Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:54.005592Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669450500010512:2079] 1748946833979661 != 1748946833979664 TServer::EnableGrpc on GrpcPort 22779, node 2 2025-06-03T10:33:54.018381Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:54.018394Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:54.018396Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:54.018452Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5784 TClient is connected to server localhost:5784 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:33:54.093931Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:54.093969Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:54.094506Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:33:54.095122Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:54.106525Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:33:54.138715Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:33:54.244413Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:54.289228Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:54.501108Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669454794979456:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:54.501179Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:54.510018Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:54.535951Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:54.571473Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:54.683514Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:54.727853Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:54.755776Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:54.781445Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:54.819830Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669454794980110:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:54.819882Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:54.819909Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669454794980115:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:54.820931Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:54.823795Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669454794980117:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:54.902589Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669454794980168:3392] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:55.262957Z node 2 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715673. Ctx: { TraceId: 01jwtnnnpj2rv82m0tc1kj3m3p, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=2&id=NDJhNTkxMTAtZTg2MDU5OTQtN2QxY2Y2YTctZGJiNzhlOTk=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root captured evread ----------------------------------------------------------- starting split ----------------------------------------------------------- scheme op Status: 53 TxId: 281474976715674 SchemeShardStatus: 1 SchemeShardTabletId: 72057594046644480 captured evreadresult ----------------------------------------------------------- resume evread ----------------------------------------------------------- 2025-06-03T10:33:55.726916Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946835307, txId: 281474976715672] shutting down >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_int.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_int.sql-result_sets] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_cant_add_existing_column [GOOD] >> KqpScan::Counters [GOOD] >> KqpScan::CrossJoinCount >> test_sql.py::TestCanonicalFolder1::test_case[simple/q1.sql-plan] [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Table [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q1.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in_range.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in_range.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[udfs/math.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_by_pk.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[json/json_query.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_from_table.sql-plan] |71.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> DataShardSnapshots::LockedWriteDistributedCommitAborted+UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitAborted-UseSink >> TxUsage::WriteToTopic_Demo_27_Table >> test_sql.py::TestCanonicalFolder1::test_case[index_predicate_point.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index_predicate_point.sql-result_sets] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions/gtest >> YdbSdkSessions::MultiThreadMultipleRequestsOnSharedSessionsQueryClient [SKIPPED] Test command err: ydb/public/sdk/cpp/tests/integration/sessions/main.cpp:534: Enable after interactive tx support >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_int.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_int_1.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[dt.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dt.sql-result_sets] |71.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export |71.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export |71.0%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_export/ydb-core-tx-datashard-ut_export |71.0%| [TA] $(B)/ydb/public/sdk/cpp/tests/integration/sessions/test-results/gtest/{meta.json ... results_accumulator.log} >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_lefonly.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_lefonly.sql-result_sets] |71.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |71.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut |71.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut >> test_users_groups_with_acl.py::test_query_create_group_by_domain_admin[domain_login_only--true-YDB] [GOOD] >> test_users_groups_with_acl.py::test_query_create_group_by_tenant_admin[domain_login_only--false-YDB] |71.0%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/integration/sessions/test-results/gtest/{meta.json ... results_accumulator.log} |71.0%| [LD] {RESULT} $(B)/ydb/core/client/minikql_compile/ut/ydb-core-client-minikql_compile-ut >> KqpScan::CrossJoinCount [GOOD] |71.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |71.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-fifo] >> TTabletPipeTest::TestSendWithoutWaitOpenToWrongTablet >> TxUsage::Sinks_Oltp_WriteToTopics_1_Table [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_from_table.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_from_table.sql-result_sets] |71.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_compaction_policy_options [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_by_pk.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_by_pk.sql-result_sets] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scan/unittest >> KqpScan::CrossJoinCount [GOOD] Test command err: Trying to start YDB, gRPC: 23858, MsgBus: 24730 2025-06-03T10:33:51.947008Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669442285618880:2205];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:33:51.947057Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001eca/r3tmp/tmp3UDy3O/pdisk_1.dat 2025-06-03T10:33:52.076450Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 23858, node 1 2025-06-03T10:33:52.114805Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:52.114825Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:52.114828Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:52.114885Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:33:52.138379Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:52.138409Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:52.139747Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24730 TClient is connected to server localhost:24730 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:52.269809Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:52.278360Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:33:52.287346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:52.326041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:52.403244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:52.437935Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:52.505939Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669446580587637:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:52.505986Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:52.560095Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:52.581480Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:52.594786Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:52.609851Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:52.627103Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:52.646847Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:52.678237Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:52.746227Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669446580588294:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:52.746276Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:52.747270Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669446580588299:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:52.748109Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:52.750731Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:33:52.750843Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669446580588301:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:52.840598Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669446580588352:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:53.062238Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:55.316028Z node 1 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946833319, txId: 281474976715674] shutting down Trying to start YDB, gRPC: 8733, MsgBus: 18691 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001eca/r3tmp/tmp86QcPD/pdisk_1.dat 2025-06-03T10:33:55.982715Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:33:56.023982Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:56.025486Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669461615977371:2079] 1748946835958107 != 1748946835958110 TServer::EnableGrpc on GrpcPort 8733, node 2 2025-06-03T10:33:56.048600Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:56.048617Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:56.048620Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:56.048684Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:33:56.079398Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:56.079447Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:56.083805Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:18691 TClient is connected to server localhost:18691 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 S ... : ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:57.420531Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669470205914258:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:57.420559Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:57.420766Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669470205914263:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:57.421817Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:57.427224Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:33:57.427356Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669470205914265:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:57.487872Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669470205914316:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:33:57.832132Z node 2 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946837869, txId: 281474976715672] shutting down Trying to start YDB, gRPC: 24968, MsgBus: 23295 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001eca/r3tmp/tmprZlZ6S/pdisk_1.dat 2025-06-03T10:33:58.529434Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:33:58.534408Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:58.537692Z node 3 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [3:7511669474180512725:2079] 1748946838448839 != 1748946838448842 TServer::EnableGrpc on GrpcPort 24968, node 3 2025-06-03T10:33:58.562069Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:58.562100Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:58.566380Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:58.577524Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:33:58.577540Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:33:58.577543Z node 3 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:33:58.577600Z node 3 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:23295 TClient is connected to server localhost:23295 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:33:58.756708Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:58.765664Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:33:58.775258Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:58.806988Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:58.843897Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:58.862489Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:33:59.420185Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669478475481655:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:59.420216Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:59.428098Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:33:59.460154Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:33:59.486492Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:33:59.514411Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:33:59.544899Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:33:59.575106Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:33:59.599359Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:33:59.675028Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669478475482320:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:59.675069Z node 3 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:59.676855Z node 3 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [3:7511669478475482325:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:33:59.677996Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:33:59.682243Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:33:59.682320Z node 3 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [3:7511669478475482327:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:33:59.776894Z node 3 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [3:7511669478475482378:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:34:00.067823Z node 3 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:00.577959Z node 3 :KQP_RESOURCE_MANAGER WARN: kqp_snapshot_manager.cpp:238: KqpSnapshotManager: discarding snapshot; our snapshot: [step: 1748946840256, txId: 281474976715674] shutting down >> test_storage_config.py::TestStorageConfig::test_cases[case_3] [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitAborted-UseSink [GOOD] >> DataShardSnapshots::LockedWriteDistributedCommitCrossConflict+UseSink >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Table [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q1.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q10.sql-plan] >> TTabletPipeTest::TestSendWithoutWaitOpenToWrongTablet [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_4] >> test_sql.py::TestCanonicalFolder1::test_case[index_predicate_point.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in_range.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in_rp.sql-plan] >> TxUsage::Sinks_Oltp_WriteToTopics_1_Query >> test_sql.py::TestCanonicalFolder1::test_case[join/group_by_lookup.script-script] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Query >> TxUsage::WriteToTopic_Demo_38_Table [GOOD] |71.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletPipeTest::TestSendWithoutWaitOpenToWrongTablet [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_int_1.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_int_1.sql-result_sets] >> TxUsage::WriteToTopic_Demo_9_Query [GOOD] >> TxUsage::WriteToTopic_Demo_38_Query >> TTabletResolver::TabletResolvePriority [GOOD] >> TxUsage::WriteToTopic_Demo_50_Table >> test_sql.py::TestCanonicalFolder1::test_case[dt.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_from_table.sql-plan] |71.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut |71.0%| [LD] {RESULT} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut |71.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/keyvalue/ut/ydb-services-keyvalue-ut |71.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tablet/ut/unittest >> TTabletResolver::TabletResolvePriority [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_lefonly.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_lefsemi.sql-plan] >> YdbSdkSessionsPool::PeriodicTask/1 [GOOD] |71.1%| [TA] $(B)/ydb/core/mind/hive/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> TxUsage::WriteToTopic_Demo_21_RestartBeforeCommit_Query [GOOD] >> ColumnShardTiers::DSConfigs [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Table >> test_sql.py::TestCanonicalFolder1::test_case[json/select_from_table.sql-result_sets] [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_3_Query [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_literal.sql-plan] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-fifo] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_by_pk.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_on.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q10.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q10.sql-result_sets] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-std] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_after_create_table_it_is_success >> DataShardSnapshots::LockedWriteDistributedCommitCrossConflict+UseSink [GOOD] >> DataShardSnapshots::LockedWriteCleanupOnSplit+UseSink >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Table >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in_rp.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in_rp.sql-result_sets] |71.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication >> TBSVWithReboots::CreateWithIntermediateDirsForceDrop >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_not_single_key_column_failure >> DataShardSnapshots::VolatileSnapshotRenameTimeout [GOOD] |71.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_in_new_channel_then_can_read_from_tablet >> test_sql.py::TestCanonicalFolder1::test_case[join/group_by_lookup.script-script] [GOOD] >> TBSVWithReboots::CreateAssignDropIsAllowed >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_int_1.sql-result_sets] [GOOD] >> DataShardSnapshots::UncommittedWriteRestartDuringCommit |71.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_from_table.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_lefsemi.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_literal.sql-plan] [GOOD] >> DataShardSnapshots::LockedWriteCleanupOnSplit+UseSink [GOOD] >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Query >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_in_rp.sql-result_sets] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_4] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q10.sql-result_sets] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-std] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_on.sql-plan] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_after_create_table_it_is_success [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_1_Query [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[remove-other-admin] [GOOD] >> TxUsage::WriteToTopic_Demo_38_Query [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Table [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Table [GOOD] >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleCleanIndex [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_1_Query [GOOD] >> test_users_groups_with_acl.py::test_query_create_group_by_tenant_admin[domain_login_only--false-YDB] [GOOD] >> TxUsage::WriteToTopic_Demo_27_Table [GOOD] >> TxUsage::WriteToTopic_Demo_50_Table [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_rp.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_from_table.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_lefsemi.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_literal.sql-result_sets] >> TxUsage::WriteToTopic_Demo_18_RestartAfterCommit_Query [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_null.sql-plan] >> test_storage_config.py::TestStorageConfig::test_cases[case_5] |71.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction |71.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction |71.1%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_replication/ydb-core-tx-datashard-ut_replication |71.1%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_compaction/ydb-core-tx-datashard-ut_compaction |71.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest |71.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/tiering/ut/unittest >> ColumnShardTiers::DSConfigs [GOOD] Test command err: 2025-06-03T10:32:20.104837Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:32:20.104951Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:32:20.104988Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016de/r3tmp/tmp5fOksI/pdisk_1.dat TServer::EnableGrpc on GrpcPort 61314, node 1 TClient is connected to server localhost:16995 2025-06-03T10:32:20.269056Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:32:20.287290Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:20.288809Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:32:20.288836Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:32:20.288843Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:32:20.288982Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:20.289180Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946739516268 != 1748946739516272 2025-06-03T10:32:20.338200Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:20.338284Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:20.349956Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Initialization finished REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 2025-06-03T10:32:32.112003Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:751:2629], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:32.112102Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:32.113831Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:2, at schemeshard: 72057594046644480 2025-06-03T10:32:32.189235Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:868:2707], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:32.189276Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:32.189352Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:873:2712], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:32.190579Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:2, at schemeshard: 72057594046644480 2025-06-03T10:32:32.325812Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:875:2714], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:32:32.438139Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:969:2779] txid# 281474976715661, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 7], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:32.552355Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:32:32.654201Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:1, at schemeshard: 72057594046644480 2025-06-03T10:32:32.959326Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715670:0, at schemeshard: 72057594046644480 2025-06-03T10:32:33.273551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:32:33.399773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715676:0, at schemeshard: 72057594046644480 2025-06-03T10:32:34.058713Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:32:34.317974Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;RESULT=;EXPECTATION=1 FINISHED_REQUEST= UPSERT OBJECT `accessKey` (TYPE SECRET) WITH (value = `secretAccessKey`); UPSERT OBJECT `secretKey` (TYPE SECRET) WITH (value = `fakeSecret`); ;EXPECTATION=1;WAITING=1 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-06-03T10:32:45.666501Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715702:0, at schemeshard: 72057594046644480 REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;RESULT=;EXPECTATION=1 2025-06-03T10:32:45.996567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7306: Cannot get console configs 2025-06-03T10:32:45.996600Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded FINISHED_REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier1` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fake.fake/abc1", AUTH_METHOD="AWS", AWS_ACCESS_KEY_ID_SECRET_NAME="accessKey", AWS_SECRET_ACCESS_KEY_SECRET_NAME="secretKey", AWS_REGION="ru-central1" ); ;EXPECTATION=1;WAITING=1 2025-06-03T10:32:46.214324Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:215;event=skip_tier_manager_start;tier=/Root/tier1;has_secrets=1;tier_config=0; 2025-06-03T10:32:46.214363Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:196;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2025-06-03T10:32:46.214370Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=0};SECRETS={}; 2025-06-03T10:32:46.214382Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:128;event=start_subscribing_metadata; 2025-06-03T10:32:46.214431Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:154;event=watch_scheme_objects;names=/Root/tier1; 2025-06-03T10:32:46.214485Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:62;event=TEvRefreshSubscriberData;snapshot=secrets; 2025-06-03T10:32:46.214489Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:271;event=update_secrets;tablet=0; 2025-06-03T10:32:46.214494Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:196;event=skip_tier_manager_reloading;tier=/Root/tier1;has_secrets=1;found_tier_config=1; 2025-06-03T10:32:46.214499Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=0};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:32:46.214818Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:111;component=TSchemeObjectWatcher;event=NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult;path=Root/tier1; 2025-06-03T10:32:46.215006Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:140;event=object_fetched;path=/Root/tier1; 2025-06-03T10:32:46.215030Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:75;component=tiering_manager;event=object_updated;path=/Root/tier1; 2025-06-03T10:32:46.215062Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=1; 2025-06-03T10:32:46.215075Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 0 2025-06-03T10:32:46.215081Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; REQUEST= CREATE EXTERNAL DATA SOURCE `/Root/tier2` WITH ( SOURCE_TYPE="ObjectStorage", LOCATION="http://fa ... ect_deleted;path=/Root/tier2; 2025-06-03T10:33:51.676002Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-03T10:33:51.676012Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037892;has_config=0; 2025-06-03T10:33:51.676027Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037892 2025-06-03T10:33:51.676037Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 72075186224037892 2025-06-03T10:33:51.676043Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037892 2025-06-03T10:33:51.676062Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 72075186224037892 2025-06-03T10:33:51.676076Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:51.676084Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-03T10:33:51.676090Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037893;has_config=0; 2025-06-03T10:33:51.676097Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037893 2025-06-03T10:33:51.676104Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 72075186224037893 2025-06-03T10:33:51.676109Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037893 2025-06-03T10:33:51.676115Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 72075186224037893 2025-06-03T10:33:51.676123Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:51.676130Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-03T10:33:51.676135Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=72075186224037894;has_config=0; 2025-06-03T10:33:51.676142Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 72075186224037894 2025-06-03T10:33:51.676148Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 72075186224037894 2025-06-03T10:33:51.676152Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037894 2025-06-03T10:33:51.676158Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 72075186224037894 2025-06-03T10:33:51.676164Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:51.676179Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-03T10:33:51.676184Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=0;has_config=0; 2025-06-03T10:33:51.676194Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 2025-06-03T10:33:51.676200Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 0 2025-06-03T10:33:51.676206Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-03T10:33:51.676211Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 0 2025-06-03T10:33:51.676218Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:33:51.676379Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier2; 2025-06-03T10:33:51.676493Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037892;self_id=[1:3016:4285];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2025-06-03T10:33:51.676517Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:3023:4287];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2025-06-03T10:33:51.676564Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037894;self_id=[1:3030:4291];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=1; 2025-06-03T10:33:51.676754Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier2; 2025-06-03T10:33:51.676766Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier2;tablet=0;has_config=0; 2025-06-03T10:33:51.676774Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier2' stopped at tablet 0 2025-06-03T10:33:51.676782Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:142 :Restarting tier '/Root/tier1' at tablet 0 2025-06-03T10:33:51.676788Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-03T10:33:51.676799Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:162 :Tier '/Root/tier1' started at tablet 0 2025-06-03T10:33:51.676808Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS={id=/Root/tier1;has_config=1};SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier2`;EXPECTATION=1;WAITING=1 REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 2025-06-03T10:34:02.494438Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-03T10:34:02.494480Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-03T10:34:02.494488Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-03T10:34:02.494506Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-03T10:34:02.494561Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-03T10:34:02.494754Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=fetcher.h:149;event=object_deleted;path=/Root/tier1; 2025-06-03T10:34:02.494807Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-03T10:34:02.494818Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037892;has_config=0; 2025-06-03T10:34:02.494836Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037892 2025-06-03T10:34:02.494857Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:34:02.494865Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-03T10:34:02.494871Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037893;has_config=0; 2025-06-03T10:34:02.494879Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037893 2025-06-03T10:34:02.494888Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:34:02.494933Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-03T10:34:02.494938Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=72075186224037894;has_config=0; 2025-06-03T10:34:02.494945Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 72075186224037894 2025-06-03T10:34:02.494954Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:34:02.494979Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-03T10:34:02.494985Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-06-03T10:34:02.494992Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-03T10:34:02.495001Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:34:02.495166Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-03T10:34:02.495175Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-06-03T10:34:02.495183Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-03T10:34:02.495192Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:34:02.495315Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:94;component=tiering_manager;event=object_deleted;name=/Root/tier1; 2025-06-03T10:34:02.495326Z node 1 :TX_TIERING INFO: log.cpp:784: fline=manager.cpp:279;event=update_tier_config;name=/Root/tier1;tablet=0;has_config=0; 2025-06-03T10:34:02.495338Z node 1 :TX_TIERING DEBUG: log.h:466: manager.cpp:150 :Tier '/Root/tier1' stopped at tablet 0 2025-06-03T10:34:02.495345Z node 1 :TX_TIERING DEBUG: log.cpp:784: fline=manager.cpp:205;event=configs_updated;configs=TIERS=;SECRETS={USId:root@builtin:accessKey;USId:root@builtin:secretKey;}; 2025-06-03T10:34:02.495641Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037892;self_id=[1:3016:4285];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-06-03T10:34:02.495661Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037893;self_id=[1:3023:4287];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; 2025-06-03T10:34:02.495674Z node 1 :TX_TIERING DEBUG: log.cpp:784: tablet_id=72075186224037894;self_id=[1:3030:4291];ev=NKikimr::NColumnShard::TEvPrivate::TEvTieringModified;fline=tiering.cpp:244;event=refresh_tiering;has_tiering=0;tiers=0;had_tiering_before=0; REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;RESULT=;EXPECTATION=1 FINISHED_REQUEST=DROP EXTERNAL DATA SOURCE `/Root/tier1`;EXPECTATION=1;WAITING=1 >> test_sql.py::TestCanonicalFolder1::test_case[simple/q11.sql-plan] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_works[tables_format_v0] >> YdbSdkSessionsPool::StressTestAsync/0 [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Table >> TxUsage::WriteToTopic_Demo_39_Table >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Query >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Query >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[remove-subgroup] >> TxUsage::Sinks_Oltp_WriteToTopics_2_Table >> test_users_groups_with_acl.py::test_query_create_group_by_tenant_admin[domain_login_only--true-YDB] |71.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_postgres.py::TestPGSQL::test_sql_suite[plan-case.test] [GOOD] |71.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::PeriodicTask/1 [GOOD] >> TxUsage::WriteToTopic_Demo_27_Query >> TBSVWithReboots::CreateWithIntermediateDirsForceDrop [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_not_single_key_column_failure [GOOD] >> TxUsage::WriteToTopic_Demo_50_Query >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_in_new_channel_then_can_read_from_tablet [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_double_lookup.sql-plan] >> DataShardSnapshots::LockedWriteCleanupOnSplit-UseSink >> DataShardSnapshots::UncommittedWriteRestartDuringCommit [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_from_table.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_lefsemi.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_literal.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_null.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q11.sql-plan] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_5] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_works[tables_format_v0] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_on.sql-result_sets] >> YdbSdkSessionsPool::StressTestAsync/1 >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Table [GOOD] >> TxUsage::WriteToTopic_Demo_39_Table [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_4_Query [GOOD] >> TxUsage::WriteToTopic_Demo_21_RestartAfterCommit_Query [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[remove-subgroup] [GOOD] >> DataShardSnapshots::UncommittedWriteRestartDuringCommitThenBulkErase >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_rp.sql-plan] [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Query >> test_sql.py::TestCanonicalFolder1::test_case[join/join_double_lookup.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_literal.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_left.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_null.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_params.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q11.sql-result_sets] >> test_storage_config.py::TestStorageConfig::test_cases[case_6] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_works[tables_format_v1] |71.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_cant_add_existing_column [GOOD] |71.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_after_create_table_it_is_success [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Query >> TxUsage::WriteToTopic_Demo_39_Query >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Table >> DataShardSnapshots::UncommittedWriteRestartDuringCommitThenBulkErase [GOOD] >> TxUsage::WriteToTopic_Demo_22_RestartBeforeCommit_Table >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[rename-admin-group] >> DataShardSnapshots::LockedWriteCleanupOnSplit-UseSink [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_rp.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_double_lookup.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_on.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_params.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q11.sql-result_sets] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_6] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_left.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_null.sql-result_sets] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_works[tables_format_v1] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_literal.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_params.sql-result_sets] >> TxUsage::WriteToTopic_Demo_39_Query [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_null_1.sql-plan] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Table [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[rename-admin-group] [GOOD] >> DataShardSnapshots::LockedWriteCleanupOnCopyTable+UseSink >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_rp.sql-result_sets] [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartBeforeCommit_Query [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_double_lookup.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q12.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_same.sql-plan] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_2_Query [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_7] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_left.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_literal.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[json/select_params.sql-result_sets] [GOOD] >> DataShardSnapshots::UncommittedChangesRenameTable-UseSink >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Query >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_rp_1.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_null_1.sql-plan] [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_user[block] >> DataShardSnapshots::LockedWriteCleanupOnCopyTable+UseSink [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_dup_column_right.sql-plan] >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Table >> test_sql.py::TestCanonicalFolder1::test_case[simple/q12.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_same.sql-plan] [GOOD] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Query >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_left.sql-result_sets] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_7] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[order_by/order_by_pk.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_literal.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_rp_1.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_null_1.sql-result_sets] >> test_user_administration.py::test_database_admin_cant_change_database_admin_user[block] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_dup_column_right.sql-plan] [GOOD] >> DataShardSnapshots::LockedWriteCleanupOnCopyTable-UseSink >> test_sql.py::TestCanonicalFolder1::test_case[simple/q12.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_same.sql-result_sets] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Query [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_left_null.sql-plan] >> test_storage_config.py::TestStorageConfig::test_cases[case_8] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_params.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[order_by/order_by_pk.sql-plan] [GOOD] >> DataShardSnapshots::UncommittedChangesRenameTable-UseSink [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[order_by/order_by_pk.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_null_1.sql-result_sets] [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_user[change-password] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_rp_1.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q12.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_dup_column_right.sql-result_sets] >> DataShardSnapshots::LockedWriteCleanupOnCopyTable-UseSink [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/delete_same.sql-result_sets] [GOOD] |71.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_in_new_channel_then_can_read_from_tablet [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_point.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_left_null.sql-plan] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_8] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_params.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_dup_column_right.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q13.sql-plan] >> test_user_administration.py::test_database_admin_cant_change_database_admin_user[change-password] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_rp_1.sql-result_sets] [GOOD] >> DataShardSnapshots::DelayedWriteReadableAfterSplit >> test_sql.py::TestCanonicalFolder1::test_case[write/insert.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[order_by/order_by_pk.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_left_null.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_params.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_range_right.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q13.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q13.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_reverse.sql-plan] >> DataShardSnapshots::DelayedWriteReadableAfterSplit [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_point.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/insert.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/insert.sql-result_sets] >> DataShardSnapshots::DelayedWriteReplyAfterSplit >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/insert_params.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_range_right.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[order_by/order_by_pk_composite.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q13.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_reverse.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_left_null.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_from_table.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_point.sql-result_sets] >> DataShardSnapshots::DelayedWriteReplyAfterSplit [GOOD] >> DataShardSnapshots::DelayedWriteReadableAfterSplitAndReboot >> test_sql.py::TestCanonicalFolder1::test_case[simple/q14.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_range_right.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[write/insert.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[order_by/order_by_pk_composite.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[order_by/order_by_pk_composite.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_reverse.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_point.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_right.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[order_by/order_by_pk_composite.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q14.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/insert_revert.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_range_right.sql-result_sets] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_works[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |71.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_storage_config.py::TestStorageConfig::test_cases[case_8] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_reverse.sql-result_sets] [GOOD] >> DataShardSnapshots::DelayedWriteReadableAfterSplitAndReboot [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_point_range_rp.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_reverse_1.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_right.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q14.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_from_table.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/insert_revert.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_bool.sql-plan] >> DataShardSnapshots::BrokenLockChangesDontLeak >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_point_range_rp.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_right.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[write/insert_revert.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q14.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_reverse_1.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_from_table.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_bool.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_bool.sql-result_sets] >> DataShardSnapshots::BrokenLockChangesDontLeak [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_right.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_point_range_rp.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q15.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[write/insert_revert.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_reverse_1.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[write/multi_usage.script-script] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_from_table.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_right_2.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_point_range_rp.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q15.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_reverse_1.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_literal.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_bool.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/multi_usage.script-script] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/multi_usage_key.script-script] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_random_chars.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_right_2.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q15.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_subquery.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_literal.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_comparison_empty_string.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_cast.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[write/multi_usage_key.script-script] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_subquery.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_random_chars.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_right_2.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q15.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_literal.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_comparison_empty_string.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_cast.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/multi_write.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[write/multi_write.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_random_chars.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_comparison_empty_string.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_predicate_right_2.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_cast.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q16.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_literal.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_subquery.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[write/multi_write.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[write/multi_write.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/replace.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[write/replace.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/replace.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_random_chars.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_cast.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_range_left.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_comparison_empty_string.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q16.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q16.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q16.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q17.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_params.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_subquery.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/replace.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_random_chars_ranges.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_cast2.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_range_left.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q17.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_params.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_utf8.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_composite.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_composite.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_composite.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_composite.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_dependent.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[write/update.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_random_chars_ranges.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_cast2.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_range_left.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q17.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_params.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_range_left.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_right_key_range.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_dependent.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_utf8.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_right_key_range.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_dependent.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_right_key_range.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_random_chars_ranges.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[write/update.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_random_chars_ranges.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_cast2.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q17.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_utf8.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_utf8.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_1.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_cast2.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_right_key_range.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[dynumber/select_params.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_1.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_1.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[write/update.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_dup_c_left.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_dependent.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_dup_c_left.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_dup_c_left.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q18.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_rightsemi.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[explain.script-script] >> test_sql.py::TestCanonicalFolder1::test_case[explain.script-script] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/select_using_index.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[write/update.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_dependent_nopush.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_rightsemi.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_dup_c_left.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/select_using_index.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_1.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/update_on.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q18.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/select_using_index.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_dependent_nopush.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_dependent_nopush.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_rightsemi.sql-result_sets] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_background_cleaning/unittest >> TSchemeshardBackgroundCleaningTest::SchemeshardBackgroundCleaningTestSimpleCleanIndex [GOOD] Test command err: Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:227:2060] recipient: [1:221:2142] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:227:2060] recipient: [1:221:2142] Leader for TabletID 72057594046678944 is [1:238:2153] sender: [1:239:2060] recipient: [1:221:2142] 2025-06-03T10:32:27.715392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:32:27.715427Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:32:27.715433Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.100000s, StatsMaxBatchSize# 100, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:32:27.715440Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:32:27.715458Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:32:27.715463Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:32:27.715474Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:32:27.715490Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:32:27.715619Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:32:27.715706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:32:27.736418Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard_impl.cpp:7314: Cannot subscribe to console configs 2025-06-03T10:32:27.736454Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:27.742333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:32:27.742832Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:32:27.742902Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:32:27.745025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:32:27.745149Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:32:27.745306Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:32:27.745575Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:32:27.746422Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:32:27.746498Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:32:27.746831Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:32:27.746841Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:32:27.746877Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:32:27.746884Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:32:27.746888Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:32:27.746911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 2025-06-03T10:32:27.748233Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:238:2153] sender: [1:351:2060] recipient: [1:17:2064] 2025-06-03T10:32:27.770504Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:32:27.770612Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:27.770694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:32:27.770745Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:32:27.770756Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:27.771860Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:32:27.771898Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:32:27.771988Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:27.772010Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:32:27.772017Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:32:27.772024Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:32:27.772544Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:27.772558Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:32:27.772565Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:32:27.772961Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:27.772974Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:32:27.772981Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:32:27.772989Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:32:27.773811Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:32:27.774315Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:32:27.774363Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:32:27.774579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:32:27.774606Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 245 RawX2: 4294969453 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:32:27.774615Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:32:27.774669Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 128 -> 240 2025-06-03T10:32:27.774677Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:362: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:32:27.774707Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 1 2025-06-03T10:32:27.774716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:402: DoUpdateTenant no IsExternalSubDomainRoot, pathId: : [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 FAKE_COORDINATOR: Erasing txId 1 2025-06-03T10:32:27.775101Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:32:27.775109Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:32:27.775153Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03 ... schemeshard_impl.cpp:4890: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:34:11.305126Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124999, Sender [7:238:2153], Recipient [7:238:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:34:11.305129Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4889: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:34:11.641122Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:238:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:34:11.641155Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4890: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:34:11.641173Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124999, Sender [7:238:2153], Recipient [7:238:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:34:11.641176Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4889: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:34:11.999319Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:238:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:34:11.999368Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4890: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:34:11.999386Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124999, Sender [7:238:2153], Recipient [7:238:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:34:11.999393Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4889: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:34:12.335959Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:238:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:34:12.335996Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4890: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:34:12.336019Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124999, Sender [7:238:2153], Recipient [7:238:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:34:12.336024Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4889: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:34:12.683172Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:238:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:34:12.683209Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4890: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:34:12.683230Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124999, Sender [7:238:2153], Recipient [7:238:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:34:12.683244Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4889: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:34:13.040316Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:238:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:34:13.040347Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4890: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:34:13.040368Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124999, Sender [7:238:2153], Recipient [7:238:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:34:13.040373Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4889: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:34:13.377180Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:238:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:34:13.377230Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4890: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:34:13.377252Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124999, Sender [7:238:2153], Recipient [7:238:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:34:13.377258Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4889: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:34:13.744198Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271125000, Sender [0:0:0], Recipient [7:238:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:34:13.744242Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4890: StateWork, processing event TEvSchemeShard::TEvWakeupToMeasureSelfResponseTime 2025-06-03T10:34:13.744260Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271124999, Sender [7:238:2153], Recipient [7:238:2153]: NKikimr::NSchemeShard::TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:34:13.744264Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4889: StateWork, processing event TEvSchemeShard::TEvMeasureSelfResponseTime 2025-06-03T10:34:13.785388Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122945, Sender [7:1089:2841], Recipient [7:238:2153]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/tmp/TempTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2025-06-03T10:34:13.785412Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4894: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-03T10:34:13.785439Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/tmp/TempTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:34:13.785482Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/tmp/TempTable" took 32us result status StatusPathDoesNotExist 2025-06-03T10:34:13.785514Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/tmp/TempTable\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/tmp/TempTable" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:34:13.785565Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122945, Sender [7:1090:2842], Recipient [7:238:2153]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/tmp" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true } 2025-06-03T10:34:13.785569Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4894: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-03T10:34:13.785574Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/tmp" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:34:13.785584Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/tmp" took 9us result status StatusPathDoesNotExist 2025-06-03T10:34:13.785594Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/tmp\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/tmp" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:34:13.785627Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4882: StateWork, received event# 271122945, Sender [7:1091:2843], Recipient [7:238:2153]: NKikimrSchemeOp.TDescribePath Path: "/MyRoot/tmp/TempTable/ValueIndex" Options { ShowPrivateTable: true } 2025-06-03T10:34:13.785630Z node 7 :FLAT_TX_SCHEMESHARD TRACE: schemeshard_impl.cpp:4894: StateWork, processing event TEvSchemeShard::TEvDescribeScheme 2025-06-03T10:34:13.785635Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/tmp/TempTable/ValueIndex" Options { ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:34:13.785643Z node 7 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/tmp/TempTable/ValueIndex" took 9us result status StatusPathDoesNotExist 2025-06-03T10:34:13.785654Z node 7 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/tmp/TempTable/ValueIndex\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/tmp/TempTable/ValueIndex" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TBSVWithReboots::CreateWithIntermediateDirsForceDrop [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:34:05.298274Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:34:05.298316Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:34:05.298324Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:34:05.298330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:34:05.298346Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:34:05.298351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:34:05.298362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:34:05.298377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:34:05.298533Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:34:05.298651Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:34:05.321557Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:34:05.321592Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:34:05.321763Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:34:05.324952Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:34:05.327328Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:34:05.327415Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:34:05.331284Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:34:05.331349Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:34:05.343872Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:34:05.345806Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:34:05.346637Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:34:05.346722Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:34:05.348702Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:34:05.348719Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:34:05.348804Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:34:05.348814Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:34:05.348822Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:34:05.348874Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:34:05.350629Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:34:05.382609Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:34:05.385796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:34:05.385925Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:34:05.386002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:34:05.386024Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:34:05.387067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:34:05.387101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:34:05.387159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:34:05.387192Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:34:05.387199Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:34:05.387207Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:34:05.387887Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:34:05.387904Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:34:05.387911Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:34:05.388523Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:34:05.388540Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:34:05.388560Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:34:05.388570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:34:05.389534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:34:05.390271Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:34:05.390343Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:34:05.390614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:34:05.390663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:34:05.390673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:34:05.395946Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... ard Send, to populator: [53:206:2207], at schemeshard: 72057594046678944, txId: 1003, path id: 3 2025-06-03T10:34:18.627599Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [53:206:2207], at schemeshard: 72057594046678944, txId: 1003, path id: 4 2025-06-03T10:34:18.627694Z node 53 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:18.627702Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:18.627705Z node 53 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 4, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:34:18.627708Z node 53 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 18446744073709551615 2025-06-03T10:34:18.627711Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 3 2025-06-03T10:34:18.627866Z node 53 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:18.627875Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:18.627878Z node 53 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:34:18.627881Z node 53 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-03T10:34:18.627884Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:34:18.628073Z node 53 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:18.628082Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:18.628087Z node 53 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:34:18.628091Z node 53 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-03T10:34:18.628094Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:34:18.628191Z node 53 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:18.628202Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:18.628206Z node 53 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:34:18.628210Z node 53 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 18446744073709551615 2025-06-03T10:34:18.628215Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:34:18.628224Z node 53 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 1 2025-06-03T10:34:18.628228Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:212: TTxAckPublishToSchemeBoard Notify send TEvNotifyTxCompletionResult, at schemeshard: 72057594046678944, to actorId: [53:304:2294] 2025-06-03T10:34:18.628276Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:34:18.628283Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:34:18.628505Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 Leader for TabletID 72057594037968897 is [53:218:2216] sender: [53:346:2058] recipient: [53:15:2062] 2025-06-03T10:34:18.628762Z node 53 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 2025-06-03T10:34:18.628826Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:34:18.628874Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-03T10:34:18.628908Z node 53 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 2025-06-03T10:34:18.628933Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:34:18.628949Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 1 2025-06-03T10:34:18.629015Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:34:18.629020Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 5], at schemeshard: 72057594046678944 2025-06-03T10:34:18.629028Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 2025-06-03T10:34:18.629033Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-03T10:34:18.629036Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:34:18.629039Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:34:18.629042Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:34:18.629111Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:34:18.629149Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:34:18.629385Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:34:18.629409Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-03T10:34:18.629417Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [53:305:2295] 2025-06-03T10:34:18.629930Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:34:18.629956Z node 53 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:34:18.629972Z node 53 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 3 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestWaitNotification: OK eventTxId 1002 TestWaitNotification: OK eventTxId 1003 2025-06-03T10:34:18.630099Z node 53 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/x" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:34:18.630137Z node 53 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/x" took 47us result status StatusPathDoesNotExist 2025-06-03T10:34:18.630174Z node 53 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/x\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/x" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> test_sql.py::TestCanonicalFolder1::test_case[simple/q18.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_10.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_nonkey_rp.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_equi.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[index/select_using_index.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_rightsemi.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_simple_c.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_equi.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_equi.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_nonkey_rp.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_10.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/select_using_index_only.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_nonkey_rp.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_10.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[index/select_using_index_only.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/select_using_index_only.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_10.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_11.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[index/select_using_index_only.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_on_top_of_apply.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_on_top_of_apply.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_on_top_of_apply.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_nonkey_rp.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_rp.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_equi.sql-result_sets] [GOOD] >> TBSVWithReboots::CreateWithIntermediateDirs >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_partition_config_options |71.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut >> test_sql.py::TestCanonicalFolder1::test_case[simple/q18.sql-result_sets] [GOOD] |71.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence |71.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut |71.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/services/config/ut/ydb-services-config-ut |71.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_data_cleanup/ydb-core-tx-datashard-ut_data_cleanup |71.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut |71.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut |71.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut |71.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/ymq/actor/ut/ydb-core-ymq-actor-ut |71.2%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_dependent_nopush.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_inner.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q19.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[write/update_on.sql-plan] [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_5_Query [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_simple_c.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/update_on.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_on_top_of_apply.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_simple_c.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_11.sql-plan] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_primary_key_and_other_scheme_then_ok >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_rp.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_rp.sql-result_sets] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_remove_directory_that_does_not_exist_failure >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_column_after_table_creation_with_data_and_success >> KqpScheme::DropChangefeedNegative >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_group_id[tables_format_v0] >> KqpScheme::AddColumnFamilyWithCompressionLevel >> KqpScheme::UseUnauthorizedTable >> KqpScheme::CreateTableWithPartitionAtKeysSimpleUncompat >> test_users_groups_with_acl.py::test_query_create_group_by_tenant_admin[domain_login_only--true-YDB] [GOOD] >> TBSVWithReboots::CreateAssignDropIsAllowed [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi.sql-plan] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_partition_config_options [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q19.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_inner.sql-plan] [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Table >> test_sql.py::TestCanonicalFolder1::test_case[write/update_on.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_inner.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_aliases_and_apply.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_simple_c.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_rp.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_11.sql-result_sets] >> test_users_groups_with_acl.py::test_query_create_user_by_domain_admin[domain_login_only--false-YDB] >> test_sql.py::TestCanonicalFolder1::test_case[write/upsert.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q19.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_aliases_and_apply.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_using_index.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_rp_1.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_11.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_aliases_and_apply.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_12.sql-plan] |71.1%| [LD] {RESULT} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut |71.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence |71.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/rate_limiter/ut/ydb-services-rate_limiter-ut |71.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut |71.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest |71.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |71.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_data_cleanup/ydb-core-tx-datashard-ut_data_cleanup |71.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/services/config/ut/ydb-services-config-ut |71.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut |71.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut |71.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::BrokenLockChangesDontLeak [GOOD] Test command err: 2025-06-03T10:33:52.503785Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:33:52.503897Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:33:52.503939Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000ea2/r3tmp/tmpKXIZnW/pdisk_1.dat 2025-06-03T10:33:52.637921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:33:52.656322Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:52.657624Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946831801160 != 1748946831801164 2025-06-03T10:33:52.699571Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:33:52.699784Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-03T10:33:52.699949Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:52.699973Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:52.710645Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:52.789702Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-03T10:33:52.789739Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-03T10:33:52.789787Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:639:2547] 2025-06-03T10:33:52.831058Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:639:2547] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-03T10:33:52.831119Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:639:2547] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-03T10:33:52.831408Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1627: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-03T10:33:52.831427Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:639:2547] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:33:52.831516Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:33:52.831565Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:639:2547] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:33:52.831587Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:639:2547] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-03T10:33:52.831698Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvClientConnected 2025-06-03T10:33:52.832258Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:33:52.832628Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [1:639:2547] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-03T10:33:52.832646Z node 1 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [1:639:2547] txid# 281474976715657 SEND to# [1:591:2517] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-03T10:33:52.849898Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:52.850264Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:52.850399Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:33:52.850494Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:33:52.863003Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:52.863247Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:33:52.863281Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:33:52.863512Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:33:52.863523Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:33:52.863530Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:33:52.863607Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:33:52.863629Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:33:52.863642Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:33:52.863732Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:33:52.874241Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:33:52.874376Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:33:52.874424Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:33:52.874431Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:33:52.874437Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:33:52.874446Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:33:52.874558Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:33:52.874568Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:33:52.874716Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:33:52.874748Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:33:52.874906Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:33:52.874917Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:33:52.874927Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:33:52.874935Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:33:52.874940Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:33:52.874948Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:33:52.874955Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:33:52.874971Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:33:52.874977Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:33:52.874985Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:33:52.875015Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:33:52.875021Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:33:52.875048Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:33:52.875113Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:33:52.875128Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:33:52.875150Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:33:52.875160Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:33:52.875166Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:33:52.875173Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:33:52.875178Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 7 ... X_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-03T10:34:24.647354Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit FinishProposeWrite 2025-06-03T10:34:24.647377Z node 16 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:226: Prepare transaction failed. txid 5 at tablet 72075186224037888 errors: Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because it cannot acquire locks" issue_code: 2001 severity: 1 } 2025-06-03T10:34:24.647385Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is DelayComplete 2025-06-03T10:34:24.647390Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-03T10:34:24.647394Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:5] at 72075186224037888 to execution unit CompletedOperations 2025-06-03T10:34:24.647399Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:5] at 72075186224037888 on unit CompletedOperations 2025-06-03T10:34:24.647413Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:5] at 72075186224037888 is Executed 2025-06-03T10:34:24.647417Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:5] at 72075186224037888 executing on unit CompletedOperations 2025-06-03T10:34:24.647422Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:5] at 72075186224037888 has finished 2025-06-03T10:34:24.647441Z node 16 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-03T10:34:24.647447Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:5] at 72075186224037888 on unit FinishProposeWrite 2025-06-03T10:34:24.647453Z node 16 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 5 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_LOCKS_BROKEN 2025-06-03T10:34:24.647463Z node 16 :TX_DATASHARD ERROR: finish_propose_write_unit.cpp:168: Errors while proposing transaction txid 5 at tablet 72075186224037888 Status: STATUS_LOCKS_BROKEN Issues: { message: "Operation is aborting because it cannot acquire locks" issue_code: 2001 severity: 1 } 2025-06-03T10:34:24.647479Z node 16 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:34:24.647526Z node 16 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:798: SelfId: [16:904:2679], Table: `/Root/table` ([72057594046644480:2:1]), SessionActorId: [16:839:2679]Got LOCKS BROKEN for table `/Root/table`. ShardID=72075186224037888, Sink=[16:904:2679].{
: Error: Operation is aborting because it cannot acquire locks, code: 2001 } 2025-06-03T10:34:24.647557Z node 16 :KQP_COMPUTE ERROR: kqp_write_actor.cpp:2935: SelfId: [16:897:2679], SessionActorId: [16:839:2679], statusCode=ABORTED. Issue=
: Error: Transaction locks invalidated. Table: `/Root/table`., code: 2001
: Error: Operation is aborting because it cannot acquire locks, code: 2001 . sessionActorId=[16:839:2679]. isRollback=0 2025-06-03T10:34:24.647621Z node 16 :KQP_SESSION WARN: kqp_session_actor.cpp:1848: SessionId: ydb://session/3?node_id=16&id=MWVhMjBhZWQtNThiNmQyMjctNTFlNTAwNTEtMjAwOGZiMmE=, ActorId: [16:839:2679], ActorState: ExecuteState, TraceId: 01jwtnpjff4fypyg4f1j2s1sac, got TEvKqpBuffer::TEvError in ExecuteState, status: ABORTED send to: [16:898:2679] from: [16:897:2679] 2025-06-03T10:34:24.647661Z node 16 :KQP_EXECUTER ERROR: kqp_executer_impl.h:1955: ActorId: [16:898:2679] TxId: 281474976715663. Ctx: { TraceId: 01jwtnpjff4fypyg4f1j2s1sac, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=16&id=MWVhMjBhZWQtNThiNmQyMjctNTFlNTAwNTEtMjAwOGZiMmE=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ABORTED: {
: Error: Transaction locks invalidated. Table: `/Root/table`., code: 2001 subissue: {
: Error: Operation is aborting because it cannot acquire locks, code: 2001 } } 2025-06-03T10:34:24.647721Z node 16 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=16&id=MWVhMjBhZWQtNThiNmQyMjctNTFlNTAwNTEtMjAwOGZiMmE=, ActorId: [16:839:2679], ActorState: ExecuteState, TraceId: 01jwtnpjff4fypyg4f1j2s1sac, Create QueryResponse for error on request, msg: 2025-06-03T10:34:24.647976Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 278003712, Sender [16:897:2679], Recipient [16:689:2579]: NKikimrDataEvents.TEvWrite TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 } Op: Rollback } 2025-06-03T10:34:24.647984Z node 16 :TX_DATASHARD TRACE: datashard__write.cpp:182: Handle TTxWrite: at tablet# 72075186224037888 2025-06-03T10:34:24.648018Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 275709965, Sender [16:61:2108], Recipient [16:689:2579]: NKikimrLongTxService.TEvLockStatus LockId: 281474976715661 LockNode: 16 Status: STATUS_NOT_FOUND 2025-06-03T10:34:24.648189Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435074, Sender [16:689:2579], Recipient [16:689:2579]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-06-03T10:34:24.648197Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3156: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-06-03T10:34:24.648205Z node 16 :TX_DATASHARD TRACE: datashard__write.cpp:28: TTxWrite:: execute at tablet# 72075186224037888 2025-06-03T10:34:24.648226Z node 16 :TX_DATASHARD TRACE: datashard_write_operation.cpp:64: Parsing write transaction for 0 at 72075186224037888, record: TxMode: MODE_IMMEDIATE Locks { Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 } Op: Rollback } 2025-06-03T10:34:24.648240Z node 16 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-03T10:34:24.648251Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CheckWrite 2025-06-03T10:34:24.648260Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-03T10:34:24.648265Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CheckWrite 2025-06-03T10:34:24.648270Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-03T10:34:24.648275Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-03T10:34:24.648286Z node 16 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v400/0 IncompleteEdge# v{min} UnprotectedReadEdge# v400/18446744073709551615 ImmediateWriteEdge# v401/0 ImmediateWriteEdgeReplied# v401/0 2025-06-03T10:34:24.648297Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:6] at 72075186224037888 2025-06-03T10:34:24.648302Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-03T10:34:24.648307Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-03T10:34:24.648311Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit ExecuteWrite 2025-06-03T10:34:24.648315Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit ExecuteWrite 2025-06-03T10:34:24.648321Z node 16 :TX_DATASHARD DEBUG: execute_write_unit.cpp:245: Executing write operation for [0:6] at 72075186224037888 2025-06-03T10:34:24.648336Z node 16 :TX_DATASHARD TRACE: datashard_kqp.cpp:787: KqpEraseLock LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 2025-06-03T10:34:24.648342Z node 16 :TX_DATASHARD DEBUG: execute_write_unit.cpp:414: Skip empty write operation for [0:6] at 72075186224037888 2025-06-03T10:34:24.648354Z node 16 :TX_DATASHARD TRACE: execute_write_unit.cpp:47: add locks to result: 0 2025-06-03T10:34:24.648367Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:34:24.648371Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit ExecuteWrite 2025-06-03T10:34:24.648375Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit FinishProposeWrite 2025-06-03T10:34:24.648380Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-06-03T10:34:24.648387Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is DelayComplete 2025-06-03T10:34:24.648391Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit FinishProposeWrite 2025-06-03T10:34:24.648395Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:6] at 72075186224037888 to execution unit CompletedOperations 2025-06-03T10:34:24.648399Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:6] at 72075186224037888 on unit CompletedOperations 2025-06-03T10:34:24.648408Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:6] at 72075186224037888 is Executed 2025-06-03T10:34:24.648412Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:6] at 72075186224037888 executing on unit CompletedOperations 2025-06-03T10:34:24.648417Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:6] at 72075186224037888 has finished 2025-06-03T10:34:24.648425Z node 16 :TX_DATASHARD TRACE: datashard__write.cpp:150: TTxWrite complete: at tablet# 72075186224037888 2025-06-03T10:34:24.648430Z node 16 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:6] at 72075186224037888 on unit FinishProposeWrite 2025-06-03T10:34:24.648435Z node 16 :TX_DATASHARD TRACE: finish_propose_write_unit.cpp:163: Propose transaction complete txid 6 at tablet 72075186224037888 send to client, propose latency: 0 ms, status: STATUS_COMPLETED 2025-06-03T10:34:24.648446Z node 16 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:34:24.651276Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [16:911:2729], Recipient [16:689:2579]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:34:24.651326Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:34:24.651337Z node 16 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [16:910:2728], serverId# [16:911:2729], sessionId# [0:0:0] 2025-06-03T10:34:24.651363Z node 16 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553224, Sender [16:591:2517], Recipient [16:689:2579]: NKikimr::TEvDataShard::TEvGetOpenTxs |71.2%| [TA] $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/test-results/unittest/{meta.json ... results_accumulator.log} |71.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/ymq/actor/ut/ydb-core-ymq-actor-ut |71.2%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_39_Query [GOOD] Test command err: 2025-06-03T10:31:31.224099Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668841666390582:2221];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000df4/r3tmp/tmp2opJVR/pdisk_1.dat 2025-06-03T10:31:31.257845Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:31:31.260403Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:31:31.301520Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:31.301854Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668841666390362:2079] 1748946691212366 != 1748946691212369 TServer::EnableGrpc on GrpcPort 4576, node 1 2025-06-03T10:31:31.315881Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:31.315919Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:31.316963Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:31.318881Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/000df4/r3tmp/yandexZel0C4.tmp 2025-06-03T10:31:31.318897Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/000df4/r3tmp/yandexZel0C4.tmp 2025-06-03T10:31:31.318988Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/000df4/r3tmp/yandexZel0C4.tmp 2025-06-03T10:31:31.319055Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:31:31.326648Z INFO: TTestServer started on Port 31823 GrpcPort 4576 TClient is connected to server localhost:31823 PQClient connected to localhost:4576 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:31.370323Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:31:31.382089Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-03T10:31:31.738238Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668841666391149:2335], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:31.738445Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:31.741402Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668841666391183:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:31.742683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480 2025-06-03T10:31:31.746456Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668841666391189:2341], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:31.746614Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:31.747310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715662, at schemeshard: 72057594046644480 2025-06-03T10:31:31.747405Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668841666391191:2342], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-03T10:31:31.794475Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:31.806995Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:31.814416Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668841666391368:2504] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:31.841398Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:31.843268Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668841666391379:2359], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:31:31.843397Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=YWU1MTY4NmUtMjcyNTE1NjItNTJjMzdhZjYtZDI2ODVlNQ==, ActorId: [1:7511668841666391146:2333], ActorState: ExecuteState, TraceId: 01jwtnh9mnb81qjbae9207y2mz, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:31:31.843901Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7511668841666391534:2608] 2025-06-03T10:31:36.222693Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668841666390582:2221];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:36.222737Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:31:37.104915Z :WriteToTopic_Demo_2_Table INFO: TTopicSdkTestSetup started 2025-06-03T10:31:37.108852Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-03T10:31:37.114309Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7511668867436195515:2688] connected; active server actors: 1 2025-06-03T10:31:37.114506Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-03T10:31:37.114653Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-03T10:31:37.114721Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-03T10:31:37.114926Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-03T10:31:37.115748Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:31:37.115857Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3089: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-03T10:31:37.115998Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:31:37.116043Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72075186224037892] doesn't have tx info 2025-06-03T10:31:37.116048Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:31:37.116050Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-03T10:31:37.116054Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-03 ... eIdle] --- write ----------------- 2025-06-03T10:34:24.019061Z node 21 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037894, Partition: 0, State: StateIdle] i0000000000 2025-06-03T10:34:24.019063Z node 21 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037894, Partition: 0, State: StateIdle] m0000000000cconsumer 2025-06-03T10:34:24.019064Z node 21 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037894, Partition: 0, State: StateIdle] m0000000000uconsumer 2025-06-03T10:34:24.019067Z node 21 :PERSQUEUE DEBUG: partition.cpp:2199: [PQ: 72075186224037894, Partition: 0, State: StateIdle] --- rename ---------------- 2025-06-03T10:34:24.019070Z node 21 :PERSQUEUE DEBUG: partition.cpp:2204: [PQ: 72075186224037894, Partition: 0, State: StateIdle] =========================== 2025-06-03T10:34:24.019084Z node 21 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-06-03T10:34:24.019495Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:779: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user consumer readTimeStamp for offset 2 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 2 2025-06-03T10:34:24.019511Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic_A' partition: 0 messageNo: 0 requestId: cookie: 2 2025-06-03T10:34:24.019517Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-03T10:34:24.019530Z node 21 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer consumer session consumer_21_1_15209430904186310887_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 2 } 2025-06-03T10:34:24.019539Z node 21 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer consumer session consumer_21_1_15209430904186310887_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) commit done to position 2 endOffset 2 with cookie 2 2025-06-03T10:34:24.019545Z node 21 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer consumer session consumer_21_1_15209430904186310887_v1 replying for commits: assignId# 1, from# 2, to# 2, offset# 2 2025-06-03T10:34:24.019742Z :DEBUG: [/Root] [/Root] [cd0752af-74ec59c1-6108f5cb-ea3186df] [] Committed response: { partitions_committed_offsets { partition_session_id: 1 committed_offset: 2 } } 2025-06-03T10:34:25.003964Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-03T10:34:25.004359Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 4 sessionId: test-message_group_id|c5aeebbd-1b2c1fae-96473bec-d0d95564_0 describe result for acl check 2025-06-03T10:34:25.014558Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:1:2 2025-06-03T10:34:25.014590Z :INFO: [/Root] [/Root] [cd0752af-74ec59c1-6108f5cb-ea3186df] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1001 BytesRead: 20 MessagesRead: 2 BytesReadCompressed: 20 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:34:25.015226Z node 21 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2378: session cookie 1 consumer consumer session consumer_21_1_15209430904186310887_v1 checking auth because of timeout 2025-06-03T10:34:25.015294Z node 21 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 1 consumer consumer session consumer_21_1_15209430904186310887_v1 auth for : consumer 2025-06-03T10:34:25.015578Z node 21 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 1 consumer consumer session consumer_21_1_15209430904186310887_v1 Handle describe topics response 2025-06-03T10:34:25.015618Z node 21 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 1 consumer consumer session consumer_21_1_15209430904186310887_v1 auth is DEAD 2025-06-03T10:34:25.015642Z node 21 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 1 consumer consumer session consumer_21_1_15209430904186310887_v1 auth ok: topics# 1, initDone# 1 2025-06-03T10:34:25.017767Z node 21 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:459: session cookie 2 consumer consumer session consumer_21_1_15209430904186310887_v1 checking auth because of timeout 2025-06-03T10:34:25.017812Z node 21 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer consumer session consumer_21_1_15209430904186310887_v1 auth for : consumer 2025-06-03T10:34:25.018121Z node 21 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer consumer session consumer_21_1_15209430904186310887_v1 Handle describe topics response 2025-06-03T10:34:25.018158Z node 21 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer consumer session consumer_21_1_15209430904186310887_v1 auth is DEAD 2025-06-03T10:34:25.018183Z node 21 :PQ_READ_PROXY INFO: direct_read_actor.cpp:299: session cookie 2 consumer consumer session consumer_21_1_15209430904186310887_v1 auth ok: topics# 1, initDone# 1 2025-06-03T10:34:26.013584Z :INFO: [/Root] [/Root] [cd0752af-74ec59c1-6108f5cb-ea3186df] Closing read session. Close timeout: 0.000000s 2025-06-03T10:34:26.013620Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:1:2 2025-06-03T10:34:26.013634Z :INFO: [/Root] [/Root] [cd0752af-74ec59c1-6108f5cb-ea3186df] Counters: { Errors: 0 CurrentSessionLifetimeMs: 2000 BytesRead: 20 MessagesRead: 2 BytesReadCompressed: 20 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:34:26.013661Z :NOTICE: [/Root] [/Root] [cd0752af-74ec59c1-6108f5cb-ea3186df] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-03T10:34:26.013676Z :DEBUG: [/Root] [/Root] [cd0752af-74ec59c1-6108f5cb-ea3186df] [] Abort session to cluster 2025-06-03T10:34:26.013940Z :DEBUG: [/Root] 0x000011FB79D2BB10 TDirectReadSessionManager ServerSessionId=consumer_21_1_15209430904186310887_v1 Close 2025-06-03T10:34:26.013976Z :DEBUG: [/Root] 0x000011FB79D2BB10 TDirectReadSessionManager ServerSessionId=consumer_21_1_15209430904186310887_v1 Close 2025-06-03T10:34:26.013998Z :NOTICE: [/Root] [/Root] [cd0752af-74ec59c1-6108f5cb-ea3186df] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-03T10:34:26.015396Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|c5aeebbd-1b2c1fae-96473bec-d0d95564_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-03T10:34:26.015408Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|c5aeebbd-1b2c1fae-96473bec-d0d95564_0] PartitionId [0] Generation [1] Write session will now close 2025-06-03T10:34:26.015415Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|c5aeebbd-1b2c1fae-96473bec-d0d95564_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-03T10:34:26.015533Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|c5aeebbd-1b2c1fae-96473bec-d0d95564_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-03T10:34:26.015541Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|c5aeebbd-1b2c1fae-96473bec-d0d95564_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-03T10:34:26.014612Z node 21 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer consumer session consumer_21_1_15209430904186310887_v1 grpc read done: success# 0, data# { } 2025-06-03T10:34:26.014631Z node 21 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer consumer session consumer_21_1_15209430904186310887_v1 grpc read failed 2025-06-03T10:34:26.014642Z node 21 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer consumer session consumer_21_1_15209430904186310887_v1 grpc closed 2025-06-03T10:34:26.014656Z node 21 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer consumer session consumer_21_1_15209430904186310887_v1 is DEAD 2025-06-03T10:34:26.015052Z node 21 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [21:7511669584963105624:2515] disconnected; active server actors: 1 2025-06-03T10:34:26.015056Z node 21 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [21:7511669584963105624:2515] client consumer disconnected session consumer_21_1_15209430904186310887_v1 2025-06-03T10:34:26.015093Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037894] Destroy direct read session consumer_21_1_15209430904186310887_v1 2025-06-03T10:34:26.015101Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037894] server disconnected, pipe [21:7511669584963105627:2518] destroyed 2025-06-03T10:34:26.015115Z node 21 :PQ_READ_PROXY DEBUG: caching_service.cpp:398: Direct read cache: close session for proxy [21:7511669584963105633:2521] 2025-06-03T10:34:26.015127Z node 21 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: consumer_21_1_15209430904186310887_v1 2025-06-03T10:34:26.017723Z node 21 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [21:7511669584963105633:2521]: session cookie 2 consumer consumer session consumer_21_1_15209430904186310887_v1 grpc read done: success# 0, data# { } 2025-06-03T10:34:26.017737Z node 21 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [21:7511669584963105633:2521]: session cookie 2 consumer consumer session consumer_21_1_15209430904186310887_v1grpc read failed 2025-06-03T10:34:26.017745Z node 21 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [21:7511669584963105633:2521]: session cookie 2 consumer consumer session consumer_21_1_15209430904186310887_v1 grpc closed 2025-06-03T10:34:26.017749Z node 21 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [21:7511669584963105633:2521]: session cookie 2 consumer consumer session consumer_21_1_15209430904186310887_v1 proxy is DEAD 2025-06-03T10:34:26.017993Z node 21 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: test-message_group_id|c5aeebbd-1b2c1fae-96473bec-d0d95564_0 grpc read done: success: 0 data: 2025-06-03T10:34:26.017996Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: test-message_group_id|c5aeebbd-1b2c1fae-96473bec-d0d95564_0 grpc read failed 2025-06-03T10:34:26.018002Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: test-message_group_id|c5aeebbd-1b2c1fae-96473bec-d0d95564_0 grpc closed 2025-06-03T10:34:26.018006Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: test-message_group_id|c5aeebbd-1b2c1fae-96473bec-d0d95564_0 is DEAD 2025-06-03T10:34:26.018137Z node 21 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:34:26.018148Z node 21 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:34:26.018210Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037894] server disconnected, pipe [21:7511669584963105543:2503] destroyed 2025-06-03T10:34:26.018216Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037894] server disconnected, pipe [21:7511669584963105540:2503] destroyed 2025-06-03T10:34:26.018229Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. |71.3%| [LD] {RESULT} $(B)/ydb/core/blobstorage/nodewarden/ut_sequence/ydb-core-blobstorage-nodewarden-ut_sequence ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/datashard/ut_snapshot/unittest >> DataShardSnapshots::UncommittedChangesRenameTable-UseSink [GOOD] Test command err: 2025-06-03T10:33:15.845320Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TCleanupTablesActor] ActorId: [1:324:2367], Scheduled retry for error: {
: Error: Retry LookupError for table .metadata/workload_manager/delayed_requests } 2025-06-03T10:33:15.845400Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:33:15.845421Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/workload_manager/classifiers/resource_pool_classifiers;error=incorrect path status: LookupError; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000ead/r3tmp/tmpK2VM30/pdisk_1.dat 2025-06-03T10:33:15.969328Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046644480 2025-06-03T10:33:15.986779Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:33:15.987797Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:32:2079] 1748946795473223 != 1748946795473227 2025-06-03T10:33:16.029703Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:213: actor# [1:59:2106] HANDLE TEvClientConnected success connect from tablet# 72057594046447617 2025-06-03T10:33:16.029926Z node 1 :TX_PROXY DEBUG: client.cpp:89: Handle TEvAllocateResult ACCEPTED RangeBegin# 281474976715656 RangeEnd# 281474976720656 txAllocator# 72057594046447617 2025-06-03T10:33:16.030113Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:33:16.030142Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:33:16.040667Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:33:16.113440Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:315: actor# [1:59:2106] Handle TEvProposeTransaction 2025-06-03T10:33:16.113470Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:238: actor# [1:59:2106] TxId# 281474976715657 ProcessProposeTransaction 2025-06-03T10:33:16.113498Z node 1 :TX_PROXY DEBUG: proxy_impl.cpp:257: actor# [1:59:2106] Cookie# 0 userReqId# "" txid# 281474976715657 SEND to# [1:639:2547] 2025-06-03T10:33:16.128553Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1562: Actor# [1:639:2547] txid# 281474976715657 Bootstrap EvSchemeRequest record: Transaction { ModifyScheme { WorkingDir: "/Root" OperationType: ESchemeOpCreateTable CreateTable { Name: "table-1" Columns { Name: "key" Type: "Uint32" FamilyName: "" NotNull: false } Columns { Name: "value" Type: "Uint32" FamilyName: "" NotNull: false } KeyColumnNames: "key" UniformPartitionsCount: 1 } } } ExecTimeoutPeriod: 18446744073709551615 2025-06-03T10:33:16.128590Z node 1 :TX_PROXY DEBUG: schemereq.cpp:569: Actor# [1:639:2547] txid# 281474976715657 Bootstrap, UserSID: CheckAdministrator: 0 CheckDatabaseAdministrator: 0 2025-06-03T10:33:16.128810Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1627: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P6 2025-06-03T10:33:16.128824Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1617: Actor# [1:639:2547] txid# 281474976715657 TEvNavigateKeySet requested from SchemeCache 2025-06-03T10:33:16.128874Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1450: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvNavigateKeySetResult TFlatSchemeReq marker# P5 ErrorCount# 0 2025-06-03T10:33:16.128912Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1497: Actor# [1:639:2547] HANDLE EvNavigateKeySetResult, txid# 281474976715657 shardToRequest# 72057594046644480 DomainKey# [OwnerId: 72057594046644480, LocalPathId: 1] DomainInfo.Params# Version: 1 PlanResolution: 500 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 RedirectRequired# true 2025-06-03T10:33:16.128928Z node 1 :TX_PROXY DEBUG: schemereq.cpp:103: Actor# [1:639:2547] txid# 281474976715657 SEND to# 72057594046644480 shardToRequest {TEvModifySchemeTransaction txid# 281474976715657 TabletId# 72057594046644480} 2025-06-03T10:33:16.128994Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1352: Actor# [1:639:2547] txid# 281474976715657 HANDLE EvClientConnected 2025-06-03T10:33:16.129371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:33:16.129647Z node 1 :TX_PROXY DEBUG: schemereq.cpp:1374: Actor# [1:639:2547] txid# 281474976715657 Status StatusAccepted HANDLE {TEvModifySchemeTransactionResult Status# StatusAccepted txid# 281474976715657} 2025-06-03T10:33:16.129658Z node 1 :TX_PROXY DEBUG: schemereq.cpp:549: Actor# [1:639:2547] txid# 281474976715657 SEND to# [1:591:2517] Source {TEvProposeTransactionStatus txid# 281474976715657 Status# 53} 2025-06-03T10:33:16.144534Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828672, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvBoot 2025-06-03T10:33:16.144802Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3097: StateInit, received event# 268828673, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvRestored 2025-06-03T10:33:16.144896Z node 1 :TX_DATASHARD INFO: datashard.cpp:373: TDataShard::OnActivateExecutor: tablet 72075186224037888 actor [1:663:2568] 2025-06-03T10:33:16.144969Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:630: TxInitSchema.Execute 2025-06-03T10:33:16.155304Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3110: StateInactive, received event# 268828684, Sender [1:655:2562], Recipient [1:663:2568]: NKikimr::TEvTablet::TEvFollowerSyncComplete 2025-06-03T10:33:16.155493Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:696: TxInitSchema.Complete 2025-06-03T10:33:16.155521Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:48: TDataShard::TTxInit::Execute 2025-06-03T10:33:16.155733Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1315: LoadChangeRecords: QueueSize: 0, at tablet: 72075186224037888 2025-06-03T10:33:16.155743Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1371: LoadLockChangeRecords at tablet: 72075186224037888 2025-06-03T10:33:16.155751Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1420: LoadChangeRecordCommits at tablet: 72075186224037888 2025-06-03T10:33:16.155822Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:92: TDataShard::TTxInit::Complete 2025-06-03T10:33:16.155841Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:100: TDataShard::TTxInitRestored::Execute 2025-06-03T10:33:16.155853Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:111: DataShard 72075186224037888 persisting started state actor id [1:680:2568] in generation 1 2025-06-03T10:33:16.166186Z node 1 :TX_DATASHARD DEBUG: datashard__init.cpp:120: TDataShard::TTxInitRestored::Complete 2025-06-03T10:33:16.171842Z node 1 :TX_DATASHARD INFO: datashard.cpp:417: Switched to work state WaitScheme tabletId 72075186224037888 2025-06-03T10:33:16.171950Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:457: 72075186224037888 not sending time cast registration request in state WaitScheme: missing processing params 2025-06-03T10:33:16.172024Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1250: Change sender created: at tablet: 72075186224037888, actorId: [1:682:2578] 2025-06-03T10:33:16.172030Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:1255: Trying to activate change sender: at tablet: 72075186224037888 2025-06-03T10:33:16.172036Z node 1 :TX_DATASHARD INFO: datashard.cpp:1272: Cannot activate change sender: at tablet: 72075186224037888, state: WaitScheme 2025-06-03T10:33:16.172043Z node 1 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:33:16.172123Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435072, Sender [1:663:2568], Recipient [1:663:2568]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvProgressTransaction 2025-06-03T10:33:16.172133Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3154: StateWork, processing event TEvPrivate::TEvProgressTransaction 2025-06-03T10:33:16.172259Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:96: TTxCheckInReadSets::Execute at 72075186224037888 2025-06-03T10:33:16.172287Z node 1 :TX_DATASHARD DEBUG: datashard__cleanup_in_rs.cpp:139: TTxCheckInReadSets::Complete found 0 read sets to remove in 72075186224037888 2025-06-03T10:33:16.172416Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:16: TTxProgressTransaction::Execute at 72075186224037888 2025-06-03T10:33:16.172427Z node 1 :TX_DATASHARD DEBUG: datashard_pipeline.cpp:272: GetNextActiveOp at 72075186224037888 active 0 active planned 0 immediate 0 planned 0 2025-06-03T10:33:16.172436Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:385: Check unit PlanQueue at 72075186224037888 2025-06-03T10:33:16.172443Z node 1 :TX_DATASHARD TRACE: plan_queue_unit.cpp:59: TPlanQueueUnit at 72075186224037888 has no attached operations 2025-06-03T10:33:16.172449Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:403: Unit PlanQueue has no ready operations at 72075186224037888 2025-06-03T10:33:16.172456Z node 1 :TX_DATASHARD INFO: datashard__progress_tx.cpp:48: No tx to execute at 72075186224037888 TxInFly 0 2025-06-03T10:33:16.172462Z node 1 :TX_DATASHARD DEBUG: datashard__progress_tx.cpp:119: TTxProgressTransaction::Complete at 72075186224037888 2025-06-03T10:33:16.172478Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [1:671:2572], Recipient [1:663:2568]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:33:16.172485Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:33:16.172494Z node 1 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [1:661:2566], serverId# [1:671:2572], sessionId# [0:0:0] 2025-06-03T10:33:16.172520Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [1:410:2404], Recipient [1:671:2572] 2025-06-03T10:33:16.172527Z node 1 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:33:16.172550Z node 1 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:33:16.172611Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 72075186224037888 on unit CheckSchemeTx 2025-06-03T10:33:16.172625Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:132: Propose scheme transaction at tablet 72075186224037888 txId 281474976715657 ssId 72057594046644480 seqNo 2:1 2025-06-03T10:33:16.172646Z node 1 :TX_DATASHARD DEBUG: check_scheme_tx_unit.cpp:220: Prepared scheme transaction txId 281474976715657 at tablet 72075186224037888 2025-06-03T10:33:16.172658Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715657] at 72075186224037888 is ExecutedNoMoreRestarts 2025-06-03T10:33:16.172663Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715657] at 72075186224037888 executing on unit CheckSchemeTx 2025-06-03T10:33:16.172669Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715657] at 72075186224037888 to execution unit StoreSchemeTx 2025-06-03T10:33:16.172674Z node 1 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715657] at 7 ... eId: 01jwtnp4e4e3dm6ntyyhpsfvfz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGJiM2JlMmYtYmJjYzkzYjMtZTcwZjk2NzEtYmJmMWFiYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. State: WaitResolveState, Executing KQP transaction on shard: 72075186224037888, tasks: [], lockTxId: (empty maybe), locks: Locks { LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true } Op: Rollback, immediate: 1 2025-06-03T10:34:10.254362Z node 13 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:1831: ActorId: [13:981:2682] TxId: 281474976715665. Ctx: { TraceId: 01jwtnp4e4e3dm6ntyyhpsfvfz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGJiM2JlMmYtYmJjYzkzYjMtZTcwZjk2NzEtYmJmMWFiYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ExecuteDatashardTransaction traceId.verbosity: 0 2025-06-03T10:34:10.254369Z node 13 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2805: ActorId: [13:981:2682] TxId: 281474976715665. Ctx: { TraceId: 01jwtnp4e4e3dm6ntyyhpsfvfz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGJiM2JlMmYtYmJjYzkzYjMtZTcwZjk2NzEtYmJmMWFiYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 1, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-03T10:34:10.254374Z node 13 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:136: ActorId: [13:981:2682] TxId: 281474976715665. Ctx: { TraceId: 01jwtnp4e4e3dm6ntyyhpsfvfz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGJiM2JlMmYtYmJjYzkzYjMtZTcwZjk2NzEtYmJmMWFiYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, datashard 72075186224037888 not finished yet: Executing 2025-06-03T10:34:10.254378Z node 13 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:157: ActorId: [13:981:2682] TxId: 281474976715665. Ctx: { TraceId: 01jwtnp4e4e3dm6ntyyhpsfvfz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGJiM2JlMmYtYmJjYzkzYjMtZTcwZjk2NzEtYmJmMWFiYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, waiting for 0 compute actor(s) and 1 datashard(s): DS 72075186224037888 (Executing), 2025-06-03T10:34:10.254382Z node 13 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:2367: ActorId: [13:981:2682] TxId: 281474976715665. Ctx: { TraceId: 01jwtnp4e4e3dm6ntyyhpsfvfz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGJiM2JlMmYtYmJjYzkzYjMtZTcwZjk2NzEtYmJmMWFiYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. ActorState: WaitResolveState, immediate tx, become ExecuteState 2025-06-03T10:34:10.254425Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269549568, Sender [13:981:2682], Recipient [13:950:2766]: NKikimrTxDataShard.TEvProposeTransaction TxKind: TX_KIND_DATA SourceDeprecated { RawX1: 981 RawX2: 55834577530 } TxBody: " \0018\001j3\010\001\032\'\n#\t\215\023\000\000\000\000\001\000\021\000\000\001\000\000\020\000\001\030\001 \000)\000\001\205\000\000\000\000\0010\0028\001 \003\"\006\020\0020\000@\n\220\001\000" TxId: 281474976715665 ExecLevel: 0 Flags: 8 2025-06-03T10:34:10.254434Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3135: StateWork, processing event TEvDataShard::TEvProposeTransaction 2025-06-03T10:34:10.254460Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 2146435074, Sender [13:950:2766], Recipient [13:950:2766]: NKikimr::NDataShard::TDataShard::TEvPrivate::TEvDelayedProposeTransaction 2025-06-03T10:34:10.254465Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3156: StateWork, processing event TEvPrivate::TEvDelayedProposeTransaction 2025-06-03T10:34:10.254477Z node 13 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:33: TTxProposeTransactionBase::Execute at 72075186224037888 2025-06-03T10:34:10.254506Z node 13 :TX_DATASHARD TRACE: key_validator.cpp:54: -- AddWriteRange: (Uint64 : 281474976715661, Uint64 : 72075186224037888, Uint64 : 72057594046644480, Uint64 : 2) table: [1:997:0] 2025-06-03T10:34:10.254520Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit CheckDataTx 2025-06-03T10:34:10.254529Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-03T10:34:10.254533Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit CheckDataTx 2025-06-03T10:34:10.254537Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit BuildAndWaitDependencies 2025-06-03T10:34:10.254540Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit BuildAndWaitDependencies 2025-06-03T10:34:10.254548Z node 13 :TX_DATASHARD TRACE: datashard.cpp:2365: GetMvccTxVersion at 72075186224037888 CompleteEdge# v500/281474976715663 IncompleteEdge# v{min} UnprotectedReadEdge# v400/18446744073709551615 ImmediateWriteEdge# v400/18446744073709551615 ImmediateWriteEdgeReplied# v1000/18446744073709551615 2025-06-03T10:34:10.254559Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:483: Activated operation [0:281474976715665] at 72075186224037888 2025-06-03T10:34:10.254563Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-03T10:34:10.254567Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit BuildAndWaitDependencies 2025-06-03T10:34:10.254571Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit ExecuteKqpDataTx 2025-06-03T10:34:10.254575Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit ExecuteKqpDataTx 2025-06-03T10:34:10.254587Z node 13 :TX_DATASHARD TRACE: execute_kqp_data_tx_unit.cpp:236: Operation [0:281474976715665] (execute_kqp_data_tx) at 72075186224037888 set memory limit 4193448 2025-06-03T10:34:10.254598Z node 13 :TX_DATASHARD TRACE: datashard_kqp.cpp:787: KqpEraseLock LockId: 281474976715661 DataShard: 72075186224037888 Generation: 1 Counter: 0 SchemeShard: 72057594046644480 PathId: 2 HasWrites: true 2025-06-03T10:34:10.254611Z node 13 :TX_DATASHARD TRACE: execute_kqp_data_tx_unit.cpp:482: add locks to result: 0 2025-06-03T10:34:10.254622Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-03T10:34:10.254626Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit ExecuteKqpDataTx 2025-06-03T10:34:10.254630Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit FinishPropose 2025-06-03T10:34:10.254634Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit FinishPropose 2025-06-03T10:34:10.254641Z node 13 :TX_DATASHARD TRACE: finish_propose_unit.cpp:167: Propose transaction complete txid 281474976715665 at tablet 72075186224037888 send to client, exec latency: 0 ms, propose latency: 0 ms, status: COMPLETE 2025-06-03T10:34:10.254656Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is DelayComplete 2025-06-03T10:34:10.254659Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit FinishPropose 2025-06-03T10:34:10.254665Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1916: Add [0:281474976715665] at 72075186224037888 to execution unit CompletedOperations 2025-06-03T10:34:10.254668Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1827: Trying to execute [0:281474976715665] at 72075186224037888 on unit CompletedOperations 2025-06-03T10:34:10.254676Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1862: Execution status for [0:281474976715665] at 72075186224037888 is Executed 2025-06-03T10:34:10.254679Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1910: Advance execution plan for [0:281474976715665] at 72075186224037888 executing on unit CompletedOperations 2025-06-03T10:34:10.254683Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1922: Execution plan for [0:281474976715665] at 72075186224037888 has finished 2025-06-03T10:34:10.254693Z node 13 :TX_DATASHARD DEBUG: datashard__propose_tx_base.cpp:152: TTxProposeTransactionBase::Complete at 72075186224037888 2025-06-03T10:34:10.254698Z node 13 :TX_DATASHARD TRACE: datashard_pipeline.cpp:1933: Complete execution for [0:281474976715665] at 72075186224037888 on unit FinishPropose 2025-06-03T10:34:10.254704Z node 13 :TX_DATASHARD DEBUG: cdc_stream_heartbeat.cpp:87: [CdcStreamHeartbeat] Emit heartbeats: at tablet# 72075186224037888 2025-06-03T10:34:10.254738Z node 13 :KQP_EXECUTER DEBUG: kqp_data_executer.cpp:1364: ActorId: [13:981:2682] TxId: 281474976715665. Ctx: { TraceId: 01jwtnp4e4e3dm6ntyyhpsfvfz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGJiM2JlMmYtYmJjYzkzYjMtZTcwZjk2NzEtYmJmMWFiYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Got propose result, shard: 72075186224037888, status: COMPLETE, error: 2025-06-03T10:34:10.254757Z node 13 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2151: ActorId: [13:981:2682] TxId: 281474976715665. Ctx: { TraceId: 01jwtnp4e4e3dm6ntyyhpsfvfz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGJiM2JlMmYtYmJjYzkzYjMtZTcwZjk2NzEtYmJmMWFiYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-03T10:34:10.254767Z node 13 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:839: ActorId: [13:981:2682] TxId: 281474976715665. Ctx: { TraceId: 01jwtnp4e4e3dm6ntyyhpsfvfz, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=13&id=OGJiM2JlMmYtYmJjYzkzYjMtZTcwZjk2NzEtYmJmMWFiYzU=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 2025-06-03T10:34:10.254787Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2542: SessionId: ydb://session/3?node_id=13&id=OGJiM2JlMmYtYmJjYzkzYjMtZTcwZjk2NzEtYmJmMWFiYzU=, ActorId: [13:837:2682], ActorState: CleanupState, TraceId: 01jwtnp4e4e3dm6ntyyhpsfvfz, EndCleanup, isFinal: 0 2025-06-03T10:34:10.254827Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2278: SessionId: ydb://session/3?node_id=13&id=OGJiM2JlMmYtYmJjYzkzYjMtZTcwZjk2NzEtYmJmMWFiYzU=, ActorId: [13:837:2682], ActorState: CleanupState, TraceId: 01jwtnp4e4e3dm6ntyyhpsfvfz, Sent query response back to proxy, proxyRequestId: 8, proxyId: [13:57:2104] 2025-06-03T10:34:10.501505Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269877761, Sender [13:990:2792], Recipient [13:950:2766]: NKikimr::TEvTabletPipe::TEvServerConnected 2025-06-03T10:34:10.501535Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3165: StateWork, processing event TEvTabletPipe::TEvServerConnected 2025-06-03T10:34:10.501545Z node 13 :TX_DATASHARD DEBUG: datashard.cpp:3710: Server connected at leader tablet# 72075186224037888, clientId# [13:989:2791], serverId# [13:990:2792], sessionId# [0:0:0] 2025-06-03T10:34:10.501567Z node 13 :TX_DATASHARD TRACE: datashard_impl.h:3129: StateWork, received event# 269553224, Sender [13:591:2517], Recipient [13:950:2766]: NKikimr::TEvDataShard::TEvGetOpenTxs |71.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |71.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_by_not_single_key_column_failure [GOOD] |71.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_data_cleanup/ydb-core-tx-datashard-ut_data_cleanup |71.3%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_init/ydb-core-tx-datashard-ut_init >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi_rp.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[write/upsert.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/upsert.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_inner.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_leftonly.sql-plan] |71.3%| [LD] {RESULT} $(B)/ydb/core/tx/sequenceproxy/ut/ydb-core-tx-sequenceproxy-ut |71.3%| [LD] {RESULT} $(B)/ydb/core/graph/shard/ut/ydb-core-graph-shard-ut |71.3%| [LD] {RESULT} $(B)/ydb/core/ymq/actor/ut/ydb-core-ymq-actor-ut |71.3%| [LD] {RESULT} $(B)/ydb/core/ymq/actor/yc_search_ut/ydb-core-ymq-actor-yc_search_ut |71.3%| [LD] {RESULT} $(B)/ydb/services/config/ut/ydb-services-config-ut >> test_sql.py::TestCanonicalFolder1::test_case[simple/q19.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q2.sql-plan] |71.3%| [LD] {RESULT} $(B)/ydb/core/fq/libs/row_dispatcher/ut/ydb-core-fq-libs-row_dispatcher-ut ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TBSVWithReboots::CreateAssignDropIsAllowed [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:34:05.298284Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:34:05.298319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:34:05.298326Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:34:05.298332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:34:05.298347Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:34:05.298351Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:34:05.298362Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:34:05.298377Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:34:05.298534Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:34:05.298649Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:34:05.321556Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:34:05.321592Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:34:05.321744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:34:05.325027Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:34:05.327354Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:34:05.327430Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:34:05.331397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:34:05.331491Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:34:05.343873Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:34:05.345794Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:34:05.346648Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:34:05.346726Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:34:05.348728Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:34:05.348744Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:34:05.348767Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:34:05.348778Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:34:05.348786Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:34:05.348834Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:34:05.350594Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:34:05.383353Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:34:05.385796Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:34:05.385950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:34:05.386002Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:34:05.386032Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:34:05.387067Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:34:05.387101Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:34:05.387159Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:34:05.387200Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:34:05.387206Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:34:05.387210Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:34:05.388003Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:34:05.388035Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:34:05.388046Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:34:05.388694Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:34:05.388710Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:34:05.388716Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:34:05.388723Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:34:05.389647Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:34:05.390282Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:34:05.390345Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:34:05.390614Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:34:05.390663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:34:05.390673Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:34:05.395946Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... ation and all the parts is done, operation id: 1003:0 2025-06-03T10:34:30.871511Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1003:0 2025-06-03T10:34:30.871533Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:34:30.871537Z node 101 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1003, publications: 3, subscribers: 0 2025-06-03T10:34:30.871540Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-03T10:34:30.871542Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 2], 7 2025-06-03T10:34:30.871544Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-03T10:34:30.871696Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:34:30.871705Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:34:30.871726Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:34:30.871732Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:34:30.872025Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:34:30.872034Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:34:30.872051Z node 101 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:34:30.872056Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:34:30.872089Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:34:30.872096Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:34:30.872116Z node 101 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:34:30.872120Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [101:208:2209], at schemeshard: 72057594046678944, txId: 1003, path id: 1 2025-06-03T10:34:30.872124Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [101:208:2209], at schemeshard: 72057594046678944, txId: 1003, path id: 2 2025-06-03T10:34:30.872127Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [101:208:2209], at schemeshard: 72057594046678944, txId: 1003, path id: 3 FAKE_COORDINATOR: Erasing txId 1003 2025-06-03T10:34:30.872251Z node 101 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:30.872261Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:30.872265Z node 101 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:34:30.872268Z node 101 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-03T10:34:30.872271Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:34:30.872334Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:34:30.872341Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:34:30.872350Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:34:30.872367Z node 101 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:30.872373Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:30.872376Z node 101 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:34:30.872378Z node 101 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-03T10:34:30.872381Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:34:30.872426Z node 101 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 2025-06-03T10:34:30.872452Z node 101 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 2025-06-03T10:34:30.872480Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:34:30.872547Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:34:30.872565Z node 101 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:30.872575Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:30.872579Z node 101 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:34:30.872584Z node 101 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 7 2025-06-03T10:34:30.872589Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:34:30.872597Z node 101 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-03T10:34:30.872970Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:34:30.873272Z node 101 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:34:30.873330Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:34:30.873391Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:34:30.873678Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:34:30.873712Z node 101 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 TestModificationResult got TxId: 1003, wait until txId: 1003 2025-06-03T10:34:30.873801Z node 101 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/BSVolume_4" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:34:30.873827Z node 101 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/BSVolume_4" took 34us result status StatusPathDoesNotExist 2025-06-03T10:34:30.873859Z node 101 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/DirA/BSVolume_4\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/DirA\' (id: [OwnerId: 72057594046678944, LocalPathId: 2]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/DirA/BSVolume_4" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/DirA" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1000 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Query [GOOD] Test command err: 2025-06-03T10:31:31.724209Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668843965586853:2199];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:31.724365Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000da5/r3tmp/tmprItYyk/pdisk_1.dat 2025-06-03T10:31:31.776111Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:31:31.821776Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:31.824086Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668843965586693:2079] 1748946691723111 != 1748946691723114 TServer::EnableGrpc on GrpcPort 12762, node 1 2025-06-03T10:31:31.839333Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/000da5/r3tmp/yandexhoPNjj.tmp 2025-06-03T10:31:31.839357Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/000da5/r3tmp/yandexhoPNjj.tmp 2025-06-03T10:31:31.839442Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/000da5/r3tmp/yandexhoPNjj.tmp 2025-06-03T10:31:31.839508Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:31:31.844993Z INFO: TTestServer started on Port 7719 GrpcPort 12762 TClient is connected to server localhost:7719 PQClient connected to localhost:12762 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:31:31.875456Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:31.875488Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:31.882143Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:31:31.887692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:31:31.901463Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:31.915145Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-03T10:31:31.917661Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:31:31.956087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:32.267620Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668848260554785:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:32.267641Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668848260554809:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:32.267648Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:32.268520Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480 2025-06-03T10:31:32.268851Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668848260554842:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:32.268868Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:32.271506Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668848260554812:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-03T10:31:32.319921Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:32.330989Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668848260554934:2461] txid# 281474976715664, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:32.337965Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:32.352210Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668848260554945:2353], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:31:32.352809Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=YWY3NzhlNTgtN2UzYzE5ZTEtMzk1YWRjMTYtZDA1OWVjYmU=, ActorId: [1:7511668848260554780:2334], ActorState: ExecuteState, TraceId: 01jwtnha5b04qd9t8468ee1529, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:31:32.353275Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:31:32.365846Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7511668848260555173:2609] 2025-06-03T10:31:36.723958Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668843965586853:2199];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:36.724002Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:31:37.571358Z :Restarts INFO: TTopicSdkTestSetup started 2025-06-03T10:31:37.586220Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-03T10:31:37.591968Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7511668869735391855:2687] connected; active server actors: 1 2025-06-03T10:31:37.592160Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-03T10:31:37.592374Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-03T10:31:37.592428Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-03T10:31:37.592549Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-03T10:31:37.593286Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:31:37.593409Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3089: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-03T10:31:37.593544Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:31:37.593583Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72075186224037892] doesn't have tx info 2025-06-03T10:31:37.593588Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:7 ... size 216 offset: -1 2025-06-03T10:34:26.585369Z node 23 :PERSQUEUE DEBUG: partition_write.cpp:1233: [PQ: 72075186224037894, Partition: {0, {23, 281474976715680}, 100000}, State: StateIdle] Topic 'topic_A' partition {0, {23, 281474976715680}, 100000} part blob processing sourceId '\0test-message_group_id' seqNo 1 partNo 0 2025-06-03T10:34:26.585432Z node 23 :PERSQUEUE DEBUG: partition_write.cpp:1333: [PQ: 72075186224037894, Partition: {0, {23, 281474976715680}, 100000}, State: StateIdle] Topic 'topic_A' partition {0, {23, 281474976715680}, 100000} part blob complete sourceId '\0test-message_group_id' seqNo 1 partNo 0 FormedBlobsCount 0 NewHead: Offset 0 PartNo 0 PackedSize 305 count 1 nextOffset 1 batches 1 2025-06-03T10:34:26.585501Z node 23 :PERSQUEUE DEBUG: partition_write.cpp:1623: [PQ: 72075186224037894, Partition: {0, {23, 281474976715680}, 100000}, State: StateIdle] Add new write blob: topic 'topic_A' partition {0, {23, 281474976715680}, 100000} compactOffset 0,1 HeadOffset 0 endOffset 0 curOffset 1 D0000100000_00000000000000000000_00000_0000000001_00000| size 293 WTime 1748946866585 2025-06-03T10:34:26.585531Z node 23 :PERSQUEUE DEBUG: partition.cpp:2185: [PQ: 72075186224037894, Partition: {0, {23, 281474976715680}, 100000}, State: StateIdle] === DumpKeyValueRequest === 2025-06-03T10:34:26.585541Z node 23 :PERSQUEUE DEBUG: partition.cpp:2186: [PQ: 72075186224037894, Partition: {0, {23, 281474976715680}, 100000}, State: StateIdle] --- delete ---------------- 2025-06-03T10:34:26.585544Z node 23 :PERSQUEUE DEBUG: partition.cpp:2192: [PQ: 72075186224037894, Partition: {0, {23, 281474976715680}, 100000}, State: StateIdle] [X0000100000, X0000100001) 2025-06-03T10:34:26.585547Z node 23 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037894, Partition: {0, {23, 281474976715680}, 100000}, State: StateIdle] --- write ----------------- 2025-06-03T10:34:26.585550Z node 23 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037894, Partition: {0, {23, 281474976715680}, 100000}, State: StateIdle] M0000100000ptest-message_group_id 2025-06-03T10:34:26.585554Z node 23 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037894, Partition: {0, {23, 281474976715680}, 100000}, State: StateIdle] D0000100000_00000000000000000000_00000_0000000001_00000| 2025-06-03T10:34:26.585556Z node 23 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037894, Partition: {0, {23, 281474976715680}, 100000}, State: StateIdle] J0000100000 2025-06-03T10:34:26.585560Z node 23 :PERSQUEUE DEBUG: partition.cpp:2199: [PQ: 72075186224037894, Partition: {0, {23, 281474976715680}, 100000}, State: StateIdle] --- rename ---------------- 2025-06-03T10:34:26.585563Z node 23 :PERSQUEUE DEBUG: partition.cpp:2204: [PQ: 72075186224037894, Partition: {0, {23, 281474976715680}, 100000}, State: StateIdle] =========================== 2025-06-03T10:34:26.585583Z node 23 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-06-03T10:34:26.585595Z node 23 :PERSQUEUE DEBUG: read.h:300: CacheProxy. Passthrough blob. Partition 100000 offset 0 partNo 0 count 1 size 293 2025-06-03T10:34:26.585909Z node 23 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 100000 offset 0 count 1 size 293 actorID [23:7511669595305767243:2484] 2025-06-03T10:34:26.585963Z node 23 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037894, Partition: {0, {23, 281474976715680}, 100000}, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 238 WriteNewSizeFromSupportivePartitions# 0 2025-06-03T10:34:26.585985Z node 23 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037894, Partition: {0, {23, 281474976715680}, 100000}, State: StateIdle] TPartition::ReplyWrite. Partition: {0, {23, 281474976715680}, 100000} 2025-06-03T10:34:26.586005Z node 23 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037894, Partition: {0, {23, 281474976715680}, 100000}, State: StateIdle] Answering for message sourceid: '\0test-message_group_id', Topic: 'topic_A', Partition: {0, {23, 281474976715680}, 100000}, SeqNo: 1, partNo: 0, Offset: 0 is stored on disk 2025-06-03T10:34:26.586052Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic_A' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-03T10:34:26.586074Z node 23 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037894' partition 100000 offset 0 partno 0 count 1 parts 0 size 293 2025-06-03T10:34:26.586091Z node 23 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037894 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-03T10:34:26.586462Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|22093976-6b5b2e39-d39cf90d-bbea11b0_0] PartitionId [0] Generation [1] Write session: OnReadDone gRpcStatusCode: 0 2025-06-03T10:34:26.586512Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|22093976-6b5b2e39-d39cf90d-bbea11b0_0] PartitionId [0] Generation [1] Write session got write response: acks { seq_no: 1 written_in_tx { } } write_statistics { persisting_time { } min_queue_wait_time { } max_queue_wait_time { } partition_quota_wait_time { } topic_quota_wait_time { } } 2025-06-03T10:34:26.586527Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|22093976-6b5b2e39-d39cf90d-bbea11b0_0] PartitionId [0] Generation [1] OnAck: seqNo=1, txId={ydb://session/3?node_id=23&id=MjY3ZjZmMmUtNmY2MjBmZDYtNDcxZjI3MzgtNmU1MGY1MjE=, 01jwtnpmaxcqm2ax458fak7hha}, WriteCount=1, AckCount=1 2025-06-03T10:34:26.586538Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|22093976-6b5b2e39-d39cf90d-bbea11b0_0] PartitionId [0] Generation [1] Write session: acknoledged message 1 2025-06-03T10:34:26.587920Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:5072: [PQ: 72075186224037894] Handle TEvLongTxService::TEvLockStatus LockId: 281474976715680 LockNode: 23 Status: STATUS_NOT_FOUND 2025-06-03T10:34:26.587941Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:5087: [PQ: 72075186224037894] TxWriteInfo: WriteId {23, 281474976715680}, TxId (empty maybe), Status STATUS_SUBSCRIBED 2025-06-03T10:34:26.587944Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:5101: [PQ: 72075186224037894] delete partitions for WriteId {23, 281474976715680} 2025-06-03T10:34:26.587951Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:5196: [PQ: 72075186224037894] send TEvPQ::TEvDeletePartition to partition {0, {23, 281474976715680}, 100000} 2025-06-03T10:34:26.588002Z node 23 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037935;self_id=[23:7511669595305767923:2546];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037935;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:34:26.588028Z node 23 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037936;self_id=[23:7511669595305767907:2533];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037936;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:34:26.588040Z node 23 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037940;self_id=[23:7511669595305767921:2544];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037940;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:34:26.588051Z node 23 :TX_COLUMNSHARD_WRITE WARN: log.cpp:784: tablet_id=72075186224037926;self_id=[23:7511669595305767936:2559];ev=NKikimr::NEvents::TDataEvents::TEvWrite;tablet_id=72075186224037926;event=TEvWrite;fline=events.h:103;event=ev_write_error;status=STATUS_BAD_REQUEST;details=only single operation is supported;tx_id=0; 2025-06-03T10:34:26.588062Z node 23 :PERSQUEUE DEBUG: partition.cpp:3773: [PQ: 72075186224037894, Partition: {0, {23, 281474976715680}, 100000}, State: StateIdle] Handle TEvPQ::TEvDeletePartition 2025-06-03T10:34:26.588092Z node 23 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-06-03T10:34:26.588099Z node 23 :PERSQUEUE DEBUG: read.h:338: CacheProxy. Delete blobs from D0000100000(+) to D0000100001(-) 2025-06-03T10:34:26.589643Z node 23 :PERSQUEUE DEBUG: cache_eviction.h:365: Deleting head blob in L1. Partition 100000 offset 0 count 1 actorID [23:7511669595305767243:2484] 2025-06-03T10:34:26.589775Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:5134: [PQ: 72075186224037894] Handle TEvPQ::TEvDeletePartitionDone {0, {23, 281474976715680}, 100000} 2025-06-03T10:34:26.589794Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:3588: [PQ: 72075186224037894] send TEvUnsubscribeLock for WriteId {23, 281474976715680} 2025-06-03T10:34:26.589813Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:3633: [PQ: 72075186224037894] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-03T10:34:26.590340Z node 23 :PERSQUEUE DEBUG: pq_l2_cache.cpp:147: PQ Cache (L2). Removed. Tablet '72075186224037894' partition 100000 offset 0 partno 0 count 1 parts 0 2025-06-03T10:34:26.590490Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:1231: [PQ: 72075186224037894] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-03T10:34:26.702920Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|22093976-6b5b2e39-d39cf90d-bbea11b0_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-03T10:34:26.702929Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|22093976-6b5b2e39-d39cf90d-bbea11b0_0] PartitionId [0] Generation [1] Write session will now close 2025-06-03T10:34:26.702939Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|22093976-6b5b2e39-d39cf90d-bbea11b0_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-03T10:34:26.702385Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72075186224037894] server connected, pipe [23:7511669595305770507:4312], now have 1 active actors on pipe 2025-06-03T10:34:26.703126Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|22093976-6b5b2e39-d39cf90d-bbea11b0_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-03T10:34:26.703133Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|22093976-6b5b2e39-d39cf90d-bbea11b0_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-03T10:34:26.705472Z node 23 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 4 sessionId: test-message_group_id|22093976-6b5b2e39-d39cf90d-bbea11b0_0 grpc read done: success: 0 data: 2025-06-03T10:34:26.705498Z node 23 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 4 sessionId: test-message_group_id|22093976-6b5b2e39-d39cf90d-bbea11b0_0 grpc read failed 2025-06-03T10:34:26.705507Z node 23 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 4 sessionId: test-message_group_id|22093976-6b5b2e39-d39cf90d-bbea11b0_0 grpc closed 2025-06-03T10:34:26.705510Z node 23 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 4 sessionId: test-message_group_id|22093976-6b5b2e39-d39cf90d-bbea11b0_0 is DEAD 2025-06-03T10:34:26.705862Z node 23 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:34:26.705879Z node 23 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:34:26.705985Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037894] server disconnected, pipe [23:7511669595305770118:2989] destroyed 2025-06-03T10:34:26.705994Z node 23 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037894] server disconnected, pipe [23:7511669595305770121:2989] destroyed 2025-06-03T10:34:26.706009Z node 23 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_aliases_and_apply.sql-result_sets] [GOOD] >> KqpScheme::DropChangefeedNegative [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_nonselector_aliases.sql-plan] >> KqpScheme::UseUnauthorizedTable [GOOD] >> KqpScheme::DropExternalDataSource >> test_sql.py::TestCanonicalFolder1::test_case[join/join_using_index.sql-plan] [GOOD] >> KqpScheme::CreateTableWithPartitionAtKeysSimpleUncompat [GOOD] >> KqpScheme::AddColumnFamilyWithCompressionLevel [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_primary_key_and_other_scheme_then_ok [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_12.sql-plan] [GOOD] >> KqpScheme::AddDropColumn >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_remove_directory_that_does_not_exist_failure [GOOD] >> KqpScheme::UseNonexistentTable >> test_sql.py::TestCanonicalFolder1::test_case[join/join_using_index.sql-result_sets] >> KqpScheme::CreateTableWithPartitionAtKeysSimpleCompat >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_12.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_rp_1.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_rp_1.sql-result_sets] |71.4%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_background_cleaning/test-results/unittest/{meta.json ... results_accumulator.log} >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi_rp.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi_rp.sql-result_sets] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_column_after_table_creation_with_data_and_success [GOOD] >> KqpScheme::CreateTableWithPartitionAtKeysSimpleCompat [GOOD] >> KqpScheme::DropExternalDataSource [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_leftonly.sql-plan] [GOOD] >> KqpScheme::AddDropColumn [GOOD] >> KqpScheme::UseNonexistentTable [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q2.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q2.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_nonselector_aliases.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/upsert.sql-result_sets] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_group_id[tables_format_v0] [GOOD] >> KqpScheme::CreateTableWithPartitionAtKeysSigned >> KqpScheme::DropExternalTable >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_leftonly.sql-result_sets] >> KqpScheme::AddChangefeedWhenDisabled >> KqpScheme::UseDroppedTable >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_group_id[tables_format_v1] >> test_sql.py::TestCanonicalFolder1::test_case[write/upsert_cast.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_nonselector_aliases.sql-result_sets] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_group_id[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_receive_attempt_id[tables_format_v0] |71.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest |71.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> test_sql.py::TestCanonicalFolder1::test_case[join/join_using_index.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_with_agg.sql-plan] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join0.test] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_receive_attempt_id[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_receive_attempt_id[tables_format_v1] >> TxUsage::Sinks_Oltp_WriteToTopics_2_Table [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q21.sql-result_sets] >> TxUsage::WriteToTopic_Demo_22_RestartBeforeCommit_Table [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_12.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_3.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_13.sql-plan] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_receive_attempt_id[tables_format_v1] [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_2_Query >> TxUsage::WriteToTopic_Demo_22_RestartBeforeCommit_Query >> KqpScheme::CreateTableWithPartitionAtKeysSigned [GOOD] >> KqpScheme::CreateTableWithPartitionAtKeysComplex >> KqpScheme::DropExternalTable [GOOD] >> KqpScheme::DropDependentExternalDataSource >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_change_visibility] >> KqpScheme::AddChangefeedWhenDisabled [GOOD] >> KqpScheme::AddChangefeedNegative >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_rp_1.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_single_rp.sql-plan] >> KqpScheme::UseDroppedTable [GOOD] >> KqpScheme::TouchIndexAfterMoveTableWrite >> test_dynamic_tenants.py::test_create_tenant_then_exec_yql[enable_alter_database_create_hive_first--true] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi_rp.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi_rp_1.sql-plan] |71.4%| [TA] $(B)/ydb/core/tx/datashard/ut_snapshot/test-results/unittest/{meta.json ... results_accumulator.log} >> test_sql.py::TestCanonicalFolder1::test_case[write/upsert_cast.sql-plan] [GOOD] |71.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_tenants.py::TestTenants::test_resolve_nodes[enable_alter_database_create_hive_first--false] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_leftonly.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_nonselector_aliases.sql-result_sets] [GOOD] >> test_dynamic_tenants.py::test_custom_coordinator_options[enable_alter_database_create_hive_first--true] >> KqpScheme::CreateTableWithPartitionAtKeysComplex [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q2.sql-result_sets] [GOOD] >> KqpScheme::DropDependentExternalDataSource [GOOD] >> KqpScheme::AddChangefeedNegative [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_with_agg.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_13.sql-plan] [GOOD] >> TxUsage::WriteToTopic_Demo_27_Query [GOOD] >> KqpScheme::TouchIndexAfterMoveTableWrite [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_single_rp.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi_rp_1.sql-plan] [GOOD] >> test_tenants.py::TestTenants::test_create_drop_create_table[enable_alter_database_create_hive_first--true] >> test_sql.py::TestCanonicalFolder1::test_case[write/upsert_cast.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_leftsemi.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_selector_aliases.sql-plan] >> KqpScheme::CreateTableWithPartitionAtKeysUuid >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Table [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_13.sql-result_sets] >> overlapping_portions.py::TestOverlappingPortions::test >> KqpScheme::UnknownFamilyTest >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_change_visibility] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-5.test] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q20.sql-plan] >> TxUsage::WriteToTopic_Demo_50_Query [GOOD] >> KqpScheme::DropAsyncReplication >> TxUsage::WriteToTopic_Demo_28_Table >> KqpScheme::AlterColumnTableTiering >> test_sql.py::TestCanonicalFolder1::test_case[join/join_with_agg.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi_rp_1.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_single_rp.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[write/upsert_cast.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_leftsemi.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_selector_aliases.sql-plan] [GOOD] >> KqpScheme::CreateTableWithPartitionAtKeysUuid [GOOD] >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Query >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_13.sql-result_sets] [GOOD] >> KqpScheme::UnknownFamilyTest [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q20.sql-plan] [GOOD] >> TxUsage::Write_Random_Sized_Messages_In_Wide_Transactions_Table >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi_rp_1.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_with_agg.sql-result_sets] [GOOD] >> KqpScheme::DropAsyncReplication [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_range_single_rp.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/write_group_by.script-script] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_leftsemi.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_selector_aliases.sql-result_sets] >> KqpScheme::CreateTableWithFamiliesRegular >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_2.sql-plan] >> KqpScheme::TwoSimilarFamiliesTest >> test_sql.py::TestCanonicalFolder1::test_case[simple/q20.sql-result_sets] >> KqpScheme::DropAsyncReplicationCascade >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_from_table.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_1.sql-plan] >> KqpScheme::CreateTableWithFamiliesRegular [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_leftsemi.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[write/write_group_by.script-script] [GOOD] >> KqpScheme::TwoSimilarFamiliesTest [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_inner.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_selector_aliases.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q20.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q21.sql-plan] |71.4%| [TA] {RESULT} $(B)/ydb/core/mind/hive/ut/test-results/unittest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateTableWithFamiliesRegular [GOOD] Test command err: Trying to start YDB, gRPC: 2188, MsgBus: 32203 2025-06-03T10:34:30.493474Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669609673133324:2263];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:34:30.493496Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001706/r3tmp/tmpVwa2is/pdisk_1.dat 2025-06-03T10:34:30.626774Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669609673133094:2079] 1748946870491126 != 1748946870491129 2025-06-03T10:34:30.629724Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:34:30.629750Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:34:30.642392Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:34:30.645578Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 2188, node 1 2025-06-03T10:34:30.745077Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:34:30.745103Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:34:30.745106Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:34:30.745156Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32203 TClient is connected to server localhost:32203 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:34:30.869072Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:30.882329Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:30.948765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:30.967333Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:30.977733Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:31.045829Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669613968102035:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:31.045861Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:31.241389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.250834Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.260683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.274683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.288669Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.303573Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.317423Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.341121Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669613968102686:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:31.341160Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:31.341174Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669613968102691:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:31.342151Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:34:31.344587Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669613968102693:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:34:31.442669Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669613968102744:3402] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:34:31.744963Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 13159, MsgBus: 21111 2025-06-03T10:34:32.033851Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669617972347748:2075];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001706/r3tmp/tmpkrHiD9/pdisk_1.dat 2025-06-03T10:34:32.034209Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:34:32.052532Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 13159, node 2 2025-06-03T10:34:32.073337Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:34:32.073355Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:34:32.073357Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:34:32.073433Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:21111 TClient is connected to server localhost:21111 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:34:32.133955Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:34:32.133979Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:34:32.135061Z node 2 :HIVE WARN: n ... e, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:34:35.629036Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:34:35.643303Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:34:35.659762Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511669632785701686:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:35.659817Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:35.659848Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511669632785701691:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:35.660812Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:34:35.670530Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7511669632785701693:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:34:35.723980Z node 5 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [5:7511669632785701744:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:34:35.843165Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 8177, MsgBus: 3710 2025-06-03T10:34:36.298978Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511669635938332922:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:34:36.299020Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001706/r3tmp/tmpFc3FUh/pdisk_1.dat 2025-06-03T10:34:36.317372Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8177, node 6 2025-06-03T10:34:36.329404Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:34:36.329420Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:34:36.329422Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:34:36.329487Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3710 TClient is connected to server localhost:3710 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:34:36.404041Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:34:36.404077Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:34:36.404638Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:36.405159Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:34:36.406097Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:34:36.409642Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:36.424381Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:36.454026Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:36.469900Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:36.706709Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669635938334540:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:36.706743Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:36.716840Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:34:36.727076Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:34:36.737149Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:34:36.749071Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:34:36.762340Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:34:36.777549Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:34:36.791064Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:34:36.807341Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669635938335193:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:36.807376Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:36.807437Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669635938335198:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:36.808257Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:34:36.818162Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511669635938335200:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:34:36.869080Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511669635938335251:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:34:37.018593Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 >> KqpScheme::AlterColumnTableTiering [GOOD] >> KqpScheme::AlterAsyncReplication >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_2.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_2.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_3.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_3.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q21.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q22.sql-plan] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::TwoSimilarFamiliesTest [GOOD] Test command err: Trying to start YDB, gRPC: 22420, MsgBus: 4887 2025-06-03T10:34:30.493532Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669611366015544:2260];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:34:30.493595Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016cc/r3tmp/tmpVqjmjY/pdisk_1.dat 2025-06-03T10:34:30.626518Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669611366015318:2079] 1748946870492576 != 1748946870492579 2025-06-03T10:34:30.629112Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:34:30.629145Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:34:30.645545Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:34:30.645698Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22420, node 1 2025-06-03T10:34:30.745076Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:34:30.745183Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:34:30.745191Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:34:30.745229Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4887 TClient is connected to server localhost:4887 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:34:30.868969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:30.882370Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:34:30.883659Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:30.949195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:30.965869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:30.976813Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:31.003143Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669615660984258:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:31.003180Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:31.241432Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.250734Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.260414Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.274521Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.329814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.338129Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.351940Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.367821Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669615660984912:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:31.367841Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:31.367938Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669615660984917:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:31.368683Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715670:3, at schemeshard: 72057594046644480 2025-06-03T10:34:31.371188Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669615660984919:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715670 completed, doublechecking } 2025-06-03T10:34:31.449742Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669615660984970:3403] txid# 281474976715671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:34:31.749845Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7511669615660985255:3578], for# test_user@builtin, access# DescribeSchema 2025-06-03T10:34:31.749867Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7511669615660985255:3578], for# test_user@builtin, access# DescribeSchema 2025-06-03T10:34:31.752226Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511669615660985252:2514], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: KiReadTable!
:2:13: Error: Cannot find table 'db.[/Root/KeyValue]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:34:31.752347Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=NTcxZTFkZTctYTYzODIxNTktZTAzNWEzZGMtYWFmMWNmNWI=, ActorId: [1:7511669615660985243:2509], ActorState: ExecuteState, TraceId: 01jwtnpsdw1e1na6t1f0nqwrem, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id:
: Error: Type annotation, code: 1030
:2:13: Error: At function: KiReadTable!
:2:13: Error: Cannot find table 'db.[/Root/KeyValue]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:34:31.756699Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511669615660985259:2517], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: KiReadTable!
:2:13: Error: Cannot find table 'db.[/Root/NonExistent]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:34:31.756814Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=NTcxZTFkZTctYTYzODIxNTktZTAzNWEzZGMtYWFmMWNmNWI=, ActorId: [1:7511669615660985243:2509], ActorState: ExecuteState, TraceId: 01jwtnpse99bkjvzwpeegyx1tw, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id:
: Error: Type annotation, code: 1030
:2:13: Error: At function: KiReadTable!
:2:13: Error: Cannot find table 'db.[/Root/NonExistent]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 Trying to start YDB, gRPC: 29158, MsgBus: 26475 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016cc/r3tmp/tmpwHjLHN/pdisk_1.dat 2025-06-03T10:34:32.042576Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;ac ... hemeshard: 72057594046644480 2025-06-03T10:34:36.106163Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:34:36.119144Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:34:36.132880Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:34:36.147704Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:34:36.161286Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:34:36.177764Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511669637863589564:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:36.177799Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:36.177830Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511669637863589569:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:36.178653Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:34:36.188048Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7511669637863589571:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:34:36.288338Z node 5 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [5:7511669637863589622:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 24572, MsgBus: 10070 2025-06-03T10:34:36.802183Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511669636854861730:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:34:36.802302Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016cc/r3tmp/tmpDkJl0w/pdisk_1.dat 2025-06-03T10:34:36.820745Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 24572, node 6 2025-06-03T10:34:36.831915Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:34:36.831931Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:34:36.831933Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:34:36.831986Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:10070 TClient is connected to server localhost:10070 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:34:36.902464Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:34:36.902504Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:34:36.903688Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:34:36.907626Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:36.915636Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:36.927497Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:36.948973Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:36.962499Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:37.240682Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669641149830600:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:37.240739Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:37.245739Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:34:37.257518Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:34:37.266488Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:34:37.280649Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:34:37.337163Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:34:37.350956Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:34:37.365635Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:34:37.381946Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669641149831254:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:37.381981Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:37.382010Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669641149831259:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:37.382849Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:34:37.391642Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511669641149831261:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:34:37.466513Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511669641149831312:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_from_table.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_from_table.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_1.sql-plan] [GOOD] >> LabeledDbCounters::TwoTablets [GOOD] >> LabeledDbCounters::TwoTabletsKillOneTablet >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_inner.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_inner.sql-result_sets] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Table [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q21.sql-plan] [GOOD] >> KqpScheme::DropAsyncReplicationCascade [GOOD] >> KqpScheme::AlterTableRenameIndex >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_3.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_4.sql-plan] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Query |71.4%| [TA] {RESULT} $(B)/ydb/core/tx/datashard/ut_snapshot/test-results/unittest/{meta.json ... results_accumulator.log} >> test_sql.py::TestCanonicalFolder1::test_case[simple/q22.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q22.sql-result_sets] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::DropAsyncReplicationCascade [GOOD] Test command err: Trying to start YDB, gRPC: 30271, MsgBus: 6372 2025-06-03T10:34:30.493265Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669609946496117:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:34:30.493366Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016ea/r3tmp/tmp9i657T/pdisk_1.dat 2025-06-03T10:34:30.626835Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669609946495950:2079] 1748946870490503 != 1748946870490506 2025-06-03T10:34:30.629884Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:34:30.629912Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:34:30.645311Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:34:30.645587Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 30271, node 1 2025-06-03T10:34:30.745077Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:34:30.745104Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:34:30.745106Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:34:30.745157Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6372 TClient is connected to server localhost:6372 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:34:30.869932Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:30.883779Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:30.908202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:30.925820Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:30.937635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:31.001970Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669614241464881:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:31.002002Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:31.241412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.250844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.260555Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.317214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.330982Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.345124Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.359132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.376200Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669614241465534:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:31.376237Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:31.376699Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669614241465539:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:31.377923Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:34:31.386248Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669614241465541:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:34:31.457360Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669614241465592:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:34:31.757788Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.775071Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669614241465959:3625] txid# 281474976715673, issues: { message: "Check failed: path: \'/Root/table/feed\', error: path hasn\'t been resolved, nearest resolved path: \'/Root/table\' (id: [OwnerId: 72057594046644480, LocalPathId: 17]), source_location: ydb/core/tx/schemeshard/schemeshard__operation_drop_cdc_stream.cpp:480" issue_code: 200200 severity: 1 } Trying to start YDB, gRPC: 25170, MsgBus: 29373 2025-06-03T10:34:32.040571Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669619043641419:2209];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016ea/r3tmp/tmpC9ZASO/pdisk_1.dat 2025-06-03T10:34:32.042838Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:34:32.059630Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25170, node 2 2025-06-03T10:34:32.073336Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:34:32.073352Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:34:32.073355Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:34:32.073424Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:29373 TClient is connected to server localhost:29373 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated ... E WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,37) wasn't found 2025-06-03T10:34:37.178269Z node 5 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 5, TabletId: 72075186224037923 not found Trying to start YDB, gRPC: 64302, MsgBus: 6380 2025-06-03T10:34:37.472019Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511669640029988035:2063];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:34:37.472047Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016ea/r3tmp/tmpNBSFxn/pdisk_1.dat 2025-06-03T10:34:37.488628Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 64302, node 6 2025-06-03T10:34:37.499517Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:34:37.499534Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:34:37.499537Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:34:37.499605Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:6380 TClient is connected to server localhost:6380 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:34:37.578298Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:34:37.578371Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:34:37.578900Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:34:37.579360Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:34:37.590644Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:37.607235Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:37.635276Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:37.651980Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:37.886469Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669640029989633:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:37.886518Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:37.898048Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:34:37.907255Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:34:37.917974Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:34:37.931823Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:34:37.946216Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:34:37.960100Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:34:37.974019Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:34:37.990713Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669640029990284:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:37.990747Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669640029990289:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:37.990750Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:37.991539Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:34:38.000937Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511669640029990291:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:34:38.060740Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511669644324957638:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:34:38.200450Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:34:38.214674Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateReplication, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:34:38.240372Z node 6 :CHANGE_EXCHANGE WARN: change_sender_cdc_stream.cpp:398: [CdcChangeSenderMain][72075186224037922:1][6:7511669644324958183:2546] Failed entry at 'ResolveTopic': entry# { Path: TableId: [72057594046644480:20:0] RequestType: ByTableId Operation: OpTopic RedirectRequired: true ShowPrivatePath: true SyncVersion: false Status: PathErrorUnknown Kind: KindUnknown DomainInfo } 2025-06-03T10:34:38.243778Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715676:0, at schemeshard: 72057594046644480 2025-06-03T10:34:38.258123Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715677:0, at schemeshard: 72057594046644480 2025-06-03T10:34:39.230247Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropReplicationCascade, opId: 281474976715678:0, at schemeshard: 72057594046644480 2025-06-03T10:34:39.232573Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropPersQueueGroup, opId: 281474976715679:2, at schemeshard: 72057594046644480 2025-06-03T10:34:39.235067Z node 6 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 6, TabletId: 72075186224037925 not found 2025-06-03T10:34:39.235081Z node 6 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 6, TabletId: 72075186224037924 not found 2025-06-03T10:34:39.235762Z node 6 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,38) wasn't found 2025-06-03T10:34:39.235784Z node 6 :HIVE WARN: tx__delete_tablet.cpp:91: HIVE#72057594037968897 THive::TTxDeleteTablet tablet (72057594046644480,37) wasn't found 2025-06-03T10:34:39.248842Z node 6 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 6, TabletId: 72075186224037923 not found 2025-06-03T10:34:39.248856Z node 6 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 6, TabletId: 72075186224037926 not found >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_from_table.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_literal.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_2.sql-result_sets] [GOOD] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_and_remove_column_many_times_success >> KqpScheme::AlterTableRenameIndex [GOOD] >> KqpScheme::AlterTableReplaceIndex >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_inner.sql-result_sets] [GOOD] >> test_dynamic_tenants.py::test_custom_coordinator_options[enable_alter_database_create_hive_first--true] [GOOD] >> test_dynamic_tenants.py::test_drop_tenant_without_nodes_could_complete[enable_alter_database_create_hive_first--false] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_4.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_4.sql-result_sets] >> test_dynamic_tenants.py::test_create_tenant_then_exec_yql[enable_alter_database_create_hive_first--true] [GOOD] >> test_dynamic_tenants.py::test_create_tenant_then_exec_yql_empty_database_header[enable_alter_database_create_hive_first--false] |71.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> test_sql.py::TestCanonicalFolder1::test_case[simple/q22.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q3.sql-plan] >> test_tenants.py::TestTenants::test_create_drop_create_table[enable_alter_database_create_hive_first--true] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_literal.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_literal.sql-result_sets] >> KqpScheme::AlterTableReplaceIndex [GOOD] >> KqpScheme::AlterTableRenameVectorIndex |71.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_user_administration.py::test_database_admin_cant_change_database_admin_user[change-password] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_4.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_5.sql-plan] |71.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> KqpScheme::AlterTableRenameVectorIndex [GOOD] >> KqpScheme::AlterTableWithDecimalColumn >> test_sql.py::TestCanonicalFolder1::test_case[simple/q3.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q3.sql-result_sets] >> test_tenants.py::TestTenants::test_create_remove_database[enable_alter_database_create_hive_first--false] >> TxUsage::WriteToTopic_Demo_28_Table [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_literal.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_params.sql-plan] >> TxUsage::WriteToTopic_Demo_28_Query >> TxUsage::WriteToTopic_Demo_22_RestartBeforeCommit_Query [GOOD] |71.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_can_change_partition_config_options [GOOD] >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Table >> KqpScheme::AlterTableWithDecimalColumn [GOOD] >> KqpScheme::AlterTableWithPgColumn >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_5.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_5.sql-result_sets] >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q3.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q4.sql-plan] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_and_remove_column_many_times_success [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_params.sql-plan] [GOOD] >> TBSVWithReboots::CreateWithIntermediateDirs [GOOD] >> test_users_groups_with_acl.py::test_query_create_user_by_domain_admin[domain_login_only--false-YDB] [GOOD] >> test_users_groups_with_acl.py::test_query_create_user_by_domain_admin[domain_login_only--true-YDB] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_decreasing_number_of_generations_it_is_raise_error >> KqpScheme::AlterTableWithPgColumn [GOOD] >> KqpScheme::AlterTransfer ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TBSVWithReboots::CreateWithIntermediateDirs [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:34:28.563521Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:34:28.563539Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:34:28.563544Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:34:28.563548Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:34:28.563558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:34:28.563561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:34:28.563568Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:34:28.563579Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:34:28.563655Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:34:28.563718Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:34:28.577712Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:34:28.577734Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:34:28.577841Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:34:28.580571Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:34:28.580686Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:34:28.580721Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:34:28.582908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:34:28.582963Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:34:28.583105Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:34:28.583163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:34:28.583706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:34:28.583764Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:34:28.584052Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:34:28.584064Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:34:28.584082Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:34:28.584093Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:34:28.584100Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:34:28.584147Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:34:28.585707Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:34:28.599333Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:34:28.599410Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:34:28.599475Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:34:28.599525Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:34:28.599539Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:34:28.600338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:34:28.600370Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:34:28.600416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:34:28.600429Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:34:28.600436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:34:28.600444Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:34:28.601075Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:34:28.601095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:34:28.601102Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:34:28.601671Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:34:28.601690Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:34:28.601698Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:34:28.601706Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:34:28.602287Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:34:28.602854Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:34:28.602908Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:34:28.603157Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:34:28.603193Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:34:28.603202Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:34:28.603284Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... -06-03T10:34:45.222610Z node 65 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 6], 2 2025-06-03T10:34:45.223122Z node 65 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:45.223148Z node 65 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 8 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:45.223155Z node 65 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 5, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:34:45.223161Z node 65 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 8 2025-06-03T10:34:45.223188Z node 65 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:34:45.223434Z node 65 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 6 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:45.223449Z node 65 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 6 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:45.223454Z node 65 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 4, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:34:45.223459Z node 65 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 6 2025-06-03T10:34:45.223464Z node 65 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:34:45.225395Z node 65 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 6 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:45.225430Z node 65 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 4 Version: 6 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:45.225437Z node 65 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:34:45.225443Z node 65 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 4], version: 6 2025-06-03T10:34:45.225450Z node 65 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:34:45.225574Z node 65 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 5 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:45.225586Z node 65 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 5 Version: 5 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:45.225590Z node 65 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:34:45.225595Z node 65 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 5], version: 5 2025-06-03T10:34:45.225599Z node 65 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 5] was 2 2025-06-03T10:34:45.225785Z node 65 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:45.225797Z node 65 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 6 Version: 2 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:34:45.225802Z node 65 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:34:45.225806Z node 65 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 6], version: 2 2025-06-03T10:34:45.225810Z node 65 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 6] was 4 2025-06-03T10:34:45.225822Z node 65 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-03T10:34:45.226636Z node 65 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:34:45.226670Z node 65 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:34:45.226749Z node 65 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:34:45.226856Z node 65 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:34:45.227085Z node 65 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 TestModificationResult got TxId: 1003, wait until txId: 1003 TestWaitNotification wait txId: 1003 2025-06-03T10:34:45.227162Z node 65 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-03T10:34:45.227191Z node 65 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-03T10:34:45.227292Z node 65 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-03T10:34:45.227321Z node 65 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-03T10:34:45.227327Z node 65 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [65:393:2372] TestWaitNotification: OK eventTxId 1003 2025-06-03T10:34:45.227421Z node 65 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Valid/x/y/z" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:34:45.227492Z node 65 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Valid/x/y/z" took 92us result status StatusSuccess 2025-06-03T10:34:45.227597Z node 65 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/Valid/x/y/z" PathDescription { Self { Name: "z" PathId: 6 SchemeshardId: 72057594046678944 PathType: EPathTypeBlockStoreVolume CreateFinished: true CreateTxId: 1003 CreateStep: 5000003 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 BSVVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 5 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } BlockStoreVolumeDescription { Name: "z" PathId: 6 VolumeConfig { BlockSize: 4096 Partitions { BlockCount: 16 } Version: 1 DiskId: "foo" ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-2" } } Partitions { PartitionId: 0 TabletId: 72075186233409546 } VolumeTabletId: 72075186233409547 AlterVersion: 1 MountToken: "" TokenVersion: 0 } } PathId: 6 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:34:45.227671Z node 65 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/Invalid" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:34:45.227693Z node 65 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/Invalid" took 24us result status StatusPathDoesNotExist 2025-06-03T10:34:45.227713Z node 65 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/Invalid\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/Invalid" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_2_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 2] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_3_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 3] >> test_postgres.py::TestPGSQL::test_sql_suite[results-abstime.test] >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Query [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_4_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 4] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_5.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_6.sql-plan] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_5_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 5] >> alter_compression.py::TestAllCompression::test_all_supported_compression[lz4_compression-COMPRESSION = "lz4"] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_10_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 10] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_6_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 6] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_7_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 7] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_compression-COMPRESSION = "zstd"] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q4.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q4.sql-result_sets] |71.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot |71.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot |71.5%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot/ydb-core-blobstorage-ut_blobstorage-ut_blob_depot ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_19_RestartAfterCommit_Query [GOOD] Test command err: 2025-06-03T10:31:32.920913Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668848564063090:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:32.921346Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d9e/r3tmp/tmpWQZivR/pdisk_1.dat 2025-06-03T10:31:33.003899Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:31:33.046278Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:33.046307Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:33.053494Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:33.058116Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 14638, node 1 2025-06-03T10:31:33.085558Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/000d9e/r3tmp/yandexKD2Z2G.tmp 2025-06-03T10:31:33.085573Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/000d9e/r3tmp/yandexKD2Z2G.tmp 2025-06-03T10:31:33.085671Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/000d9e/r3tmp/yandexKD2Z2G.tmp 2025-06-03T10:31:33.085735Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:31:33.089486Z INFO: TTestServer started on Port 4557 GrpcPort 14638 TClient is connected to server localhost:4557 PQClient connected to localhost:14638 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:31:33.146879Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:31:33.150193Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:33.162474Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-03T10:31:33.416850Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668852859031127:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:33.417525Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668852859031162:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:33.425543Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:33.426793Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-06-03T10:31:33.433615Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710662, at schemeshard: 72057594046644480 2025-06-03T10:31:33.433770Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668852859031165:2341], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-03T10:31:33.473050Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:33.490218Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:33.526464Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:33.533967Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668852859031426:2549] txid# 281474976710666, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); 2025-06-03T10:31:33.610326Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Execution" issue_code: 1060 severity: 2 issues { position { row: 3 column: 120 } message: "Cost Based Optimizer could not be applied to this query: couldn\'t load statistics" end_position { row: 3 column: 120 } issue_code: 8001 severity: 2 } } TxMeta { } YdbResults { columns { name: "C.name" type { optional_type { item { type_id: UTF8 } } } } columns { name: "C.balancer" type { optional_type { item { type_id: UTF8 } } } } columns { name: "C.local" type { optional_type { item { type_id: BOOL } } } } columns { name: "C.enabled" type { optional_type { item { type_id: BOOL } } } } columns { name: "C.weight" type { optional_type { item { type_id: UINT64 } } } } columns { name: "V.version" type { optional_type { item { type_id: INT64 } } } } } QueryDiagnostics: "" } YdbStatus: SUCCESS ConsumedRu: 44 } === CheckClustersList. Subcribe to ClusterTracker from [1:7511668852859031522:2608] 2025-06-03T10:31:37.921353Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668848564063090:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:37.921403Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:31:38.831872Z :WriteToTopic_Demo_12_Table INFO: TTopicSdkTestSetup started 2025-06-03T10:31:38.835918Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-03T10:31:38.850428Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:31:38.850638Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7511668874333868213:2691] connected; active server actors: 1 2025-06-03T10:31:38.850713Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-03T10:31:38.851019Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3089: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-03T10:31:38.851127Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:31:38.851175Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72075186224037892] doesn't have tx info 2025-06-03T10:31:38.851195Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:31:38.851198Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-03T10:31:38.851203Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:31:38.851210Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:31:38.851218Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72075186224037892] doesn't have tx writes info 2025-06-03T10:31:38.851248Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-03T10:31:38.851282Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-03T10:31:38.851807Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72075186224037892] server connected, pipe [1:7511668874333868212:2690], now have 1 active actors on pipe 2025-06-03T10:31:38.855351Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3220: [PQ: 72075186224037892] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 7511668852859030798 RawX2: 4294969497 } TxId: 281474976710674 Config { TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { Pool ... ition: 0, State: StateIdle] --- delete ---------------- 2025-06-03T10:34:44.434481Z node 21 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037894, Partition: 0, State: StateIdle] --- write ----------------- 2025-06-03T10:34:44.434483Z node 21 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037894, Partition: 0, State: StateIdle] i0000000000 2025-06-03T10:34:44.434485Z node 21 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037894, Partition: 0, State: StateIdle] m0000000000ctest-consumer 2025-06-03T10:34:44.434487Z node 21 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037894, Partition: 0, State: StateIdle] m0000000000utest-consumer 2025-06-03T10:34:44.434489Z node 21 :PERSQUEUE DEBUG: partition.cpp:2199: [PQ: 72075186224037894, Partition: 0, State: StateIdle] --- rename ---------------- 2025-06-03T10:34:44.434492Z node 21 :PERSQUEUE DEBUG: partition.cpp:2204: [PQ: 72075186224037894, Partition: 0, State: StateIdle] =========================== 2025-06-03T10:34:44.434503Z node 21 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-06-03T10:34:44.434545Z :DEBUG: [/Root] [/Root] [3c5fccb3-cca20365-c71abfcf-509e6d9f] [] Committed response: { partitions_committed_offsets { partition_session_id: 1 committed_offset: 12 } } 2025-06-03T10:34:44.434753Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:779: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer readTimeStamp for offset 20 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-03T10:34:44.434767Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-03T10:34:44.434776Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic_A' partition: 0 messageNo: 0 requestId: cookie: 3 2025-06-03T10:34:44.434791Z node 21 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 3 } 2025-06-03T10:34:44.434801Z node 21 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) commit done to position 20 endOffset 20 with cookie 3 2025-06-03T10:34:44.434807Z node 21 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1 replying for commits: assignId# 1, from# 3, to# 3, offset# 20 2025-06-03T10:34:44.435069Z :DEBUG: [/Root] [/Root] [3c5fccb3-cca20365-c71abfcf-509e6d9f] [] Committed response: { partitions_committed_offsets { partition_session_id: 1 committed_offset: 20 } } 2025-06-03T10:34:45.422489Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:19:20 2025-06-03T10:34:45.422522Z :INFO: [/Root] [/Root] [3c5fccb3-cca20365-c71abfcf-509e6d9f] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1000 BytesRead: 2000000 MessagesRead: 20 BytesReadCompressed: 2000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:34:45.424439Z node 21 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2378: session cookie 1 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1 checking auth because of timeout 2025-06-03T10:34:45.424488Z node 21 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 1 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1 auth for : test-consumer 2025-06-03T10:34:45.424861Z node 21 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 1 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1 Handle describe topics response 2025-06-03T10:34:45.424886Z node 21 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 1 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1 auth is DEAD 2025-06-03T10:34:45.424907Z node 21 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 1 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1 auth ok: topics# 1, initDone# 1 2025-06-03T10:34:45.428016Z node 21 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:459: session cookie 2 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1 checking auth because of timeout 2025-06-03T10:34:45.428057Z node 21 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1 auth for : test-consumer 2025-06-03T10:34:45.428334Z node 21 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1 Handle describe topics response 2025-06-03T10:34:45.428359Z node 21 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1 auth is DEAD 2025-06-03T10:34:45.428380Z node 21 :PQ_READ_PROXY INFO: direct_read_actor.cpp:299: session cookie 2 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1 auth ok: topics# 1, initDone# 1 2025-06-03T10:34:46.422348Z :INFO: [/Root] [/Root] [3c5fccb3-cca20365-c71abfcf-509e6d9f] Closing read session. Close timeout: 0.000000s 2025-06-03T10:34:46.422368Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:19:20 2025-06-03T10:34:46.422378Z :INFO: [/Root] [/Root] [3c5fccb3-cca20365-c71abfcf-509e6d9f] Counters: { Errors: 0 CurrentSessionLifetimeMs: 2000 BytesRead: 2000000 MessagesRead: 20 BytesReadCompressed: 2000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:34:46.422399Z :NOTICE: [/Root] [/Root] [3c5fccb3-cca20365-c71abfcf-509e6d9f] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-03T10:34:46.422408Z :DEBUG: [/Root] [/Root] [3c5fccb3-cca20365-c71abfcf-509e6d9f] [] Abort session to cluster 2025-06-03T10:34:46.423622Z :DEBUG: [/Root] 0x0000739F39304010 TDirectReadSessionManager ServerSessionId=test-consumer_21_1_6784068070939353347_v1 Close 2025-06-03T10:34:46.423682Z :DEBUG: [/Root] 0x0000739F39304010 TDirectReadSessionManager ServerSessionId=test-consumer_21_1_6784068070939353347_v1 Close 2025-06-03T10:34:46.423714Z :NOTICE: [/Root] [/Root] [3c5fccb3-cca20365-c71abfcf-509e6d9f] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-03T10:34:46.423917Z node 21 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1 grpc read done: success# 0, data# { } 2025-06-03T10:34:46.423935Z node 21 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1 grpc read failed 2025-06-03T10:34:46.423945Z node 21 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1 grpc closed 2025-06-03T10:34:46.423963Z node 21 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1 is DEAD 2025-06-03T10:34:46.424536Z node 21 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [21:7511669671987234451:2544] disconnected; active server actors: 1 2025-06-03T10:34:46.424546Z node 21 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [21:7511669671987234451:2544] client test-consumer disconnected session test-consumer_21_1_6784068070939353347_v1 2025-06-03T10:34:46.424581Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037894] Destroy direct read session test-consumer_21_1_6784068070939353347_v1 2025-06-03T10:34:46.424590Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037894] server disconnected, pipe [21:7511669671987234454:2547] destroyed 2025-06-03T10:34:46.424605Z node 21 :PQ_READ_PROXY DEBUG: caching_service.cpp:398: Direct read cache: close session for proxy [21:7511669671987234460:2550] 2025-06-03T10:34:46.424617Z node 21 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_21_1_6784068070939353347_v1 2025-06-03T10:34:46.429168Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|98563a60-b0085246-849d32d7-2501f200_0] PartitionId [0] Generation [2] Write session: close. Timeout 0.000000s 2025-06-03T10:34:46.429187Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|98563a60-b0085246-849d32d7-2501f200_0] PartitionId [0] Generation [2] Write session will now close 2025-06-03T10:34:46.429196Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|98563a60-b0085246-849d32d7-2501f200_0] PartitionId [0] Generation [2] Write session: aborting 2025-06-03T10:34:46.429573Z node 21 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [21:7511669671987234460:2550]: session cookie 2 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1 grpc read done: success# 0, data# { } 2025-06-03T10:34:46.429586Z node 21 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [21:7511669671987234460:2550]: session cookie 2 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1grpc read failed 2025-06-03T10:34:46.429594Z node 21 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [21:7511669671987234460:2550]: session cookie 2 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1 grpc closed 2025-06-03T10:34:46.429597Z node 21 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [21:7511669671987234460:2550]: session cookie 2 consumer test-consumer session test-consumer_21_1_6784068070939353347_v1 proxy is DEAD 2025-06-03T10:34:46.429727Z node 21 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|98563a60-b0085246-849d32d7-2501f200_0 grpc read done: success: 0 data: 2025-06-03T10:34:46.429729Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|98563a60-b0085246-849d32d7-2501f200_0 grpc read failed 2025-06-03T10:34:46.429734Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|98563a60-b0085246-849d32d7-2501f200_0 grpc closed 2025-06-03T10:34:46.429738Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|98563a60-b0085246-849d32d7-2501f200_0 is DEAD 2025-06-03T10:34:46.429920Z node 21 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:34:46.430263Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|98563a60-b0085246-849d32d7-2501f200_0] PartitionId [0] Generation [2] Write session: gracefully shut down, all writes complete 2025-06-03T10:34:46.430272Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|98563a60-b0085246-849d32d7-2501f200_0] PartitionId [0] Generation [2] Write session: destroy 2025-06-03T10:34:46.430319Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037894] server disconnected, pipe [21:7511669663397299799:2520] destroyed 2025-06-03T10:34:46.430353Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. >> test_tenants.py::TestTenants::test_resolve_nodes[enable_alter_database_create_hive_first--false] [GOOD] >> test_tenants.py::TestTenants::test_resolve_nodes[enable_alter_database_create_hive_first--true] >> RetryPolicy::TWriteSession_TestPolicy [GOOD] >> RetryPolicy::TWriteSession_TestBrokenPolicy >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_8_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 8] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_9_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 9] >> TTablesWithReboots::LostBorrowAckWithReboots [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_6.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_6.sql-result_sets] |71.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_12_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 12] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_13_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 13] >> test_dynamic_tenants.py::test_create_tenant_then_exec_yql_empty_database_header[enable_alter_database_create_hive_first--false] [GOOD] >> test_dynamic_tenants.py::test_create_tenant_then_exec_yql_empty_database_header[enable_alter_database_create_hive_first--true] >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_decreasing_number_of_generations_it_is_raise_error [GOOD] >> KqpScheme::AlterAsyncReplication [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopicAndTable_6_Query [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_15_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 15] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_16_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 16] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_base_reboots/unittest >> TTablesWithReboots::LostBorrowAckWithReboots [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:127:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:130:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:134:2058] recipient: [1:111:2142] 2025-06-03T10:31:30.874261Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:31:30.874291Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:30.874299Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:31:30.874313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:31:30.874328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:31:30.874332Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:31:30.874343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:31:30.874358Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:31:30.874491Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:31:30.874564Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:31:30.913242Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:31:30.913272Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:30.913444Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:176:2058] recipient: [1:15:2062] 2025-06-03T10:31:30.926254Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:31:30.926302Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:31:30.926330Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:31:30.942410Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:31:30.942472Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:31:30.942621Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:30.942744Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:31:30.957751Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:30.957833Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:31:30.958094Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:31:30.958103Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:31:30.958129Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:31:30.958136Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:31:30.958140Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:31:30.958159Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:31:30.963199Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:31:30.989783Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:31:30.989914Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:30.990006Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:31:30.990082Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:31:30.990100Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:30.991293Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:30.991345Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:31:30.991436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:30.991451Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:31:30.991459Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:31:30.991467Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:31:30.992366Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:30.992391Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:31:30.992401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:31:30.993053Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:30.993067Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:31:30.993073Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:30.993081Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:31:30.993835Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:31:30.994394Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:31:30.994439Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:31:30.994691Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:31:30.994724Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:31:30.994735Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:31:30.994812Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... d__operation_side_effects.cpp:906: Part operation is done id#1005:0 progress is 1/1 2025-06-03T10:34:46.943868Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:34:46.943874Z node 122 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1005:0 progress is 1/1 2025-06-03T10:34:46.943877Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:34:46.943883Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1005, ready parts: 1/1, is published: true 2025-06-03T10:34:46.943889Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1005 ready parts: 1/1 2025-06-03T10:34:46.943894Z node 122 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1005:0 2025-06-03T10:34:46.943899Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1005:0 2025-06-03T10:34:46.943924Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 2 2025-06-03T10:34:46.944006Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-03T10:34:46.949953Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-03T10:34:46.958117Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:34:46.958354Z node 122 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 TabletID: 72075186233409546 2025-06-03T10:34:46.958443Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:34:46.958539Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 Forgetting tablet 72075186233409546 2025-06-03T10:34:46.959444Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:34:46.959460Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:34:46.959484Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:663:2058] recipient: [122:15:2062] 2025-06-03T10:34:46.960669Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:34:46.960689Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:1 tabletId 72075186233409546 2025-06-03T10:34:46.960830Z node 122 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 TestModificationResult got TxId: 1005, wait until txId: 1005 TestWaitNotification wait txId: 1005 2025-06-03T10:34:46.960895Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1005: send EvNotifyTxCompletion 2025-06-03T10:34:46.960905Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1005 2025-06-03T10:34:46.960976Z node 122 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1005, at schemeshard: 72057594046678944 2025-06-03T10:34:46.961002Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1005: got EvNotifyTxCompletionResult 2025-06-03T10:34:46.961007Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1005: satisfy waiter [122:666:2625] TestWaitNotification: OK eventTxId 1005 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted 2025-06-03T10:34:46.961069Z node 122 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-03T10:34:46.961082Z node 122 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 Deleted tabletId 72075186233409546 Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:672:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:673:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:675:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:676:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:678:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:679:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:682:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:683:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:686:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:687:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:693:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:694:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:696:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:697:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:700:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:701:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:702:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:703:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:705:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:706:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:707:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:708:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:711:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:712:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:713:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:714:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:716:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:717:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:718:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:719:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:721:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:722:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:723:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:724:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:726:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:727:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:728:2058] recipient: [122:15:2062] Leader for TabletID 72075186233409546 is [122:330:2316] sender: [122:729:2058] recipient: [122:15:2062] 2025-06-03T10:34:48.329572Z node 122 :HIVE INFO: tablet_helpers.cpp:1404: [72057594037968897] TEvRequestHiveInfo, msg: TabletID: 72075186233409546 ReturnFollowers: false 2025-06-03T10:34:48.339384Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5554: Handle TEvStateChanged, at schemeshard: 72057594046678944, message: Source { RawX1: 441 RawX2: 523986012522 } TabletId: 72075186233409547 State: 4 2025-06-03T10:34:48.339435Z node 122 :FLAT_TX_SCHEMESHARD INFO: schemeshard__state_changed_reply.cpp:78: TTxShardStateChanged DoExecute, datashard informs about state changing, datashardId: 72075186233409547, state: Offline, at schemeshard: 72057594046678944 2025-06-03T10:34:48.343694Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:34:48.343899Z node 122 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 TabletID: 72075186233409547 2025-06-03T10:34:48.344774Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:34:48.344879Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason shard deleted for pathId [OwnerId: 72057594046678944, LocalPathId: 4] was 1 Forgetting tablet 72075186233409547 2025-06-03T10:34:48.345184Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:34:48.345194Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 4], at schemeshard: 72057594046678944 2025-06-03T10:34:48.345210Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:34:48.354010Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 2025-06-03T10:34:48.354059Z node 122 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:180: Close pipe to deleted shardIdx 72057594046678944:2 tabletId 72075186233409547 2025-06-03T10:34:48.354223Z node 122 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 Deleted tabletId 72075186233409547 |71.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_remove_directory_that_does_not_exist_failure [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-abstime.test] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q4.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q5.sql-plan] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Table ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::AlterAsyncReplication [GOOD] Test command err: Trying to start YDB, gRPC: 18151, MsgBus: 3694 2025-06-03T10:34:30.492940Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669612839884917:2066];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:34:30.493319Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001708/r3tmp/tmpalV6Ht/pdisk_1.dat 2025-06-03T10:34:30.626815Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669612839884887:2079] 1748946870492731 != 1748946870492734 2025-06-03T10:34:30.629679Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:34:30.629711Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:34:30.645311Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:34:30.645575Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 18151, node 1 2025-06-03T10:34:30.745142Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:34:30.745163Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:34:30.745165Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:34:30.745212Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3694 TClient is connected to server localhost:3694 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:34:30.869079Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:30.879632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:30.905906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:30.925687Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:30.939362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:31.001995Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669617134853817:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:31.002031Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:31.241389Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.250814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.260444Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.274646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.329872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.338087Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.351872Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:34:31.367784Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669617134854470:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:31.367807Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:31.367874Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669617134854475:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:31.368686Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:34:31.371721Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669617134854477:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:34:31.449742Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669617134854528:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:34:31.743196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 18009, MsgBus: 20473 2025-06-03T10:34:32.016056Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669620470667869:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:34:32.016082Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001708/r3tmp/tmpiln2le/pdisk_1.dat 2025-06-03T10:34:32.032986Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18009, node 2 2025-06-03T10:34:32.050309Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:34:32.050322Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:34:32.050324Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:34:32.050373Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20473 TClient is connected to server localhost:20473 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:34:32.116585Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:34:32.116619Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:34:32.117641Z node 2 :HIVE WARN: no ... lterReplication, opId: 281474976715683:0, at schemeshard: 72057594046644480 2025-06-03T10:34:49.269747Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037923] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715683 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2136" Database: "/local" OAuthToken { Token: "***" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-03T10:34:49.269803Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037923][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715683 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2136" Database: "/local" OAuthToken { Token: "***" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-03T10:34:49.269928Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037923][TxAlterReplication] Complete 2025-06-03T10:34:49.283981Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterReplication, opId: 281474976715684:0, at schemeshard: 72057594046644480 2025-06-03T10:34:49.284655Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037923] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715684 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" OAuthToken { Token: "***" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-03T10:34:49.284711Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037923][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715684 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" OAuthToken { Token: "***" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-03T10:34:49.290131Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037923][TxAlterReplication] Complete 2025-06-03T10:34:49.333926Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterReplication, opId: 281474976715685:0, at schemeshard: 72057594046644480 2025-06-03T10:34:49.334605Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037923] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715685 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" OAuthToken { Token: "***" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-03T10:34:49.334650Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037923][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715685 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" OAuthToken { Token: "***" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-03T10:34:49.334823Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037923][TxAlterReplication] Complete 2025-06-03T10:34:49.336058Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715685, at schemeshard: 72057594046644480 2025-06-03T10:34:49.345240Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterReplication, opId: 281474976715686:0, at schemeshard: 72057594046644480 2025-06-03T10:34:49.349711Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037923] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715686 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" OAuthToken { TokenSecretName: "mysecret" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-03T10:34:49.349779Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037923][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715686 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" OAuthToken { TokenSecretName: "mysecret" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-03T10:34:49.353500Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037923][TxAlterReplication] Complete 2025-06-03T10:34:49.355472Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715686, at schemeshard: 72057594046644480 2025-06-03T10:34:49.365275Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511669694700838863:4035] txid# 281474976715687, issues: { message: "User is not set" severity: 1 } 2025-06-03T10:34:49.370657Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511669694700838872:4040] txid# 281474976715688, issues: { message: "User is not set" severity: 1 } 2025-06-03T10:34:49.376329Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterReplication, opId: 281474976715689:0, at schemeshard: 72057594046644480 2025-06-03T10:34:49.377007Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037923] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715689 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-03T10:34:49.377050Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037923][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715689 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-03T10:34:49.377356Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037923][TxAlterReplication] Complete 2025-06-03T10:34:49.384503Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterReplication, opId: 281474976715690:0, at schemeshard: 72057594046644480 2025-06-03T10:34:49.385265Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037923] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715690 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" Password: "***" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-03T10:34:49.385338Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037923][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715690 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" Password: "***" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-03T10:34:49.385595Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037923][TxAlterReplication] Complete 2025-06-03T10:34:49.397564Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterReplication, opId: 281474976715691:0, at schemeshard: 72057594046644480 2025-06-03T10:34:49.398102Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037923] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715691 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" PasswordSecretName: "password_secret_name" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-03T10:34:49.398153Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037923][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715691 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" PasswordSecretName: "password_secret_name" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-03T10:34:49.398299Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037923][TxAlterReplication] Complete 2025-06-03T10:34:49.408761Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterReplication, opId: 281474976715692:0, at schemeshard: 72057594046644480 2025-06-03T10:34:49.409520Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037923] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715692 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "new_user" Password: "***" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-03T10:34:49.409568Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037923][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 18 } OperationId { TxId: 281474976715692 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "new_user" Password: "***" } } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Row { } } } 2025-06-03T10:34:49.410633Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037923][TxAlterReplication] Complete >> test_postgres.py::TestPGSQL::test_sql_suite[results-boolean.test] >> test_postgres.py::TestPGSQL::test_sql_suite[results-boolean.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-case.test] |71.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/py3test >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_equi_multi_rp_1.sql-result_sets] [GOOD] >> TxUsage::WriteToTopic_Demo_28_Query [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_2_Query [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_6.sql-result_sets] [GOOD] |71.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/py3test >> test_sql.py::TestCanonicalFolder1::test_case[index/topsort_index_with_selector_aliases.sql-result_sets] [GOOD] |71.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/py3test >> test_sql.py::TestCanonicalFolder1::test_case[write/write_group_by.script-script] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_7.sql-plan] >> test_users_groups_with_acl.py::test_yql_create_group_by_tenant_admin[domain_login_only--true-YDB] >> TxUsage::WriteToTopic_Demo_29_Table >> test_dynamic_tenants.py::test_drop_tenant_without_nodes_could_complete[enable_alter_database_create_hive_first--false] [GOOD] >> test_dynamic_tenants.py::test_drop_tenant_without_nodes_could_complete[enable_alter_database_create_hive_first--true] >> TxUsage::Sinks_Oltp_WriteToTopics_3_Table |71.5%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut |71.5%| [LD] {RESULT} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut |71.5%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/row_dispatcher/format_handler/ut/ydb-core-fq-libs-row_dispatcher-format_handler-ut |71.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/py3test >> test_sql.py::TestCanonicalFolder1::test_case[pk_predicate/pk_predicate_ranges_1.sql-plan] [GOOD] |71.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/py3test >> test_sql.py::TestCanonicalFolder1::test_case[simple/q21.sql-plan] [GOOD] |71.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops >> test_sql.py::TestCanonicalFolder1::test_case[simple/q5.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q5.sql-result_sets] |71.6%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops |71.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_range_ops/ydb-core-tx-datashard-ut_range_ops >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_18_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 18] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_19_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 19] >> test_tenants.py::TestTenants::test_create_create_table[enable_alter_database_create_hive_first--true] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_20_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 20] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_21_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 21] |71.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/apps/pgwire/pgwire |71.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/pgwire/pgwire |71.6%| [LD] {RESULT} $(B)/ydb/apps/pgwire/pgwire |71.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/py3test >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_2.sql-result_sets] [GOOD] |71.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel |71.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel |71.6%| [LD] {RESULT} $(B)/ydb/core/cms/ut_sentinel/ydb-core-cms-ut_sentinel >> test_storage_config.py::TestStorageConfig::test_cases[case_0] >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Table [GOOD] >> test_tenants.py::TestTenants::test_create_tables[enable_alter_database_create_hive_first--false] >> alter_compression.py::TestAlterCompression::test_availability_data |71.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/locks/ut_range_treap/ydb-core-tx-locks-ut_range_treap |71.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/locks/ut_range_treap/ydb-core-tx-locks-ut_range_treap |71.6%| [LD] {RESULT} $(B)/ydb/core/tx/locks/ut_range_treap/ydb-core-tx-locks-ut_range_treap |71.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut |71.6%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut |71.6%| [LD] {RESULT} $(B)/ydb/library/yql/providers/generic/actors/ut/ydb-library-yql-providers-generic-actors-ut >> test_users_groups_with_acl.py::test_query_create_user_by_tenant_admin[domain_login_only--false-YDB] >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Query >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_7.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_7.sql-result_sets] >> test_user_administration.py::test_database_admin_cant_change_database_admin_user[unblock] |71.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> test_dynamic_tenants.py::test_create_and_drop_tenants[enable_alter_database_create_hive_first--true] >> test_tenants.py::TestTenants::test_when_deactivate_fat_tenant_creation_another_tenant_is_ok[enable_alter_database_create_hive_first--true] >> test_user_administration.py::test_database_admin_can_create_user |71.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_column_after_table_creation_with_data_and_success [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-case.test] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q5.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q6.sql-plan] >> test_auth_system_views.py::test_tenant_auth_groups_access[clusteradmin-True] >> test_tenants.py::TestTenants::test_create_remove_database[enable_alter_database_create_hive_first--false] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_7.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_8.sql-plan] |71.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> KqpScheme::AlterTransfer [GOOD] >> TxUsage::WriteToTopic_Demo_29_Table [GOOD] >> test_tenants.py::TestTenants::test_create_remove_database[enable_alter_database_create_hive_first--true] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_3_Table [GOOD] >> TxUsage::WriteToTopic_Demo_29_Query ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::AlterTransfer [GOOD] Test command err: Trying to start YDB, gRPC: 26281, MsgBus: 16707 2025-06-03T10:34:39.656677Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669650577041787:2069];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:34:39.656708Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016f4/r3tmp/tmpd5iA6J/pdisk_1.dat 2025-06-03T10:34:39.710001Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26281, node 1 2025-06-03T10:34:39.727742Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:34:39.727757Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:34:39.727759Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:34:39.727807Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16707 2025-06-03T10:34:39.758118Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:34:39.758151Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:34:39.759231Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:16707 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:34:39.791844Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:39.798653Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:39.817013Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:39.838649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:39.851219Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:34:40.089849Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669654872010700:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:40.089877Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:40.133911Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:34:40.141784Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:34:40.150814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:34:40.164709Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:34:40.178783Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:34:40.193000Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:34:40.207035Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:34:40.222925Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669654872011352:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:40.222955Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:40.222959Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669654872011357:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:34:40.223764Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:34:40.226548Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669654872011359:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:34:40.327531Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669654872011410:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:34:40.471790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:34:40.480956Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:34:40.494132Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 31425, MsgBus: 2097 2025-06-03T10:34:40.950901Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669652668464049:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:34:40.951288Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016f4/r3tmp/tmpg1brMi/pdisk_1.dat 2025-06-03T10:34:40.969607Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31425, node 2 2025-06-03T10:34:40.976798Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:34:40.976813Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:34:40.976815Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:34:40.976877Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:2097 TClient is connected to server localhost:2097 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:34:41.056024Z nod ... ration type: ESchemeOpAlterTransfer, opId: 281474976710682:0, at schemeshard: 72057594046644480 2025-06-03T10:34:57.800314Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037925] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976710682 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" OAuthToken { Token: "***" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "" } RunAsUser: "root@builtin" } } 2025-06-03T10:34:57.800369Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037925][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976710682 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" OAuthToken { Token: "***" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "" } RunAsUser: "root@builtin" } } 2025-06-03T10:34:57.804410Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037925][TxAlterReplication] Complete 2025-06-03T10:34:57.808669Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710682, at schemeshard: 72057594046644480 2025-06-03T10:34:57.817198Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTransfer, opId: 281474976710683:0, at schemeshard: 72057594046644480 2025-06-03T10:34:57.818058Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037925] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976710683 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" OAuthToken { TokenSecretName: "mysecret" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "" } RunAsUser: "root@builtin" } } 2025-06-03T10:34:57.818146Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037925][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976710683 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" OAuthToken { TokenSecretName: "mysecret" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "" } RunAsUser: "root@builtin" } } 2025-06-03T10:34:57.818596Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037925][TxAlterReplication] Complete 2025-06-03T10:34:57.826418Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511669728722525274:3977] txid# 281474976710684, issues: { message: "User is not set" severity: 1 } 2025-06-03T10:34:57.832116Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511669728722525283:3982] txid# 281474976710685, issues: { message: "User is not set" severity: 1 } 2025-06-03T10:34:57.841522Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTransfer, opId: 281474976710686:0, at schemeshard: 72057594046644480 2025-06-03T10:34:57.842572Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037925] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976710686 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "" } RunAsUser: "root@builtin" } } 2025-06-03T10:34:57.846204Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037925][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976710686 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "" } RunAsUser: "root@builtin" } } 2025-06-03T10:34:57.846721Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037925][TxAlterReplication] Complete 2025-06-03T10:34:57.853670Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTransfer, opId: 281474976710687:0, at schemeshard: 72057594046644480 2025-06-03T10:34:57.854358Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037925] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976710687 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" Password: "***" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "" } RunAsUser: "root@builtin" } } 2025-06-03T10:34:57.854404Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037925][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976710687 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" Password: "***" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "" } RunAsUser: "root@builtin" } } 2025-06-03T10:34:57.854687Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037925][TxAlterReplication] Complete 2025-06-03T10:34:57.859918Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710687, at schemeshard: 72057594046644480 2025-06-03T10:34:57.865716Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTransfer, opId: 281474976710688:0, at schemeshard: 72057594046644480 2025-06-03T10:34:57.866410Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037925] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976710688 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" PasswordSecretName: "password_secret_name" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "" } RunAsUser: "root@builtin" } } 2025-06-03T10:34:57.866454Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037925][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976710688 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "user" PasswordSecretName: "password_secret_name" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "" } RunAsUser: "root@builtin" } } 2025-06-03T10:34:57.866839Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037925][TxAlterReplication] Complete 2025-06-03T10:34:57.882891Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTransfer, opId: 281474976710689:0, at schemeshard: 72057594046644480 2025-06-03T10:34:57.883935Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037925] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976710689 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "new_user" Password: "***" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "" } RunAsUser: "root@builtin" } } 2025-06-03T10:34:57.883988Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037925][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976710689 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "new_user" Password: "***" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "" } RunAsUser: "root@builtin" } } 2025-06-03T10:34:57.884155Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037925][TxAlterReplication] Complete 2025-06-03T10:34:57.912648Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTransfer, opId: 281474976710690:0, at schemeshard: 72057594046644480 2025-06-03T10:34:57.913442Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:147: [controller 72075186224037925] Handle NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976710690 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "new_user" Password: "***" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "$__ydb_transfer_lambda = ($x) -> {\n RETURN CAST($x as String);\n };\n" } RunAsUser: "root@builtin" } } 2025-06-03T10:34:57.913528Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:22: [controller 72075186224037925][TxAlterReplication] Execute: NKikimrReplication.TEvAlterReplication PathId { OwnerId: 72057594046644480 LocalId: 19 } OperationId { TxId: 281474976710690 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:2135" Database: "/Root" StaticCredentials { User: "new_user" Password: "***" } } ConsistencySettings { Row { } } TransferSpecific { Target { SrcPath: "/Root/topic" DstPath: "/Root/table" TransformLambda: "$__ydb_transfer_lambda = ($x) -> {\n RETURN CAST($x as String);\n };\n" } RunAsUser: "root@builtin" } } 2025-06-03T10:34:57.913595Z node 6 :REPLICATION_CONTROLLER NOTICE: tx_alter_replication.cpp:110: [controller 72075186224037925][TxAlterReplication] Alter replication: rid# 1, pathId# [OwnerId: 72057594046644480, LocalPathId: 19] 2025-06-03T10:34:57.917847Z node 6 :REPLICATION_CONTROLLER DEBUG: tx_alter_replication.cpp:119: [controller 72075186224037925][TxAlterReplication] Complete 2025-06-03T10:34:57.917997Z node 6 :REPLICATION_CONTROLLER TRACE: controller.cpp:202: [controller 72075186224037925] Handle NKikimr::NReplication::NController::TEvPrivate::TEvRequestCreateStream |71.6%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut |71.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut |71.7%| [LD] {RESULT} $(B)/ydb/core/fq/libs/control_plane_proxy/ut/ydb-core-fq-libs-control_plane_proxy-ut |71.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/py3test >> test_sql.py::TestCanonicalFolder1::test_case[json/insert_params.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q6.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q6.sql-result_sets] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Table |71.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/py3test >> test_sql.py::TestCanonicalFolder1::test_case[join/join_to_idx_lookup_partial_inner.sql-result_sets] [GOOD] |71.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_given_table_when_drop_table_and_create_with_same_primary_key_and_other_scheme_then_ok [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_path_with_long_name_failed >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_8.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_8.sql-result_sets] |71.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ydb-core-tx-schemeshard-ut_data_erasure_reboots |71.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut |71.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ydb-core-tx-schemeshard-ut_data_erasure_reboots |71.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut |71.7%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_data_erasure_reboots/ydb-core-tx-schemeshard-ut_data_erasure_reboots |71.7%| [LD] {RESULT} $(B)/ydb/core/quoter/ut/ydb-core-quoter-ut >> test_users_groups_with_acl.py::test_yql_create_group_by_tenant_admin[domain_login_only--true-YDB] [GOOD] >> test_users_groups_with_acl.py::test_yql_create_user_by_domain_admin[domain_login_only--false-YDB] >> RetryPolicy::TWriteSession_TestBrokenPolicy [GOOD] >> RetryPolicy::TWriteSession_RetryOnTargetCluster >> test_tenants.py::TestTenants::test_create_create_table[enable_alter_database_create_hive_first--true] [GOOD] |71.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence |71.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence |71.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_sequence/ydb-core-tx-datashard-ut_sequence >> test_db_counters.py::TestKqpCounters::test_case >> test_users_groups_with_acl.py::test_query_create_user_by_domain_admin[domain_login_only--true-YDB] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_9] >> test_tenants.py::TestTenants::test_create_tables[enable_alter_database_create_hive_first--false] [GOOD] >> test_tenants.py::TestTenants::test_create_drop_create_table2[enable_alter_database_create_hive_first--false] |71.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction |71.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction |71.7%| [LD] {RESULT} $(B)/ydb/core/tx/datashard/ut_background_compaction/ydb-core-tx-datashard-ut_background_compaction >> test_sql.py::TestCanonicalFolder1::test_case[simple/q6.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q7.sql-plan] >> test_tenants.py::TestTenants::test_list_database_above[enable_alter_database_create_hive_first--true] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_8.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_9.sql-plan] |71.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_decreasing_number_of_generations_it_is_raise_error [GOOD] >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_path_with_long_name_failed [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_table_and_path_with_name_clash_unsuccessful [GOOD] |71.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub |71.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub |71.7%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_scrub/ydb-core-blobstorage-ut_blobstorage-ut_scrub >> test_tenants.py::TestTenants::test_create_tables[enable_alter_database_create_hive_first--true] >> test_users_groups_with_acl.py::test_query_create_user_by_tenant_admin[domain_login_only--false-YDB] [GOOD] >> test_users_groups_with_acl.py::test_query_create_user_by_tenant_admin[domain_login_only--true-YDB] >> test_dynamic_tenants.py::test_create_tenant_then_exec_yql_empty_database_header[enable_alter_database_create_hive_first--true] [GOOD] >> test_dynamic_tenants.py::test_create_tenant_with_cpu[enable_alter_database_create_hive_first--false] >> test_serverless.py::test_seamless_migration_to_exclusive_nodes[enable_alter_database_create_hive_first--false] >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Query [GOOD] |71.7%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/apps/etcd_proxy/service/ut/ydb-apps-etcd_proxy-service-ut |71.7%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/etcd_proxy/service/ut/ydb-apps-etcd_proxy-service-ut |71.8%| [LD] {RESULT} $(B)/ydb/apps/etcd_proxy/service/ut/ydb-apps-etcd_proxy-service-ut >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_9.sql-plan] [GOOD] |71.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_9.sql-result_sets] >> TxUsage::WriteToTopic_Demo_29_Query [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q7.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q7.sql-result_sets] |71.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/apps/etcd_proxy/etcd_proxy |71.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/apps/etcd_proxy/etcd_proxy |71.8%| [LD] {RESULT} $(B)/ydb/apps/etcd_proxy/etcd_proxy |71.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_fixtures[enable_alter_database_create_hive_first--false] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Table [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_22_RestartAfterCommit_Query [GOOD] Test command err: 2025-06-03T10:31:34.405609Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668853849885399:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:34.405635Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d8e/r3tmp/tmpifsxnx/pdisk_1.dat 2025-06-03T10:31:34.449793Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:31:34.471320Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668853849885380:2079] 1748946694405433 != 1748946694405436 2025-06-03T10:31:34.473815Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 19202, node 1 2025-06-03T10:31:34.485777Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/000d8e/r3tmp/yandexqoBN93.tmp 2025-06-03T10:31:34.485789Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/000d8e/r3tmp/yandexqoBN93.tmp 2025-06-03T10:31:34.485885Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/000d8e/r3tmp/yandexqoBN93.tmp 2025-06-03T10:31:34.485956Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:31:34.492755Z INFO: TTestServer started on Port 64753 GrpcPort 19202 TClient is connected to server localhost:64753 PQClient connected to localhost:19202 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:34.540132Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:34.540164Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:34.541285Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:34.543366Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:31:34.551118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:34.552487Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:31:34.859882Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668853849886190:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:34.859915Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:34.860013Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668853849886202:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:34.860915Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480 2025-06-03T10:31:34.861428Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668853849886231:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:34.861444Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:34.862930Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668853849886204:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-03T10:31:34.910166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:34.918090Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:34.930415Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668853849886395:2514] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:34.938251Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:34.944751Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668853849886423:2360], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:31:34.945147Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=YTdmYWM0OWQtZjA3ODAwMDAtMWZhYjRmMTUtYTc0Mjc3Yzk=, ActorId: [1:7511668853849886187:2334], ActorState: ExecuteState, TraceId: 01jwtnhcp954d5fd2cev1b764m, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:31:34.945647Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7511668853849886551:2609] 2025-06-03T10:31:39.405946Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668853849885399:2061];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:39.405991Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:31:40.172934Z :WriteToTopic_Demo_1_Table INFO: TTopicSdkTestSetup started 2025-06-03T10:31:40.178350Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-03T10:31:40.184184Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7511668879619690530:2688] connected; active server actors: 1 2025-06-03T10:31:40.184259Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-03T10:31:40.184407Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-03T10:31:40.184468Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-03T10:31:40.184726Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-03T10:31:40.185469Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:31:40.185556Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3089: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-03T10:31:40.185697Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:31:40.185736Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72075186224037892] doesn't have tx info 2025-06-03T10:31:40.185746Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:31:40.185748Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-03T10:31:40.185752Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06 ... e: StateIdle] Topic 'topic_A' partition 0 user test-consumer offset is set to 10 (startOffset 0) session test-consumer_21_1_66472088382830951_v1 2025-06-03T10:35:02.718997Z node 21 :PERSQUEUE DEBUG: partition.cpp:2185: [PQ: 72075186224037894, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-06-03T10:35:02.719000Z node 21 :PERSQUEUE DEBUG: partition.cpp:2186: [PQ: 72075186224037894, Partition: 0, State: StateIdle] --- delete ---------------- 2025-06-03T10:35:02.719005Z node 21 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037894, Partition: 0, State: StateIdle] --- write ----------------- 2025-06-03T10:35:02.719007Z node 21 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037894, Partition: 0, State: StateIdle] i0000000000 2025-06-03T10:35:02.719010Z node 21 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037894, Partition: 0, State: StateIdle] m0000000000ctest-consumer 2025-06-03T10:35:02.719012Z node 21 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037894, Partition: 0, State: StateIdle] m0000000000utest-consumer 2025-06-03T10:35:02.719014Z node 21 :PERSQUEUE DEBUG: partition.cpp:2199: [PQ: 72075186224037894, Partition: 0, State: StateIdle] --- rename ---------------- 2025-06-03T10:35:02.719017Z node 21 :PERSQUEUE DEBUG: partition.cpp:2204: [PQ: 72075186224037894, Partition: 0, State: StateIdle] =========================== 2025-06-03T10:35:02.719031Z node 21 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-06-03T10:35:02.721393Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:779: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer readTimeStamp for offset 10 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-03T10:35:02.721419Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 0 2025-06-03T10:35:02.721442Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic_A' partition: 0 messageNo: 0 requestId: cookie: 2 2025-06-03T10:35:02.721483Z node 21 :PQ_READ_PROXY DEBUG: partition_actor.cpp:652: session cookie 1 consumer test-consumer session test-consumer_21_1_66472088382830951_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) initDone 1 event { Cookie: 2 } 2025-06-03T10:35:02.721491Z node 21 :PQ_READ_PROXY DEBUG: partition_actor.cpp:950: session cookie 1 consumer test-consumer session test-consumer_21_1_66472088382830951_v1 TopicId: Topic /Root/topic_A in database: Root, partition 0(assignId:1) commit done to position 10 endOffset 10 with cookie 2 2025-06-03T10:35:02.721518Z node 21 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:696: session cookie 1 consumer test-consumer session test-consumer_21_1_66472088382830951_v1 replying for commits: assignId# 1, from# 2, to# 2, offset# 10 2025-06-03T10:35:02.726228Z :DEBUG: [/Root] [/Root] [2b1dc328-16f18889-a348f634-cfa0faac] [] Committed response: { partitions_committed_offsets { partition_session_id: 1 committed_offset: 10 } } 2025-06-03T10:35:03.679110Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:9:10 2025-06-03T10:35:03.679138Z :INFO: [/Root] [/Root] [2b1dc328-16f18889-a348f634-cfa0faac] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1004 BytesRead: 1000000 MessagesRead: 10 BytesReadCompressed: 1000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:35:03.685694Z node 21 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:2378: session cookie 1 consumer test-consumer session test-consumer_21_1_66472088382830951_v1 checking auth because of timeout 2025-06-03T10:35:03.685731Z node 21 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 1 consumer test-consumer session test-consumer_21_1_66472088382830951_v1 auth for : test-consumer 2025-06-03T10:35:03.685994Z node 21 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 1 consumer test-consumer session test-consumer_21_1_66472088382830951_v1 Handle describe topics response 2025-06-03T10:35:03.686016Z node 21 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 1 consumer test-consumer session test-consumer_21_1_66472088382830951_v1 auth is DEAD 2025-06-03T10:35:03.686036Z node 21 :PQ_READ_PROXY INFO: read_session_actor.cpp:1033: session cookie 1 consumer test-consumer session test-consumer_21_1_66472088382830951_v1 auth ok: topics# 1, initDone# 1 2025-06-03T10:35:03.713919Z node 21 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:459: session cookie 2 consumer test-consumer session test-consumer_21_1_66472088382830951_v1 checking auth because of timeout 2025-06-03T10:35:03.713973Z node 21 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:41: session cookie 2 consumer test-consumer session test-consumer_21_1_66472088382830951_v1 auth for : test-consumer 2025-06-03T10:35:03.714179Z node 21 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:131: session cookie 2 consumer test-consumer session test-consumer_21_1_66472088382830951_v1 Handle describe topics response 2025-06-03T10:35:03.714198Z node 21 :PQ_READ_PROXY DEBUG: read_init_auth_actor.cpp:68: session cookie 2 consumer test-consumer session test-consumer_21_1_66472088382830951_v1 auth is DEAD 2025-06-03T10:35:03.714218Z node 21 :PQ_READ_PROXY INFO: direct_read_actor.cpp:299: session cookie 2 consumer test-consumer session test-consumer_21_1_66472088382830951_v1 auth ok: topics# 1, initDone# 1 2025-06-03T10:35:04.677668Z :INFO: [/Root] [/Root] [2b1dc328-16f18889-a348f634-cfa0faac] Closing read session. Close timeout: 0.000000s 2025-06-03T10:35:04.677689Z :INFO: [/Root] Read/commit by partition streams (cluster:topic:partition:stream-id:read-offset:committed-offset): -:topic_A:0:1:9:10 2025-06-03T10:35:04.677700Z :INFO: [/Root] [/Root] [2b1dc328-16f18889-a348f634-cfa0faac] Counters: { Errors: 0 CurrentSessionLifetimeMs: 2002 BytesRead: 1000000 MessagesRead: 10 BytesReadCompressed: 1000000 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:35:04.677721Z :NOTICE: [/Root] [/Root] [2b1dc328-16f18889-a348f634-cfa0faac] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Close with zero timeout " } 2025-06-03T10:35:04.677731Z :DEBUG: [/Root] [/Root] [2b1dc328-16f18889-a348f634-cfa0faac] [] Abort session to cluster 2025-06-03T10:35:04.677956Z :DEBUG: [/Root] 0x000072C6BC58B290 TDirectReadSessionManager ServerSessionId=test-consumer_21_1_66472088382830951_v1 Close 2025-06-03T10:35:04.678019Z :DEBUG: [/Root] 0x000072C6BC58B290 TDirectReadSessionManager ServerSessionId=test-consumer_21_1_66472088382830951_v1 Close 2025-06-03T10:35:04.678037Z :NOTICE: [/Root] [/Root] [2b1dc328-16f18889-a348f634-cfa0faac] Aborting read session. Description: SessionClosed { Status: ABORTED Issues: "
: Error: Aborted " } 2025-06-03T10:35:04.682461Z node 21 :PQ_READ_PROXY DEBUG: read_session_actor.cpp:122: session cookie 1 consumer test-consumer session test-consumer_21_1_66472088382830951_v1 grpc read done: success# 0, data# { } 2025-06-03T10:35:04.682484Z node 21 :PQ_READ_PROXY INFO: read_session_actor.cpp:125: session cookie 1 consumer test-consumer session test-consumer_21_1_66472088382830951_v1 grpc read failed 2025-06-03T10:35:04.682494Z node 21 :PQ_READ_PROXY INFO: read_session_actor.cpp:92: session cookie 1 consumer test-consumer session test-consumer_21_1_66472088382830951_v1 grpc closed 2025-06-03T10:35:04.682506Z node 21 :PQ_READ_PROXY INFO: read_session_actor.cpp:377: session cookie 1 consumer test-consumer session test-consumer_21_1_66472088382830951_v1 is DEAD 2025-06-03T10:35:04.682637Z node 21 :PQ_READ_PROXY DEBUG: direct_read_actor.cpp:83: Direct read proxy [21:7511669747242506497:2551]: session cookie 2 consumer test-consumer session test-consumer_21_1_66472088382830951_v1 grpc read done: success# 0, data# { } 2025-06-03T10:35:04.682650Z node 21 :PQ_READ_PROXY INFO: direct_read_actor.cpp:86: Direct read proxy [21:7511669747242506497:2551]: session cookie 2 consumer test-consumer session test-consumer_21_1_66472088382830951_v1grpc read failed 2025-06-03T10:35:04.682657Z node 21 :PQ_READ_PROXY INFO: direct_read_actor.cpp:65: Direct read proxy [21:7511669747242506497:2551]: session cookie 2 consumer test-consumer session test-consumer_21_1_66472088382830951_v1 grpc closed 2025-06-03T10:35:04.682661Z node 21 :PQ_READ_PROXY INFO: direct_read_actor.cpp:153: Direct read proxy [21:7511669747242506497:2551]: session cookie 2 consumer test-consumer session test-consumer_21_1_66472088382830951_v1 proxy is DEAD 2025-06-03T10:35:04.683339Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|cc93e6f7-bcf575c2-606d5038-e617d1bd_0] PartitionId [0] Generation [2] Write session: close. Timeout 0.000000s 2025-06-03T10:35:04.683346Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|cc93e6f7-bcf575c2-606d5038-e617d1bd_0] PartitionId [0] Generation [2] Write session will now close 2025-06-03T10:35:04.683353Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|cc93e6f7-bcf575c2-606d5038-e617d1bd_0] PartitionId [0] Generation [2] Write session: aborting 2025-06-03T10:35:04.683476Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|cc93e6f7-bcf575c2-606d5038-e617d1bd_0] PartitionId [0] Generation [2] Write session: gracefully shut down, all writes complete 2025-06-03T10:35:04.683482Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|cc93e6f7-bcf575c2-606d5038-e617d1bd_0] PartitionId [0] Generation [2] Write session: destroy 2025-06-03T10:35:04.683585Z node 21 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037895][topic_A] pipe [21:7511669747242506489:2546] disconnected; active server actors: 1 2025-06-03T10:35:04.683604Z node 21 :PERSQUEUE_READ_BALANCER NOTICE: read_balancer__balancing.cpp:1673: [72075186224037895][topic_A] pipe [21:7511669747242506489:2546] client test-consumer disconnected session test-consumer_21_1_66472088382830951_v1 2025-06-03T10:35:04.683638Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2439: [PQ: 72075186224037894] Destroy direct read session test-consumer_21_1_66472088382830951_v1 2025-06-03T10:35:04.683644Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037894] server disconnected, pipe [21:7511669747242506492:2549] destroyed 2025-06-03T10:35:04.683744Z node 21 :PQ_READ_PROXY DEBUG: caching_service.cpp:139: Direct read cache: server session deregistered: test-consumer_21_1_66472088382830951_v1 2025-06-03T10:35:04.684558Z node 21 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 5 sessionId: test-message_group_id|cc93e6f7-bcf575c2-606d5038-e617d1bd_0 grpc read done: success: 0 data: 2025-06-03T10:35:04.684563Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 5 sessionId: test-message_group_id|cc93e6f7-bcf575c2-606d5038-e617d1bd_0 grpc read failed 2025-06-03T10:35:04.684571Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 5 sessionId: test-message_group_id|cc93e6f7-bcf575c2-606d5038-e617d1bd_0 grpc closed 2025-06-03T10:35:04.684574Z node 21 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 5 sessionId: test-message_group_id|cc93e6f7-bcf575c2-606d5038-e617d1bd_0 is DEAD 2025-06-03T10:35:04.684794Z node 21 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:35:04.687438Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037894] server disconnected, pipe [21:7511669738652571824:2519] destroyed 2025-06-03T10:35:04.687452Z node 21 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Query ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::WriteToTopic_Demo_29_Query [GOOD] Test command err: 2025-06-03T10:31:31.351679Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668843017062536:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:31.351750Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000dea/r3tmp/tmpNLRaRh/pdisk_1.dat 2025-06-03T10:31:31.416964Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created TServer::EnableGrpc on GrpcPort 64355, node 1 2025-06-03T10:31:31.439263Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:31.439610Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668843017062372:2079] 1748946691350531 != 1748946691350534 2025-06-03T10:31:31.457662Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:31.457662Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/000dea/r3tmp/yandex41ZN9Y.tmp 2025-06-03T10:31:31.457667Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/000dea/r3tmp/yandex41ZN9Y.tmp 2025-06-03T10:31:31.457697Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:31.457749Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/000dea/r3tmp/yandex41ZN9Y.tmp 2025-06-03T10:31:31.457803Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:31:31.461911Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:31.463040Z INFO: TTestServer started on Port 10378 GrpcPort 64355 TClient is connected to server localhost:10378 PQClient connected to localhost:64355 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:31.517198Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:31:31.529522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-03T10:31:31.868587Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668843017063184:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:31.868614Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668843017063195:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:31.868622Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:31.869545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480 2025-06-03T10:31:31.873161Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668843017063198:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-03T10:31:31.927688Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:31.952271Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:31.966604Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668843017063392:2508] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:31.988711Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:31:31.991313Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668843017063409:2359], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:31:31.991476Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=ZGYxYjgzZDItZDAwNjE4Ni1iNzU1MWUwZC1jOTM0MTE1OQ==, ActorId: [1:7511668843017063181:2334], ActorState: ExecuteState, TraceId: 01jwtnh9rw00e9dtk4b0jhjxep, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:31:31.992206Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7511668847312030848:2609] 2025-06-03T10:31:36.351735Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668843017062536:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:36.351774Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:31:37.223436Z :WriteToTopic_Demo_2_Query INFO: TTopicSdkTestSetup started 2025-06-03T10:31:37.228017Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-03T10:31:37.233437Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7511668868786867534:2690] connected; active server actors: 1 2025-06-03T10:31:37.233499Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-03T10:31:37.233602Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-03T10:31:37.233657Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-03T10:31:37.233841Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-03T10:31:37.234713Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:31:37.234806Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3089: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-03T10:31:37.234939Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:31:37.234975Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72075186224037892] doesn't have tx info 2025-06-03T10:31:37.234997Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:31:37.235001Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-03T10:31:37.235005Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:31:37.235011Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:31:37.235025Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72075186224037892] doesn't have tx writes info 2025-06-03T10:31:37.244340Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72075186224037892, NodeId 1, Generation 1 2025-06-03T10:31:37.244383Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72075186224037892] server connected, pipe [1:7511668868786867550:2447], now have 1 active actors on pipe 2025-06-03T10:31:37.281666Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72075186224037892] server connected, pipe [1:7511668868786867533:2689], no ... ition: {0, {6, 281474976715679}, 100001}, State: StateIdle] Answering for message sourceid: '\0test-message_group_id', Topic: 'topic_A', Partition: {0, {6, 281474976715679}, 100001}, SeqNo: 2, partNo: 13, Offset: 0 is stored on disk 2025-06-03T10:35:04.957721Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic_A' partition: 0 messageNo: 1 requestId: cookie: 2 2025-06-03T10:35:04.957749Z node 6 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037894 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-03T10:35:04.958136Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|f9fc69aa-ad1e407e-db70a101-10ecb60c_0] PartitionId [0] Generation [1] Write session: OnReadDone gRpcStatusCode: 0 2025-06-03T10:35:04.958194Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|f9fc69aa-ad1e407e-db70a101-10ecb60c_0] PartitionId [0] Generation [1] Write session got write response: acks { seq_no: 2 written_in_tx { } } write_statistics { persisting_time { nanos: 9000000 } min_queue_wait_time { } max_queue_wait_time { } partition_quota_wait_time { } topic_quota_wait_time { } } 2025-06-03T10:35:04.958205Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|f9fc69aa-ad1e407e-db70a101-10ecb60c_0] PartitionId [0] Generation [1] OnAck: seqNo=2, txId={ydb://session/3?node_id=6&id=OGI5OThjYWQtMzJkOTMwZjUtZTFmNjAyYWUtOWZjZjY1YTI=, 01jwtnqstycrb50ezv48m4j5n2}, WriteCount=1, AckCount=1 2025-06-03T10:35:04.958451Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|f9fc69aa-ad1e407e-db70a101-10ecb60c_0] PartitionId [0] Generation [1] Write session: acknoledged message 2 2025-06-03T10:35:04.959319Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3220: [PQ: 72075186224037894] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 7511669756073498375 RawX2: 4503625397176767 } TxId: 281474976715680 Data { Operations { PartitionId: 0 Path: "/Root/topic_A" SupportivePartition: 100001 } Op: Commit SendingShards: 72075186224037894 ReceivingShards: 72075186224037894 Immediate: true WriteId { NodeId: 6 KeyId: 281474976715679 } } 2025-06-03T10:35:04.959335Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3246: [PQ: 72075186224037894] PartitionId {0, {6, 281474976715679}, 100001} for WriteId {6, 281474976715679} 2025-06-03T10:35:04.959342Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3335: [PQ: 72075186224037894] TxId 281474976715680 has WriteId {6, 281474976715679} 2025-06-03T10:35:04.959346Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3360: [PQ: 72075186224037894] immediate transaction 2025-06-03T10:35:04.959373Z node 6 :PERSQUEUE DEBUG: partition.cpp:1253: [PQ: 72075186224037894, Partition: {0, {6, 281474976715679}, 100001}, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoRequest 2025-06-03T10:35:04.959387Z node 6 :PERSQUEUE DEBUG: partition.cpp:1248: [PQ: 72075186224037894, Partition: {0, {6, 281474976715679}, 100001}, State: StateIdle] Send TEvPQ::TEvGetWriteInfoResponse 2025-06-03T10:35:04.959397Z node 6 :PERSQUEUE DEBUG: partition.cpp:1363: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Handle TEvPQ::TEvGetWriteInfoResponse 2025-06-03T10:35:04.959411Z node 6 :PERSQUEUE DEBUG: partition.cpp:2424: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::CommitWriteOperations TxId: (empty maybe) 2025-06-03T10:35:04.959423Z node 6 :PERSQUEUE DEBUG: partition.cpp:2451: [PQ: 72075186224037894, Partition: 0, State: StateIdle] t.WriteInfo->BodyKeys.size=1, t.WriteInfo->BlobsFromHead.size=0 2025-06-03T10:35:04.959428Z node 6 :PERSQUEUE DEBUG: partition.cpp:2452: [PQ: 72075186224037894, Partition: 0, State: StateIdle] Head=Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0, NewHead=Offset 1 PartNo 0 PackedSize 0 count 0 nextOffset 1 batches 0 2025-06-03T10:35:04.959435Z node 6 :PERSQUEUE DEBUG: partition.cpp:2471: [PQ: 72075186224037894, Partition: 0, State: StateIdle] add key D0000100001_00000000000000000000_00000_0000000001_00013 2025-06-03T10:35:04.959451Z node 6 :PERSQUEUE DEBUG: partition.cpp:2482: [PQ: 72075186224037894, Partition: 0, State: StateIdle] PartitionedBlob.GetFormedBlobs().size=1 2025-06-03T10:35:04.959465Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:1049: [PQ: 72075186224037894, Partition: 0, State: StateIdle] writing blob: topic 'topic_A' partition 0 old key D0000100001_00000000000000000000_00000_0000000001_00013 new key d0000000000_00000000000000000001_00000_0000000001_00013 size 7001240 WTime 1748946904959 2025-06-03T10:35:04.959482Z node 6 :PERSQUEUE DEBUG: partition.cpp:3322: [PQ: 72075186224037894, Partition: 0, State: StateIdle] schedule TEvPersQueue::TEvProposeTransactionResult(COMPLETE), reason= 2025-06-03T10:35:04.959513Z node 6 :PERSQUEUE DEBUG: partition.cpp:2185: [PQ: 72075186224037894, Partition: 0, State: StateIdle] === DumpKeyValueRequest === 2025-06-03T10:35:04.959520Z node 6 :PERSQUEUE DEBUG: partition.cpp:2186: [PQ: 72075186224037894, Partition: 0, State: StateIdle] --- delete ---------------- 2025-06-03T10:35:04.959522Z node 6 :PERSQUEUE DEBUG: partition.cpp:2194: [PQ: 72075186224037894, Partition: 0, State: StateIdle] --- write ----------------- 2025-06-03T10:35:04.959525Z node 6 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037894, Partition: 0, State: StateIdle] m0000000000ptest-message_group_id 2025-06-03T10:35:04.959527Z node 6 :PERSQUEUE DEBUG: partition.cpp:2197: [PQ: 72075186224037894, Partition: 0, State: StateIdle] i0000000000 2025-06-03T10:35:04.959529Z node 6 :PERSQUEUE DEBUG: partition.cpp:2199: [PQ: 72075186224037894, Partition: 0, State: StateIdle] --- rename ---------------- 2025-06-03T10:35:04.959531Z node 6 :PERSQUEUE DEBUG: partition.cpp:2202: [PQ: 72075186224037894, Partition: 0, State: StateIdle] D0000100001_00000000000000000000_00000_0000000001_00013, d0000000000_00000000000000000001_00000_0000000001_00013 2025-06-03T10:35:04.959533Z node 6 :PERSQUEUE DEBUG: partition.cpp:2204: [PQ: 72075186224037894, Partition: 0, State: StateIdle] =========================== 2025-06-03T10:35:04.959540Z node 6 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-06-03T10:35:04.959544Z node 6 :PERSQUEUE DEBUG: read.h:318: CacheProxy. Rename blob from D0000100001_00000000000000000000_00000_0000000001_00013 to d0000000000_00000000000000000001_00000_0000000001_00013 2025-06-03T10:35:04.960263Z node 6 :PERSQUEUE DEBUG: cache_eviction.h:345: Renaming head blob in L1. Old partition 100001 old offset 0 old count 1 new partition 0 new offset 1 new count 1 actorID [6:7511669756073498199:2480] 2025-06-03T10:35:04.960304Z node 6 :PERSQUEUE DEBUG: pq_l2_cache.cpp:179: PQ Cache (L2). Renamed. old Tablet '72075186224037894' partition 100001 offset 0 partno 0 count 1 parts 13, new Tablet '72075186224037894' partition 0 offset 1 partno 0 count 1 parts 13 2025-06-03T10:35:04.960326Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 0 WriteNewSizeFromSupportivePartitions# 7000382 2025-06-03T10:35:04.960361Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:5170: [PQ: 72075186224037894] Handle TEvPQ::TEvTransactionCompleted WriteId {6, 281474976715679} 2025-06-03T10:35:04.960366Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:5196: [PQ: 72075186224037894] send TEvPQ::TEvDeletePartition to partition {0, {6, 281474976715679}, 100001} 2025-06-03T10:35:04.960374Z node 6 :PERSQUEUE DEBUG: partition.cpp:3773: [PQ: 72075186224037894, Partition: {0, {6, 281474976715679}, 100001}, State: StateIdle] Handle TEvPQ::TEvDeletePartition 2025-06-03T10:35:04.960386Z node 6 :PERSQUEUE DEBUG: read.h:262: CacheProxy. Passthrough write request to KV 2025-06-03T10:35:04.960394Z node 6 :PERSQUEUE DEBUG: read.h:338: CacheProxy. Delete blobs from D0000100001(+) to D0000100002(-) 2025-06-03T10:35:04.960558Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:5072: [PQ: 72075186224037894] Handle TEvLongTxService::TEvLockStatus LockId: 281474976715679 LockNode: 6 Status: STATUS_NOT_FOUND 2025-06-03T10:35:04.960569Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:5087: [PQ: 72075186224037894] TxWriteInfo: WriteId {6, 281474976715679}, TxId 281474976715680, Status STATUS_SUBSCRIBED 2025-06-03T10:35:04.960571Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:5097: [PQ: 72075186224037894] there is already a transaction TxId 281474976715680 for WriteId {6, 281474976715679} 2025-06-03T10:35:04.960817Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:5134: [PQ: 72075186224037894] Handle TEvPQ::TEvDeletePartitionDone {0, {6, 281474976715679}, 100001} 2025-06-03T10:35:04.960833Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3588: [PQ: 72075186224037894] send TEvUnsubscribeLock for WriteId {6, 281474976715679} 2025-06-03T10:35:04.960838Z node 6 :PERSQUEUE WARN: pq_impl.cpp:4207: [PQ: 72075186224037894] Unknown transaction 281474976715680 2025-06-03T10:35:04.960851Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:3633: [PQ: 72075186224037894] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-03T10:35:04.961116Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|f9fc69aa-ad1e407e-db70a101-10ecb60c_0] PartitionId [0] Generation [1] Write session: close. Timeout 0.000000s 2025-06-03T10:35:04.961121Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|f9fc69aa-ad1e407e-db70a101-10ecb60c_0] PartitionId [0] Generation [1] Write session will now close 2025-06-03T10:35:04.961126Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|f9fc69aa-ad1e407e-db70a101-10ecb60c_0] PartitionId [0] Generation [1] Write session: aborting 2025-06-03T10:35:04.961259Z :INFO: [/Root] TraceId [] SessionId [test-message_group_id|f9fc69aa-ad1e407e-db70a101-10ecb60c_0] PartitionId [0] Generation [1] Write session: gracefully shut down, all writes complete 2025-06-03T10:35:04.961267Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id|f9fc69aa-ad1e407e-db70a101-10ecb60c_0] PartitionId [0] Generation [1] Write session: destroy 2025-06-03T10:35:04.960958Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:1231: [PQ: 72075186224037894] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-03T10:35:04.961488Z node 6 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 3 sessionId: test-message_group_id|f9fc69aa-ad1e407e-db70a101-10ecb60c_0 grpc read done: success: 0 data: 2025-06-03T10:35:04.961496Z node 6 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 3 sessionId: test-message_group_id|f9fc69aa-ad1e407e-db70a101-10ecb60c_0 grpc read failed 2025-06-03T10:35:04.961504Z node 6 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 3 sessionId: test-message_group_id|f9fc69aa-ad1e407e-db70a101-10ecb60c_0 grpc closed 2025-06-03T10:35:04.961507Z node 6 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 3 sessionId: test-message_group_id|f9fc69aa-ad1e407e-db70a101-10ecb60c_0 is DEAD 2025-06-03T10:35:04.961791Z node 6 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:35:04.961799Z node 6 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:35:04.961805Z node 6 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037894 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:35:04.961862Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037894] server disconnected, pipe [6:7511669756073498297:2501] destroyed 2025-06-03T10:35:04.961880Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037894] server disconnected, pipe [6:7511669756073498354:2501] destroyed 2025-06-03T10:35:04.961883Z node 6 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037894] server disconnected, pipe [6:7511669756073498294:2501] destroyed 2025-06-03T10:35:04.961892Z node 6 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037894, Partition: 0, State: StateIdle] TPartition::DropOwner. |71.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tools/stress_tool/ydb_stress_tool |71.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/stress_tool/ydb_stress_tool |71.8%| [LD] {RESULT} $(B)/ydb/tools/stress_tool/ydb_stress_tool >> test_storage_config.py::TestStorageConfig::test_cases[case_0] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_10] >> test_sql.py::TestCanonicalFolder1::test_case[pure/trivial_9.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[range_skip_take.sql-plan] >> test_tenants.py::TestTenants::test_create_remove_database[enable_alter_database_create_hive_first--true] [GOOD] |71.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_alter_ops.py::TestSchemeShardAlterTest::test_alter_table_add_and_remove_column_many_times_success [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q7.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q8.sql-plan] >> test_serverless.py::test_discovery[enable_alter_database_create_hive_first--false] >> test_tenants.py::TestTenants::test_list_database_above[enable_alter_database_create_hive_first--true] [FAIL] |71.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |71.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut >> test_tenants.py::TestTenants::test_create_remove_database_wait[enable_alter_database_create_hive_first--false] |71.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut |71.8%| [LD] {RESULT} $(B)/ydb/core/external_sources/hive_metastore/ut/ydb-core-external_sources-hive_metastore-ut >> test_dynamic_tenants.py::test_drop_tenant_without_nodes_could_complete[enable_alter_database_create_hive_first--true] [GOOD] >> test_dynamic_tenants.py::test_drop_tenant_without_nodes_could_continue[enable_alter_database_create_hive_first--false] |71.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tools/query_replay/ydb_query_replay |71.8%| [LD] {RESULT} $(B)/ydb/tools/query_replay/ydb_query_replay |71.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/query_replay/ydb_query_replay |71.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_users_groups_with_acl.py::test_query_create_user_by_domain_admin[domain_login_only--true-YDB] [GOOD] |71.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_postgres.py::TestPGSQL::test_sql_suite[results-case.test] [GOOD] >> test_stream_query.py::TestStreamQuery::test_sql_suite[results-window.test] |71.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration |71.8%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |71.8%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration |71.8%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_group_reconfiguration/ut_group_reconfiguration |71.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat |71.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_blob_depot_fat/blobstorage-ut_blobstorage-ut_blob_depot_fat >> test_tenants.py::TestTenants::test_progress_when_tenant_tablets_run_on_dynamic_nodes[enable_alter_database_create_hive_first--false] >> test_sql.py::TestCanonicalFolder1::test_case[range_skip_take.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[range_skip_take.sql-result_sets] |71.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_tenants.py::TestTenants::test_resolve_nodes[enable_alter_database_create_hive_first--true] [GOOD] >> test_tenants.py::TestTenants::test_stop_start[enable_alter_database_create_hive_first--false] >> test_serverless.py::test_create_table[enable_alter_database_create_hive_first--false] |71.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots |71.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots |71.9%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_move_reboots/ydb-core-tx-schemeshard-ut_move_reboots >> test_sql.py::TestCanonicalFolder1::test_case[simple/q8.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q8.sql-result_sets] >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Query [GOOD] >> KqpScheme::MoveTableWithSerialTypes >> test_serverless.py::test_fixtures[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_database_with_column_disk_quotas[enable_alter_database_create_hive_first--false] >> test_serverless.py::test_create_table_with_quotas[enable_alter_database_create_hive_first--false] >> test_serverless.py::test_discovery_exclusive_nodes[enable_alter_database_create_hive_first--false] >> test_storage_config.py::TestStorageConfig::test_cases[case_9] [GOOD] >> test_storage_config.py::TestStorageConfig::test_create_tablet >> test_serverless.py::test_fixtures[enable_alter_database_create_hive_first--true] >> test_storage_config.py::TestStorageConfig::test_create_tablet [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::Sinks_Olap_WriteToTopicAndTable_4_Query [GOOD] Test command err: 2025-06-03T10:31:33.536092Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668851175240049:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:33.537166Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d91/r3tmp/tmpfPicz2/pdisk_1.dat 2025-06-03T10:31:33.612973Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:31:33.657039Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:33.657073Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:33.664611Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:31:33.664998Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 17681, node 1 2025-06-03T10:31:33.686115Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/000d91/r3tmp/yandexkzJfBX.tmp 2025-06-03T10:31:33.686131Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/000d91/r3tmp/yandexkzJfBX.tmp 2025-06-03T10:31:33.686216Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/000d91/r3tmp/yandexkzJfBX.tmp 2025-06-03T10:31:33.686268Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:31:33.696794Z INFO: TTestServer started on Port 6794 GrpcPort 17681 TClient is connected to server localhost:6794 PQClient connected to localhost:17681 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:33.750515Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:33.757693Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:31:33.772790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:33.852578Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710660, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:34.157156Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668855470208021:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:34.157175Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668855470208029:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:34.157180Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:34.158392Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710662:3, at schemeshard: 72057594046644480 2025-06-03T10:31:34.159705Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668855470208066:2343], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:34.159763Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:34.162592Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668855470208035:2340], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710662 completed, doublechecking } 2025-06-03T10:31:34.202254Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:34.212256Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:34.230654Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668855470208219:2505] txid# 281474976710665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:34.245777Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511668855470208235:2359], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:5:17: Error: At function: KiReadTable!
:5:17: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Versions]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:31:34.245934Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=ZWNhZjVmZDgtYWI0NmViMDQtZDQ5NmRkNGYtNzQ1MzU4YWU=, ActorId: [1:7511668855470208018:2334], ActorState: ExecuteState, TraceId: 01jwtnhc0a4x5hmb5nba6mmdrc, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:31:34.246358Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 5 column: 17 } message: "At function: KiReadTable!" end_position { row: 5 column: 17 } severity: 1 issues { position { row: 5 column: 17 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Versions]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 5 column: 17 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:31:34.283572Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7511668855470208395:2608] 2025-06-03T10:31:38.536273Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668851175240049:2148];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:38.536314Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:31:39.455050Z :Sinks_Oltp_WriteToTopic_1_Table INFO: TTopicSdkTestSetup started 2025-06-03T10:31:39.459830Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-03T10:31:39.468123Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7511668876945045092:2698] connected; active server actors: 1 2025-06-03T10:31:39.468229Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-03T10:31:39.468569Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:31:39.468748Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-03T10:31:39.468795Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-03T10:31:39.468943Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3089: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-03T10:31:39.469110Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-03T10:31:39.469375Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:31:39.469445Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72075186224037892] doesn't have tx info 2025-06-03T10:31:39.469460Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:31:39.469464Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-03T10:31:39.469468Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:31: ... ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "test-topic" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatVersion: 0 Codecs { } TopicPath: "/Root/test-topic" YcCloudId: "" YcFolderId: "" YdbDatabaseId: "" YdbDatabasePath: "/Root" Partitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } ReadRuleGenerations: 0 AllPartitions { PartitionId: 0 Status: Active CreateVersion: 1 TabletId: 0 } Consumers { Name: "test-consumer" ReadFromTimestampsMs: 0 FormatVersion: 0 Codec { } ServiceType: "data-streams" Version: 0 Generation: 0 } } BootstrapConfig { } SourceActor { RawX1: 7511669767279704249 RawX2: 64424511578 } Partitions { Partition { PartitionId: 0 } } 2025-06-03T10:35:12.476061Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:3633: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-03T10:35:12.477926Z node 15 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-03T10:35:12.479240Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:3089: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-03T10:35:12.479353Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:1231: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-03T10:35:12.479361Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4291: [PQ: 72075186224037892] Try execute txs with state EXECUTED 2025-06-03T10:35:12.479365Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4336: [PQ: 72075186224037892] TxId 281474976715673, State EXECUTED 2025-06-03T10:35:12.479372Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4283: [PQ: 72075186224037892] TxId 281474976715673 State EXECUTED FrontTxId 281474976715673 2025-06-03T10:35:12.479378Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:3987: [PQ: 72075186224037892] TPersQueue::SendEvReadSetAckToSenders 2025-06-03T10:35:12.479382Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4226: [PQ: 72075186224037892] TxId 281474976715673, NewState WAIT_RS_ACKS 2025-06-03T10:35:12.479385Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4261: [PQ: 72075186224037892] TxId 281474976715673 moved from EXECUTED to WAIT_RS_ACKS 2025-06-03T10:35:12.479390Z node 15 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715673] PredicateAcks: 0/0 2025-06-03T10:35:12.479392Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4537: [PQ: 72075186224037892] HaveAllRecipientsReceive 1, AllSupportivePartitionsHaveBeenDeleted 1 2025-06-03T10:35:12.479394Z node 15 :PERSQUEUE DEBUG: transaction.cpp:366: [TxId: 281474976715673] PredicateAcks: 0/0 2025-06-03T10:35:12.479397Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4598: [PQ: 72075186224037892] add an TxId 281474976715673 to the list for deletion 2025-06-03T10:35:12.479400Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4226: [PQ: 72075186224037892] TxId 281474976715673, NewState DELETING 2025-06-03T10:35:12.479407Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:3832: [PQ: 72075186224037892] delete key for TxId 281474976715673 2025-06-03T10:35:12.479424Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:3633: [PQ: 72075186224037892] Send TEvKeyValue::TEvRequest (WRITE_TX_COOKIE) 2025-06-03T10:35:12.479985Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:1231: [PQ: 72075186224037892] Handle TEvKeyValue::TEvResponse (WRITE_TX_COOKIE) 2025-06-03T10:35:12.479991Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4291: [PQ: 72075186224037892] Try execute txs with state DELETING 2025-06-03T10:35:12.479993Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4336: [PQ: 72075186224037892] TxId 281474976715673, State DELETING 2025-06-03T10:35:12.479999Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:4548: [PQ: 72075186224037892] delete TxId 281474976715673 2025-06-03T10:35:12.483907Z :DEBUG: [/Root] MessageGroupId [src] SessionId [] Write session: try to update token 2025-06-03T10:35:12.484229Z :INFO: [/Root] MessageGroupId [src] SessionId [] Write session: Do CDS request 2025-06-03T10:35:12.484238Z :INFO: [/Root] MessageGroupId [src] SessionId [] Start write session. Will connect to endpoint: localhost:10059 2025-06-03T10:35:12.490208Z :DEBUG: [/Root] MessageGroupId [src] SessionId [] Write session: send init request: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-03T10:35:12.490915Z node 15 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-03T10:35:12.490934Z node 15 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 1 2025-06-03T10:35:12.491196Z node 15 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: grpc read done: success: 1 data: init_request { topic: "test-topic" message_group_id: "src" } 2025-06-03T10:35:12.491221Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 1 topic: "test-topic" message_group_id: "src" from ipv6:[::1]:39778 2025-06-03T10:35:12.491227Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=1 sessionId= userAgent="pqv1 server" ip=ipv6:[::1]:39778 proto=v1 topic=test-topic durationSec=0 2025-06-03T10:35:12.491231Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-03T10:35:12.492539Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1 sessionId: describe result for acl check 2025-06-03T10:35:12.492598Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-03T10:35:12.492601Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-03T10:35:12.492603Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-03T10:35:12.492619Z node 15 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [15:7511669793049509175:2468] (SourceId=src, PreferedPartition=(NULL)) ReplyResult: Partition=0, SeqNo=0 2025-06-03T10:35:12.492625Z node 15 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1 sessionId: partition: 0 expectedGeneration: (NULL) 2025-06-03T10:35:12.492974Z node 15 :PQ_WRITE_PROXY DEBUG: writer.cpp:798: TPartitionWriter 72075186224037892 (partition=0) TEvClientConnected Status OK, TabletId: 72075186224037892, NodeId 15, Generation: 1 2025-06-03T10:35:12.492993Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72075186224037892] server connected, pipe [15:7511669793049509178:2468], now have 1 active actors on pipe 2025-06-03T10:35:12.493069Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'test-topic' requestId: 2025-06-03T10:35:12.493079Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037892] got client message batch for topic 'test-topic' partition 0 2025-06-03T10:35:12.493124Z node 15 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie src|e3bdb767-7f5fe290-6d3f38cd-d049a23b_0 generated for partition 0 topic 'test-topic' owner src 2025-06-03T10:35:12.493162Z node 15 :PERSQUEUE DEBUG: partition_write.cpp:35: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 0 2025-06-03T10:35:12.493187Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-03T10:35:12.493311Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'test-topic' requestId: 2025-06-03T10:35:12.493319Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037892] got client message batch for topic 'test-topic' partition 0 2025-06-03T10:35:12.493339Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'test-topic' partition: 0 messageNo: 0 requestId: cookie: 0 2025-06-03T10:35:12.493368Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1 partition: 0 MaxSeqNo: 0 sessionId: src|e3bdb767-7f5fe290-6d3f38cd-d049a23b_0 2025-06-03T10:35:12.495066Z :INFO: [/Root] MessageGroupId [src] SessionId [] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1748946912495 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:35:12.495107Z :INFO: [/Root] MessageGroupId [src] SessionId [] Write session established. Init response: session_id: "src|e3bdb767-7f5fe290-6d3f38cd-d049a23b_0" topic: "test-topic" 2025-06-03T10:35:12.495286Z :INFO: [/Root] MessageGroupId [src] SessionId [src|e3bdb767-7f5fe290-6d3f38cd-d049a23b_0] Write session: close. Timeout = 0 ms 2025-06-03T10:35:12.495292Z :INFO: [/Root] MessageGroupId [src] SessionId [src|e3bdb767-7f5fe290-6d3f38cd-d049a23b_0] Write session will now close 2025-06-03T10:35:12.495296Z :DEBUG: [/Root] MessageGroupId [src] SessionId [src|e3bdb767-7f5fe290-6d3f38cd-d049a23b_0] Write session: aborting 2025-06-03T10:35:12.495410Z :INFO: [/Root] MessageGroupId [src] SessionId [src|e3bdb767-7f5fe290-6d3f38cd-d049a23b_0] Write session: gracefully shut down, all writes complete 2025-06-03T10:35:12.495415Z :DEBUG: [/Root] MessageGroupId [src] SessionId [src|e3bdb767-7f5fe290-6d3f38cd-d049a23b_0] Write session: destroy 2025-06-03T10:35:12.505762Z :Sinks_Olap_WriteToTopicAndTable_4_Query INFO: Topic created 2025-06-03T10:35:12.506577Z node 15 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1 sessionId: src|e3bdb767-7f5fe290-6d3f38cd-d049a23b_0 grpc read done: success: 0 data: 2025-06-03T10:35:12.506589Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 1 sessionId: src|e3bdb767-7f5fe290-6d3f38cd-d049a23b_0 grpc read failed 2025-06-03T10:35:12.506602Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 1 sessionId: src|e3bdb767-7f5fe290-6d3f38cd-d049a23b_0 grpc closed 2025-06-03T10:35:12.506606Z node 15 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 1 sessionId: src|e3bdb767-7f5fe290-6d3f38cd-d049a23b_0 is DEAD 2025-06-03T10:35:12.506901Z node 15 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:35:12.507583Z node 15 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037892] server disconnected, pipe [15:7511669793049509178:2468] destroyed 2025-06-03T10:35:12.507660Z node 15 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. |71.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large |71.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large |71.9%| [LD] {RESULT} $(B)/ydb/core/sys_view/ut_large/ydb-core-sys_view-ut_large >> test_sql.py::TestCanonicalFolder1::test_case[range_skip_take.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[select_result_limit.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q8.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q9.sql-plan] >> test_serverless.py::test_seamless_migration_to_exclusive_nodes[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_seamless_migration_to_exclusive_nodes[enable_alter_database_create_hive_first--true] >> KqpScheme::MoveTableWithSerialTypes [GOOD] >> KqpScheme::ModifyPermissions >> test_users_groups_with_acl.py::test_yql_create_user_by_domain_admin[domain_login_only--false-YDB] [GOOD] >> test_users_groups_with_acl.py::test_yql_create_user_by_domain_admin[domain_login_only--true-YDB] |71.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test |71.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test |71.9%| [LD] {RESULT} $(B)/ydb/core/kesus/tablet/quoter_performance_test/quoter_performance_test >> KqpAcl::FailNavigate |71.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3] [GOOD] >> test_dynamic_tenants.py::test_create_tenant_with_cpu[enable_alter_database_create_hive_first--false] [GOOD] >> test_dynamic_tenants.py::test_create_tenant_with_cpu[enable_alter_database_create_hive_first--true] >> test_storage_config.py::TestStorageConfig::test_cases[case_10] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_11] |71.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris |71.9%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris |71.9%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_osiris/ydb-core-blobstorage-ut_blobstorage-ut_osiris >> alter_compression.py::TestAlterCompression::test_availability_data [GOOD] >> test_serverless.py::test_discovery[enable_alter_database_create_hive_first--false] [GOOD] >> test_system_views.py::TestPartitionStats::test_case >> KqpAcl::FailNavigate [GOOD] >> KqpAcl::FailResolve >> test_sql.py::TestCanonicalFolder1::test_case[select_result_limit.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[select_result_limit.sql-result_sets] >> test_tenants.py::TestTenants::test_create_tables[enable_alter_database_create_hive_first--true] [GOOD] >> KqpOlapScheme::InvalidColumnInTieringRule >> test_serverless.py::test_discovery[enable_alter_database_create_hive_first--true] >> KqpScheme::ModifyPermissions [GOOD] >> KqpScheme::ModifyUnknownPermissions >> test_sql.py::TestCanonicalFolder1::test_case[simple/q9.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q9.sql-result_sets] >> test_user_administration.py::test_database_admin_cant_change_database_admin_user[unblock] [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_owner >> test_user_administration.py::test_database_admin_can_create_user [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[add-subgroup] >> test_user_administration.py::test_database_admin_cant_change_database_owner [GOOD] >> test_user_administration.py::test_user_can_change_password_for_himself[dbadmin] |71.9%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tools/query_replay_yt/query_replay_yt |72.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/query_replay_yt/query_replay_yt |72.0%| [LD] {RESULT} $(B)/ydb/tools/query_replay_yt/query_replay_yt >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_3_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 3] [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[add-subgroup] [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[add-user] >> test_tenants.py::TestTenants::test_force_delete_tenant_when_table_has_been_deleted[enable_alter_database_create_hive_first--false] >> KqpAcl::FailResolve [GOOD] >> KqpAcl::FailedReadAccessDenied >> test_auth_system_views.py::test_tenant_auth_groups_access[clusteradmin-True] [GOOD] >> test_auth_system_views.py::test_tenant_auth_groups_access[clusteruser-False] >> test_user_administration.py::test_user_can_change_password_for_himself[dbadmin] [GOOD] >> test_user_administration.py::test_user_can_change_password_for_himself[ordinaryuser] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[add-user] [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[remove-admin-group] >> KqpScheme::ModifyUnknownPermissions [GOOD] >> KqpScheme::ModifyPermissionsByRelativePath >> test_users_groups_with_acl.py::test_query_create_user_by_tenant_admin[domain_login_only--true-YDB] [GOOD] >> test_auth_system_views.py::test_tenant_auth_groups_access[clusteruser-False] [GOOD] >> test_auth_system_views.py::test_tenant_auth_groups_access[dbadmin-True] >> test_users_groups_with_acl.py::test_yql_create_group_by_domain_admin[domain_login_only--false-YDB] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[remove-admin-group] [GOOD] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[remove-himself] >> test_serverless.py::test_create_table[enable_alter_database_create_hive_first--false] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_5_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 5] [GOOD] >> test_user_administration.py::test_user_can_change_password_for_himself[ordinaryuser] [GOOD] >> test_tenants.py::TestTenants::test_create_drop_create_table2[enable_alter_database_create_hive_first--false] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[select_result_limit.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/ct.script-script] >> test_auth_system_views.py::test_tenant_auth_groups_access[dbadmin-True] [GOOD] >> test_auth_system_views.py::test_tenant_auth_groups_access[ordinaryuser-False] >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[remove-himself] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/q9.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/script_params.script-script] >> test_serverless.py::test_create_table[enable_alter_database_create_hive_first--true] >> KqpOlapScheme::InvalidColumnInTieringRule [GOOD] >> KqpOlapScheme::DropThenAddColumnIndexation >> test_auth_system_views.py::test_tenant_auth_groups_access[ordinaryuser-False] [GOOD] >> KqpAcl::FailedReadAccessDenied [GOOD] >> KqpAcl::FailedWriteAccessDenied >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_7_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 7] [GOOD] >> test_serverless.py::test_create_table_with_quotas[enable_alter_database_create_hive_first--false] [GOOD] >> KqpScheme::ModifyPermissionsByRelativePath [GOOD] >> KqpScheme::ModifyPermissionsByRelativePathQueryClient |72.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |72.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |72.0%| [LD] {RESULT} $(B)/ydb/core/blobstorage/ut_blobstorage/ut_replication/core-blobstorage-ut_blobstorage-ut_replication |72.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots |72.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots |72.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_pq_reboots/ydb-core-tx-schemeshard-ut_pq_reboots >> test_serverless.py::test_create_table_with_quotas[enable_alter_database_create_hive_first--true] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_10_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 10] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_11_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 11] >> test_tenants.py::TestTenants::test_create_drop_create_table2[enable_alter_database_create_hive_first--true] >> KqpAcl::FailedWriteAccessDenied [GOOD] >> KqpAcl::AclRevoke-UseSink-IsOlap >> test_sql.py::TestCanonicalFolder1::test_case[simple/ct.script-script] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_dict_select.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[simple/script_params.script-script] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[table_types.script-script] >> KqpScheme::ModifyPermissionsByRelativePathQueryClient [GOOD] >> KqpScheme::ModifyPermissionsByIncorrectPaths >> test_storage_config.py::TestStorageConfig::test_cases[case_11] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_12] >> test_system_views.py::TestPartitionStats::test_case [GOOD] |72.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_3_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 3] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_9_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 9] [GOOD] >> KqpScheme::ModifyPermissionsByIncorrectPaths [GOOD] >> KqpAcl::AclRevoke-UseSink-IsOlap [GOOD] >> KqpAcl::AclRevoke+UseSink-IsOlap >> RetryPolicy::TWriteSession_RetryOnTargetCluster [GOOD] >> RetryPolicy::TWriteSession_SwitchBackToLocalCluster >> TBSVWithReboots::AlterAssignDrop >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_dict_select.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_dict_select.sql-result_sets] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::ModifyPermissionsByIncorrectPaths [GOOD] Test command err: Trying to start YDB, gRPC: 18666, MsgBus: 26701 2025-06-03T10:35:12.843858Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669792379846841:2081];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:35:12.844100Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016d0/r3tmp/tmpXBoZmR/pdisk_1.dat 2025-06-03T10:35:13.004572Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18666, node 1 2025-06-03T10:35:13.081589Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:35:13.081605Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:35:13.081607Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:35:13.081663Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26701 2025-06-03T10:35:13.134266Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:35:13.134305Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:35:13.135351Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:26701 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:35:13.252812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:35:13.261954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:35:13.278856Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:35:13.330313Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:35:13.417522Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:35:13.454234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:35:13.853874Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669796674815734:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:35:13.853921Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:35:13.924234Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:35:13.945138Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:35:13.971346Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:35:13.988646Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:35:14.010503Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:35:14.029839Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:35:14.053182Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:35:14.078859Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669800969783685:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:35:14.078887Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:35:14.079061Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669800969783690:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:35:14.080306Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:35:14.083765Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669800969783692:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:35:14.141993Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669800969783743:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:35:14.383870Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 17941, MsgBus: 24313 2025-06-03T10:35:14.993185Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669798456463371:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:35:14.997908Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016d0/r3tmp/tmpNTu3ZO/pdisk_1.dat 2025-06-03T10:35:15.041392Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:35:15.041746Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669798456463214:2079] 1748946914991737 != 1748946914991740 TServer::EnableGrpc on GrpcPort 17941, node 2 2025-06-03T10:35:15.078307Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:35:15.078323Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:35:15.078325Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:35:15.078386Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24313 2025-06-03T10:35:15.115280Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:35:15.115313Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:35:15.116392Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24313 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir ... hed;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:35:21.465195Z node 5 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:35:21.465209Z node 5 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:35:21.465226Z node 5 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:35:21.465239Z node 5 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:35:21.465248Z node 5 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:35:21.465263Z node 5 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:35:21.465270Z node 5 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:35:21.465511Z node 5 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-03T10:35:21.465525Z node 5 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037922;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-03T10:35:21.511190Z node 5 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037922;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715672;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715672; 2025-06-03T10:35:21.534956Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:35:21.543311Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 8626, MsgBus: 12441 2025-06-03T10:35:21.856380Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511669831303051025:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:35:21.856425Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016d0/r3tmp/tmpkjn2T4/pdisk_1.dat 2025-06-03T10:35:21.878365Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 8626, node 6 2025-06-03T10:35:21.893633Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:35:21.893649Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:35:21.893652Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:35:21.893737Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12441 TClient is connected to server localhost:12441 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:35:21.960542Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:35:21.960579Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:35:21.961648Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:35:21.961679Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:35:21.967957Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:35:21.991478Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:35:22.022519Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:35:22.038666Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:35:22.334144Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669835598019923:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:35:22.334171Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:35:22.347376Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:35:22.359928Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:35:22.420603Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:35:22.434498Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:35:22.449388Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:35:22.465189Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:35:22.482057Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:35:22.506132Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669835598020576:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:35:22.506161Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:35:22.506274Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511669835598020581:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:35:22.507333Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:35:22.511797Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511669835598020583:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:35:22.588688Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511669835598020634:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:35:22.847680Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511669835598020915:3571] txid# 281474976715673, issues: { message: "Path does not exist" issue_code: 200200 severity: 1 } 2025-06-03T10:35:22.847931Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715674:0, at schemeshard: 72057594046644480 >> test_serverless.py::test_discovery_exclusive_nodes[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_discovery_exclusive_nodes[enable_alter_database_create_hive_first--true] >> test_tenants.py::TestTenants::test_progress_when_tenant_tablets_run_on_dynamic_nodes[enable_alter_database_create_hive_first--false] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[table_types.script-script] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort.sql-plan] |72.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut |72.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut |72.0%| [LD] {RESULT} $(B)/ydb/tools/stress_tool/ut/ydb-tools-stress_tool-ut >> KqpAcl::AclRevoke+UseSink-IsOlap [GOOD] >> KqpAcl::AclRevoke-UseSink+IsOlap |72.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots |72.0%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots |72.0%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_split_merge_reboots/ydb-core-tx-schemeshard-ut_split_merge_reboots >> test_dynamic_tenants.py::test_drop_tenant_without_nodes_could_continue[enable_alter_database_create_hive_first--false] [GOOD] >> test_dynamic_tenants.py::test_drop_tenant_without_nodes_could_continue[enable_alter_database_create_hive_first--true] >> test_tenants.py::TestTenants::test_progress_when_tenant_tablets_run_on_dynamic_nodes[enable_alter_database_create_hive_first--true] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_dict_select.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_list_select.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort.sql-result_sets] >> test_system_views.py::TestQueryMetrics::test_case >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_create_path_second_time_then_it_is_ok >> test_serverless.py::test_fixtures[enable_alter_database_create_hive_first--true] [GOOD] |72.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_9_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 9] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_13_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 13] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_14_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 14] |72.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_5_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 5] [GOOD] >> KqpAcl::AclRevoke-UseSink+IsOlap [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_list_select.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_list_select.sql-result_sets] >> test_tenants.py::TestTenants::test_create_remove_database_wait[enable_alter_database_create_hive_first--false] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range.sql-plan] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpAcl::AclRevoke-UseSink+IsOlap [GOOD] Test command err: Trying to start YDB, gRPC: 9103, MsgBus: 5795 2025-06-03T10:35:15.130473Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669802792479944:2264];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:35:15.130923Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016d6/r3tmp/tmpn2QeU7/pdisk_1.dat 2025-06-03T10:35:15.254479Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:35:15.254512Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:35:15.258478Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:35:15.261630Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511669802792479711:2079] 1748946915125510 != 1748946915125513 2025-06-03T10:35:15.264926Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 9103, node 1 2025-06-03T10:35:15.291588Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:35:15.291603Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:35:15.291606Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:35:15.291663Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5795 TClient is connected to server localhost:5795 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:35:15.390167Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:35:15.409906Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-03T10:35:15.412894Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:35:15.462371Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:35:15.527920Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:35:15.558812Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:35:15.704792Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669802792481354:2404], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:35:15.704828Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:35:15.768016Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:35:15.779055Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:35:15.789635Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:35:15.802170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:35:15.817359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:35:15.835905Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:35:15.850352Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710669:0, at schemeshard: 72057594046644480 2025-06-03T10:35:15.870873Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669802792482005:2467], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:35:15.870901Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:35:15.871085Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669802792482010:2470], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:35:15.872568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710670:3, at schemeshard: 72057594046644480 2025-06-03T10:35:15.877869Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710670, at schemeshard: 72057594046644480 2025-06-03T10:35:15.878025Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511669802792482012:2471], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710670 completed, doublechecking } 2025-06-03T10:35:15.942848Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511669802792482063:3405] txid# 281474976710671, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:35:16.131535Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7511669807087449644:3580], for# user0@builtin, access# DescribeSchema 2025-06-03T10:35:16.131565Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7511669807087449644:3580], for# user0@builtin, access# DescribeSchema 2025-06-03T10:35:16.134229Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511669807087449641:2514], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:2:13: Error: At function: KiReadTable!
:2:13: Error: Cannot find table 'db.[/Root/TwoShard]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:35:16.134959Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=Y2EwODQ5ZTktYzJhZjZkNzAtZGE5YzFlZmYtYTgxZmE1MDc=, ActorId: [1:7511669807087449632:2509], ActorState: ExecuteState, TraceId: 01jwtnr4rvapnjf34bv81hg7yv, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: Trying to start YDB, gRPC: 9578, MsgBus: 20831 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016d6/r3tmp/tmpwNUHi6/pdisk_1.dat 2025-06-03T10:35:16.605847Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:35:16.620228Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:35:16.623255Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511669808746752839:2079] 1748946916584222 != 1748946916584225 TServer::EnableGrpc on GrpcPort 9578, node 2 2025-06-03T10:35:16.641556Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:35:16.641569Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:35:16.641571Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:35:16.641628Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20831 2025-06-03T10:35:16.696722Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:35:16.696754Z node ... 0:3088], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:1:13: Error: At function: KiWriteTable!
:1:13: Error: Cannot find table 'db.[/Root/test_acl]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:35:26.594764Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=7&id=NzZiM2I2NGEtZDRhZTM2OTktNTY4ZGYzNWMtODc5NzU4ODk=, ActorId: [7:7511669851755569182:3013], ActorState: ExecuteState, TraceId: 01jwtnrezy0p4z844q9sa5e1tp, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:35:26.610150Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715707:0, at schemeshard: 72057594046644480 2025-06-03T10:35:26.634809Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715710. Ctx: { TraceId: 01jwtnrf0vad8xta0nq33ek49t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:35:26.638689Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715711. Ctx: { TraceId: 01jwtnrf0vad8xta0nq33ek49t, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:35:26.641836Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715711;tx_id=281474976715711;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715711; 2025-06-03T10:35:26.646046Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715712. Ctx: { TraceId: 01jwtnrf1m3ex2qbqkpe039wtf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:35:26.650403Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715713. Ctx: { TraceId: 01jwtnrf1m3ex2qbqkpe039wtf, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:35:26.653440Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715713;tx_id=281474976715713;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715713; 2025-06-03T10:35:26.656921Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715714. Ctx: { TraceId: 01jwtnrf1z6v911h24ap53204j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:35:26.661514Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715715. Ctx: { TraceId: 01jwtnrf1z6v911h24ap53204j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:35:26.667955Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715715;tx_id=281474976715715;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715715; 2025-06-03T10:35:26.674272Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715716. Ctx: { TraceId: 01jwtnrf2ga3azw6yjq6yhyeqy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:35:26.678449Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715717. Ctx: { TraceId: 01jwtnrf2ga3azw6yjq6yhyeqy, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:35:26.682272Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715717;tx_id=281474976715717;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715717; 2025-06-03T10:35:26.686731Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715718. Ctx: { TraceId: 01jwtnrf2xesksf9ftgm4yn11x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:35:26.691004Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715719. Ctx: { TraceId: 01jwtnrf2xesksf9ftgm4yn11x, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:35:26.693633Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715719;tx_id=281474976715719;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715719; 2025-06-03T10:35:26.697697Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715720. Ctx: { TraceId: 01jwtnrf38f8vfpqffjffas9e2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:35:26.700919Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715721. Ctx: { TraceId: 01jwtnrf38f8vfpqffjffas9e2, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:35:26.707338Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715721;tx_id=281474976715721;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715721; 2025-06-03T10:35:26.711213Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715722. Ctx: { TraceId: 01jwtnrf3p41nbyk3g7q8aa9g1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:35:26.714859Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715723. Ctx: { TraceId: 01jwtnrf3p41nbyk3g7q8aa9g1, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:35:26.721075Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715723;tx_id=281474976715723;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715723; 2025-06-03T10:35:26.725102Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715724. Ctx: { TraceId: 01jwtnrf4487sev3ewd8g37fv3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:35:26.728721Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715725. Ctx: { TraceId: 01jwtnrf4487sev3ewd8g37fv3, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:35:26.735537Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715725;tx_id=281474976715725;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715725; 2025-06-03T10:35:26.739935Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715726. Ctx: { TraceId: 01jwtnrf4jdvx5bc22wbz2ys0k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:35:26.743536Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715727. Ctx: { TraceId: 01jwtnrf4jdvx5bc22wbz2ys0k, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:35:26.749251Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715727;tx_id=281474976715727;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715727; 2025-06-03T10:35:26.753532Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715728. Ctx: { TraceId: 01jwtnrf50ccfe472mv5fzyc4j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:35:26.757468Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715729. Ctx: { TraceId: 01jwtnrf50ccfe472mv5fzyc4j, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root 2025-06-03T10:35:26.763789Z node 7 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037928;tx_state=TTxProgressTx::Execute;tx_current=281474976715729;tx_id=281474976715729;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715729; 2025-06-03T10:35:26.767464Z node 7 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715730:0, at schemeshard: 72057594046644480 2025-06-03T10:35:26.770156Z node 7 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [7:7511669851755569990:5554], for# user0@builtin, access# UpdateRow 2025-06-03T10:35:26.770230Z node 7 :KQP_EXECUTER ERROR: kqp_table_resolver.cpp:275: TxId: 281474976715731. Error resolving keys for entry: { TableId: [OwnerId: 72057594046644480, LocalPathId: 17] Access: 2 SyncVersion: false Status: AccessDenied Kind: KindUnknown PartitionsCount: 0 DomainInfo From: (Uint64 : NULL) IncFrom: 1 To: () IncTo: 0 } 2025-06-03T10:35:26.770353Z node 7 :KQP_SESSION WARN: kqp_session_actor.cpp:2586: SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, ActorId: [7:7511669851755569720:3101], ActorState: ExecuteState, TraceId: 01jwtnrf5g2pg7mcmzbncy6wck, Create QueryResponse for error on request, msg: 2025-06-03T10:35:26.770686Z node 7 :KQP_EXECUTER ERROR: kqp_planner.cpp:119: TxId: 281474976715732. Ctx: { TraceId: 01jwtnrf5g2pg7mcmzbncy6wck, Database: , DatabaseId: /Root, SessionId: ydb://session/3?node_id=7&id=NDIzMjUwMWYtZTQ2ZmI3ZDItYWRjODEyZWEtZWYxMTc4ODI=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Database not set, use /Root >> test_storage_config.py::TestStorageConfig::test_cases[case_12] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_1] >> test_tenants.py::TestTenants::test_create_remove_database_wait[enable_alter_database_create_hive_first--true] |72.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAlterCompression::test_availability_data [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_create_path_second_time_then_it_is_ok [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_list_select.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_select.sql-plan] >> TxUsage::Sinks_Oltp_WriteToTopics_3_Table [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range.sql-result_sets] >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_and_change_tablet_channel_then_can_read_from_tablet >> TxUsage::Sinks_Oltp_WriteToTopics_3_Query >> test_users_groups_with_acl.py::test_yql_create_user_by_domain_admin[domain_login_only--true-YDB] [GOOD] >> test_users_groups_with_acl.py::test_yql_create_user_by_tenant_admin[domain_login_only--false-YDB] >> test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--false] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-5.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-6.test] >> test_dynamic_tenants.py::test_create_tenant_with_cpu[enable_alter_database_create_hive_first--true] [GOOD] >> test_dynamic_tenants.py::test_custom_coordinator_options[enable_alter_database_create_hive_first--false] |72.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_table_and_path_with_name_clash_unsuccessful [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_select.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_select.sql-result_sets] |72.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range_pk.sql-plan] >> test_tenants.py::TestTenants::test_force_delete_tenant_when_table_has_been_deleted[enable_alter_database_create_hive_first--false] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_16_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 16] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_17_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 17] >> test_serverless.py::test_turn_on_serverless_storage_billing[enable_alter_database_create_hive_first--false] >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_and_change_tablet_channel_then_can_read_from_tablet [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/empty_select.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/multi_select.sql-plan] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range_pk.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range_pk.sql-result_sets] >> test_tenants.py::TestTenants::test_force_delete_tenant_when_table_has_been_deleted[enable_alter_database_create_hive_first--true] |72.0%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark |72.0%| [LD] {RESULT} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark |72.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tablet_flat/benchmark/core_tablet_flat_benchmark |72.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |72.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |72.1%| [LD] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_login_large/ydb-core-tx-schemeshard-ut_login_large |72.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_7_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 7] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_1] [GOOD] >> test_storage_config.py::TestStorageConfig::test_cases[case_2] >> test_tenants.py::TestTenants::test_create_drop_create_table2[enable_alter_database_create_hive_first--true] [GOOD] |72.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test |72.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_sql.py::TestCanonicalFolder1::test_case[simple/multi_select.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/multi_select.sql-result_sets] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range_pk.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range_skip.sql-plan] >> test_serverless.py::test_discovery[enable_alter_database_create_hive_first--true] [GOOD] |72.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_fixtures[enable_alter_database_create_hive_first--true] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_19_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 19] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_21_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 21] [GOOD] >> test_tenants.py::TestTenants::test_create_drop_create_table3[enable_alter_database_create_hive_first--false] |72.1%| [LD] {default-linux-x86_64, relwithdebinfo} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test |72.1%| [LD] {BAZEL_UPLOAD, SKIPPED} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test |72.1%| [LD] {RESULT} $(B)/ydb/core/quoter/quoter_service_bandwidth_test/quoter_service_bandwidth_test >> test_sql.py::TestCanonicalFolder1::test_case[simple/multi_select.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[simple/null_select.sql-plan] >> test_serverless.py::test_create_table_using_exclusive_nodes[enable_alter_database_create_hive_first--true] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range_skip.sql-plan] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range_skip.sql-result_sets] |72.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test |72.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_sql.py::TestCanonicalFolder1::test_case[simple/null_select.sql-plan] [GOOD] >> test_users_groups_with_acl.py::test_yql_create_group_by_domain_admin[domain_login_only--false-YDB] [GOOD] >> test_users_groups_with_acl.py::test_yql_create_group_by_domain_admin[domain_login_only--true-YDB] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_multi_range_skip.sql-result_sets] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_pk.sql-plan] |72.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_users_groups_with_acl.py::test_yql_create_user_by_tenant_admin[domain_login_only--false-YDB] [GOOD] >> test_users_groups_with_acl.py::test_yql_create_user_by_tenant_admin[domain_login_only--true-YDB] >> test_storage_config.py::TestStorageConfig::test_cases[case_2] [GOOD] |72.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_19_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 19] [GOOD] >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_pk.sql-plan] [GOOD] |72.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test |72.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_tablet_channel_migration.py::TestChannelsOps::test_when_write_and_change_tablet_channel_then_can_read_from_tablet [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint32-pk_types23-all_types23-index23---] >> test_tenants.py::TestTenants::test_create_remove_database_wait[enable_alter_database_create_hive_first--true] [GOOD] >> KqpOlapScheme::DropThenAddColumnIndexation [GOOD] >> KqpOlapScheme::DropTtlColumn |72.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] >> test_dynamic_tenants.py::test_drop_tenant_without_nodes_could_continue[enable_alter_database_create_hive_first--true] [GOOD] >> KqpOlapScheme::DropTtlColumn [GOOD] >> KqpOlapScheme::InsertAddInsertDrop >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] |72.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_discovery[enable_alter_database_create_hive_first--true] [GOOD] >> test_tenants.py::TestTenants::test_progress_when_tenant_tablets_run_on_dynamic_nodes[enable_alter_database_create_hive_first--true] [GOOD] |72.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_21_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 21] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Utf8-pk_types30-all_types30-index30---] |72.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_tenants.py::TestTenants::test_create_remove_database_wait[enable_alter_database_create_hive_first--true] [GOOD] >> test_tenants.py::TestTenants::test_register_tenant_and_force_drop_with_table[enable_alter_database_create_hive_first--false] >> test_dynamic_tenants.py::test_custom_coordinator_options[enable_alter_database_create_hive_first--false] [GOOD] >> test_tenants.py::TestTenants::test_create_drop_create_table3[enable_alter_database_create_hive_first--false] [XFAIL] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int8-pk_types21-all_types21-index21---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime-pk_types33-all_types33-index33---] >> test_tenants.py::TestTenants::test_force_delete_tenant_when_table_has_been_deleted[enable_alter_database_create_hive_first--true] [GOOD] >> test_serverless.py::test_create_table_using_exclusive_nodes[enable_alter_database_create_hive_first--true] [GOOD] >> test_serverless.py::test_create_table_with_alter_quotas[enable_alter_database_create_hive_first--false] >> test_tenants.py::TestTenants::test_create_drop_create_table3[enable_alter_database_create_hive_first--true] >> test_users_groups_with_acl.py::test_yql_create_user_by_tenant_admin[domain_login_only--true-YDB] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] >> TxUsage::Write_Random_Sized_Messages_In_Wide_Transactions_Table [GOOD] |72.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_storage_config.py::TestStorageConfig::test_cases[case_2] [GOOD] |72.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_create_path_second_time_then_it_is_ok [GOOD] >> test_tenants.py::TestTenants::test_list_database_above[enable_alter_database_create_hive_first--false] |72.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_dynamic_tenants.py::test_custom_coordinator_options[enable_alter_database_create_hive_first--false] [GOOD] >> test_users_groups_with_acl.py::test_yql_create_group_by_domain_admin[domain_login_only--true-YDB] [GOOD] >> test_users_groups_with_acl.py::test_yql_create_group_by_tenant_admin[domain_login_only--false-YDB] |72.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_users_groups_with_acl.py::test_yql_create_user_by_tenant_admin[domain_login_only--true-YDB] [GOOD] >> test_publish_into_schemeboard_with_common_ssring.py::TestOn3DC::test_create_dirs >> test_serverless.py::test_discovery_exclusive_nodes[enable_alter_database_create_hive_first--true] [GOOD] >> RetryPolicy::TWriteSession_SwitchBackToLocalCluster [GOOD] >> RetryPolicy::TWriteSession_SeqNoShift >> test_users_groups_with_acl.py::test_query_create_group_by_domain_admin[domain_login_only--false-YDB] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp-pk_types34-all_types34-index34---] >> TxUsage::Write_Random_Sized_Messages_In_Wide_Transactions_Query >> TBSVWithReboots::AlterAssignDrop [GOOD] |72.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_create_users.py::test_create_user |72.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_user_administration.py::test_database_admin_cant_change_database_admin_group[remove-himself] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_11_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 11] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint8-pk_types24-all_types24-index24---] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TBSVWithReboots::AlterAssignDrop [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:35:23.687166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:35:23.687203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:35:23.687209Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:35:23.687215Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:35:23.687233Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:35:23.687238Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:35:23.687249Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:35:23.687267Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:35:23.687403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:35:23.687497Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:35:23.706515Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:35:23.706550Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:35:23.706666Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:35:23.709998Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:35:23.710164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:35:23.710204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:35:23.713468Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:35:23.713548Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:35:23.713739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:35:23.713847Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:35:23.714593Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:35:23.714673Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:35:23.715038Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:35:23.715057Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:35:23.715079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:35:23.715090Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:35:23.715099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:35:23.715152Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:35:23.717276Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:35:23.746191Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:35:23.746314Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:35:23.746406Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:35:23.746466Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:35:23.746481Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:35:23.747805Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:35:23.747853Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:35:23.747927Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:35:23.747942Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:35:23.747950Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:35:23.747957Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:35:23.748751Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:35:23.748773Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:35:23.748780Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:35:23.749279Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:35:23.749314Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:35:23.749323Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:35:23.749332Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:35:23.750212Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:35:23.750929Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:35:23.751026Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:35:23.751278Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:35:23.751316Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:35:23.751325Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:35:23.751404Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... 8446744073709551615 2025-06-03T10:35:49.156804Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:35:49.156898Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:35:49.156905Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:35:49.156918Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 3 2025-06-03T10:35:49.156961Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:35:49.156971Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 9 PathOwnerId: 72057594046678944, cookie: 1005 2025-06-03T10:35:49.156976Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1005 2025-06-03T10:35:49.156981Z node 97 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1005, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 9 2025-06-03T10:35:49.156985Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:35:49.156996Z node 97 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1005, subscribers: 0 2025-06-03T10:35:49.157130Z node 97 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 3 TxId_Deprecated: 3 2025-06-03T10:35:49.157194Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 3 ShardOwnerId: 72057594046678944 ShardLocalIdx: 3, at schemeshard: 72057594046678944 2025-06-03T10:35:49.157240Z node 97 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 2025-06-03T10:35:49.157259Z node 97 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 2025-06-03T10:35:49.157351Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:35:49.157675Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:35:49.158190Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-03T10:35:49.158312Z node 97 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:35:49.158330Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1005 2025-06-03T10:35:49.158900Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:3 2025-06-03T10:35:49.158929Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:35:49.158940Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 TestModificationResult got TxId: 1005, wait until txId: 1005 TestWaitNotification wait txId: 1005 2025-06-03T10:35:49.159035Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1005: send EvNotifyTxCompletion 2025-06-03T10:35:49.159046Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1005 2025-06-03T10:35:49.159125Z node 97 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1005, at schemeshard: 72057594046678944 2025-06-03T10:35:49.159146Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1005: got EvNotifyTxCompletionResult 2025-06-03T10:35:49.159152Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1005: satisfy waiter [97:453:2432] TestWaitNotification: OK eventTxId 1005 TestWaitNotification wait txId: 1003 2025-06-03T10:35:49.159194Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1003: send EvNotifyTxCompletion 2025-06-03T10:35:49.159198Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1003 2025-06-03T10:35:49.159238Z node 97 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1003, at schemeshard: 72057594046678944 2025-06-03T10:35:49.159249Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1003: got EvNotifyTxCompletionResult 2025-06-03T10:35:49.159253Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1003: satisfy waiter [97:456:2435] TestWaitNotification: OK eventTxId 1003 TestModificationResults wait txId: 1006 2025-06-03T10:35:49.160016Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/MyRoot" OperationType: ESchemeOpDropBlockStoreVolume Drop { Name: "BSVolume" } DropBlockStoreVolume { FillGeneration: 0 } } TxId: 1006 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:35:49.160065Z node 97 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_drop_bsv.cpp:152: TDropBlockStoreVolume Propose, path: /MyRoot/BSVolume, pathId: 0, opId: 1006:0, at schemeshard: 72057594046678944 2025-06-03T10:35:49.160090Z node 97 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1006:1, propose status:StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/BSVolume', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard__operation_drop_bsv.cpp:161, at schemeshard: 72057594046678944 2025-06-03T10:35:49.160692Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1006, response: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/BSVolume\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard__operation_drop_bsv.cpp:161" TxId: 1006 SchemeshardId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:35:49.160730Z node 97 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1006, database: /MyRoot, subject: , status: StatusPathDoesNotExist, reason: Check failed: path: '/MyRoot/BSVolume', error: path hasn't been resolved, nearest resolved path: '/MyRoot' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard__operation_drop_bsv.cpp:161, operation: DROP BLOCK STORE VOLUME, path: /MyRoot/BSVolume TestModificationResult got TxId: 1006, wait until txId: 1006 TestWaitNotification wait txId: 1006 2025-06-03T10:35:49.160800Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1006: send EvNotifyTxCompletion 2025-06-03T10:35:49.160807Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1006 2025-06-03T10:35:49.160870Z node 97 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1006, at schemeshard: 72057594046678944 2025-06-03T10:35:49.160887Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1006: got EvNotifyTxCompletionResult 2025-06-03T10:35:49.160892Z node 97 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1006: satisfy waiter [97:463:2442] TestWaitNotification: OK eventTxId 1006 2025-06-03T10:35:49.160968Z node 97 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/BSVolume" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:35:49.160998Z node 97 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/BSVolume" took 37us result status StatusPathDoesNotExist 2025-06-03T10:35:49.161035Z node 97 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/BSVolume\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot\' (id: [OwnerId: 72057594046678944, LocalPathId: 1]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/BSVolume" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot" LastExistedPrefixPathId: 1 LastExistedPrefixDescription { Self { Name: "MyRoot" PathId: 1 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 5000001 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 wait until 72075186233409546 is deleted wait until 72075186233409547 is deleted wait until 72075186233409548 is deleted wait until 72075186233409549 is deleted wait until 72075186233409550 is deleted 2025-06-03T10:35:49.161107Z node 97 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409546 2025-06-03T10:35:49.161118Z node 97 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409547 2025-06-03T10:35:49.161128Z node 97 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409548 2025-06-03T10:35:49.161138Z node 97 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409549 2025-06-03T10:35:49.161147Z node 97 :HIVE INFO: tablet_helpers.cpp:1476: [72057594037968897] TEvSubscribeToTabletDeletion, 72075186233409550 Deleted tabletId 72075186233409546 Deleted tabletId 72075186233409547 Deleted tabletId 72075186233409548 Deleted tabletId 72075186233409549 Deleted tabletId 72075186233409550 >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal150-pk_types25-all_types25-index25---] |72.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/py3test >> test_sql.py::TestCanonicalFolder1::test_case[simple/null_select.sql-plan] [GOOD] >> test_publish_into_schemeboard_with_common_ssring.py::TestOn3DC::test_create_dirs [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal229-pk_types26-all_types26-index26---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int32-pk_types20-all_types20-index20---] >> test_dynamic_tenants.py::test_create_and_drop_tenants[enable_alter_database_create_hive_first--true] [GOOD] >> test_dynamic_tenants.py::test_create_and_drop_the_same_tenant2[enable_alter_database_create_hive_first--false] |72.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/canonical/py3test >> test_sql.py::TestCanonicalFolder1::test_case[topsort/topsort_pk.sql-plan] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] >> test_tenants.py::TestTenants::test_register_tenant_and_force_drop_with_table[enable_alter_database_create_hive_first--false] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] >> test_tenants.py::TestTenants::test_register_tenant_and_force_drop_with_table[enable_alter_database_create_hive_first--true] |72.2%| [TA] $(B)/ydb/tests/functional/canonical/test-results/py3test/{meta.json ... results_accumulator.log} >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] |72.2%| [TA] {RESULT} $(B)/ydb/tests/functional/canonical/test-results/py3test/{meta.json ... results_accumulator.log} >> test_users_groups_with_acl.py::test_yql_create_group_by_tenant_admin[domain_login_only--false-YDB] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_14_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 14] [GOOD] |72.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_11_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 11] [GOOD] >> test_create_users.py::test_create_user [GOOD] |72.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_publish_into_schemeboard_with_common_ssring.py::TestOn3DC::test_create_dirs [GOOD] >> test_tenants.py::TestTenants::test_create_drop_create_table3[enable_alter_database_create_hive_first--true] [XFAIL] >> test_tenants.py::TestTenants::test_list_database_above[enable_alter_database_create_hive_first--false] [FAIL] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] >> test_tenants.py::TestTenants::test_create_drop_create_table[enable_alter_database_create_hive_first--false] >> test_tenants.py::TestTenants::test_register_tenant_and_force_drop_with_table[enable_alter_database_create_hive_first--true] [GOOD] >> test_users_groups_with_acl.py::test_query_create_group_by_domain_admin[domain_login_only--false-YDB] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/ut_blobstorage/ut_huge/unittest >> HugeBlobOnlineSizeChange::Compaction 2025-06-03 10:35:59,720 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-03 10:35:59,764 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 147218 46.4M 46.1M 23.3M test_tool run_ut @/home/runner/.ya/build/build_root/u93c/0029f5/ydb/core/blobstorage/ut_blobstorage/ut_huge/test-results/unittest/testing_out_stuff/chunk0/testing_out_stuff/te 147407 163M 161M 150M └─ ydb-core-blobstorage-ut_blobstorage-ut_huge --trace-path-append /home/runner/.ya/build/build_root/u93c/0029f5/ydb/core/blobstorage/ut_blobstorage/ut_huge/test-results/un Test command err: fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 0 huge3# 0 targetHuge2# 1 targetHuge3# 1 RandomSeed# 6092168653308578224 fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 0 huge3# 0 targetHuge2# 1 targetHuge3# 0 fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 0 huge3# 0 targetHuge2# 0 targetHuge3# 1 fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 0 huge3# 0 targetHuge2# 0 targetHuge3# 0 fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 0 huge3# 1 targetHuge2# 1 targetHuge3# 1 small blob# 1 writing partIdx# 0 to 6 small blob# 1 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 1 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 0 huge3# 1 targetHuge2# 1 targetHuge3# 0 small blob# 1 writing partIdx# 0 to 6 small blob# 1 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 0 huge3# 1 targetHuge2# 0 targetHuge3# 1 small blob# 1 writing partIdx# 0 to 6 small blob# 0 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 1 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 0 huge3# 1 targetHuge2# 0 targetHuge3# 0 small blob# 1 writing partIdx# 0 to 6 small blob# 0 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 1 huge3# 0 targetHuge2# 1 targetHuge3# 1 small blob# 0 writing partIdx# 0 to 6 small blob# 1 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 1 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 1 huge3# 0 targetHuge2# 1 targetHuge3# 0 small blob# 0 writing partIdx# 0 to 6 small blob# 1 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 1 huge3# 0 targetHuge2# 0 targetHuge3# 1 small blob# 0 writing partIdx# 0 to 6 small blob# 0 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 1 Offset: 12288 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 1 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 1 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 2 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 1 huge3# 0 targetHuge2# 0 targetHuge3# 0 small blob# 0 writing partIdx# 0 to 6 small blob# 0 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 1 Offset: 12288 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 1 Offset: 12288 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 1 huge3# 1 targetHuge2# 1 targetHuge3# 1 small blob# 0 writing partIdx# 0 to 6 small blob# 1 writing partIdx# 0 to 6 small blob# 1 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 1 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 1 huge3# 1 targetHuge2# 1 targetHuge3# 0 small blob# 0 writing partIdx# 0 to 6 small blob# 1 writing partIdx# 0 to 6 small blob# 1 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 1 huge3# 1 targetHuge2# 0 targetHuge3# 1 small blob# 0 writing partIdx# 0 to 6 small blob# 1 writing partIdx# 0 to 6 small blob# 0 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 12288 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 2 Offset: 0 Size: 8197} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 1 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 1 huge3# 1 targetHuge2# 0 targetHuge3# 0 small blob# 0 writing partIdx# 0 to 6 small blob# 1 writing partIdx# 0 to 6 small blob# 0 compacting fresh checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 12288 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 2 Offset: 0 Size: 8197} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 compacting levels checking parts in place mask# 1 {Location# {ChunkIdx: 2 Offset: 12288 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 2 Offset: 0 Size: 8197} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 2 huge3# 0 targetHuge2# 1 targetHuge3# 1 small blob# 0 writing partIdx# 1 to 6 small blob# 1 compacting fresh checking parts in place mask# 2 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 1 compacting levels checking parts in place mask# 2 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 2 huge3# 0 targetHuge2# 1 targetHuge3# 0 small blob# 0 writing partIdx# 1 to 6 small blob# 1 compacting fresh checking parts in place mask# 2 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 compacting levels checking parts in place mask# 2 {Location# {ChunkIdx: 2 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 2 huge3# 0 targetHuge2# 0 targetHuge3# 1 small blob# 0 writing partIdx# 1 to 6 small blob# 0 compacting fresh checking parts in place mask# 2 {Location# {ChunkIdx: 1 Offset: 12288 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 1 compacting levels checking parts in place mask# 2 {Location# {ChunkIdx: 1 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 3 Level# 17} {Location# {ChunkIdx: 2 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 3 Level# 17} fresh1# 0 fresh2# 0 huge1# 0 huge2# 0 targetHuge# 1 fresh3# 2 huge3# 0 targetHuge2# 0 targetHuge3# 0 small blob# 0 writing partIdx# 1 to 6 small blob# 0 compacting fresh checking parts in place mask# 2 {Location# {ChunkIdx: 1 Offset: 12288 Size: 140} Database# 0 RecordType# 2 Blo ... 0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 2 Offset: 0 Size: 24581} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 3 Offset: 12288 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 2 Level# 0} {Location# {ChunkIdx: 3 Offset: 0 Size: 8197} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 2 Level# 0} small blob# 1 compacting levels checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 0 Size: 176} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 24576 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 36864 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} fresh1# 5 fresh2# 1 huge1# 3 huge2# 0 targetHuge# 0 fresh3# 1 huge3# 0 targetHuge2# 0 targetHuge3# 0 small blob# 0 writing partIdx# 0 to 6 writing partIdx# 2 to 6 writing partIdx# 0 to 6 checking parts in place mask# 5 small blob# 1 writing partIdx# 0 to 6 writing partIdx# 1 to 6 checking parts in place mask# 7 {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 0 Level# 4294967295} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 0 Level# 4294967295} small blob# 0 compacting fresh checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 28672 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 2 Offset: 0 Size: 24581} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 writing partIdx# 0 to 6 small blob# 0 compacting fresh checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 28672 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 2 Offset: 0 Size: 24581} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 3 Offset: 12288 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 2 Level# 0} {Location# {ChunkIdx: 3 Offset: 0 Size: 8197} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 2 Level# 0} small blob# 0 compacting levels checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 28672 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 2 Offset: 0 Size: 24581} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} fresh1# 5 fresh2# 1 huge1# 3 huge2# 0 targetHuge# 0 fresh3# 1 huge3# 1 targetHuge2# 1 targetHuge3# 1 small blob# 0 writing partIdx# 0 to 6 writing partIdx# 2 to 6 writing partIdx# 0 to 6 checking parts in place mask# 5 small blob# 1 writing partIdx# 0 to 6 writing partIdx# 1 to 6 checking parts in place mask# 7 {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 0 Level# 4294967295} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 0 Level# 4294967295} small blob# 0 compacting fresh checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 28672 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 2 Offset: 0 Size: 24581} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 writing partIdx# 0 to 6 small blob# 1 writing partIdx# 0 to 6 small blob# 1 compacting fresh checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 28672 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 2 Offset: 0 Size: 24581} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 3 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 2 Level# 0} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 2 Level# 0} small blob# 1 compacting levels checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 0 Size: 176} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 24576 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 36864 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} fresh1# 5 fresh2# 1 huge1# 3 huge2# 0 targetHuge# 0 fresh3# 1 huge3# 1 targetHuge2# 1 targetHuge3# 0 small blob# 0 writing partIdx# 0 to 6 writing partIdx# 2 to 6 writing partIdx# 0 to 6 checking parts in place mask# 5 small blob# 1 writing partIdx# 0 to 6 writing partIdx# 1 to 6 checking parts in place mask# 7 {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 0 Level# 4294967295} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 0 Level# 4294967295} small blob# 0 compacting fresh checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 28672 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 2 Offset: 0 Size: 24581} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 writing partIdx# 0 to 6 small blob# 1 writing partIdx# 0 to 6 small blob# 1 compacting fresh checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 28672 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 2 Offset: 0 Size: 24581} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 3 Offset: 0 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 2 Level# 0} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 2 Level# 0} small blob# 0 compacting levels checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 28672 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 2 Offset: 0 Size: 24581} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} fresh1# 5 fresh2# 1 huge1# 3 huge2# 0 targetHuge# 0 fresh3# 1 huge3# 1 targetHuge2# 0 targetHuge3# 1 small blob# 0 writing partIdx# 0 to 6 writing partIdx# 2 to 6 writing partIdx# 0 to 6 checking parts in place mask# 5 small blob# 1 writing partIdx# 0 to 6 writing partIdx# 1 to 6 checking parts in place mask# 7 {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 0 Level# 4294967295} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 0 Level# 4294967295} small blob# 0 compacting fresh checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 28672 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 2 Offset: 0 Size: 24581} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 writing partIdx# 0 to 6 small blob# 1 writing partIdx# 0 to 6 small blob# 0 compacting fresh checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 28672 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 2 Offset: 0 Size: 24581} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 3 Offset: 12288 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 2 Level# 0} {Location# {ChunkIdx: 3 Offset: 0 Size: 8197} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 2 Level# 0} small blob# 1 compacting levels checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 0 Size: 176} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 24576 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 36864 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 1 Offset: 49152 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} fresh1# 5 fresh2# 1 huge1# 3 huge2# 0 targetHuge# 0 fresh3# 1 huge3# 1 targetHuge2# 0 targetHuge3# 0 small blob# 0 writing partIdx# 0 to 6 writing partIdx# 2 to 6 writing partIdx# 0 to 6 checking parts in place mask# 5 small blob# 1 writing partIdx# 0 to 6 writing partIdx# 1 to 6 checking parts in place mask# 7 {Location# {ChunkIdx: 1 Offset: 0 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 0 Level# 4294967295} {Location# {ChunkIdx: 1 Offset: 12288 Size: 8197} Database# 0 RecordType# 0 BlobId# [1000:1:1:0:0:32768:0] SstId# 0 Level# 4294967295} small blob# 0 compacting fresh checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 28672 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 2 Offset: 0 Size: 24581} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} small blob# 0 writing partIdx# 0 to 6 small blob# 1 writing partIdx# 0 to 6 small blob# 0 compacting fresh checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 28672 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 2 Offset: 0 Size: 24581} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 1 Level# 0} {Location# {ChunkIdx: 3 Offset: 12288 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 2 Level# 0} {Location# {ChunkIdx: 3 Offset: 0 Size: 8197} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 2 Level# 0} small blob# 0 compacting levels checking parts in place mask# 7 {Location# {ChunkIdx: 2 Offset: 28672 Size: 140} Database# 0 RecordType# 2 BlobId# [0:0:0:0:0:0:0] SstId# 4 Level# 17} {Location# {ChunkIdx: 2 Offset: 0 Size: 24581} Database# 0 RecordType# 1 BlobId# [1000:1:1:0:0:32768:0] SstId# 4 Level# 17} fresh1# 5 fresh2# 1 huge1# 3 huge2# 0 targetHuge# 0 fresh3# 2 huge3# 0 targetHuge2# 1 targetHuge3# 1 Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8865992733/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/u93c/0029f5/ydb/core/blobstorage/ut_blobstorage/ut_huge/test-results/unittest/testing_out_stuff/chunk0/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8865992733/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/u93c/0029f5/ydb/core/blobstorage/ut_blobstorage/ut_huge/test-results/unittest/testing_out_stuff/chunk0/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) |72.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_users_groups_with_acl.py::test_query_create_group_by_domain_admin[domain_login_only--false-YDB] [GOOD] |72.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_discovery_exclusive_nodes[enable_alter_database_create_hive_first--true] [GOOD] |72.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_users_groups_with_acl.py::test_yql_create_group_by_tenant_admin[domain_login_only--false-YDB] [GOOD] |72.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_tenants.py::TestTenants::test_register_tenant_and_force_drop_with_table[enable_alter_database_create_hive_first--true] [GOOD] >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_17_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 17] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date32-pk_types36-all_types36-index36---] |72.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_14_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 14] [GOOD] |72.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> TxUsage::Sinks_Oltp_WriteToTopics_3_Query [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_UUID-pk_types31-all_types31-index31---] >> TxUsage::Sinks_Oltp_WriteToTopics_4_Table >> test_create_users_strict_acl_checks.py::test_create_user >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] >> test_dynamic_tenants.py::test_create_and_drop_the_same_tenant2[enable_alter_database_create_hive_first--false] [FAIL] >> test_dynamic_tenants.py::test_create_and_drop_the_same_tenant2[enable_alter_database_create_hive_first--true] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] |72.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_tenants.py::TestTenants::test_list_database_above[enable_alter_database_create_hive_first--false] [FAIL] |72.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/column_family/compression/py3test >> alter_compression.py::TestAllCompression::test_all_supported_compression[zstd_17_compression-COMPRESSION = "zstd", COMPRESSION_LEVEL = 17] [GOOD] >> test_create_users_strict_acl_checks.py::test_create_user [GOOD] >> test_tenants.py::TestTenants::test_create_drop_create_table[enable_alter_database_create_hive_first--false] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/py3test Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. >> RetryPolicy::TWriteSession_SeqNoShift [GOOD] >> RetryPolicy::RetryWithBatching |72.3%| [TA] $(B)/ydb/tests/olap/column_family/compression/test-results/py3test/{meta.json ... results_accumulator.log} >> test_split_merge.py::TestSplitMerge::test_merge_split[table_String-pk_types29-all_types29-index29---] |72.3%| [TA] {RESULT} $(B)/ydb/tests/olap/column_family/compression/test-results/py3test/{meta.json ... results_accumulator.log} >> test.py::test_plans[column] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-6.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-7.test] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int64-pk_types19-all_types19-index19---] |72.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_tenants.py::TestTenants::test_create_drop_create_table[enable_alter_database_create_hive_first--false] [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_4_Table [GOOD] >> TxUsage::Sinks_Oltp_WriteToTopics_4_Query >> test_dynamic_tenants.py::test_create_and_drop_the_same_tenant2[enable_alter_database_create_hive_first--true] [FAIL] >> test_dynamic_tenants.py::test_create_tenant_no_cpu[enable_alter_database_create_hive_first--false] >> test.py::test_plans[column] [GOOD] |72.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_create_users_strict_acl_checks.py::test_create_user [GOOD] >> test.py::test_run_determentistic[column] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/py3test >> test.py::test_plans[column] [GOOD] Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. >> test_serverless.py::test_create_table_with_quotas[enable_alter_database_create_hive_first--true] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval64-pk_types39-all_types39-index39---] >> test_tenants.py::TestTenants::test_when_deactivate_fat_tenant_creation_another_tenant_is_ok[enable_alter_database_create_hive_first--true] [GOOD] >> test_tenants.py::TestTenants::test_yql_operations_over_dynamic_nodes[enable_alter_database_create_hive_first--false] >> test.py::test_run_benchmark[column] >> test_dynamic_tenants.py::test_create_tenant_no_cpu[enable_alter_database_create_hive_first--false] [GOOD] >> test_dynamic_tenants.py::test_create_tenant_no_cpu[enable_alter_database_create_hive_first--true] >> test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--false] [FAIL] >> test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--true] >> test_serverless.py::test_create_table[enable_alter_database_create_hive_first--true] [GOOD] >> test_serverless.py::test_create_table_using_exclusive_nodes[enable_alter_database_create_hive_first--false] >> test_dynamic_tenants.py::test_create_tenant_no_cpu[enable_alter_database_create_hive_first--true] [GOOD] >> test_dynamic_tenants.py::test_create_tenant_then_exec_yql[enable_alter_database_create_hive_first--false] >> RetryPolicy::RetryWithBatching [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/persqueue_public/ut/unittest >> RetryPolicy::RetryWithBatching [GOOD] Test command err: 2025-06-03T10:32:29.483952Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.483964Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.483969Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.000000s 2025-06-03T10:32:29.484113Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-03T10:32:29.484128Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.484131Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.484162Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.008464s 2025-06-03T10:32:29.484301Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-03T10:32:29.484307Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.484310Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.484322Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.008196s 2025-06-03T10:32:29.484414Z :ERROR: [db] [sessionid] [cluster] Got error. Status: INTERNAL_ERROR. Description: 2025-06-03T10:32:29.484420Z :DEBUG: [db] [sessionid] [cluster] In Reconnect, ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.484422Z :DEBUG: [db] [sessionid] [cluster] New values: ReadSizeBudget = 52428800, ReadSizeServerDelta = 0 2025-06-03T10:32:29.484433Z :DEBUG: [db] [sessionid] [cluster] Reconnecting session to cluster cluster in 0.006269s 2025-06-03T10:32:29.539988Z :TWriteSession_TestPolicy INFO: Random seed for debugging is 1748946749539978 2025-06-03T10:32:29.697398Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669092029849856:2211];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:29.697465Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002504/r3tmp/tmp3QtRfe/pdisk_1.dat 2025-06-03T10:32:29.699460Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511669091525663716:2137];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:32:29.700298Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:32:29.749592Z node 2 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:32:29.752893Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:32:29.796493Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:32:29.798369Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:29.798412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:29.800553Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 61051, node 1 2025-06-03T10:32:29.846569Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/002504/r3tmp/yandex5dkRgx.tmp 2025-06-03T10:32:29.846584Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/002504/r3tmp/yandex5dkRgx.tmp 2025-06-03T10:32:29.846668Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/002504/r3tmp/yandex5dkRgx.tmp 2025-06-03T10:32:29.846755Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:32:29.853864Z INFO: TTestServer started on Port 28281 GrpcPort 61051 2025-06-03T10:32:29.863218Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:32:29.863253Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:32:29.873721Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:32:29.876889Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28281 PQClient connected to localhost:61051 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:32:29.904632Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976720657:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... waiting... 2025-06-03T10:32:30.250337Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669095820631271:2309], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:30.250367Z node 2 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [2:7511669095820631246:2306], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:30.250428Z node 2 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:32:30.256241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710657:3, at schemeshard: 72057594046644480 2025-06-03T10:32:30.265464Z node 2 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [2:7511669095820631275:2310], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710657 completed, doublechecking } 2025-06-03T10:32:30.416895Z node 2 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [2:7511669095820631303:2126] txid# 281474976710658, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 8], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:32:30.422101Z node 1 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [1:7511669096324817989:2340], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:32:30.422736Z node 1 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=1&id=ZDMwOGMzNTctN2QyNjk4ZTctMzBkMDJhODQtYzFlZWRmMTg=, ActorId: [1:7511669096324817946:2331], ActorState: ExecuteState, TraceId: 01jwtnk2v2869q1eaw84yfwkcd, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:32:30.422847Z node 2 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [2:7511669095820631310:2314], status: SCHEME_ERROR, issues:
: Error: Type annotation, code: 1030
:3:16: Error: At function: KiReadTable!
:3:16: Error: Cannot find table 'db.[/Root/PQ/Config/V2/Cluster]' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions., code: 2003 2025-06-03T10:32:30.423150Z node 2 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=2&id=OTNkOGNhMzItNzQyN2JlNmItMjA5Y2MzZWQtOTcwNWQyOWU=, ActorId: [2:7511669095820631244:2305], ActorState: ExecuteState, TraceId: 01jwtnk2s79nbvghw9602v4qxb, ReplyQueryCompileError, status SCHEME_ERROR remove tx with tx_id: 2025-06-03T10:32:30.423434Z node 2 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:32:30.423295Z node 1 :PERSQUEUE_CLUSTER_TRACKER ERROR: cluster_tracker.cpp:167: failed to list clusters: { Response { QueryIssues { message: "Type annotation" issue_code: 1030 severity: 1 issues { position { row: 3 column: 16 } message: "At function: KiReadTable!" end_position { row: 3 column: 16 } severity: 1 issues { position { row: 3 column: 16 } message: "Cannot find table \'db.[/Root/PQ/Config/V2/Cluster]\' because it does not exist or you do not have access permissions. Please check correctness of table path and user permissions." end_position { row: 3 column: 16 } issue_code: 2003 severity: 1 } } } TxMeta { } } YdbStatus: SCHEME_ERROR ConsumedRu: 1 } 2025-06-03T10:32:30.423806Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976720661:0, at schemeshard: 72057594046644480 2025-06-03T10:32:30.514567Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, ... offset 0 partNo 0 count 10 size 1208 2025-06-03T10:36:31.339211Z node 17 :PERSQUEUE DEBUG: cache_eviction.h:315: Caching head blob in L1. Partition 0 offset 0 count 10 size 1208 actorID [17:7511670120565541035:2612] 2025-06-03T10:36:31.339248Z node 17 :PERSQUEUE DEBUG: pq_l2_cache.cpp:120: PQ Cache (L2). Adding blob. Tablet '72075186224037892' partition 0 offset 0 partno 0 count 10 parts 0 size 1208 2025-06-03T10:36:31.339269Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:524: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::HandleWriteResponse writeNewSize# 1230 WriteNewSizeFromSupportivePartitions# 0 2025-06-03T10:36:31.339284Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-03T10:36:31.339298Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 1, partNo: 0, Offset: 0 is stored on disk 2025-06-03T10:36:31.339310Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-03T10:36:31.339316Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 2, partNo: 0, Offset: 1 is stored on disk 2025-06-03T10:36:31.339319Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-03T10:36:31.339329Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 3, partNo: 0, Offset: 2 is stored on disk 2025-06-03T10:36:31.339336Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-03T10:36:31.339344Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 4, partNo: 0, Offset: 3 is stored on disk 2025-06-03T10:36:31.339353Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-03T10:36:31.339358Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 5, partNo: 0, Offset: 4 is stored on disk 2025-06-03T10:36:31.339361Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-03T10:36:31.339366Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 6, partNo: 0, Offset: 5 is stored on disk 2025-06-03T10:36:31.339374Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-03T10:36:31.339379Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 7, partNo: 0, Offset: 6 is stored on disk 2025-06-03T10:36:31.339382Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-03T10:36:31.339390Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 8, partNo: 0, Offset: 7 is stored on disk 2025-06-03T10:36:31.339397Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-03T10:36:31.339402Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 9, partNo: 0, Offset: 8 is stored on disk 2025-06-03T10:36:31.339405Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:58: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::ReplyWrite. Partition: 0 2025-06-03T10:36:31.339411Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Answering for message sourceid: '\0test-message-group-id', Topic: 'rt3.dc1--test-topic', Partition: 0, SeqNo: 10, partNo: 0, Offset: 9 is stored on disk 2025-06-03T10:36:31.339414Z node 17 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'rt3.dc1--test-topic' partition: 0 messageNo: 1 requestId: cookie: 1 2025-06-03T10:36:31.339435Z node 17 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-03T10:36:31.339448Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:779: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-03T10:36:31.339455Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:821: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Topic 'rt3.dc1--test-topic' partition 0 user user send read request for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 1 rrg 0 2025-06-03T10:36:31.339500Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:736: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 0 Topic 'rt3.dc1--test-topic' partition 0 user user offset 0 count 1 size 1024000 endOffset 10 max time lag 0ms effective offset 0 2025-06-03T10:36:31.339513Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:936: [PQ: 72075186224037892, Partition: 0, State: StateIdle] read cookie 0 added 0 blobs, size 0 count 0 last offset 0, current partition end offset: 10 2025-06-03T10:36:31.339556Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:953: [PQ: 72075186224037892, Partition: 0, State: StateIdle] Reading cookie 0. All data is from uncompacted head. 2025-06-03T10:36:31.339565Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:420: FormAnswer for 0 blobs 2025-06-03T10:36:31.339583Z node 17 :PERSQUEUE DEBUG: partition_read.cpp:861: Topic 'rt3.dc1--test-topic' partition 0 user user readTimeStamp done, result 1748946991337 queuesize 0 startOffset 0 2025-06-03T10:36:31.339811Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|7391e852-8576a3f1-f6d71235-de8fa75c_0] Write session got write response: sequence_numbers: 1 sequence_numbers: 2 sequence_numbers: 3 sequence_numbers: 4 sequence_numbers: 5 sequence_numbers: 6 sequence_numbers: 7 sequence_numbers: 8 sequence_numbers: 9 sequence_numbers: 10 offsets: 0 offsets: 1 offsets: 2 offsets: 3 offsets: 4 offsets: 5 offsets: 6 offsets: 7 offsets: 8 offsets: 9 already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false already_written: false write_statistics { persist_duration_ms: 2 } 2025-06-03T10:36:31.339822Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|7391e852-8576a3f1-f6d71235-de8fa75c_0] Write session: acknoledged message 1 2025-06-03T10:36:31.339828Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|7391e852-8576a3f1-f6d71235-de8fa75c_0] Write session: acknoledged message 2 2025-06-03T10:36:31.339832Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|7391e852-8576a3f1-f6d71235-de8fa75c_0] Write session: acknoledged message 3 2025-06-03T10:36:31.339846Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|7391e852-8576a3f1-f6d71235-de8fa75c_0] Write session: acknoledged message 4 2025-06-03T10:36:31.339851Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|7391e852-8576a3f1-f6d71235-de8fa75c_0] Write session: acknoledged message 5 2025-06-03T10:36:31.339856Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|7391e852-8576a3f1-f6d71235-de8fa75c_0] Write session: acknoledged message 6 2025-06-03T10:36:31.339860Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|7391e852-8576a3f1-f6d71235-de8fa75c_0] Write session: acknoledged message 7 2025-06-03T10:36:31.339867Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|7391e852-8576a3f1-f6d71235-de8fa75c_0] Write session: acknoledged message 8 2025-06-03T10:36:31.339871Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|7391e852-8576a3f1-f6d71235-de8fa75c_0] Write session: acknoledged message 9 2025-06-03T10:36:31.339875Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|7391e852-8576a3f1-f6d71235-de8fa75c_0] Write session: acknoledged message 10 2025-06-03T10:36:31.340005Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|7391e852-8576a3f1-f6d71235-de8fa75c_0] Write session: close. Timeout = 0 ms 2025-06-03T10:36:31.340018Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|7391e852-8576a3f1-f6d71235-de8fa75c_0] Write session will now close 2025-06-03T10:36:31.340025Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|7391e852-8576a3f1-f6d71235-de8fa75c_0] Write session: aborting 2025-06-03T10:36:31.340198Z :INFO: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|7391e852-8576a3f1-f6d71235-de8fa75c_0] Write session: gracefully shut down, all writes complete 2025-06-03T10:36:31.340204Z :DEBUG: [/Root] MessageGroupId [test-message-group-id] SessionId [test-message-group-id|7391e852-8576a3f1-f6d71235-de8fa75c_0] Write session: destroy 2025-06-03T10:36:31.340366Z node 17 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 7 sessionId: test-message-group-id|7391e852-8576a3f1-f6d71235-de8fa75c_0 grpc read done: success: 0 data: 2025-06-03T10:36:31.340378Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:256: session v1 cookie: 7 sessionId: test-message-group-id|7391e852-8576a3f1-f6d71235-de8fa75c_0 grpc read failed 2025-06-03T10:36:31.340387Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:232: session v1 cookie: 7 sessionId: test-message-group-id|7391e852-8576a3f1-f6d71235-de8fa75c_0 grpc closed 2025-06-03T10:36:31.340393Z node 17 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:304: session v1 cookie: 7 sessionId: test-message-group-id|7391e852-8576a3f1-f6d71235-de8fa75c_0 is DEAD 2025-06-03T10:36:31.340736Z node 17 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037892 (partition=0) Received event: NActors::TEvents::TEvPoison 2025-06-03T10:36:31.340830Z node 17 :PERSQUEUE DEBUG: pq_impl.cpp:2905: [PQ: 72075186224037892] server disconnected, pipe [17:7511670129155475881:2649] destroyed 2025-06-03T10:36:31.340849Z node 17 :PERSQUEUE DEBUG: partition_write.cpp:138: [PQ: 72075186224037892, Partition: 0, State: StateIdle] TPartition::DropOwner. |72.3%| [TA] $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/test-results/unittest/{meta.json ... results_accumulator.log} |72.3%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/src/client/persqueue_public/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_tenants.py::TestTenants::test_stop_start[enable_alter_database_create_hive_first--false] [FAIL] >> TxUsage::Sinks_Oltp_WriteToTopics_4_Query [GOOD] >> test_tenants.py::TestTenants::test_stop_start[enable_alter_database_create_hive_first--true] >> TxUsage::Transactions_Conflict_On_SeqNo_Table >> TxUsage::Write_Random_Sized_Messages_In_Wide_Transactions_Query [GOOD] >> TxUsage::Write_Only_Big_Messages_In_Wide_Transactions_Table >> test_tenants.py::TestTenants::test_yql_operations_over_dynamic_nodes[enable_alter_database_create_hive_first--false] [GOOD] >> LabeledDbCounters::TwoTabletsKillOneTablet [GOOD] >> ShowCreateView::Basic ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/py3test Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. >> test_tenants.py::TestTenants::test_yql_operations_over_dynamic_nodes[enable_alter_database_create_hive_first--true] >> test_system_views.py::TestQueryMetrics::test_case [GOOD] >> ShowCreateView::Basic [GOOD] >> ShowCreateView::FromTable >> test_serverless.py::test_seamless_migration_to_exclusive_nodes[enable_alter_database_create_hive_first--true] [GOOD] >> test_system_views.py::TestQueryMetricsUniqueQueries::test_case >> test.py::test_run_determentistic[column] [GOOD] >> test_dynamic_tenants.py::test_create_tenant_then_exec_yql[enable_alter_database_create_hive_first--false] [GOOD] >> test.py::test_plans[row] |72.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_dynamic_tenants.py::test_create_tenant_then_exec_yql[enable_alter_database_create_hive_first--false] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/py3test >> test.py::test_run_determentistic[column] [GOOD] Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. >> ShowCreateView::FromTable [GOOD] >> ShowCreateView::WithPairedTablePathPrefix >> TxUsage::Transactions_Conflict_On_SeqNo_Table [GOOD] >> TxUsage::Transactions_Conflict_On_SeqNo_Query >> test.py::test_run_benchmark[column] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-7.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-8.test] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint64-pk_types3-all_types3-index3] >> ShowCreateView::WithPairedTablePathPrefix [GOOD] >> test.py::test_plans[row] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/py3test >> test.py::test_run_benchmark[column] [GOOD] Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/sys_view/ut/unittest >> ShowCreateView::WithPairedTablePathPrefix [GOOD] Test command err: 2025-06-03T10:28:13.480186Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511667992883963428:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:13.480266Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002ae5/r3tmp/tmpJHZoB2/pdisk_1.dat 2025-06-03T10:28:13.565044Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:13.583590Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:13.583626Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:13.584813Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 27195, node 1 2025-06-03T10:28:13.597437Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:28:13.597454Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:28:13.597456Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:28:13.597513Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:26995 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:28:13.648038Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:13.654158Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:28:13.655671Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:13.662430Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511667990946662865:2073];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:28:13.662453Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/Database1/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; waiting... 2025-06-03T10:28:13.671121Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:28:13.671152Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:28:13.672343Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-03T10:28:13.672677Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:28:13.689048Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:28:13.689184Z node 3 :SYSTEM_VIEWS DEBUG: partition_stats.cpp:32: NSysView::TPartitionStatsCollector bootstrapped 2025-06-03T10:28:13.690062Z node 3 :SYSTEM_VIEWS INFO: processor_impl.cpp:41: [72075186224037893] OnActivateExecutor 2025-06-03T10:28:13.690068Z node 3 :SYSTEM_VIEWS DEBUG: tx_init_schema.cpp:15: [72075186224037893] TTxInitSchema::Execute 2025-06-03T10:28:13.693088Z node 3 :SYSTEM_VIEWS DEBUG: tx_init_schema.cpp:42: [72075186224037893] TTxInitSchema::Complete 2025-06-03T10:28:13.693111Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:136: [72075186224037893] TTxInit::Execute 2025-06-03T10:28:13.693262Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:257: [72075186224037893] Loading interval summaries: query count# 0, node ids count# 0, total count# 0 2025-06-03T10:28:13.693274Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:284: [72075186224037893] Loading interval metrics: query count# 0 2025-06-03T10:28:13.693279Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:362: [72075186224037893] Loading interval query tops: total query count# 0 2025-06-03T10:28:13.693284Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:408: [72075186224037893] Loading nodes to request: nodes count# 0, hashes count# 0 2025-06-03T10:28:13.693289Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 6, result count# 0 2025-06-03T10:28:13.693327Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 7, result count# 0 2025-06-03T10:28:13.693331Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 8, result count# 0 2025-06-03T10:28:13.693334Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 9, result count# 0 2025-06-03T10:28:13.693339Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 10, result count# 0 2025-06-03T10:28:13.693342Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 11, result count# 0 2025-06-03T10:28:13.693345Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 12, result count# 0 2025-06-03T10:28:13.693349Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 13, result count# 0 2025-06-03T10:28:13.693353Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 14, result count# 0 2025-06-03T10:28:13.693357Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:51: [72075186224037893] Loading results: table# 15, result count# 0 2025-06-03T10:28:13.693362Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:129: [72075186224037893] Loading results: table# 16, partCount count# 0 2025-06-03T10:28:13.693376Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:129: [72075186224037893] Loading results: table# 19, partCount count# 0 2025-06-03T10:28:13.693381Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:82: [72075186224037893] Loading results: table# 17, result count# 0 2025-06-03T10:28:13.693384Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:82: [72075186224037893] Loading results: table# 18, result count# 0 2025-06-03T10:28:13.693394Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:82: [72075186224037893] Loading results: table# 20, result count# 0 2025-06-03T10:28:13.693398Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:82: [72075186224037893] Loading results: table# 21, result count# 0 2025-06-03T10:28:13.693419Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:333: [72075186224037893] Reset: interval end# 2025-06-03T10:28:13.000000Z 2025-06-03T10:28:13.696147Z node 3 :SYSTEM_VIEWS DEBUG: tx_init.cpp:488: [72075186224037893] TTxInit::Complete 2025-06-03T10:28:13.696526Z node 3 :SYSTEM_VIEWS DEBUG: tx_aggregate.cpp:14: [72075186224037893] TTxAggregate::Execute 2025-06-03T10:28:13.696540Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:136: [72075186224037893] PersistQueryResults: interval end# 2025-06-03T10:28:13.000000Z, query count# 0 2025-06-03T10:28:13.696545Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 8, interval end# 2025-06-03T10:28:13.000000Z, query count# 0, persisted# 0 2025-06-03T10:28:13.696548Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 10, interval end# 2025-06-03T10:28:13.000000Z, query count# 0, persisted# 0 2025-06-03T10:28:13.696552Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 12, interval end# 2025-06-03T10:28:13.000000Z, query count# 0, persisted# 0 2025-06-03T10:28:13.696555Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 14, interval end# 2025-06-03T10:28:13.000000Z, query count# 0, persisted# 0 2025-06-03T10:28:13.696558Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 9, interval end# 2025-06-03T11:00:00.000000Z, query count# 0, persisted# 0 2025-06-03T10:28:13.696561Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 11, interval end# 2025-06-03T11:00:00.000000Z, query count# 0, persisted# 0 2025-06-03T10:28:13.696564Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 13, interval end# 2025-06-03T11:00:00.000000Z, query count# 0, persisted# 0 2025-06-03T10:28:13.696567Z node 3 :SYSTEM_VIEWS DEBUG: processor_impl.cpp:105: [72075186224037893] PersistQueryTopResults: table id# 15, interval end# 2025-06-03T11:00:00.000000Z, query count# 0, persisted# 0 2025-06-03T10:28:13.698366Z node 3 :SYSTEM_VIEWS DEBUG: tx_aggregate.cpp:110: [72075186224037893] TTxAggregate::Complete 2025-06-03T10:28:13.701674Z node 3 :SYSTEM_VIEWS DEBUG: sysview_service.cpp:778: Handle TEvSysView::TEvRegisterDbCounters: service id# [3:7511667990946662903:2109], path id# [OwnerId: 72057594046644480, LocalPathId: 2], service# 2 2025-06-03T10:28:13.702509Z node 3 :SYSTEM_VIEWS INFO: sysview_service.cpp:860: Navigate by path id succeeded: service id# [3:7511667990946662903:2109], path id# [OwnerId: 72057594046644480, LocalPathId: 2], database# /Root/Database1 2025-06-03T10:28:13.702548Z node 3 :SYSTEM_VIEWS INFO: sysview_service.cpp:886: Navigate by database succeeded: service id# [3:7511667990946662903:2109], database# /Root/Database1, no sysview processor 2025-06-03T10:28:13.723709Z node 3 :SYSTEM_VIEWS DEBUG: tx_configure.cpp:20: [72075186224037893] TTxConfigure::Execute: database# /Root/Database1 2025-06-03T10:28:13.725849Z node 3 :SYSTEM_VIEWS INFO: partition_stats.cpp:522: NSysView::TPartitionStatsCollector initialized: domain key# [OwnerId: 72057594046644480, LocalPathId: 2], sysview processor id# 72075186224037893 2025-06-03T10:28:13.726379Z node 3 :SYSTEM_VIEWS DEBUG: tx_configure.cpp:30: [72075186224037893] TTxConfigure::Complete waiting... 2025-06-03T10:28:13.734180Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:28:13.756281Z node 1 :HIVE WARN: node_info.cpp:2 ... ype: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:36:45.144455Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480 2025-06-03T10:36:45.236379Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:36:45.321757Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:36:45.403033Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:36:45.480951Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:36:45.498115Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:36:45.933209Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715695:0, at schemeshard: 72057594046644480 2025-06-03T10:36:46.305213Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715710:0, at schemeshard: 72057594046644480 2025-06-03T10:36:46.319532Z node 24 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715711:0, at schemeshard: 72057594046644480 2025-06-03T10:36:46.359640Z node 24 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [24:7511670194962345577:2870], owner: [24:7511670194962345574:2868], scan id: 0, table id: [1:0:0:show_create] 2025-06-03T10:36:46.359674Z node 24 :SYSTEM_VIEWS INFO: show_create.cpp:106: Scan prepared, actor: [24:7511670194962345577:2870] 2025-06-03T10:36:46.363217Z node 24 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [24:7511670194962345577:2870], row count: 1, finished: 1 2025-06-03T10:36:46.363254Z node 24 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [24:7511670194962345577:2870], owner: [24:7511670194962345574:2868], scan id: 0, table id: [1:0:0:show_create] 2025-06-03T10:36:47.590189Z node 29 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[29:7511670199015695092:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:36:47.590211Z node 29 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/002ae5/r3tmp/tmpUK9dDF/pdisk_1.dat 2025-06-03T10:36:47.611088Z node 29 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3126, node 29 2025-06-03T10:36:47.630396Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:36:47.630409Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:36:47.630411Z node 29 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:36:47.630468Z node 29 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5095 TClient is connected to server localhost:5095 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:36:47.691020Z node 29 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(29, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:36:47.691076Z node 29 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(29, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:36:47.693069Z node 29 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(29, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:36:47.695785Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:36:47.947342Z node 29 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [29:7511670199015696126:2342], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:36:47.947361Z node 29 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [29:7511670199015696134:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:36:47.947367Z node 29 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:36:47.948219Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715658:3, at schemeshard: 72057594046644480 2025-06-03T10:36:47.957390Z node 29 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [29:7511670199015696140:2346], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715658 completed, doublechecking } 2025-06-03T10:36:48.018223Z node 29 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [29:7511670203310663514:2722] txid# 281474976715659, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:36:48.592127Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:1, at schemeshard: 72057594046644480 2025-06-03T10:36:48.660146Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:36:48.720677Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:1, at schemeshard: 72057594046644480 2025-06-03T10:36:48.792178Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715669:0, at schemeshard: 72057594046644480 2025-06-03T10:36:48.864170Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:36:48.928167Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:36:49.000281Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:36:49.016558Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:36:49.472346Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715695:0, at schemeshard: 72057594046644480 2025-06-03T10:36:49.832026Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976715710:0, at schemeshard: 72057594046644480 2025-06-03T10:36:49.845761Z node 29 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715711:3, at schemeshard: 72057594046644480 2025-06-03T10:36:49.883069Z node 29 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:45: Scan started, actor: [29:7511670207605633325:2869], owner: [29:7511670207605633321:2867], scan id: 0, table id: [1:0:0:show_create] 2025-06-03T10:36:49.883098Z node 29 :SYSTEM_VIEWS INFO: show_create.cpp:106: Scan prepared, actor: [29:7511670207605633325:2869] 2025-06-03T10:36:49.887800Z node 29 :SYSTEM_VIEWS DEBUG: scan_actor_base_impl.h:65: Sending scan batch, actor: [29:7511670207605633325:2869], row count: 1, finished: 1 2025-06-03T10:36:49.887825Z node 29 :SYSTEM_VIEWS INFO: scan_actor_base_impl.h:120: Scan finished, actor: [29:7511670207605633325:2869], owner: [29:7511670207605633321:2867], scan id: 0, table id: [1:0:0:show_create] >> test.py::test_run_benchmark[row] >> test_tenants.py::TestTenants::test_yql_operations_over_dynamic_nodes[enable_alter_database_create_hive_first--true] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/py3test >> test.py::test_plans[row] [GOOD] Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. |72.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_tenants.py::test_operation_with_locks[enable_alter_database_create_hive_first--false] |72.3%| [TA] $(B)/ydb/core/sys_view/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_partitioning.py::TestPartitionong::test_uniform_partitiona[table_ttl_Date-pk_types0-all_types0-index0] >> test_serverless.py::test_create_table_with_alter_quotas[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_create_table_with_alter_quotas[enable_alter_database_create_hive_first--true] >> test.py::test_run_determentistic[row] |72.3%| [TA] {RESULT} $(B)/ydb/core/sys_view/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint64-pk_types3-all_types3-index3] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/py3test Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. >> test_tenants.py::test_operation_with_locks[enable_alter_database_create_hive_first--false] [GOOD] >> test_tenants.py::test_operation_with_locks[enable_alter_database_create_hive_first--true] |72.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test >> TxUsage::Transactions_Conflict_On_SeqNo_Query [GOOD] >> test_partitioning.py::TestPartitionong::test_uniform_partitiona[table_ttl_Date-pk_types0-all_types0-index0] [GOOD] >> TxUsage::The_Transaction_Starts_On_One_Version_And_Ends_On_The_Other |72.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint64-pk_types3-all_types3-index3] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Utf8-pk_types7-all_types7-index7] >> YdbSdkSessionsPool::StressTestAsync/1 [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/py3test Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. >> test.py::test_run_benchmark[row] [GOOD] |72.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_uniform_partitiona[table_ttl_Date-pk_types0-all_types0-index0] [GOOD] |72.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/tests/integration/sessions_pool/gtest >> YdbSdkSessionsPool::StressTestAsync/1 [GOOD] >> test_partitioning.py::TestPartitionong::test_uniform_partitiona[table_ttl_Date-pk_types1-all_types1-index1] |72.4%| [TA] $(B)/ydb/public/sdk/cpp/tests/integration/sessions_pool/test-results/gtest/{meta.json ... results_accumulator.log} |72.4%| [TA] {RESULT} $(B)/ydb/public/sdk/cpp/tests/integration/sessions_pool/test-results/gtest/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/py3test >> test.py::test_run_benchmark[row] [GOOD] Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. >> test.py::test_run_determentistic[row] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_String-pk_types6-all_types6-index6] |72.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_seamless_migration_to_exclusive_nodes[enable_alter_database_create_hive_first--true] [GOOD] >> TxUsage::The_Transaction_Starts_On_One_Version_And_Ends_On_The_Other [GOOD] |72.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/clickbench/py3test >> test.py::test_run_determentistic[row] [GOOD] Test command err: contrib/python/ydb/py3/ydb/__init__.py:43: UserWarning: Used deprecated behavior, for fix ADD PEERDIR kikimr/public/sdk/python/ydb_v3_new_behavior contrib/python/ydb/py3/ydb/global_settings.py:22: UserWarning: Global allow split transaction is deprecated behaviour. contrib/python/ydb/py3/ydb/global_settings.py:12: UserWarning: Global allow truncated response is deprecated behaviour. |72.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test |72.4%| [TA] $(B)/ydb/tests/functional/clickbench/test-results/py3test/{meta.json ... results_accumulator.log} |72.4%| [TA] {RESULT} $(B)/ydb/tests/functional/clickbench/test-results/py3test/{meta.json ... results_accumulator.log} >> test_tenants.py::test_operation_with_locks[enable_alter_database_create_hive_first--true] [GOOD] >> test_serverless.py::test_create_table_using_exclusive_nodes[enable_alter_database_create_hive_first--false] [GOOD] |72.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp64-pk_types38-all_types38-index38---] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Utf8-pk_types7-all_types7-index7] [GOOD] |72.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test |72.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int64-pk_types0-all_types0-index0] |72.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_create_table_with_quotas[enable_alter_database_create_hive_first--true] [GOOD] >> VectorIndexBuildTestReboots::BaseCase[TabletReboots] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint8-pk_types5-all_types5-index5] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::The_Transaction_Starts_On_One_Version_And_Ends_On_The_Other [GOOD] Test command err: 2025-06-03T10:31:33.143359Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668852681445994:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:33.143374Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:31:33.201945Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d9b/r3tmp/tmpR4uXWD/pdisk_1.dat 2025-06-03T10:31:33.254951Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668852681445820:2079] 1748946693142499 != 1748946693142502 2025-06-03T10:31:33.257147Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 22292, node 1 2025-06-03T10:31:33.265159Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/000d9b/r3tmp/yandext6tZm9.tmp 2025-06-03T10:31:33.265176Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/000d9b/r3tmp/yandext6tZm9.tmp 2025-06-03T10:31:33.265289Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/000d9b/r3tmp/yandext6tZm9.tmp 2025-06-03T10:31:33.265362Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:31:33.275977Z INFO: TTestServer started on Port 21617 GrpcPort 22292 TClient is connected to server localhost:21617 PQClient connected to localhost:22292 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: 2025-06-03T10:31:33.305319Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:33.305364Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:33.306873Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:33.318524Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:31:33.340028Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-03T10:31:33.342059Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:31:33.380420Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715660, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:33.617940Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668852681446618:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:33.617985Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668852681446645:2339], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:33.617999Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:33.619030Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480 2025-06-03T10:31:33.622273Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668852681446650:2341], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-03T10:31:33.664143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:33.674184Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:33.705401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:31:33.723453Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668852681446936:2574] txid# 281474976715666, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7511668852681447004:2609] 2025-06-03T10:31:38.143644Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668852681445994:2201];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:38.143686Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:31:39.028054Z :TwoSessionOneConsumer_Table INFO: TTopicSdkTestSetup started 2025-06-03T10:31:39.031955Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-03T10:31:39.036594Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7511668878451250987:2690] connected; active server actors: 1 2025-06-03T10:31:39.036799Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-03T10:31:39.036939Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-03T10:31:39.036989Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-03T10:31:39.037185Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-03T10:31:39.038082Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:31:39.038177Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3089: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-03T10:31:39.038407Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:31:39.038460Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72075186224037892] doesn't have tx info 2025-06-03T10:31:39.038470Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:31:39.038473Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-03T10:31:39.038476Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:31:39.038482Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:31:39.038495Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72075186224037892] doesn't have tx writes info 2025-06-03T10:31:39.038584Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72075186224037892, NodeId 1, Generation 1 2025-06-03T10:31:39.038616Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72075186224037892] server connected, pipe [1:7511668878451251003:2451], now have 1 active actors on pipe 2025-06-03T10:31:39.085133Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72075186224037892] server connected, pipe [1:7511668878451250986:2689], now have 1 active actors on pipe 2025-06-03T10:31:39.088436Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3220: [PQ: 72075186224037892] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 7511668852681446260 RawX2: 4294969484 } TxId: 281474976715674 Config { TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } SourceIdMaxCounts: 6000000 } PartitionIds: 0 TopicName: "test-topic" Version: 0 RequireAuthWrite: true RequireAuthRead: true FormatV ... initializing step TInitDataStep 2025-06-03T10:37:11.297558Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:0:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-03T10:37:11.297561Z node 21 :PERSQUEUE INFO: partition_init.cpp:774: [topic_A:0:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:37:11.297562Z node 21 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72075186224037895, Partition: {0, {1, 281474976715674}, 100000}, State: StateInit] SYNC INIT topic topic_A partitition {0, {1, 281474976715674}, 100000} so 0 endOffset 2 Head Offset 0 PartNo 0 PackedSize 500 count 2 nextOffset 2 batches 2 SYNC INIT sourceId test-message_group_id_3 seqNo 1 offset 1 SYNC INIT sourceId test-message_group_id_1 seqNo 1 offset 0 SYNC INIT HEAD KEY: D0000100000_00000000000000000000_00000_0000000001_00000| size 250 SYNC INIT HEAD KEY: D0000100000_00000000000000000001_00000_0000000001_00000| size 250 2025-06-03T10:37:11.297563Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic_A:0:Initializer] Initializing completed. 2025-06-03T10:37:11.297568Z node 21 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72075186224037895, Partition: {0, {1, 281474976715674}, 100000}, State: StateIdle] Process pending events. Count 0 2025-06-03T10:37:11.297568Z node 21 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037895, Partition: 0, State: StateInit] init complete for topic 'topic_A' partition 0 generation 2 [21:7511670303622572664:2505] 2025-06-03T10:37:11.297574Z node 21 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72075186224037895, Partition: 0, State: StateInit] SYNC INIT topic topic_A partitition 0 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:37:11.297578Z node 21 :PERSQUEUE DEBUG: partition.cpp:594: [PQ: 72075186224037895, Partition: {0, {1, 281474976715674}, 100000}, State: StateIdle] Init complete for topic 'topic_A' Partition: {0, {1, 281474976715674}, 100000} SourceId: test-message_group_id_3 SeqNo: 1 offset: 1 MaxOffset: 2 2025-06-03T10:37:11.297580Z node 21 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72075186224037895, Partition: 0, State: StateIdle] Process pending events. Count 0 2025-06-03T10:37:11.297582Z node 21 :PERSQUEUE DEBUG: partition.cpp:594: [PQ: 72075186224037895, Partition: {0, {1, 281474976715674}, 100000}, State: StateIdle] Init complete for topic 'topic_A' Partition: {0, {1, 281474976715674}, 100000} SourceId: test-message_group_id_1 SeqNo: 1 offset: 0 MaxOffset: 2 2025-06-03T10:37:11.297593Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:779: [PQ: 72075186224037895, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 2 2025-06-03T10:37:11.297596Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:779: [PQ: 72075186224037895, Partition: 0, State: StateIdle] Topic 'topic_A' partition 0 user test-consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-03T10:37:11.303996Z node 21 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037896][topic_A] TEvClientConnected TabletId 72075186224037895, NodeId 21, Generation 2 2025-06-03T10:37:11.304026Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72075186224037895] server connected, pipe [21:7511670303622572640:2479], now have 1 active actors on pipe 2025-06-03T10:37:13.294007Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72075186224037894] server connected, pipe [21:7511670312212507317:2880], now have 1 active actors on pipe 2025-06-03T10:37:13.294380Z node 21 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:465: [72075186224037896][topic_A] TEvClientDestroyed 72075186224037894 2025-06-03T10:37:13.296405Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72075186224037894] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:37:13.296466Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:3089: [PQ: 72075186224037894] Registered with mediator time cast 2025-06-03T10:37:13.296581Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72075186224037894] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:37:13.296616Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:741: [PQ: 72075186224037894] has a tx info 2025-06-03T10:37:13.296631Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72075186224037894] PlanStep 1742296505901, PlanTxId 281474976715673, ExecStep 1742296505901, ExecTxId 281474976715673 2025-06-03T10:37:13.296669Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72075186224037894] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:37:13.296788Z node 21 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72075186224037894] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:37:13.296796Z node 21 :PERSQUEUE INFO: pq_impl.cpp:787: [PQ: 72075186224037894] has a tx writes info 2025-06-03T10:37:13.296883Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:3580: [PQ: 72075186224037894] send TEvSubscribeLock for WriteId {1, 281474976715674} 2025-06-03T10:37:13.296911Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitConfigStep 2025-06-03T10:37:13.296918Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitConfigStep 2025-06-03T10:37:13.296984Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-03T10:37:13.297039Z node 21 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72075186224037894, Partition: {1, {1, 281474976715674}, 100000}, State: StateInit] bootstrapping {1, {1, 281474976715674}, 100000} [21:7511670312212507344:2528] 2025-06-03T10:37:13.297188Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitInternalFieldsStep 2025-06-03T10:37:13.297200Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitDiskStatusStep 2025-06-03T10:37:13.297239Z node 21 :PERSQUEUE INFO: partition_init.cpp:880: [PQ: 72075186224037894, Partition: 1, State: StateInit] bootstrapping 1 [21:7511670312212507343:2527] 2025-06-03T10:37:13.297412Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitDiskStatusStep 2025-06-03T10:37:13.297533Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitMetaStep 2025-06-03T10:37:13.297591Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitMetaStep 2025-06-03T10:37:13.297648Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitInfoRangeStep 2025-06-03T10:37:13.297702Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitInfoRangeStep 2025-06-03T10:37:13.297765Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitDataRangeStep 2025-06-03T10:37:13.297952Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitDataRangeStep 2025-06-03T10:37:13.297986Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:621: [topic_A:{1, {1, 281474976715674}, 100000}:TInitDataRangeStep] Got data offset 0 count 1 size 250 so 0 eo 1 D0000100000_00000000000000000000_00000_0000000001_00000| 2025-06-03T10:37:13.298001Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitDataStep 2025-06-03T10:37:13.298069Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitDataStep 2025-06-03T10:37:13.298076Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:1:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-03T10:37:13.298079Z node 21 :PERSQUEUE INFO: partition_init.cpp:774: [topic_A:1:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:37:13.298081Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic_A:1:Initializer] Initializing completed. 2025-06-03T10:37:13.298088Z node 21 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037894, Partition: 1, State: StateInit] init complete for topic 'topic_A' partition 1 generation 2 [21:7511670312212507343:2527] 2025-06-03T10:37:13.298093Z node 21 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72075186224037894, Partition: 1, State: StateInit] SYNC INIT topic topic_A partitition 1 so 0 endOffset 0 Head Offset 0 PartNo 0 PackedSize 0 count 0 nextOffset 0 batches 0 2025-06-03T10:37:13.298100Z node 21 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72075186224037894, Partition: 1, State: StateIdle] Process pending events. Count 0 2025-06-03T10:37:13.298112Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:779: [PQ: 72075186224037894, Partition: 1, State: StateIdle] Topic 'topic_A' partition 1 user consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 2 2025-06-03T10:37:13.298120Z node 21 :PERSQUEUE DEBUG: partition_read.cpp:779: [PQ: 72075186224037894, Partition: 1, State: StateIdle] Topic 'topic_A' partition 1 user test-consumer readTimeStamp for offset 0 initiated queuesize 0 startOffset 0 ReadingTimestamp 0 rrg 0 2025-06-03T10:37:13.298163Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:726: [topic_A:{1, {1, 281474976715674}, 100000}:TInitDataStep] read res partition offset 0 endOffset 1 key 0,1 valuesize 250 expected 250 2025-06-03T10:37:13.298175Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:75: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Start initializing step TInitEndWriteTimestampStep 2025-06-03T10:37:13.298177Z node 21 :PERSQUEUE INFO: partition_init.cpp:774: [topic_A:{1, {1, 281474976715674}, 100000}:TInitEndWriteTimestampStep] Initializing EndWriteTimestamp skipped because already initialized. 2025-06-03T10:37:13.298180Z node 21 :PERSQUEUE DEBUG: partition_init.cpp:55: [topic_A:{1, {1, 281474976715674}, 100000}:Initializer] Initializing completed. 2025-06-03T10:37:13.298185Z node 21 :PERSQUEUE INFO: partition.cpp:560: [PQ: 72075186224037894, Partition: {1, {1, 281474976715674}, 100000}, State: StateInit] init complete for topic 'topic_A' partition {1, {1, 281474976715674}, 100000} generation 2 [21:7511670312212507344:2528] 2025-06-03T10:37:13.298190Z node 21 :PERSQUEUE DEBUG: partition.cpp:574: [PQ: 72075186224037894, Partition: {1, {1, 281474976715674}, 100000}, State: StateInit] SYNC INIT topic topic_A partitition {1, {1, 281474976715674}, 100000} so 0 endOffset 1 Head Offset 0 PartNo 0 PackedSize 250 count 1 nextOffset 1 batches 1 SYNC INIT sourceId test-message_group_id_2 seqNo 1 offset 0 SYNC INIT HEAD KEY: D0000100000_00000000000000000000_00000_0000000001_00000| size 250 2025-06-03T10:37:13.298199Z node 21 :PERSQUEUE DEBUG: partition.cpp:3850: [PQ: 72075186224037894, Partition: {1, {1, 281474976715674}, 100000}, State: StateIdle] Process pending events. Count 0 2025-06-03T10:37:13.298207Z node 21 :PERSQUEUE DEBUG: partition.cpp:594: [PQ: 72075186224037894, Partition: {1, {1, 281474976715674}, 100000}, State: StateIdle] Init complete for topic 'topic_A' Partition: {1, {1, 281474976715674}, 100000} SourceId: test-message_group_id_2 SeqNo: 1 offset: 0 MaxOffset: 1 2025-06-03T10:37:13.304945Z node 21 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037896][topic_A] TEvClientConnected TabletId 72075186224037894, NodeId 21, Generation 2 2025-06-03T10:37:13.304994Z node 21 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72075186224037894] server connected, pipe [21:7511670312212507319:2479], now have 1 active actors on pipe >> test_partitioning.py::TestPartitionong::test_uniform_partitiona[table_ttl_Date-pk_types1-all_types1-index1] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_String-pk_types6-all_types6-index6] [GOOD] |72.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test >> overlapping_portions.py::TestOverlappingPortions::test [GOOD] |72.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_create_table_using_exclusive_nodes[enable_alter_database_create_hive_first--false] [GOOD] |72.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Utf8-pk_types7-all_types7-index7] [GOOD] |72.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_tenants.py::test_operation_with_locks[enable_alter_database_create_hive_first--true] [GOOD] |72.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] |72.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test |72.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int64-pk_types0-all_types0-index0] [GOOD] |72.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test |72.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_uniform_partitiona[table_ttl_Date-pk_types1-all_types1-index1] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint8-pk_types5-all_types5-index5] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int8-pk_types2-all_types2-index2] |72.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test |72.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test |72.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int32-pk_types1-all_types1-index1] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint32-pk_types4-all_types4-index4] |72.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test |72.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int64-pk_types0-all_types0-index0] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint64-pk_types22-all_types22-index22---] >> test_db_counters.py::TestKqpCounters::test_case [GOOD] |72.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_String-pk_types6-all_types6-index6] [GOOD] |72.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint8-pk_types5-all_types5-index5] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Date-pk_types18-all_types18-index18-Date--] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-8.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-9.test] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1_UNIQUE_SYNC-pk_types17-all_types17-index17-DyNumber-UNIQUE-SYNC] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1__ASYNC-pk_types28-all_types28-index28-Uint64--ASYNC] >> test_db_counters.py::TestStorageCounters::test_storage_counters[disable_separate_quotas] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int8-pk_types2-all_types2-index2] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int32-pk_types1-all_types1-index1] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_vector_index_build_reboots/unittest >> VectorIndexBuildTestReboots::BaseCase[TabletReboots] [GOOD] Test command err: =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:29:19.238117Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:29:19.238146Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:19.238153Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:29:19.238159Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:29:19.238166Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:29:19.238171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:29:19.238182Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:29:19.238204Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:29:19.238356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:29:19.238467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:29:19.255812Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:29:19.255836Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:19.255935Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:29:19.259467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:29:19.259570Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:29:19.259601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:29:19.264972Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:29:19.265022Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:29:19.265147Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:19.265216Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:29:19.265706Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:19.265750Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:29:19.266036Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:29:19.266048Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:29:19.266063Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:29:19.266071Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:29:19.266076Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:29:19.266123Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:29:19.267847Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:29:19.291964Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:29:19.292054Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:19.292123Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:29:19.292171Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:29:19.292185Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:19.293031Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:19.293070Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:29:19.293146Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:19.293158Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:29:19.293164Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:29:19.293171Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:29:19.293801Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:19.293820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:29:19.293827Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:29:19.294352Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:19.294371Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:29:19.294378Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:19.294387Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:29:19.295234Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:29:19.296004Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:29:19.296062Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:29:19.296319Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:29:19.296360Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:29:19.296370Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:29:19.296456Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Ch ... oCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 7 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:37:20.490313Z node 684 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1/indexImplPostingTable0build" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:37:20.490350Z node 684 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1/indexImplPostingTable0build" took 38us result status StatusPathDoesNotExist 2025-06-03T10:37:20.490371Z node 684 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/dir/Table/index1/indexImplPostingTable0build\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/dir/Table/index1\' (id: [OwnerId: 72057594046678944, LocalPathId: 5]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/dir/Table/index1/indexImplPostingTable0build" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/dir/Table/index1" LastExistedPrefixPathId: 5 LastExistedPrefixDescription { Self { Name: "index1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:37:20.490440Z node 684 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1/indexImplPostingTable1build" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: true ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:37:20.490456Z node 684 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1/indexImplPostingTable1build" took 18us result status StatusPathDoesNotExist 2025-06-03T10:37:20.490472Z node 684 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/dir/Table/index1/indexImplPostingTable1build\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/dir/Table/index1\' (id: [OwnerId: 72057594046678944, LocalPathId: 5]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/dir/Table/index1/indexImplPostingTable1build" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/dir/Table/index1" LastExistedPrefixPathId: 5 LastExistedPrefixDescription { Self { Name: "index1" PathId: 5 SchemeshardId: 72057594046678944 PathType: EPathTypeTableIndex CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 4 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: true } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 2025-06-03T10:37:20.490537Z node 684 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/dir/Table/index1/indexImplPostingTable" Options { ReturnPartitioningInfo: true ReturnPartitionConfig: true BackupInfo: false ReturnBoundaries: false ShowPrivateTable: true }, at schemeshard: 72057594046678944 2025-06-03T10:37:20.490560Z node 684 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/dir/Table/index1/indexImplPostingTable" took 24us result status StatusSuccess 2025-06-03T10:37:20.490675Z node 684 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/dir/Table/index1/indexImplPostingTable" PathDescription { Self { Name: "indexImplPostingTable" PathId: 7 SchemeshardId: 72057594046678944 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976710758 CreateStep: 5000005 ParentPathId: 5 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeVectorKmeansTreeIndexImplTable Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "indexImplPostingTable" Columns { Name: "__ydb_parent" Type: "Uint64" TypeId: 4 Id: 1 NotNull: true IsBuildInProgress: false } Columns { Name: "key" Type: "Uint32" TypeId: 2 Id: 2 NotNull: false IsBuildInProgress: false } Columns { Name: "value" Type: "String" TypeId: 4097 Id: 3 NotNull: false IsBuildInProgress: false } KeyColumnNames: "__ydb_parent" KeyColumnNames: "key" KeyColumnIds: 1 KeyColumnIds: 2 PartitionConfig { CompactionPolicy { InMemSizeToSnapshot: 4194304 InMemStepsToSnapshot: 300 InMemForceStepsToSnapshot: 500 InMemForceSizeToSnapshot: 16777216 InMemCompactionBrokerQueue: 0 ReadAheadHiThreshold: 67108864 ReadAheadLoThreshold: 16777216 MinDataPageSize: 7168 SnapBrokerQueue: 0 Generation { GenerationId: 0 SizeToCompact: 0 CountToCompact: 8 ForceCountToCompact: 8 ForceSizeToCompact: 134217728 CompactionBrokerQueue: 4294967295 KeepInCache: true BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen1" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 0 UpliftPartSize: 0 } Generation { GenerationId: 1 SizeToCompact: 41943040 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 536870912 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen2" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 8388608 UpliftPartSize: 8388608 } Generation { GenerationId: 2 SizeToCompact: 419430400 CountToCompact: 5 ForceCountToCompact: 16 ForceSizeToCompact: 17179869184 CompactionBrokerQueue: 4294967295 KeepInCache: false BackgroundCompactionPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } ResourceBrokerTask: "compaction_gen3" ExtraCompactionPercent: 10 ExtraCompactionMinSize: 16384 ExtraCompactionExpPercent: 110 ExtraCompactionExpMaxSize: 83886080 UpliftPartSize: 83886080 } BackupBrokerQueue: 1 DefaultTaskPriority: 5 BackgroundSnapshotPolicy { Threshold: 101 PriorityBase: 100 TimeFactor: 1 ResourceBrokerTask: "background_compaction" } InMemResourceBrokerTask: "compaction_gen0" SnapshotResourceBrokerTask: "compaction_gen0" BackupResourceBrokerTask: "scan" LogOverheadSizeToSnapshot: 16777216 LogOverheadCountToSnapshot: 500 DroppedRowsPercentToCompact: 50 KeepEraseMarkers: false MinBTreeIndexNodeSize: 7168 MinBTreeIndexNodeKeys: 6 } PartitioningPolicy { SizeToSplit: 2147483648 MinPartitionsCount: 1 } } TableSchemaVersion: 2 IsBackup: false IsRestore: false } TablePartitions { EndOfRangeKeyPrefix: "" IsPoint: false IsInclusive: false DatashardId: 72075186233409551 } TableStats { DataSize: 0 RowCount: 0 IndexSize: 0 LastAccessTime: 0 LastUpdateTime: 0 ImmediateTxCompleted: 0 PlannedTxCompleted: 0 TxRejectedByOverload: 0 TxRejectedBySpace: 0 TxCompleteLagMsec: 0 InFlightTxCount: 0 RowUpdates: 0 RowDeletes: 0 RowReads: 0 RangeReads: 0 PartCount: 1 RangeReadRows: 0 StoragePools { } ByKeyFilterSize: 0 HasSchemaChanges: false LocksAcquired: 0 LocksWholeShard: 0 LocksBroken: 0 } TabletMetrics { CPU: 0 Memory: 0 Network: 0 Storage: 0 ReadThroughput: 0 WriteThroughput: 0 ReadIops: 0 WriteIops: 0 } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 6 PathsLimit: 10000 ShardsInside: 7 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } } PathId: 7 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 ... posting table contains 400 rows >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint32-pk_types4-all_types4-index4] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Date_0__ASYNC-pk_types31-all_types31-index31-Date--ASYNC] >> test_serverless.py::test_create_table_with_alter_quotas[enable_alter_database_create_hive_first--true] [GOOD] |72.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/oom/py3test >> overlapping_portions.py::TestOverlappingPortions::test [GOOD] |72.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int8-pk_types2-all_types2-index2] [GOOD] |72.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal3510-pk_types27-all_types27-index27---] >> test_ttl.py::TestTTL::test_ttl[table_Date_1__SYNC-pk_types33-all_types33-index33-Date--SYNC] |72.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Int32-pk_types1-all_types1-index1] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime64-pk_types37-all_types37-index37---] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] |72.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/partitioning/py3test >> test_partitioning.py::TestPartitionong::test_partition_at_keys[table_Uint32-pk_types4-all_types4-index4] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date-pk_types32-all_types32-index32---] |72.5%| [TA] $(B)/ydb/tests/datashard/partitioning/test-results/py3test/{meta.json ... results_accumulator.log} |72.5%| [TA] {RESULT} $(B)/ydb/tests/datashard/partitioning/test-results/py3test/{meta.json ... results_accumulator.log} |72.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_create_table_with_alter_quotas[enable_alter_database_create_hive_first--true] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0_UNIQUE_SYNC-pk_types2-all_types2-index2-Datetime-UNIQUE-SYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint32-pk_types23-all_types23-index23---] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0__ASYNC-pk_types19-all_types19-index19-Uint32--ASYNC] >> test_tenants.py::TestTenants::test_stop_start[enable_alter_database_create_hive_first--true] [FAIL] >> test_tenants.py::TestTenants::test_when_deactivate_fat_tenant_creation_another_tenant_is_ok[enable_alter_database_create_hive_first--false] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal229-pk_types26-all_types26-index26---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int32-pk_types20-all_types20-index20---] [GOOD] |72.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint32-pk_types23-all_types23-index23---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Utf8-pk_types30-all_types30-index30---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] |72.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> test_system_views.py::TestQueryMetricsUniqueQueries::test_case [GOOD] |72.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal229-pk_types26-all_types26-index26---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime-pk_types33-all_types33-index33---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int8-pk_types21-all_types21-index21---] [GOOD] |72.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] |72.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int32-pk_types20-all_types20-index20---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal150-pk_types25-all_types25-index25---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0__SYNC-pk_types12-all_types12-index12-DyNumber--SYNC] |72.6%| [TA] $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/test-results/unittest/{meta.json ... results_accumulator.log} >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp-pk_types34-all_types34-index34---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval-pk_types35-all_types35-index35---] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0__ASYNC-pk_types1-all_types1-index1-Datetime--ASYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_DyNumber-pk_types28-all_types28-index28---] |72.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint8-pk_types24-all_types24-index24---] [GOOD] |72.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime-pk_types33-all_types33-index33---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] |72.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] |72.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int8-pk_types21-all_types21-index21---] [GOOD] |72.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal150-pk_types25-all_types25-index25---] [GOOD] >> test_tenants.py::TestTenants::test_create_create_table[enable_alter_database_create_hive_first--false] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date32-pk_types36-all_types36-index36---] [GOOD] |72.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_UUID-pk_types31-all_types31-index31---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_all_types-pk_types12-all_types12-index12---] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1_UNIQUE_SYNC-pk_types23-all_types23-index23-Uint32-UNIQUE-SYNC] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1__ASYNC-pk_types10-all_types10-index10-Timestamp--ASYNC] |72.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp-pk_types34-all_types34-index34---] [GOOD] |72.6%| [TA] {RESULT} $(B)/ydb/core/tx/schemeshard/ut_vector_index_build_reboots/test-results/unittest/{meta.json ... results_accumulator.log} >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1_UNIQUE_SYNC-pk_types5-all_types5-index5-Datetime-UNIQUE-SYNC] |72.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Utf8-pk_types30-all_types30-index30---] [GOOD] |72.6%| [TA] $(B)/ydb/tests/olap/oom/test-results/py3test/{meta.json ... results_accumulator.log} >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> test_serverless.py::test_turn_on_serverless_storage_billing[enable_alter_database_create_hive_first--false] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0__SYNC-pk_types18-all_types18-index18-Uint32--SYNC] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1__SYNC-pk_types3-all_types3-index3-Datetime--SYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_tenants.py::TestTenants::test_create_create_table[enable_alter_database_create_hive_first--false] [GOOD] >> test_serverless.py::test_turn_on_serverless_storage_billing[enable_alter_database_create_hive_first--true] >> test_discovery.py::TestDiscoveryExtEndpoint::test_scenario |72.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint8-pk_types24-all_types24-index24---] [GOOD] |72.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date32-pk_types36-all_types36-index36---] [GOOD] |72.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_UUID-pk_types31-all_types31-index31---] [GOOD] |72.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] |72.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_String-pk_types29-all_types29-index29---] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int64-pk_types19-all_types19-index19---] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0__SYNC-pk_types6-all_types6-index6-Timestamp--SYNC] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0_UNIQUE_SYNC-pk_types20-all_types20-index20-Uint32-UNIQUE-SYNC] >> test_read_table.py::TestReadTableSuccessStories::test_read_table_only_specified_ranges >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> test_discovery.py::TestDiscoveryExtEndpoint::test_scenario [GOOD] >> test_read_table.py::TestReadTableSuccessStories::test_read_table_only_specified_ranges [GOOD] >> test_read_table.py::TestReadTableSuccessStories::test_read_table_constructed_key_range |72.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] |72.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] |72.7%| [TA] {RESULT} $(B)/ydb/tests/olap/oom/test-results/py3test/{meta.json ... results_accumulator.log} |72.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_tenants.py::TestTenants::test_create_create_table[enable_alter_database_create_hive_first--false] [GOOD] >> test_read_table.py::TestReadTableSuccessStories::test_read_table_constructed_key_range [GOOD] >> test_read_table.py::TestReadTableSuccessStories::test_read_table_reads_only_specified_columns >> test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--true] [FAIL] >> test_read_table.py::TestReadTableSuccessStories::test_read_table_reads_only_specified_columns [GOOD] >> test_read_table.py::TestReadTableSuccessStories::test_read_table_without_data_has_snapshot [GOOD] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_success >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0_UNIQUE_SYNC-pk_types26-all_types26-index26-Uint64-UNIQUE-SYNC] >> test_stream_query.py::TestStreamQuery::test_sql_suite[results-window.test] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Date_1__ASYNC-pk_types34-all_types34-index34-Date--ASYNC] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1__SYNC-pk_types27-all_types27-index27-Uint64--SYNC] >> test_serverless.py::test_database_with_column_disk_quotas[enable_alter_database_create_hive_first--false] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1_UNIQUE_SYNC-pk_types29-all_types29-index29-Uint64-UNIQUE-SYNC] >> test_serverless.py::test_database_with_column_disk_quotas[enable_alter_database_create_hive_first--true] |72.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_String-pk_types29-all_types29-index29---] [GOOD] |72.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_success [GOOD] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_add_new_column >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_add_new_column [GOOD] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_change_column_type [GOOD] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_remove_column >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_remove_column [GOOD] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_add_to_key [GOOD] >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_remove_from_key [GOOD] |72.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Int64-pk_types19-all_types19-index19---] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0__SYNC-pk_types24-all_types24-index24-Uint64--SYNC] >> test_indexes.py::TestSecondaryIndexes::test_create_table_with_global_index >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join0.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join1.test] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0__ASYNC-pk_types13-all_types13-index13-DyNumber--ASYNC] |72.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval64-pk_types39-all_types39-index39---] [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_simple_acquire >> test_indexes.py::TestSecondaryIndexes::test_create_table_with_global_index [GOOD] |72.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_stream_query.py::TestStreamQuery::test_sql_suite[results-window.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join1.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join2.test] >> test_session_pool.py::TestSessionPool::test_session_pool_simple_acquire [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_no_race_after_future_cancel_case_1 [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_no_race_after_future_cancel_case_2 [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_keep_alive [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_no_race_after_future_cancel_case_3 [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_no_race_after_future_cancel_case_4 [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_release_logic [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_close_basic_logic_case_1 >> test_session_pool.py::TestSessionPool::test_session_pool_close_basic_logic_case_1 [GOOD] >> test_session_pool.py::TestSessionPool::test_no_cluster_endpoints_no_failure >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0__ASYNC-pk_types25-all_types25-index25-Uint64--ASYNC] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-9.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join2.test] [GOOD] |72.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval64-pk_types39-all_types39-index39---] [GOOD] >> test_db_counters.py::TestStorageCounters::test_storage_counters[disable_separate_quotas] [GOOD] |72.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/api/py3test >> test_indexes.py::TestSecondaryIndexes::test_create_table_with_global_index [GOOD] >> test_db_counters.py::TestStorageCounters::test_storage_counters[enable_separate_quotas] |72.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/py3test |72.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1__ASYNC-pk_types4-all_types4-index4-Datetime--ASYNC] >> test_crud.py::TestCreateAndUpsertWithRepetitions::test_create_and_select_with_repetitions[10-64] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1__ASYNC-pk_types28-all_types28-index28-Uint64--ASYNC] [GOOD] |72.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-9.test] [GOOD] >> test_crud.py::TestCreateAndUpsertWithRepetitions::test_create_and_select_with_repetitions[10-64] [GOOD] >> test_crud.py::TestCreateAndUpsertWithRepetitions::test_create_and_upsert_data_with_repetitions[10-64] |72.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join2.test] [GOOD] >> test_crud.py::TestCreateAndUpsertWithRepetitions::test_create_and_upsert_data_with_repetitions[10-64] [GOOD] >> test_read_table.py::TestReadTableTruncatedResults::test_truncated_results[async_read_table] |72.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1_UNIQUE_SYNC-pk_types17-all_types17-index17-DyNumber-UNIQUE-SYNC] [GOOD] |72.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/py3test >> test_session_grace_shutdown.py::Test::test_grace_shutdown_of_session >> test_read_table.py::TestReadTableTruncatedResults::test_truncated_results[async_read_table] [GOOD] >> test_read_table.py::TestReadTableTruncatedResults::test_truncated_results[read_table] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1__ASYNC-pk_types22-all_types22-index22-Uint32--ASYNC] |72.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/api/py3test >> test_execute_scheme.py::TestExecuteSchemeOperations::test_create_table_if_it_is_created_fail_remove_from_key [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Date_0__ASYNC-pk_types31-all_types31-index31-Date--ASYNC] [GOOD] >> test_read_table.py::TestReadTableTruncatedResults::test_truncated_results[read_table] [GOOD] >> test_session_grace_shutdown.py::Test::test_grace_shutdown_of_session [GOOD] |72.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1_UNIQUE_SYNC-pk_types17-all_types17-index17-DyNumber-UNIQUE-SYNC] [GOOD] >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_simple_table-False] |72.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1__ASYNC-pk_types28-all_types28-index28-Uint64--ASYNC] [GOOD] >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_table-True] >> test_ttl.py::TestTTL::test_ttl[table_Date_0__SYNC-pk_types30-all_types30-index30-Date--SYNC] >> test_session_pool.py::TestSessionPool::test_no_cluster_endpoints_no_failure [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_close_basic_logic_case_2 >> test_session_pool.py::TestSessionPool::test_session_pool_close_basic_logic_case_2 [GOOD] >> test_session_pool.py::TestSessionPool::test_session_pool_min_size_feature >> test_session_pool.py::TestSessionPool::test_session_pool_min_size_feature [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Date_1__SYNC-pk_types33-all_types33-index33-Date--SYNC] [GOOD] >> test_session_grace_shutdown.py::TestIdle::test_idle_shutdown_of_session |72.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Date_0__ASYNC-pk_types31-all_types31-index31-Date--ASYNC] [GOOD] >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_simple_table-False] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0__ASYNC-pk_types19-all_types19-index19-Uint32--ASYNC] [GOOD] |72.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Date_1__SYNC-pk_types33-all_types33-index33-Date--SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0_UNIQUE_SYNC-pk_types2-all_types2-index2-Datetime-UNIQUE-SYNC] [GOOD] >> test_isolation.py::TestTransactionIsolation::test_prevents_write_cycles_g0 >> test_crud.py::TestCRUDOperations::test_create_table_and_drop_table_success >> test_discovery.py::TestDiscoveryFaultInjectionSlotStop::test_scenario >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_table-True] [GOOD] >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_simple_table-False] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_database_with_disk_quotas[enable_alter_database_create_hive_first--true] [FAIL] Test command err: contrib/python/tornado/tornado-4/tornado/gen.py:1064: DeprecationWarning: the (type, exc, tb) signature of throw() is deprecated, use the single-arg signature instead. contrib/python/tornado/tornado-4/tornado/gen.py:1064: DeprecationWarning: the (type, exc, tb) signature of throw() is deprecated, use the single-arg signature instead. yielded = self.gen.throw(*exc_info) >> test_isolation.py::TestTransactionIsolation::test_prevents_write_cycles_g0 [GOOD] >> test_isolation.py::TestTransactionIsolation::test_prevents_aborted_reads_g1a >> test_isolation.py::TestTransactionIsolation::test_prevents_aborted_reads_g1a [GOOD] >> test_isolation.py::TestTransactionIsolation::test_prevents_intermediate_reads_g1b |72.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0_UNIQUE_SYNC-pk_types2-all_types2-index2-Datetime-UNIQUE-SYNC] [GOOD] >> test_isolation.py::TestTransactionIsolation::test_prevents_intermediate_reads_g1b [GOOD] >> test_crud.py::TestCRUDOperations::test_create_table_and_drop_table_success [GOOD] >> test_crud.py::TestCRUDOperations::test_create_table_wrong_primary_key_failed1 [GOOD] >> test_crud.py::TestCRUDOperations::test_create_table_wrong_primary_key_failed2 >> test_isolation.py::TestTransactionIsolation::test_prevents_circular_information_flow_g1c >> test_crud.py::TestCRUDOperations::test_create_table_wrong_primary_key_failed2 [GOOD] >> test_isolation.py::TestTransactionIsolation::test_prevents_circular_information_flow_g1c [GOOD] >> test_isolation.py::TestTransactionIsolation::test_isolation_mailing_list_example >> test_isolation.py::TestTransactionIsolation::test_isolation_mailing_list_example [GOOD] >> test_isolation.py::TestTransactionIsolation::test_prevents_observed_transaction_vanishes_otv >> test_isolation.py::TestTransactionIsolation::test_prevents_observed_transaction_vanishes_otv [GOOD] >> test_isolation.py::TestTransactionIsolation::test_does_not_prevent_predicate_many_preceders_pmp >> test_isolation.py::TestTransactionIsolation::test_does_not_prevent_predicate_many_preceders_pmp [GOOD] >> test_isolation.py::TestTransactionIsolation::test_does_not_prevent_predicate_many_preceders_pmp_for_write_predicates >> test_isolation.py::TestTransactionIsolation::test_does_not_prevent_predicate_many_preceders_pmp_for_write_predicates [GOOD] >> test_isolation.py::TestTransactionIsolation::test_lost_update_p4 >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_async_table-True] >> test_isolation.py::TestTransactionIsolation::test_lost_update_p4 [GOOD] >> test_isolation.py::TestTransactionIsolation::test_lost_update_on_value_p4 >> test_isolation.py::TestTransactionIsolation::test_lost_update_on_value_p4 [GOOD] >> test_isolation.py::TestTransactionIsolation::test_lost_update_on_value_with_upsert_p4 >> test_isolation.py::TestTransactionIsolation::test_lost_update_on_value_with_upsert_p4 [GOOD] >> test_isolation.py::TestTransactionIsolation::test_read_skew_g_single >> test_isolation.py::TestTransactionIsolation::test_read_skew_g_single [GOOD] >> test_isolation.py::TestTransactionIsolation::test_read_skew_g_single_predicate_deps >> test_isolation.py::TestTransactionIsolation::test_read_skew_g_single_predicate_deps [GOOD] >> test_isolation.py::TestTransactionIsolation::test_read_skew_g_single_write_predicate >> test_isolation.py::TestTransactionIsolation::test_read_skew_g_single_write_predicate [GOOD] >> test_isolation.py::TestTransactionIsolation::test_write_skew_g2_item >> test_isolation.py::TestTransactionIsolation::test_write_skew_g2_item [GOOD] >> test_isolation.py::TestTransactionIsolation::test_anti_dependency_cycles_g2 >> test_isolation.py::TestTransactionIsolation::test_anti_dependency_cycles_g2 [GOOD] >> test_isolation.py::TestTransactionIsolation::test_anti_dependency_cycles_g2_two_edges >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_table-True] >> test_isolation.py::TestTransactionIsolation::test_anti_dependency_cycles_g2_two_edges [GOOD] |72.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0__ASYNC-pk_types19-all_types19-index19-Uint32--ASYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0__SYNC-pk_types12-all_types12-index12-DyNumber--SYNC] [GOOD] >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_async_table-True] >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_simple_table-False] [GOOD] |72.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/api/py3test >> test_session_pool.py::TestSessionPool::test_session_pool_min_size_feature [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/olap/unittest >> KqpOlapJson::CompactionVariants 2025-06-03 10:39:04,486 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-03 10:39:04,537 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 258846 46.7M 43.7M 23.7M test_tool run_ut @/home/runner/.ya/build/build_root/u93c/000d73/ydb/core/kqp/ut/olap/test-results/unittest/testing_out_stuff/chunk133/testing_out_stuff/test_tool.args 259273 259M 256M 196M └─ ydb-core-kqp-ut-olap --trace-path-append /home/runner/.ya/build/build_root/u93c/000d73/ydb/core/kqp/ut/olap/test-results/unittest/testing_out_stuff/chunk133/ytest.report Test command err: Trying to start YDB, gRPC: 10629, MsgBus: 27331 2025-06-03T10:29:05.306650Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668215252134017:2203];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:29:05.306704Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d73/r3tmp/tmpwmsmjb/pdisk_1.dat 2025-06-03T10:29:05.409378Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668215252133843:2079] 1748946545305653 != 1748946545305656 2025-06-03T10:29:05.410453Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:29:05.411019Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:29:05.411037Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:29:05.414547Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TServer::EnableGrpc on GrpcPort 10629, node 1 2025-06-03T10:29:05.430607Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:29:05.430621Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:29:05.430623Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:29:05.430675Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:27331 TClient is connected to server localhost:27331 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:29:05.504230Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... EXECUTE: CREATE TABLE `/Root/ColumnTable` ( Col1 Uint64 NOT NULL, Col2 JsonDocument, PRIMARY KEY (Col1) ) PARTITION BY HASH(Col1) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1); 2025-06-03T10:29:05.707184Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668215252134505:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.707218Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:29:05.760373Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:29:05.770855Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668215252134580:2332];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:29:05.770911Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668215252134580:2332];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:29:05.770960Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668215252134580:2332];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:29:05.770976Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668215252134580:2332];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:29:05.770991Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668215252134580:2332];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:29:05.771005Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668215252134580:2332];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:29:05.771022Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668215252134580:2332];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:29:05.771039Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668215252134580:2332];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:29:05.771053Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668215252134580:2332];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:29:05.771066Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668215252134580:2332];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:29:05.771079Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668215252134580:2332];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:29:05.771092Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511668215252134580:2332];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:29:05.771973Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-03T10:29:05.771997Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-03T10:29:05.772015Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-03T10:29:05.772021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-03T10:29:05.772046Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-03T10:29:05.772060Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-03T10:29:05.772075Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-03T10:29:05.772088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-06-03T10:29:05.772107Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-06-03T10:29:05.772118Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-03T10:29:05.772132Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-03T10:29:05.772143Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-03T10:29:05.772176Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:29:05.772189Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:29:05.772218Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:29:05.772229Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:29:05.772250Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:29:05.772263Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_N ... 2025-06-03T10:39:04.266498Z node 315 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037896;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:39:04.266631Z node 315 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:39:04.266787Z node 315 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:39:04.266807Z node 315 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037891;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:39:04.266931Z node 315 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(1u, JsonDocument('{"a" : "a1"}')), (2u, JsonDocument('{"a" : "a2"}')), (3u, JsonDocument('{"b" : "b3"}')), (4u, JsonDocument('{"b" : "b4", "a" : "a4"}')) 2025-06-03T10:39:04.272006Z node 315 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [315:7511670786972526990:2415], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:39:04.272042Z node 315 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:39:04.272099Z node 315 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [315:7511670786972526995:2418], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:39:04.272880Z node 315 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480 2025-06-03T10:39:04.280700Z node 315 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [315:7511670786972526997:2419], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-03T10:39:04.347002Z node 315 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [315:7511670786972527048:2727] txid# 281474976715663, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 6], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:39:04.361973Z node 315 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037889;local_tx_no=10;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037889;tx_state=complete;fline=columnshard_impl.cpp:830;event=skip_compaction;reason=disabled; 2025-06-03T10:39:04.361973Z node 315 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;local_tx_no=10;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037894;tx_state=complete;fline=columnshard_impl.cpp:830;event=skip_compaction;reason=disabled; 2025-06-03T10:39:04.362021Z node 315 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037893;local_tx_no=10;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037893;tx_state=complete;fline=columnshard_impl.cpp:830;event=skip_compaction;reason=disabled; 2025-06-03T10:39:04.362026Z node 315 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037897;local_tx_no=10;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037897;tx_state=complete;fline=columnshard_impl.cpp:830;event=skip_compaction;reason=disabled; 2025-06-03T10:39:04.367111Z node 315 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037889;tx_state=TTxProgressTx::Execute;tx_current=281474976715665;tx_id=281474976715665;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715665; 2025-06-03T10:39:04.367112Z node 315 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037893;tx_state=TTxProgressTx::Execute;tx_current=281474976715665;tx_id=281474976715665;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715665; 2025-06-03T10:39:04.367199Z node 315 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[315:7511670786972526453:2337];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037894;local_tx_no=20;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037889; 2025-06-03T10:39:04.367208Z node 315 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037897;tx_state=TTxProgressTx::Execute;tx_current=281474976715665;tx_id=281474976715665;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715665; 2025-06-03T10:39:04.367217Z node 315 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[315:7511670786972526453:2337];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037894;local_tx_no=21;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037893; 2025-06-03T10:39:04.367223Z node 315 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[315:7511670786972526453:2337];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037894;local_tx_no=22;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037889; 2025-06-03T10:39:04.367230Z node 315 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;self_id=[315:7511670786972526453:2337];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037894;local_tx_no=23;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037897;receive=72075186224037893; 2025-06-03T10:39:04.367320Z node 315 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715665;tx_id=281474976715665;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715665; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1, Col2) VALUES(11u, JsonDocument('{"a" : "1a1"}')), (12u, JsonDocument('{"a" : "1a2"}')), (13u, JsonDocument('{"b" : "1b3"}')), (14u, JsonDocument('{"b" : "1b4", "a" : "a4"}')) 2025-06-03T10:39:04.383986Z node 315 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;local_tx_no=10;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037888;tx_state=complete;fline=columnshard_impl.cpp:830;event=skip_compaction;reason=disabled; 2025-06-03T10:39:04.383986Z node 315 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037894;local_tx_no=28;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037894;tx_state=complete;fline=columnshard_impl.cpp:830;event=skip_compaction;reason=disabled; 2025-06-03T10:39:04.384036Z node 315 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037892;local_tx_no=10;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037892;tx_state=complete;fline=columnshard_impl.cpp:830;event=skip_compaction;reason=disabled; 2025-06-03T10:39:04.384046Z node 315 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037890;local_tx_no=10;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037890;tx_state=complete;fline=columnshard_impl.cpp:830;event=skip_compaction;reason=disabled; 2025-06-03T10:39:04.389246Z node 315 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037892;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:39:04.389248Z node 315 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037894;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:39:04.389350Z node 315 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[315:7511670786972526451:2335];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037888;local_tx_no=20;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037890;receive=72075186224037894; 2025-06-03T10:39:04.389361Z node 315 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[315:7511670786972526451:2335];ev=NKikimr::TEvTxProcessing::TEvReadSetAck;tablet_id=72075186224037888;local_tx_no=21;method=execute;tx_info=;fline=primary.h:138;event=ack_tablet_duplication;wait=72075186224037890;receive=72075186224037892; 2025-06-03T10:39:04.389377Z node 315 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037890;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; 2025-06-03T10:39:04.389453Z node 315 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715667;tx_id=281474976715667;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715667; EXECUTE: REPLACE INTO `/Root/ColumnTable` (Col1) VALUES(10u) 2025-06-03T10:39:04.407051Z node 315 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;local_tx_no=25;method=complete;tx_info=TTxBlobsWritingFinished;tablet_id=72075186224037888;tx_state=complete;fline=columnshard_impl.cpp:830;event=skip_compaction;reason=disabled; 2025-06-03T10:39:04.409673Z node 315 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=281474976715669;tx_id=281474976715669;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715669; WAIT_COMPACTION: 0 Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8865992733/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/u93c/000d73/ydb/core/kqp/ut/olap/test-results/unittest/testing_out_stuff/chunk133/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8865992733/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/u93c/000d73/ydb/core/kqp/ut/olap/test-results/unittest/testing_out_stuff/chunk133/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1__SYNC-pk_types9-all_types9-index9-Timestamp--SYNC] >> test_insert.py::TestInsertOperations::test_several_inserts_per_transaction_are_success >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0__ASYNC-pk_types1-all_types1-index1-Datetime--ASYNC] [GOOD] >> test_session_grace_shutdown.py::TestIdle::test_idle_shutdown_of_session [GOOD] >> test_insert.py::TestInsertOperations::test_several_inserts_per_transaction_are_success [GOOD] >> test_insert.py::TestInsertOperations::test_insert_plus_update_per_transaction_are_success >> test_insert.py::TestInsertOperations::test_insert_plus_update_per_transaction_are_success [GOOD] >> test_insert.py::TestInsertOperations::test_update_plus_insert_per_transaction_are_success_prepared_case >> test_insert.py::TestInsertOperations::test_update_plus_insert_per_transaction_are_success_prepared_case [GOOD] >> test_insert.py::TestInsertOperations::test_upsert_plus_insert_per_transaction_are_success_prepared_case >> test_insert.py::TestInsertOperations::test_upsert_plus_insert_per_transaction_are_success_prepared_case [GOOD] >> test_insert.py::TestInsertOperations::test_insert_plus_upsert_are_success >> test_insert.py::TestInsertOperations::test_insert_plus_upsert_are_success [GOOD] >> test_insert.py::TestInsertOperations::test_insert_revert_basis >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1__ASYNC-pk_types10-all_types10-index10-Timestamp--ASYNC] [GOOD] >> test_insert.py::TestInsertOperations::test_insert_revert_basis [GOOD] >> test_insert.py::TestInsertOperations::test_query_pairs |72.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0__SYNC-pk_types12-all_types12-index12-DyNumber--SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Date_1_UNIQUE_SYNC-pk_types35-all_types35-index35-Date-UNIQUE-SYNC] |72.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/api/py3test >> test_session_grace_shutdown.py::TestIdle::test_idle_shutdown_of_session [GOOD] >> test_read_table.py::TestReadTableWithTabletKills::test_read_table_async_simple[async_read_table] >> test_insert.py::TestInsertOperations::test_query_pairs [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1_UNIQUE_SYNC-pk_types23-all_types23-index23-Uint32-UNIQUE-SYNC] [GOOD] |72.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_table-True] [GOOD] >> test_tenants.py::TestTenants::test_when_deactivate_fat_tenant_creation_another_tenant_is_ok[enable_alter_database_create_hive_first--false] [GOOD] |72.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_simple_table-False] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0__SYNC-pk_types18-all_types18-index18-Uint32--SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1_UNIQUE_SYNC-pk_types5-all_types5-index5-Datetime-UNIQUE-SYNC] [GOOD] >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_table-True] [GOOD] |72.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0__ASYNC-pk_types1-all_types1-index1-Datetime--ASYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1__SYNC-pk_types3-all_types3-index3-Datetime--SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1__SYNC-pk_types21-all_types21-index21-Uint32--SYNC] >> test_public_api.py::TestExplain::test_explain_data_query >> test_read_table.py::TestReadTableWithTabletKills::test_read_table_async_simple[async_read_table] [GOOD] >> test_read_table.py::TestReadTableWithTabletKills::test_read_table_async_simple[read_table] >> test_read_table.py::TestReadTableWithTabletKills::test_read_table_async_simple[read_table] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1__SYNC-pk_types15-all_types15-index15-DyNumber--SYNC] >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_async_table-True] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp64-pk_types38-all_types38-index38---] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1__ASYNC-pk_types16-all_types16-index16-DyNumber--ASYNC] >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_async_table-True] [GOOD] |72.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1__ASYNC-pk_types10-all_types10-index10-Timestamp--ASYNC] [GOOD] >> test_public_api.py::TestExplain::test_explain_data_query [GOOD] >> test_crud.py::TestSelect::test_advanced_select_failed[select distinct b, a from (select a, b from t1 union all select b, a from t1 order by b) order by B-Column B is not in source column set.*] |72.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1_UNIQUE_SYNC-pk_types5-all_types5-index5-Datetime-UNIQUE-SYNC] [GOOD] |72.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_simple_table-False] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1_UNIQUE_SYNC-pk_types11-all_types11-index11-Timestamp-UNIQUE-SYNC] >> test_discovery.py::TestDiscoveryFaultInjectionSlotStop::test_scenario [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0__SYNC-pk_types6-all_types6-index6-Timestamp--SYNC] [GOOD] |72.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Timestamp64-pk_types38-all_types38-index38---] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0_UNIQUE_SYNC-pk_types20-all_types20-index20-Uint32-UNIQUE-SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Date_0_UNIQUE_SYNC-pk_types32-all_types32-index32-Date-UNIQUE-SYNC] |72.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1_UNIQUE_SYNC-pk_types23-all_types23-index23-Uint32-UNIQUE-SYNC] [GOOD] >> test_crud.py::TestSelect::test_advanced_select_failed[select distinct b, a from (select a, b from t1 union all select b, a from t1 order by b) order by B-Column B is not in source column set.*] [GOOD] >> test_crud.py::TestSelect::test_advanced_select_failed[select count(a, b) from t1-Aggregation function Count requires exactly 1 argument] [GOOD] >> test_crud.py::TestSelect::test_advanced_select_failed[select min(a, b) from t1-Aggregation function Min requires exactly 1 argument] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] >> test_crud.py::TestSelect::test_advanced_select_failed[select min(a, b) from t1-Aggregation function Min requires exactly 1 argument] [GOOD] >> test_crud.py::TestSelect::test_advanced_select_failed[select min(*) from t1-.*is not allowed here] [GOOD] |72.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_tenants.py::TestTenants::test_when_deactivate_fat_tenant_creation_another_tenant_is_ok[enable_alter_database_create_hive_first--false] [GOOD] |72.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test |72.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0__SYNC-pk_types18-all_types18-index18-Uint32--SYNC] [GOOD] |72.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorKeep::test_restart_as_much_as_can |72.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1__SYNC-pk_types3-all_types3-index3-Datetime--SYNC] [GOOD] |72.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Date_1__ASYNC-pk_types34-all_types34-index34-Date--ASYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0_UNIQUE_SYNC-pk_types14-all_types14-index14-DyNumber-UNIQUE-SYNC] >> test_cms_restart.py::TestCmsStateStorageRestartsBlockMax::test_restart_as_much_as_can |72.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test |72.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_0_UNIQUE_SYNC-pk_types20-all_types20-index20-Uint32-UNIQUE-SYNC] [GOOD] |72.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1__SYNC-pk_types27-all_types27-index27-Uint64--SYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0_UNIQUE_SYNC-pk_types26-all_types26-index26-Uint64-UNIQUE-SYNC] [GOOD] |72.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/api/py3test >> test_isolation.py::TestTransactionIsolation::test_anti_dependency_cycles_g2_two_edges [GOOD] |72.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/api/py3test >> test_insert.py::TestInsertOperations::test_query_pairs [GOOD] |72.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0__SYNC-pk_types0-all_types0-index0-Datetime--SYNC] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1_UNIQUE_SYNC-pk_types29-all_types29-index29-Uint64-UNIQUE-SYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] |72.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test |72.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0__SYNC-pk_types6-all_types6-index6-Timestamp--SYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] >> test_cms_erasure.py::TestDegradedGroupMirror3dcKeep::test_no_degraded_groups_after_shutdown >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0__SYNC-pk_types24-all_types24-index24-Uint64--SYNC] [GOOD] |72.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1__SYNC-pk_types27-all_types27-index27-Uint64--SYNC] [GOOD] |72.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_table-True] [GOOD] |72.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0_UNIQUE_SYNC-pk_types26-all_types26-index26-Uint64-UNIQUE-SYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] |72.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[replace_table-create_indexed_async_table-True] [GOOD] |72.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test |72.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest |72.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_1_UNIQUE_SYNC-pk_types29-all_types29-index29-Uint64-UNIQUE-SYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_create_and_remove_directory_success |72.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/rename/py3test >> test_rename.py::test_client_gets_retriable_errors_when_rename[substitute_table-create_indexed_async_table-True] [GOOD] >> test_discovery.py::TestMirror3DCDiscovery::test_mirror3dc_discovery_logic >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] |72.9%| [TA] $(B)/ydb/tests/functional/rename/test-results/py3test/{meta.json ... results_accumulator.log} |72.9%| [TA] {RESULT} $(B)/ydb/tests/functional/rename/test-results/py3test/{meta.json ... results_accumulator.log} >> test_cms_restart.py::TestCmsStateStorageRestartsBlockKeep::test_restart_as_much_as_can >> test_db_counters.py::TestStorageCounters::test_storage_counters[enable_separate_quotas] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0__ASYNC-pk_types13-all_types13-index13-DyNumber--ASYNC] [GOOD] |72.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test |72.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test |72.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_create_and_remove_directory_success [GOOD] |72.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorMax::test_restart_as_much_as_can |72.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Date_1__ASYNC-pk_types34-all_types34-index34-Date--ASYNC] [GOOD] >> test_cms_state_storage.py::TestCmsStateStorageSimpleKeep::test_check_shutdown_state_storage_nodes |73.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0__SYNC-pk_types24-all_types24-index24-Uint64--SYNC] [GOOD] |73.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_erasure.py::TestDegradedGroupBlock42Max::test_no_degraded_groups_after_shutdown >> KqpOlapScheme::InsertAddInsertDrop [GOOD] >> KqpOlapScheme::InsertDropAddColumn >> test_dynamic_tenants.py::test_check_access[enable_alter_database_create_hive_first--false] >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0__ASYNC-pk_types25-all_types25-index25-Uint64--ASYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] |73.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint64-pk_types22-all_types22-index22---] [GOOD] >> test_public_api.py::TestCRUDOperations::test_prepared_query_pipeline |73.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] |73.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_public_api.py::TestCRUDOperations::test_prepared_query_pipeline [GOOD] >> test_public_api.py::TestCRUDOperations::test_scheme_client_ops >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] >> test_public_api.py::TestCRUDOperations::test_scheme_client_ops [GOOD] >> test_public_api.py::TestCRUDOperations::test_scheme_operation_errors_handle [GOOD] >> test_public_api.py::TestCRUDOperations::test_none_values [GOOD] >> test_public_api.py::TestCRUDOperations::test_parse_list_type [GOOD] >> test_public_api.py::TestCRUDOperations::test_parse_tuple >> test_public_api.py::TestCRUDOperations::test_parse_tuple [GOOD] >> test_public_api.py::TestCRUDOperations::test_dict_type [GOOD] >> test_public_api.py::TestCRUDOperations::test_struct_type [GOOD] >> test_public_api.py::TestCRUDOperations::test_data_types >> KqpOlapScheme::InsertDropAddColumn [GOOD] >> KqpOlapScheme::InitTtlSettingsOnShardStart >> test_public_api.py::TestCRUDOperations::test_data_types [GOOD] >> test_public_api.py::TestCRUDOperations::test_struct_type_parameter >> test_public_api.py::TestCRUDOperations::test_struct_type_parameter [GOOD] >> test_public_api.py::TestCRUDOperations::test_bulk_prepared_insert_many_values >> test_public_api.py::TestCRUDOperations::test_bulk_prepared_insert_many_values [GOOD] >> test_public_api.py::TestCRUDOperations::test_bulk_upsert >> test_public_api.py::TestCRUDOperations::test_bulk_upsert [GOOD] >> test_public_api.py::TestCRUDOperations::test_all_enums_are_presented_as_exceptions [GOOD] >> test_public_api.py::TestCRUDOperations::test_type_builders_str_methods [GOOD] >> test_public_api.py::TestCRUDOperations::test_create_and_delete_session_then_use_it_again [GOOD] >> test_public_api.py::TestCRUDOperations::test_locks_invalidated_error >> test_crud.py::TestClientTimeouts::test_can_set_timeouts_on_query >> test_public_api.py::TestCRUDOperations::test_locks_invalidated_error [GOOD] >> test_public_api.py::TestCRUDOperations::test_tcl [GOOD] >> test_public_api.py::TestCRUDOperations::test_tcl_2 [GOOD] >> test_public_api.py::TestCRUDOperations::test_tcl_3 [GOOD] >> test_public_api.py::TestCRUDOperations::test_reuse_session_to_tx_leak >> KqpOlapScheme::InitTtlSettingsOnShardStart [GOOD] |73.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/api/py3test >> test_read_table.py::TestReadTableWithTabletKills::test_read_table_async_simple[read_table] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpOlapScheme::InitTtlSettingsOnShardStart [GOOD] Test command err: Trying to start YDB, gRPC: 23750, MsgBus: 16644 2025-06-03T10:35:16.990063Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511669808323423729:2205];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:35:16.990891Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016e3/r3tmp/tmpU06c1B/pdisk_1.dat 2025-06-03T10:35:17.083228Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:35:17.093636Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:35:17.093680Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting TServer::EnableGrpc on GrpcPort 23750, node 1 2025-06-03T10:35:17.098109Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:35:17.119408Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:35:17.119425Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:35:17.119427Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:35:17.119479Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:16644 TClient is connected to server localhost:16644 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:35:17.254987Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... CREATE TABLE `/Root/ColumnTableTest` (id Int32 NOT NULL, id_second Int32 NOT NULL, level Int32, created_at Timestamp NOT NULL, PRIMARY KEY (created_at, id_second)) PARTITION BY HASH(created_at) WITH (STORE = COLUMN, AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = 1, TTL = Interval("PT1H") ON created_at); 2025-06-03T10:35:17.725447Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511669812618391513:2331], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:35:17.725482Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:35:17.825816Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateColumnTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 2025-06-03T10:35:17.846777Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511669812618391589:2335];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=0; 2025-06-03T10:35:17.846837Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511669812618391589:2335];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Granules; 2025-06-03T10:35:17.846905Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511669812618391589:2335];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=Chunks; 2025-06-03T10:35:17.846937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511669812618391589:2335];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=TablesCleaner; 2025-06-03T10:35:17.846965Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511669812618391589:2335];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanGranuleId; 2025-06-03T10:35:17.847009Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511669812618391589:2335];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanInsertionDedup; 2025-06-03T10:35:17.847033Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511669812618391589:2335];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:35:17.847062Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511669812618391589:2335];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:35:17.847088Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511669812618391589:2335];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:35:17.847115Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511669812618391589:2335];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:35:17.847142Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511669812618391589:2335];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:35:17.847167Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[1:7511669812618391589:2335];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:35:17.847816Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-03T10:35:17.847831Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-03T10:35:17.847845Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-03T10:35:17.847850Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-03T10:35:17.847868Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-03T10:35:17.847879Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-03T10:35:17.847891Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-03T10:35:17.847896Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-06-03T10:35:17.847909Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-06-03T10:35:17.847919Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-03T10:35:17.847928Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-03T10:35:17.847937Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-03T10:35:17.847959Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:35:17.847969Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:35:17.847990Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:35:17.847999Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:35:17.848011Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:35:17.848021Z node 1 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:35:17.848029Z node 1 :TX_COLUMN ... r_register;description=CLASS_NAME=GCCountersNormalizer; 2025-06-03T10:39:45.832536Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[14:7511670964036117083:2335];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=SyncPortionFromChunks; 2025-06-03T10:39:45.832565Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[14:7511670964036117083:2335];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV1Chunks_V2; 2025-06-03T10:39:45.832590Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[14:7511670964036117083:2335];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV2Chunks; 2025-06-03T10:39:45.832616Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[14:7511670964036117083:2335];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=CleanDeprecatedSnapshot; 2025-06-03T10:39:45.832643Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[14:7511670964036117083:2335];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:11;event=normalizer_register;description=CLASS_NAME=RestoreV0ChunksMeta; 2025-06-03T10:39:45.833197Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Granules;id=1; 2025-06-03T10:39:45.833211Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=Chunks;id=Chunks; 2025-06-03T10:39:45.833226Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=Chunks;id=2; 2025-06-03T10:39:45.833231Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=TablesCleaner;id=TablesCleaner; 2025-06-03T10:39:45.833250Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=TablesCleaner;id=4; 2025-06-03T10:39:45.833259Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanGranuleId;id=CleanGranuleId; 2025-06-03T10:39:45.833272Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanGranuleId;id=6; 2025-06-03T10:39:45.833281Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanInsertionDedup;id=CleanInsertionDedup; 2025-06-03T10:39:45.833311Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanInsertionDedup;id=8; 2025-06-03T10:39:45.833321Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=GCCountersNormalizer;id=GCCountersNormalizer; 2025-06-03T10:39:45.833328Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=GCCountersNormalizer;id=9; 2025-06-03T10:39:45.833335Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=SyncPortionFromChunks;id=SyncPortionFromChunks; 2025-06-03T10:39:45.833360Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=SyncPortionFromChunks;id=11; 2025-06-03T10:39:45.833370Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV1Chunks_V2;id=RestoreV1Chunks_V2; 2025-06-03T10:39:45.833391Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV1Chunks_V2;id=15; 2025-06-03T10:39:45.833400Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV2Chunks;id=RestoreV2Chunks; 2025-06-03T10:39:45.833413Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV2Chunks;id=16; 2025-06-03T10:39:45.833425Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=CleanDeprecatedSnapshot;id=CleanDeprecatedSnapshot; 2025-06-03T10:39:45.833433Z node 14 :TX_COLUMNSHARD CRIT: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=clean_deprecated_snapshot.cpp:30;tasks_for_rewrite=0; 2025-06-03T10:39:45.833438Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=CleanDeprecatedSnapshot;id=17; 2025-06-03T10:39:45.833442Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:42;event=normalizer_switched;description=CLASS_NAME=RestoreV0ChunksMeta;id=RestoreV0ChunksMeta; 2025-06-03T10:39:45.833552Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:62;event=normalizer_finished;description=CLASS_NAME=RestoreV0ChunksMeta;id=18; 2025-06-03T10:39:45.833562Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;process=TTxUpdateSchema::Execute;fline=abstract.cpp:45;event=normalization_finished; 2025-06-03T10:39:45.878534Z node 14 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715658;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715658; 2025-06-03T10:39:45.881266Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7511670964036117154:2345], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:39:45.881287Z node 14 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:39:45.884319Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:39:45.887652Z node 14 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715659;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715659; 2025-06-03T10:39:45.890722Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7511670964036117181:2347], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:39:45.890746Z node 14 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:39:45.894322Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:39:45.902100Z node 14 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715660;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715660; 2025-06-03T10:39:45.905409Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7511670964036117213:2353], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:39:45.905437Z node 14 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:39:45.909627Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 2025-06-03T10:39:45.915789Z node 14 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715661;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715661; 2025-06-03T10:39:45.918476Z node 14 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [14:7511670964036117244:2359], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:39:45.918501Z node 14 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:39:45.921392Z node 14 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterColumnTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:39:45.929975Z node 14 :TX_COLUMNSHARD_TX WARN: log.cpp:784: tablet_id=72075186224037888;tx_state=TTxProgressTx::Execute;tx_current=0;tx_id=281474976715662;fline=tx_controller.cpp:214;event=finished_tx;tx_id=281474976715662; 2025-06-03T10:39:45.934920Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[14:7511670964036117083:2335];ev=NActors::TEvents::TEvPoison;fline=columnshard_impl.cpp:1152;event=tablet_die; 2025-06-03T10:39:45.942798Z node 14 :TX_COLUMNSHARD WARN: log.cpp:784: tablet_id=72075186224037888;self_id=[14:7511670964036117285:2365];tablet_id=72075186224037888;process=TTxInitSchema::Execute;fline=abstract.cpp:90;event=normalization_start;last_saved_id=18; |73.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_discovery.py::TestMirror3DCDiscovery::test_mirror3dc_discovery_logic [GOOD] >> test_public_api.py::TestCRUDOperations::test_reuse_session_to_tx_leak [GOOD] >> test_public_api.py::TestCRUDOperations::test_direct_leak_tx_but_no_actual_leak_by_best_efforts >> test_public_api.py::TestCRUDOperations::test_direct_leak_tx_but_no_actual_leak_by_best_efforts [GOOD] >> test_public_api.py::TestCRUDOperations::test_presented_in_cache >> test_public_api.py::TestCRUDOperations::test_presented_in_cache [GOOD] >> test_public_api.py::TestCRUDOperations::test_decimal_values_negative_stories [GOOD] >> test_public_api.py::TestCRUDOperations::test_decimal_values >> test_public_api.py::TestCRUDOperations::test_decimal_values [GOOD] >> test_public_api.py::TestCRUDOperations::test_list_directory_with_children [GOOD] >> test_public_api.py::TestCRUDOperations::test_validate_describe_path_result >> test_public_api.py::TestCRUDOperations::test_validate_describe_path_result [GOOD] >> test_public_api.py::TestCRUDOperations::test_acl_modifications_1 [GOOD] >> test_public_api.py::TestCRUDOperations::test_acl_modification_2 [GOOD] >> test_public_api.py::TestCRUDOperations::test_can_execute_valid_statement_after_invalid_success [GOOD] >> test_public_api.py::TestCRUDOperations::test_modify_permissions_3 [GOOD] >> test_public_api.py::TestCRUDOperations::test_directory_that_doesnt_exists [GOOD] >> test_public_api.py::TestCRUDOperations::test_crud_acl_actions >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1__ASYNC-pk_types4-all_types4-index4-Datetime--ASYNC] [GOOD] |73.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0__ASYNC-pk_types13-all_types13-index13-DyNumber--ASYNC] [GOOD] >> test_public_api.py::TestCRUDOperations::test_crud_acl_actions [GOOD] >> test_public_api.py::TestCRUDOperations::test_too_many_pending_transactions [GOOD] >> test_public_api.py::TestCRUDOperations::test_query_set1 |73.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint64_0__ASYNC-pk_types25-all_types25-index25-Uint64--ASYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] |73.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test_public_api.py::TestCRUDOperations::test_query_set1 [GOOD] >> test_public_api.py::TestCRUDOperations::test_queries_set2 >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> test_public_api.py::TestCRUDOperations::test_queries_set2 [GOOD] >> test_public_api.py::TestCRUDOperations::test_when_result_set_is_large_then_issue_occure >> test_crud.py::TestClientTimeouts::test_can_set_timeouts_on_query [GOOD] |73.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test |73.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Uint64-pk_types22-all_types22-index22---] [GOOD] |73.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> test_dynamic_tenants.py::test_check_access[enable_alter_database_create_hive_first--false] [GOOD] >> test_dynamic_tenants.py::test_check_access[enable_alter_database_create_hive_first--true] |73.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> test_public_api.py::TestCRUDOperations::test_when_result_set_is_large_then_issue_occure [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] |73.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |73.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal3510-pk_types27-all_types27-index27---] [GOOD] |73.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_ydb_create_and_remove_directory_success [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1__ASYNC-pk_types22-all_types22-index22-Uint32--ASYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime64-pk_types37-all_types37-index37---] [GOOD] |73.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_1__ASYNC-pk_types4-all_types4-index4-Datetime--ASYNC] [GOOD] >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date-pk_types32-all_types32-index32---] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Date_0__SYNC-pk_types30-all_types30-index30-Date--SYNC] [GOOD] |73.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/api/py3test >> test_discovery.py::TestMirror3DCDiscovery::test_mirror3dc_discovery_logic [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Decimal3510-pk_types27-all_types27-index27---] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Datetime64-pk_types37-all_types37-index37---] [GOOD] |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Date-pk_types32-all_types32-index32---] [GOOD] |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Date_0__SYNC-pk_types30-all_types30-index30-Date--SYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_erasure.py::TestDegradedGroupMirror3dcMax::test_no_degraded_groups_after_shutdown >> test_cms_state_storage.py::TestCmsStateStorageSimpleMax::test_check_shutdown_state_storage_nodes >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_cms_erasure.py::TestDegradedGroupBlock42Keep::test_no_degraded_groups_after_shutdown >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0__ASYNC-pk_types7-all_types7-index7-Timestamp--ASYNC] |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval-pk_types35-all_types35-index35---] [GOOD] |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1__ASYNC-pk_types22-all_types22-index22-Uint32--ASYNC] [GOOD] |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_DyNumber-pk_types28-all_types28-index28---] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0_UNIQUE_SYNC-pk_types8-all_types8-index8-Timestamp-UNIQUE-SYNC] >> test_restarts.py::TestRestartSingleBlock42::test_restart_single_node_is_ok >> test_dynamic_tenants.py::test_check_access[enable_alter_database_create_hive_first--true] [GOOD] >> test_dynamic_tenants.py::test_create_and_drop_tenants[enable_alter_database_create_hive_first--false] |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_Interval-pk_types35-all_types35-index35---] [GOOD] |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_DyNumber-pk_types28-all_types28-index28---] [GOOD] |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_public_api.py::TestSessionNotFound::test_session_not_found |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_restarts.py::TestRestartMultipleMirror3DC::test_tablets_are_successfully_started_after_few_killed_nodes >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] >> test_restarts.py::TestRestartMultipleBlock42::test_tablets_are_successfully_started_after_few_killed_nodes >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1__SYNC-pk_types9-all_types9-index9-Timestamp--SYNC] [GOOD] >> test_restarts.py::TestRestartClusterMirror34::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok >> test_restarts.py::TestRestartSingleMirror3DC::test_restart_single_node_is_ok |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test_crud.py::TestManySelectsInRow::test_selects_in_row_success[500-500-50] >> test_restarts.py::TestRestartClusterBlock42::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] |73.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] |73.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1__SYNC-pk_types9-all_types9-index9-Timestamp--SYNC] [GOOD] |73.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] >> test_restarts.py::TestRestartClusterMirror3DC::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok >> test_ttl.py::TestTTL::test_ttl[table_Date_1_UNIQUE_SYNC-pk_types35-all_types35-index35-Date-UNIQUE-SYNC] [GOOD] >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_set_queue_attributes[tables_format_v0-fifo] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1__ASYNC-pk_types16-all_types16-index16-DyNumber--ASYNC] [GOOD] >> test_restarts.py::TestRestartMultipleMirror34::test_tablets_are_successfully_started_after_few_killed_nodes >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_set_queue_attributes[tables_format_v1-std] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1__SYNC-pk_types21-all_types21-index21-Uint32--SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1__SYNC-pk_types15-all_types15-index15-DyNumber--SYNC] [GOOD] |73.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_set_queue_attributes[tables_format_v0-fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue_batch[tables_format_v1] >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_set_queue_attributes[tables_format_v0-std] >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_set_queue_attributes[tables_format_v1-std] [GOOD] >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_set_queue_attributes[tables_format_v0-std] [GOOD] >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_set_queue_attributes[tables_format_v1-fifo] |73.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1__SYNC-pk_types15-all_types15-index15-DyNumber--SYNC] [GOOD] >> test_acl.py::TestSqsACLWithPath::test_apply_permissions[tables_format_v0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_set_queue_attributes[tables_format_v1-fifo] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1_UNIQUE_SYNC-pk_types11-all_types11-index11-Timestamp-UNIQUE-SYNC] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Date_0_UNIQUE_SYNC-pk_types32-all_types32-index32-Date-UNIQUE-SYNC] [GOOD] |73.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Date_1_UNIQUE_SYNC-pk_types35-all_types35-index35-Date-UNIQUE-SYNC] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_ya_count_queues[tables_format_v0] >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_create_queue_with_custom_attributes[tables_format_v1-fifo] >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0_UNIQUE_SYNC-pk_types14-all_types14-index14-DyNumber-UNIQUE-SYNC] [GOOD] |73.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Uint32_1__SYNC-pk_types21-all_types21-index21-Uint32--SYNC] [GOOD] >> test_acl.py::TestSqsACLWithPath::test_apply_permissions[tables_format_v0] [GOOD] >> test_acl.py::TestSqsACLWithPath::test_apply_permissions[tables_format_v1] |73.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_acl.py::TestSqsACLWithPath::test_apply_permissions[tables_format_v1] [GOOD] >> test_acl.py::TestSqsACLWithPath::test_modify_permissions[tables_format_v0] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_request_to_deleted_queue[tables_format_v0-fifo] >> test_acl.py::TestSqsACLWithPath::test_modify_permissions[tables_format_v0] [GOOD] >> test_acl.py::TestSqsACLWithPath::test_modify_permissions[tables_format_v1] >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0__SYNC-pk_types0-all_types0-index0-Datetime--SYNC] [GOOD] >> test_acl.py::TestSqsACLWithPath::test_modify_permissions[tables_format_v1] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_create_queue[std] |73.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_1__ASYNC-pk_types16-all_types16-index16-DyNumber--ASYNC] [GOOD] |73.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_DyNumber_0_UNIQUE_SYNC-pk_types14-all_types14-index14-DyNumber-UNIQUE-SYNC] [GOOD] >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_create_queue_with_custom_attributes[tables_format_v1-fifo] [GOOD] >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_create_queue_with_custom_attributes[tables_format_v1-std] >> test_counters.py::TestSqsCountersFeatures::test_updates_status_code_counters_when_parsing_errors_occur[tables_format_v1] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_create_queue_with_custom_attributes[tables_format_v1-std] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_create_queue_with_default_attributes[tables_format_v0-fifo] |73.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_1_UNIQUE_SYNC-pk_types11-all_types11-index11-Timestamp-UNIQUE-SYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_create_queue_with_default_attributes[tables_format_v0-fifo] [GOOD] |73.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Date_0_UNIQUE_SYNC-pk_types32-all_types32-index32-Date-UNIQUE-SYNC] [GOOD] >> test_self_heal.py::TestEnableSelfHeal::test_replication >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_create_queue[fifo] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_create_queue[std] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_create_queue_with_empty_tables_format >> test_split_merge.py::TestSplitMerge::test_merge_split[table_all_types-pk_types12-all_types12-index12---] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue_batch[tables_format_v1] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_delete_queue_batch[tables_format_v0] >> test_counters.py::TestSqsCountersFeatures::test_updates_status_code_counters_when_parsing_errors_occur[tables_format_v1] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_queues_count_over_limit[tables_format_v0] >> test_garbage_collection.py::TestSqsGarbageCollection::test_removes_messages_by_retention_time[tables_format_v0-std] |73.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_ya_count_queues[tables_format_v0] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_ya_count_queues[tables_format_v1] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_set_queue_attributes[tables_format_v1-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |73.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/split_merge/py3test >> test_split_merge.py::TestSplitMerge::test_merge_split[table_all_types-pk_types12-all_types12-index12---] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_create_queue_with_empty_tables_format [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_create_queue_with_incorrect_tables_format >> test_queue_tags.py::TestQueueTags::test_tag_queue[tables_format_v0-std] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] |73.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_queues_count_over_limit[tables_format_v0] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_request_to_deleted_queue[tables_format_v0-fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_queues_count_over_limit[tables_format_v1] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_request_to_deleted_queue[tables_format_v0-std] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_account_actions.py::TestAccountActionsWithPath::test_manage_account[with_queues-tables_format_v0] >> test_queue_counters.py::TestSqsGettingCounters::test_action_duration_being_not_immediate ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_public_api.py::TestSessionNotFound::test_session_not_found [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_ya_count_queues[tables_format_v1] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_queues_count_over_limit[tables_format_v1] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_queues_count_over_limit[tables_format_v1] [GOOD] |73.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Datetime_0__SYNC-pk_types0-all_types0-index0-Datetime--SYNC] [GOOD] >> test_queue_tags.py::TestQueueTags::test_invalid_tag_queue[tables_format_v0-fifo] >> test_self_heal.py::TestEnableSelfHeal::test_replication [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue[tables_format_v1-fifo] >> test_queue_tags.py::TestQueueTags::test_tag_queue[tables_format_v0-std] [GOOD] >> test_queue_tags.py::TestQueueTags::test_tag_queue[tables_format_v1-fifo] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_create_queue_with_incorrect_tables_format [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_create_queue_with_unsupported_tables_format ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_create_queue_with_unsupported_tables_format >> test_queue_tags.py::TestQueueTags::test_tag_queue[tables_format_v1-fifo] [GOOD] >> test_queue_tags.py::TestQueueTags::test_tag_queue[tables_format_v1-std] >> test_account_actions.py::TestAccountActionsWithPath::test_manage_account[with_queues-tables_format_v0] [GOOD] >> test_account_actions.py::TestAccountActionsWithPath::test_manage_account[with_queues-tables_format_v1] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_delete_queue_batch[tables_format_v0] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_delete_queue_batch[tables_format_v1] >> test_queue_tags.py::TestQueueTags::test_tag_queue[tables_format_v1-std] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_account_actions.py::TestAccountActionsWithPath::test_manage_account[with_queues-tables_format_v1] [GOOD] >> test_account_actions.py::TestAccountActionsWithPath::test_manage_account[without_queues-tables_format_v0] |73.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_self_heal.py::TestEnableSelfHeal::test_replication [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_delete_queue_batch[tables_format_v1] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_purge_queue[tables_format_v0-fifo] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_create_queue[fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_request_to_deleted_queue[tables_format_v0-std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_request_to_deleted_queue[tables_format_v1-fifo] >> test_account_actions.py::TestAccountActionsWithPath::test_manage_account[without_queues-tables_format_v0] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_create_queue[std] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> test_account_actions.py::TestAccountActionsWithPath::test_manage_account[without_queues-tables_format_v1] >> test_account_actions.py::TestAccountActionsWithPath::test_manage_account[without_queues-tables_format_v1] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_create_queue[std] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_create_queue_with_empty_tables_format >> test_queue_attributes_validation.py::TestQueueAttributesInCompatibilityMode::test_set_queue_attributes_no_validation[tables_format_v0-fifo] |73.3%| [TA] $(B)/ydb/tests/datashard/split_merge/test-results/py3test/{meta.json ... results_accumulator.log} >> test_queue_counters.py::TestSqsGettingCounters::test_counters_when_sending_duplicates >> test_queue_tags.py::TestQueueTags::test_invalid_tag_queue[tables_format_v0-fifo] [GOOD] >> test_queue_tags.py::TestQueueTags::test_invalid_tag_queue[tables_format_v0-std] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_request_to_deleted_queue[tables_format_v1-fifo] [GOOD] >> test_queue_tags.py::TestQueueTags::test_invalid_tag_queue[tables_format_v1-std] >> test_queue_tags.py::TestQueueTags::test_invalid_tag_queue[tables_format_v0-std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_purge_queue[tables_format_v0-fifo] [GOOD] >> test_queue_tags.py::TestQueueTags::test_invalid_tag_queue[tables_format_v1-fifo] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_create_queue_with_unsupported_tables_format [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_session_pool ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_acl.py::TestSqsACLWithPath::test_modify_permissions[tables_format_v1] [GOOD] >> test_queue_tags.py::TestQueueTags::test_invalid_tag_queue[tables_format_v1-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |73.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] |73.3%| [TA] {RESULT} $(B)/ydb/tests/datashard/split_merge/test-results/py3test/{meta.json ... results_accumulator.log} >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue[tables_format_v1-fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue[tables_format_v1-std] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_create_queue_with_empty_tables_format [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_create_queue_with_incorrect_tables_format >> test_garbage_collection.py::TestSqsGarbageCollection::test_removes_messages_by_retention_time[tables_format_v0-std] [GOOD] >> test_garbage_collection.py::TestSqsGarbageCollection::test_removes_messages_by_retention_time[tables_format_v1-fifo] >> test_queue_attributes_validation.py::TestQueueAttributesInCompatibilityMode::test_set_queue_attributes_no_validation[tables_format_v0-fifo] [GOOD] >> test_queue_attributes_validation.py::TestQueueAttributesInCompatibilityMode::test_set_queue_attributes_no_validation[tables_format_v0-std] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_create_queue_with_default_attributes[tables_format_v0-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_public_api.py::TestSessionNotFoundOperations::test_session_pool [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_ok_keep_alive_example >> test_queue_counters.py::TestSqsGettingCounters::test_counters_when_sending_duplicates [GOOD] >> test_queue_counters.py::TestSqsGettingCounters::test_counters_when_sending_reading_deleting >> test_public_api.py::TestSessionNotFoundOperations::test_ok_keep_alive_example [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_can_commit_bad_tx [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_cannot_commit_bad_tx >> test_public_api.py::TestSessionNotFoundOperations::test_cannot_commit_bad_tx [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_commit_successfully_after_success_commit [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_invalid_keep_alive_example [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_describe_table_with_bounds >> test_queue_attributes_validation.py::TestQueueAttributesInCompatibilityMode::test_set_queue_attributes_no_validation[tables_format_v0-std] [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_describe_table_with_bounds [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_native_datetime_types [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_native_date_types [GOOD] >> test_queue_attributes_validation.py::TestQueueAttributesInCompatibilityMode::test_set_queue_attributes_no_validation[tables_format_v1-fifo] >> test_public_api.py::TestSessionNotFoundOperations::test_keep_in_cache_disabled [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_explicit_partitions_case_1 [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_explict_partitions_case_2 >> test_queue_counters.py::TestSqsGettingCounters::test_counters_when_sending_reading_deleting [GOOD] >> test_queue_counters.py::TestSqsGettingCounters::test_purge_queue_counters >> test_public_api.py::TestSessionNotFoundOperations::test_explict_partitions_case_2 [GOOD] >> test_public_api.py::TestSessionNotFoundOperations::test_simple_table_profile_settings [GOOD] >> test_queue_attributes_validation.py::TestQueueAttributesInCompatibilityMode::test_set_queue_attributes_no_validation[tables_format_v1-fifo] [GOOD] |73.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_counters.py::TestSqsCountersExportDelay::test_export_delay[tables_format_v0] >> test_crud.py::TestManySelectsInRow::test_selects_in_row_success[500-500-50] [GOOD] >> test_queue_tags.py::TestQueueTags::test_invalid_tag_queue[tables_format_v1-std] [GOOD] >> test_queue_tags.py::TestQueueTags::test_list_queue_tags[tables_format_v0-fifo] >> test_queue_tags.py::TestQueueTags::test_list_queue_tags[tables_format_v0-fifo] [GOOD] >> test_queue_tags.py::TestQueueTags::test_list_queue_tags[tables_format_v0-std] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] >> test_queue_counters.py::TestSqsGettingCounters::test_purge_queue_counters [GOOD] >> test_queue_tags.py::TestQueueTags::test_list_queue_tags[tables_format_v0-std] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_format_without_version.py::TestQueueWithoutVersionWithPath::test_common[fifo] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue[tables_format_v1-std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue_batch[tables_format_v0] >> test_queue_counters.py::TestSqsGettingCounters::test_action_duration_being_not_immediate [GOOD] >> test_queue_counters.py::TestSqsGettingCounters::test_counters_when_reading_from_empty_queue >> test_queues_managing.py::TestQueuesManagingWithTenant::test_queues_count_over_limit[tables_format_v1] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_remove_queue_generates_event[tables_format_v0] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_remove_queue_generates_event[tables_format_v0] [SKIPPED] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_create_queue_with_incorrect_tables_format [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_remove_queue_generates_event[tables_format_v1] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] >> test_queue_counters.py::TestSqsGettingCounters::test_counters_when_reading_from_empty_queue [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_remove_queue_generates_event[tables_format_v1] [SKIPPED] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_all_types-pk_types7-all_types7-index7---] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_create_queue_with_unsupported_tables_format [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_double_create[fifo] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_double_create[fifo] [GOOD] |73.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queue_counters.py::TestSqsGettingCounters::test_purge_queue_counters [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_double_create[std] |73.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_double_create[std] [GOOD] >> test_format_without_version.py::TestQueueWithoutVersionWithPath::test_common[fifo] [GOOD] >> test_format_without_version.py::TestQueueWithoutVersionWithPath::test_common[std] >> test_counters.py::TestSqsCountersExportDelay::test_export_delay[tables_format_v0] [GOOD] >> test_counters.py::TestSqsCountersExportDelay::test_export_delay[tables_format_v1] >> test_format_without_version.py::TestQueueWithoutVersionWithPath::test_common[std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_fifo_queue_wo_postfix[tables_format_v0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queue_counters.py::TestSqsGettingCounters::test_counters_when_reading_from_empty_queue [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue_batch[tables_format_v0] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_queues_count_over_limit[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |73.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_counters.py::TestSqsCountersExportDelay::test_export_delay[tables_format_v1] [GOOD] >> test_garbage_collection.py::TestSqsGarbageCollection::test_removes_messages_by_retention_time[tables_format_v1-fifo] [GOOD] >> test_garbage_collection.py::TestSqsGarbageCollection::test_removes_messages_by_retention_time[tables_format_v1-std] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithTenant::test_request_to_deleted_queue[tables_format_v1-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_create_queue_with_unsupported_tables_format [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |73.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithTenant::test_purge_queue[tables_format_v0-fifo] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] |73.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queue_tags.py::TestQueueTags::test_tag_queue[tables_format_v1-std] [GOOD] >> test_public_api.py::TestBadSession::test_simple >> test_queue_tags.py::TestQueueTags::test_list_queue_tags[tables_format_v1-fifo] |73.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_account_actions.py::TestAccountActionsWithPath::test_manage_account[without_queues-tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |73.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_fifo_queue_wo_postfix[tables_format_v0] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |73.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/api/py3test >> test_crud.py::TestManySelectsInRow::test_selects_in_row_success[500-500-50] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/secondary_index/py3test >> test_secondary_index.py::TestSecondaryIndex::test_secondary_index_cover[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queue_tags.py::TestQueueTags::test_invalid_tag_queue[tables_format_v1-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue[tables_format_v0-fifo] >> test_garbage_collection.py::TestSqsGarbageCollection::test_cleanups_reads_table[tables_format_v0-30] >> test_queue_tags.py::TestQueueTags::test_list_queue_tags[tables_format_v1-fifo] [GOOD] >> test_queue_tags.py::TestQueueTags::test_list_queue_tags[tables_format_v1-std] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_create_queue_with_incorrect_tables_format [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0__ASYNC-pk_types7-all_types7-index7-Timestamp--ASYNC] [GOOD] >> test_queue_tags.py::TestQueueTags::test_list_queue_tags[tables_format_v1-std] [GOOD] >> test_queue_tags.py::TestQueueTags::test_tag_queue[tables_format_v0-fifo] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithTenant::test_remove_queue_generates_event[tables_format_v1] [SKIPPED] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |73.4%| [TA] $(B)/ydb/tests/datashard/secondary_index/test-results/py3test/{meta.json ... results_accumulator.log} >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue[tables_format_v1-fifo] >> test_acl.py::TestSqsACLWithTenant::test_apply_permissions[tables_format_v0] >> test_queue_tags.py::TestQueueTags::test_tag_queue[tables_format_v0-fifo] [GOOD] |73.4%| [TA] {RESULT} $(B)/ydb/tests/datashard/secondary_index/test-results/py3test/{meta.json ... results_accumulator.log} >> test_dynamic_tenants.py::test_create_and_drop_tenants[enable_alter_database_create_hive_first--false] [GOOD] >> test_garbage_collection.py::TestSqsGarbageCollection::test_visibility_change_cleanups_proper_receive_attempt_id[tables_format_v0-with_delete_message] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queue_attributes_validation.py::TestQueueAttributesInCompatibilityMode::test_set_queue_attributes_no_validation[tables_format_v1-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_and_create_queue[std] >> test_serverless.py::test_turn_on_serverless_storage_billing[enable_alter_database_create_hive_first--true] [GOOD] >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0_UNIQUE_SYNC-pk_types8-all_types8-index8-Timestamp-UNIQUE-SYNC] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_double_create[std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_queue_tags.py::TestQueueTags::test_untag_queue[tables_format_v1-std] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queue_tags.py::TestQueueTags::test_list_queue_tags[tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue[tables_format_v0-fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue[tables_format_v0-std] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_remove_queue_generates_event[tables_format_v0] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue[tables_format_v0-std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue[tables_format_v1-fifo] >> test_garbage_collection.py::TestSqsGarbageCollection::test_removes_messages_by_retention_time[tables_format_v1-std] [GOOD] >> test_garbage_collection.py::TestSqsGarbageCollection::test_visibility_change_cleanups_proper_receive_attempt_id[tables_format_v0-with_change_visibility] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue[tables_format_v1-fifo] [GOOD] >> test_ping.py::TestPing::test_error_on_cgi_parameters >> test_queue_tags.py::TestQueueTags::test_untag_queue[tables_format_v0-fifo] |73.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_queue_tags.py::TestQueueTags::test_untag_queue[tables_format_v1-std] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue[tables_format_v1-fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue[tables_format_v1-std] >> test_garbage_collection.py::TestSqsGarbageCollection::test_visibility_change_cleanups_proper_receive_attempt_id[tables_format_v0-with_change_visibility] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_remove_queue_generates_event[tables_format_v0] [SKIPPED] |73.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0__ASYNC-pk_types7-all_types7-index7-Timestamp--ASYNC] [GOOD] >> test_format_without_version.py::TestQueueWithoutVersionWithTenant::test_common[fifo] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_remove_queue_generates_event[tables_format_v1] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_remove_queue_generates_event[tables_format_v1] [SKIPPED] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue[tables_format_v1-std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue_generates_event[tables_format_v0] >> test_acl.py::TestSqsACLWithTenant::test_apply_permissions[tables_format_v0] [GOOD] >> test_acl.py::TestSqsACLWithTenant::test_apply_permissions[tables_format_v1] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_request_to_deleted_queue[tables_format_v0-fifo] |73.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_turn_on_serverless_storage_billing[enable_alter_database_create_hive_first--true] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue_generates_event[tables_format_v0] [SKIPPED] >> test_acl.py::TestSqsACLWithTenant::test_apply_permissions[tables_format_v1] [GOOD] >> test_acl.py::TestSqsACLWithTenant::test_modify_permissions[tables_format_v0] >> test_garbage_collection.py::TestSqsGarbageCollection::test_visibility_change_cleanups_proper_receive_attempt_id[tables_format_v0-with_delete_message] [GOOD] >> test_garbage_collection.py::TestSqsGarbageCollection::test_visibility_change_cleanups_proper_receive_attempt_id[tables_format_v1-with_change_visibility] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] >> test_ping.py::TestPing::test_error_on_cgi_parameters [GOOD] >> test_ping.py::TestPing::test_error_on_non_ping_path >> test_ping.py::TestPing::test_error_on_non_ping_path [GOOD] >> test_ping.py::TestPing::test_ping >> test_acl.py::TestSqsACLWithTenant::test_modify_permissions[tables_format_v0] [GOOD] >> test_acl.py::TestSqsACLWithTenant::test_modify_permissions[tables_format_v1] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] >> test_ping.py::TestPing::test_ping [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] >> test_queue_tags.py::TestQueueTags::test_untag_queue[tables_format_v0-fifo] [GOOD] >> test_queue_tags.py::TestQueueTags::test_untag_queue[tables_format_v0-std] >> test_counters.py::TestSqsCountersFeatures::test_aggregates_transaction_counters[queue] |73.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue_batch[tables_format_v0] [GOOD] |73.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/tenants/py3test >> test_dynamic_tenants.py::test_create_and_drop_tenants[enable_alter_database_create_hive_first--false] [GOOD] >> test_acl.py::TestSqsACLWithTenant::test_modify_permissions[tables_format_v1] [GOOD] |73.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/ttl/py3test >> test_ttl.py::TestTTL::test_ttl[table_Timestamp_0_UNIQUE_SYNC-pk_types8-all_types8-index8-Timestamp-UNIQUE-SYNC] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_fifo_queue_wo_postfix[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_queue_tags.py::TestQueueTags::test_untag_queue[tables_format_v0-std] [GOOD] >> test_queue_tags.py::TestQueueTags::test_untag_queue[tables_format_v1-fifo] >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_create_queue_with_default_attributes[tables_format_v0-std] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] >> test_queue_tags.py::TestQueueTags::test_untag_queue[tables_format_v1-fifo] [GOOD] >> test_garbage_collection.py::TestSqsGarbageCollection::test_visibility_change_cleanups_proper_receive_attempt_id[tables_format_v1-with_change_visibility] [GOOD] >> test_garbage_collection.py::TestSqsGarbageCollection::test_visibility_change_cleanups_proper_receive_attempt_id[tables_format_v1-with_delete_message] |73.4%| [TA] $(B)/ydb/tests/datashard/ttl/test-results/py3test/{meta.json ... results_accumulator.log} >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_and_create_queue[std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_queue[tables_format_v0-fifo] >> test_public_api.py::TestBadSession::test_simple [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_request_to_deleted_queue[tables_format_v0-fifo] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] |73.4%| [TA] {RESULT} $(B)/ydb/tests/datashard/ttl/test-results/py3test/{meta.json ... results_accumulator.log} >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Date-pk_types13-all_types13-index13-Date--] |73.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_public_api.py::TestDriverCanRecover::test_driver_recovery >> test_counters.py::TestSqsCountersFeatures::test_creates_counter[tables_format_v0] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] >> test_garbage_collection.py::TestSqsGarbageCollection::test_visibility_change_cleanups_proper_receive_attempt_id[tables_format_v1-with_delete_message] [GOOD] >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_create_queue_with_default_attributes[tables_format_v0-std] [GOOD] >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_create_queue_with_default_attributes[tables_format_v1-fifo] >> test_counters.py::TestSqsCountersFeatures::test_aggregates_transaction_counters[queue] [GOOD] >> test_counters.py::TestSqsCountersFeatures::test_aggregates_transaction_counters[user] >> test_format_without_version.py::TestQueueWithoutVersionWithTenant::test_common[fifo] [GOOD] >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_create_queue_with_default_attributes[tables_format_v1-fifo] [GOOD] >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_create_queue_with_default_attributes[tables_format_v1-std] >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_create_queue_with_default_attributes[tables_format_v1-std] [GOOD] >> test_garbage_collection.py::TestSqsGarbageCollection::test_cleanups_reads_table[tables_format_v0-30] [GOOD] >> test_garbage_collection.py::TestSqsGarbageCollection::test_cleanups_reads_table[tables_format_v1-200] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_double_create_old[fifo] |73.4%| [TA] $(B)/ydb/tests/functional/tenants/test-results/py3test/{meta.json ... results_accumulator.log} |73.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_ping.py::TestPing::test_ping [GOOD] >> test_counters.py::TestSqsCountersFeatures::test_creates_counter[tables_format_v0] [GOOD] >> test_counters.py::TestSqsCountersFeatures::test_creates_counter[tables_format_v1] >> test_counters.py::TestSqsCountersFeatures::test_creates_counter[tables_format_v1] [GOOD] >> test_counters.py::TestSqsCountersFeatures::test_detailed_counters[queue] |73.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queue_tags.py::TestQueueTags::test_tag_queue[tables_format_v0-fifo] [GOOD] >> test_public_api.py::TestDriverCanRecover::test_driver_recovery [GOOD] |73.5%| [TA] {RESULT} $(B)/ydb/tests/functional/tenants/test-results/py3test/{meta.json ... results_accumulator.log} >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_fifo_queue_wo_postfix[tables_format_v1] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_all_types-pk_types7-all_types7-index7---] [GOOD] >> test_counters.py::TestSqsCountersFeatures::test_detailed_counters[queue] [GOOD] >> test_counters.py::TestSqsCountersFeatures::test_detailed_counters[user] |73.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue_generates_event[tables_format_v0] [SKIPPED] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue_generates_event[tables_format_v1] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_acl.py::TestSqsACLWithTenant::test_modify_permissions[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |73.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue[tables_format_v1-fifo] [GOOD] >> test_counters.py::TestSqsCountersFeatures::test_detailed_counters[user] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_fifo_queue_wo_postfix[tables_format_v0] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_delete_queue[tables_format_v0-std] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_garbage_collection.py::TestSqsGarbageCollection::test_visibility_change_cleanups_proper_receive_attempt_id[tables_format_v0-with_change_visibility] [GOOD] Test command err: contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=807487) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/pool.py:268: ResourceWarning: unclosed running multiprocessing pool ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=807487) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/pool.py:268: ResourceWarning: unclosed running multiprocessing pool ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=807487) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/pool.py:268: ResourceWarning: unclosed running multiprocessing pool ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_queues_managing.py::TestQueuesManagingWithTenant::test_purge_queue_batch[tables_format_v0] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue_with_invalid_name[tables_format_v0] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_create_queue[fifo] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_double_create_old[fifo] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_double_create_old[std] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_double_create_old[std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_fifo_queue_wo_postfix[tables_format_v0] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_read_message[fifo] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_fifo_queue_wo_postfix[tables_format_v1] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_format_without_version.py::TestQueueWithoutVersionWithTenant::test_common[fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_fifo_queue_wo_postfix[tables_format_v1] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_read_message[fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue_generates_event[tables_format_v1] [SKIPPED] |73.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queue_tags.py::TestQueueTags::test_untag_queue[tables_format_v1-fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue_with_invalid_name[tables_format_v0] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_read_message[std] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue_with_invalid_name[tables_format_v0] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue_with_invalid_name[tables_format_v1] >> test_garbage_collection.py::TestSqsGarbageCollection::test_cleanups_reads_table[tables_format_v1-200] [GOOD] >> test_garbage_collection.py::TestSqsGarbageCollection::test_cleanups_reads_table[tables_format_v1-30] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue_with_invalid_name[tables_format_v1] [GOOD] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] >> test_counters.py::TestSqsCountersFeatures::test_aggregates_transaction_counters[user] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue_with_invalid_name[tables_format_v0] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue_with_invalid_name[tables_format_v1] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_request_to_deleted_queue[tables_format_v0-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue_with_invalid_name[tables_format_v1] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_create_queue[fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_and_create_queue[fifo] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_request_to_deleted_queue[tables_format_v1-std] >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] >> test_public_api.py::TestSelectAfterDropWithRepetitions::test_select_on_dropped_table_unsuccessful[10] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_fifo_queue_wo_postfix[tables_format_v1] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue[tables_format_v0-fifo] >> test_format_without_version.py::TestQueueWithoutVersionWithTenant::test_common[std] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue[tables_format_v0-fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue[tables_format_v0-std] >> test_cms_erasure.py::TestDegradedGroupMirror3dcKeep::test_no_degraded_groups_after_shutdown [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue[tables_format_v0-std] [GOOD] |73.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_create_queue_with_default_attributes[tables_format_v1-std] [GOOD] >> test_acl.py::TestSqsWithForceAuthorizationWithPath::test_invalid_token[tables_format_v1-invalid] >> test_cms_state_storage.py::TestCmsStateStorageSimpleKeep::test_check_shutdown_state_storage_nodes [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/unittest >> TxUsage::Write_Only_Big_Messages_In_Wide_Transactions_Table 2025-06-03 10:41:32,282 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper execution timed out 2025-06-03 10:41:33,630 WARNING devtools.ya.test.programs.test_tool.run_test.run_test: Wrapper has overrun 600 secs timeout. Process tree before termination: pid rss ref pdirt 391416 47.9M 47.3M 24.8M test_tool run_ut @/home/runner/.ya/build/build_root/u93c/000d93/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff/chunk9/testi 392201 9.2G 9.2G 9.0G └─ src-client-topic-ut-with_direct_read_ut --trace-path-append /home/runner/.ya/build/build_root/u93c/000d93/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test Test command err: 2025-06-03T10:31:33.437077Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511668849018246651:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:33.437130Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/000d93/r3tmp/tmpMmDJdf/pdisk_1.dat 2025-06-03T10:31:33.489953Z node 1 :PQ_READ_PROXY DEBUG: caching_service.cpp:44: Direct read cache: : Created 2025-06-03T10:31:33.547829Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:31:33.547866Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:31:33.549210Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:31:33.555822Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511668849018246550:2079] 1748946693436561 != 1748946693436564 2025-06-03T10:31:33.558911Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 26458, node 1 2025-06-03T10:31:33.585586Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: /home/runner/.ya/build/build_root/u93c/000d93/r3tmp/yandexguUK14.tmp 2025-06-03T10:31:33.585603Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: /home/runner/.ya/build/build_root/u93c/000d93/r3tmp/yandexguUK14.tmp 2025-06-03T10:31:33.585687Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:202: successfully initialized from file: /home/runner/.ya/build/build_root/u93c/000d93/r3tmp/yandexguUK14.tmp 2025-06-03T10:31:33.585748Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:31:33.598575Z INFO: TTestServer started on Port 9007 GrpcPort 26458 TClient is connected to server localhost:9007 PQClient connected to localhost:26458 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:31:33.681214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:33.691740Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:31:33.700828Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715658, at schemeshard: 72057594046644480 2025-06-03T10:31:33.702545Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... waiting... 2025-06-03T10:31:33.956347Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668849018247346:2336], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:33.956394Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:33.956563Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511668849018247375:2340], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:31:33.957577Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715662:3, at schemeshard: 72057594046644480 2025-06-03T10:31:33.960244Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715662, at schemeshard: 72057594046644480 2025-06-03T10:31:33.960328Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511668849018247377:2341], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715662 completed, doublechecking } 2025-06-03T10:31:34.009134Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:31:34.034497Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:31:34.044756Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511668853313214870:2521] txid# 281474976715665, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 9], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:31:34.054581Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 === Init DC: UPSERT INTO `/Root/PQ/Config/V2/Cluster` (name, balancer, local, enabled, weight) VALUES ("dc1", "localhost", true, true, 1000), ("dc2", "dc2.logbroker.yandex.net", false, true, 1000); === CheckClustersList. Subcribe to ClusterTracker from [1:7511668853313215032:2610] 2025-06-03T10:31:38.437485Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=table_exists.cpp:59;actor=TTableExistsActor;event=timeout;self_id=[1:7511668849018246651:2133];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:31:38.437525Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=timeout; === CheckClustersList. Ok 2025-06-03T10:31:39.361768Z :WriteToTopic_Invalid_Session_Table INFO: TTopicSdkTestSetup started 2025-06-03T10:31:39.365098Z node 1 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:132: new create topic request 2025-06-03T10:31:39.370144Z node 1 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037893][] pipe [1:7511668874788051713:2689] connected; active server actors: 1 2025-06-03T10:31:39.370353Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer__balancing.cpp:1516: [72075186224037893][test-topic] updating configuration. Deleted partitions []. Added partitions [0] 2025-06-03T10:31:39.370937Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:1040: [72075186224037893][test-topic] Discovered subdomain [OwnerId: 72057594046644480, LocalPathId: 1] state, outOfSpace = 0 at RB 72075186224037893 2025-06-03T10:31:39.370991Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:138: [72075186224037893][test-topic] BALANCER INIT DONE for test-topic: (0, 72075186224037892) 2025-06-03T10:31:39.371189Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72057594046644480, NodeId 1, Generation 2 2025-06-03T10:31:39.371913Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3096: [PQ: 72075186224037892] Handle TEvInterconnect::TEvNodeInfo 2025-06-03T10:31:39.371990Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3089: [PQ: 72075186224037892] Registered with mediator time cast 2025-06-03T10:31:39.372101Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3128: [PQ: 72075186224037892] Transactions request. From tx_00000000000000000000, To tx_18446744073709551615 2025-06-03T10:31:39.372133Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:751: [PQ: 72075186224037892] doesn't have tx info 2025-06-03T10:31:39.372137Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:763: [PQ: 72075186224037892] PlanStep 0, PlanTxId 0, ExecStep 0, ExecTxId 0 2025-06-03T10:31:39.372140Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:975: [PQ: 72075186224037892] no config, start with empty partitions and default config 2025-06-03T10:31:39.372143Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:4887: [PQ: 72075186224037892] Txs.size=0, PlannedTxs.size=0 2025-06-03T10:31:39.372149Z node 1 :PERSQUEUE NOTICE: pq_impl.cpp:1099: [PQ: 72075186224037892] disable metering: reason# billing is not enabled in BillingMeteringConfig 2025-06-03T10:31:39.372160Z node 1 :PERSQUEUE INFO: pq_impl.cpp:800: [PQ: 72075186224037892] doesn't have tx writes info 2025-06-03T10:31:39.372279Z node 1 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:493: [72075186224037893][test-topic] TEvClientConnected TabletId 72075186224037892, NodeId 1, Generation 1 2025-06-03T10:31:39.372314Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72075186224037892] server connected, pipe [1:7511668874788051736:2451], now have 1 active actors on pipe 2025-06-03T10:31:39.419525Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72075186224037892] server connected, pipe [1:7511668874788051711:2687], now have 1 active actors on pipe 2025-06-03T10:31:39.423723Z node 1 :PERSQUEUE DEBUG: pq_impl.cpp:3220: [PQ: 72075186224037892] Handle TEvPersQueue::TEvProposeTransaction SourceActor { RawX1: 7511668849018247019 RawX2: 4294969502 } TxId: 281474976715674 Config { TabletConfig { PartitionConfig { MaxCountInPartition: 2147483647 LifetimeSeconds: 86400 SourceIdLifetimeSeconds: 1382400 WriteSpeedInBytesPerSecond: 1048576 BurstSize: 1048576 TotalPartitions: 1 ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { PoolKind: "test" } ExplicitChannelProfiles { Po ... 25-06-03T10:41:33.363684Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic_A' partition: 3 messageNo: 1 requestId: cookie: 1 2025-06-03T10:41:33.363685Z node 20 :PERSQUEUE DEBUG: partition_write.cpp:324: [PQ: 72075186224037904, Partition: {3, {20, 281474976716023}, 100069}, State: StateIdle] Answering for message sourceid: '\0test-message_group_id_69_3', Topic: 'topic_A', Partition: {3, {20, 281474976716023}, 100069}, SeqNo: 1, partNo: 12, Offset: 0 is stored on disk 2025-06-03T10:41:33.363723Z node 20 :PQ_WRITE_PROXY DEBUG: writer.cpp:538: TPartitionWriter 72075186224037904 (partition=3) Received event: NKikimr::TEvPersQueue::TEvResponse 2025-06-03T10:41:33.435704Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-03T10:41:33.436165Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1384 sessionId: test-message_group_id_69_1|2253d378-737b2066-f01839fd-83d5552f_0 describe result for acl check 2025-06-03T10:41:33.442036Z node 20 :PQ_READ_PROXY DEBUG: grpc_pq_schema.cpp:168: new Describe partition request 2025-06-03T10:41:33.442098Z node 20 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1209: TDescribePartitionActor for request path: "/Root/topic_A" partition_id: 4 include_location: true 2025-06-03T10:41:33.442114Z node 20 :PQ_READ_PROXY DEBUG: schema_actors.cpp:1219: TDescribePartitionActor[20:7511671429016439420:5104]: Bootstrap 2025-06-03T10:41:33.442407Z node 20 :PQ_READ_PROXY DEBUG: schema_actors.cpp:657: DescribeTopicImpl [20:7511671429016439420:5104]: Request location 2025-06-03T10:41:33.442519Z node 20 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1652: [72075186224037914][topic_A] pipe [20:7511671429016439422:5097] connected; active server actors: 1 2025-06-03T10:41:33.442548Z node 20 :PERSQUEUE_READ_BALANCER DEBUG: read_balancer.cpp:904: [72075186224037914][topic_A] addPartitionToResponse tabletId 72075186224037898, partitionId 4, NodeId 20, Generation 1 2025-06-03T10:41:33.442577Z node 20 :PQ_READ_PROXY DEBUG: schema_actors.cpp:750: DescribeTopicImpl [20:7511671429016439420:5104]: Got location 2025-06-03T10:41:33.442619Z node 20 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1664: [72075186224037914][topic_A] pipe [20:7511671429016439422:5097] disconnected; active server actors: 1 2025-06-03T10:41:33.442630Z node 20 :PERSQUEUE_READ_BALANCER INFO: read_balancer__balancing.cpp:1688: [72075186224037914][topic_A] pipe [20:7511671429016439422:5097] disconnected no session 2025-06-03T10:41:33.442991Z :INFO: [/Root] TraceId [] SessionId [] PartitionId [4] Generation [0] Got PartitionLocation response. Status SUCCESS, proto: partition { partition_id: 4 active: true partition_location { node_id: 20 generation: 1 } } 2025-06-03T10:41:33.443004Z :TRACE: [/Root] TRACE_EVENT DescribePartitionResponse partition_id=4 active=1 pl_node_id=20 pl_generation=1 2025-06-03T10:41:33.443011Z :DEBUG: [/Root] TraceId [] SessionId [] PartitionId [4] Generation [0] GetPreferredEndpoint: partitionId 4, partitionNodeId 20 exists in the endpoint pool. 2025-06-03T10:41:33.443018Z :TRACE: [/Root] TRACE_EVENT PreferredPartitionLocation Endpoint= NodeId=20 Generation=1 2025-06-03T10:41:33.443023Z :INFO: [/Root] TraceId [] SessionId [] PartitionId [4] Generation [1] Start write session. Will connect to nodeId: 20 2025-06-03T10:41:33.443254Z :DEBUG: [/Root] TraceId [] SessionId [] PartitionId [4] Generation [1] Write session: direct write to partition: 4, generation 1 2025-06-03T10:41:33.443297Z :DEBUG: [/Root] TraceId [] SessionId [] PartitionId [4] Generation [1] Write session: send init request: init_request { path: "topic_A" producer_id: "test-message_group_id_69_4" partition_with_generation { partition_id: 4 generation: 1 } } 2025-06-03T10:41:33.443303Z :TRACE: [/Root] TRACE_EVENT InitRequest pwg_partition_id=4 pwg_generation=1 2025-06-03T10:41:33.443444Z :DEBUG: [/Root] TraceId [] SessionId [] PartitionId [4] Generation [1] Write session: OnWriteDone gRpcStatusCode: 0 2025-06-03T10:41:33.443478Z node 20 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:107: new grpc connection 2025-06-03T10:41:33.443490Z node 20 :PQ_WRITE_PROXY DEBUG: grpc_pq_write.h:141: new session created cookie 1387 2025-06-03T10:41:33.443653Z node 20 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:254: session v1 cookie: 1387 sessionId: grpc read done: success: 1 data: init_request { path: "topic_A" producer_id: "test-message_group_id_69_4" partition_with_generation { partition_id: 4 generation: 1 } } 2025-06-03T10:41:33.443690Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:442: session request cookie: 1387 path: "topic_A" producer_id: "test-message_group_id_69_4" partition_with_generation { partition_id: 4 generation: 1 } from ipv6:[::1]:44992 2025-06-03T10:41:33.443702Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:1532: write session: cookie=1387 sessionId= userAgent="topic server" ip=ipv6:[::1]:44992 proto=topic topic=topic_A durationSec=0 2025-06-03T10:41:33.443706Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:566: init check schema 2025-06-03T10:41:33.443719Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:475: session to partition: 4, generation: 1 2025-06-03T10:41:33.444010Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:627: session v1 cookie: 1387 sessionId: describe result for acl check 2025-06-03T10:41:33.444083Z node 20 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:62: TTableHelper SelectQuery: --!syntax_v1 DECLARE $Hash AS Uint64; DECLARE $Topic AS Utf8; DECLARE $SourceId AS Utf8; SELECT Partition, CreateTime, AccessTime, SeqNo FROM `//Root/.metadata/TopicPartitionsMapping` WHERE Hash == $Hash AND Topic == $Topic AND ProducerId == $SourceId; 2025-06-03T10:41:33.444090Z node 20 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:63: TTableHelper UpdateQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; DECLARE $SeqNo AS Uint64; UPSERT INTO `//Root/.metadata/TopicPartitionsMapping` (Hash, Topic, ProducerId, CreateTime, AccessTime, Partition, SeqNo) VALUES ($Hash, $Topic, $SourceId, $CreateTime, $AccessTime, $Partition, $SeqNo); 2025-06-03T10:41:33.444092Z node 20 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__table_helper.h:64: TTableHelper UpdateAccessTimeQuery: --!syntax_v1 DECLARE $SourceId AS Utf8; DECLARE $Topic AS Utf8; DECLARE $Hash AS Uint64; DECLARE $Partition AS Uint32; DECLARE $CreateTime AS Uint64; DECLARE $AccessTime AS Uint64; UPDATE `//Root/.metadata/TopicPartitionsMapping` SET AccessTime = $AccessTime WHERE Hash = $Hash AND Topic = $Topic AND ProducerId = $SourceId AND Partition = $Partition; 2025-06-03T10:41:33.444100Z node 20 :PQ_PARTITION_CHOOSER DEBUG: partition_chooser_impl__abstract_chooser_actor.h:305: TPartitionChooser [20:7511671429016439427:5087] (SourceId=test-message_group_id_69_4, PreferedPartition=4) ReplyResult: Partition=4, SeqNo=0 2025-06-03T10:41:33.444106Z node 20 :PQ_WRITE_PROXY DEBUG: write_session_actor.cpp:689: ProceedPartition. session cookie: 1387 sessionId: partition: 4 expectedGeneration: 1 2025-06-03T10:41:33.444313Z node 20 :PQ_WRITE_PROXY DEBUG: writer.cpp:798: TPartitionWriter 72075186224037898 (partition=4) TEvClientConnected Status OK, TabletId: 72075186224037898, NodeId 20, Generation: 1 2025-06-03T10:41:33.444338Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2880: [PQ: 72075186224037898] server connected, pipe [20:7511671429016439430:5087], now have 1 active actors on pipe 2025-06-03T10:41:33.444355Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'topic_A' requestId: 2025-06-03T10:41:33.444363Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037898] got client message batch for topic 'topic_A' partition 4 2025-06-03T10:41:33.444416Z node 20 :PERSQUEUE INFO: ownerinfo.cpp:30: new Cookie test-message_group_id_69_4|7a38109-72d98d3f-92e8f33-f5a2360f_0 generated for partition 4 topic 'topic_A' owner test-message_group_id_69_4 2025-06-03T10:41:33.444460Z node 20 :PERSQUEUE DEBUG: partition_write.cpp:35: [PQ: 72075186224037898, Partition: 4, State: StateIdle] TPartition::ReplyOwnerOk. Partition: 4 2025-06-03T10:41:33.444499Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic_A' partition: 4 messageNo: 0 requestId: cookie: 0 2025-06-03T10:41:33.444574Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:347: Handle TEvRequest topic: 'topic_A' requestId: 2025-06-03T10:41:33.444582Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:2794: [PQ: 72075186224037898] got client message batch for topic 'topic_A' partition 4 2025-06-03T10:41:33.444613Z node 20 :PERSQUEUE DEBUG: pq_impl.cpp:382: Answer ok topic: 'topic_A' partition: 4 messageNo: 0 requestId: cookie: 0 2025-06-03T10:41:33.444635Z node 20 :PQ_WRITE_PROXY INFO: write_session_actor.cpp:865: session inited cookie: 1387 partition: 4 MaxSeqNo: 0 sessionId: test-message_group_id_69_4|7a38109-72d98d3f-92e8f33-f5a2360f_0 2025-06-03T10:41:33.444866Z :DEBUG: [/Root] TraceId [] SessionId [] PartitionId [4] Generation [1] Write session: OnReadDone gRpcStatusCode: 0 2025-06-03T10:41:33.444883Z :INFO: [/Root] TraceId [] SessionId [] PartitionId [4] Generation [1] Counters: { Errors: 0 CurrentSessionLifetimeMs: 1748947293444 BytesWritten: 0 MessagesWritten: 0 BytesWrittenCompressed: 0 BytesInflightUncompressed: 0 BytesInflightCompressed: 0 BytesInflightTotal: 0 MessagesInflight: 0 } 2025-06-03T10:41:33.542536Z :INFO: [/Root] TraceId [] SessionId [] PartitionId [4] Generation [1] Write session established. Init response: session_id: "test-message_group_id_69_4|7a38109-72d98d3f-92e8f33-f5a2360f_0" partition_id: 4 2025-06-03T10:41:33.542552Z :TRACE: [/Root] TRACE_EVENT InitResponse partition_id=4 session_id=test-message_group_id_69_4|7a38109-72d98d3f-92e8f33-f5a2360f_0 2025-06-03T10:41:33.542674Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id_69_4|7a38109-72d98d3f-92e8f33-f5a2360f_0] PartitionId [4] Generation [1] OnWrite: seqNo=1, txId={ydb://session/3?node_id=20&id=ODRkNzc4NmYtYTY3Nzg4NzMtYjEzNmI3ZDItYmNmMWE4OTE=, 01jwtp3m1x82x70j0f5ppq774p}, WriteCount=1, AckCount=0 2025-06-03T10:41:33.543716Z :DEBUG: [/Root] TraceId [] SessionId [test-message_group_id_69_4|7a38109-72d98d3f-92e8f33-f5a2360f_0] PartitionId [4] Generation [1] Write 1 messages with Id from 1 to 1 Traceback (most recent call last): File "library/python/testing/yatest_common/yatest/common/process.py", line 384, in wait wait_for( File "library/python/testing/yatest_common/yatest/common/process.py", line 764, in wait_for raise TimeoutError(truncate(message, MAX_MESSAGE_LEN)) yatest.common.process.TimeoutError: 600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8865992733/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/u93c/000d93/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff/chunk9/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout During handling of the above exception, another exception occurred: Traceback (most recent call last): File "devtools/ya/test/programs/test_tool/run_test/run_test.py", line 1738, in main res.wait(check_exit_code=False, timeout=run_timeout, on_timeout=timeout_callback) File "library/python/testing/yatest_common/yatest/common/process.py", line 398, in wait raise ExecutionTimeoutError(self, str(e)) yatest.common.process.ExecutionTimeoutError: (("600 second(s) wait timeout has expired: Command '['/home/runner/.ya/tools/v4/8865992733/test_tool', 'run_ut', '@/home/runner/.ya/build/build_root/u93c/000d93/ydb/public/sdk/cpp/src/client/topic/ut/with_direct_read_ut/test-results/unittest/testing_out_stuff/chunk9/testing_out_stuff/test_tool.args']' stopped by 600 seconds timeout",), {}) |73.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_erasure.py::TestDegradedGroupMirror3dcKeep::test_no_degraded_groups_after_shutdown [GOOD] >> test_restarts.py::TestRestartSingleBlock42::test_restart_single_node_is_ok [GOOD] >> test_public_api.py::TestSelectAfterDropWithRepetitions::test_select_on_dropped_table_unsuccessful[10] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_read_message[fifo] >> test_cms_restart.py::TestCmsStateStorageRestartsBlockMax::test_restart_as_much_as_can [GOOD] >> test_queue_counters.py::TestSqsGettingCounters::test_receive_attempts_are_counted_separately_for_messages_in_one_batch >> test_acl.py::TestSqsWithForceAuthorizationWithPath::test_invalid_token[tables_format_v1-invalid] [GOOD] >> test_acl.py::TestSqsWithForceAuthorizationWithPath::test_invalid_token[tables_format_v1-no] >> test_acl.py::TestSqsWithForceAuthorizationWithPath::test_invalid_token[tables_format_v1-no] [GOOD] >> test_account_actions.py::TestAccountActionsWithTenant::test_manage_account[with_queues-tables_format_v0] |73.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_state_storage.py::TestCmsStateStorageSimpleKeep::test_check_shutdown_state_storage_nodes [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_queue[tables_format_v0-fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_queue[tables_format_v0-std] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_and_create_queue[fifo] [GOOD] |73.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] >> test_cms_erasure.py::TestDegradedGroupBlock42Max::test_no_degraded_groups_after_shutdown [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_purge_queue_batch[tables_format_v0] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_purge_queue_batch[tables_format_v1] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_read_message[std] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_send_message[fifo] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_send_message[fifo] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_send_message[std] >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorKeep::test_restart_as_much_as_can [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_send_message[std] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_read_message[fifo] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_read_message[std] >> test_garbage_collection.py::TestSqsGarbageCollection::test_cleanups_reads_table[tables_format_v1-30] [GOOD] >> test_garbage_collection.py::TestSqsGarbageCollection::test_removes_messages_by_retention_time[tables_format_v0-fifo] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_queue[tables_format_v1-fifo] >> test_queue_counters.py::TestSqsGettingCounters::test_receive_attempts_are_counted_separately_for_messages_in_one_batch [GOOD] >> test_queue_counters.py::TestSqsGettingCounters::test_receive_message_immediate_duration_counter |73.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsBlockMax::test_restart_as_much_as_can [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_read_message[std] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_send_message[fifo] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_send_message[fifo] [GOOD] |73.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_erasure.py::TestDegradedGroupBlock42Max::test_no_degraded_groups_after_shutdown [GOOD] |73.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_counters.py::TestSqsCountersFeatures::test_detailed_counters[user] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_send_message[std] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_send_message[std] [GOOD] |73.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_read_message[fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_purge_queue[tables_format_v0-std] |73.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue_with_invalid_name[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |73.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_request_to_deleted_queue[tables_format_v1-std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_ya_count_queues[tables_format_v0] >> test_format_without_version.py::TestQueueWithoutVersionWithTenant::test_common[std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue[tables_format_v1-std] |73.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartSingleBlock42::test_restart_single_node_is_ok [GOOD] |73.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] >> test_queue_attributes_validation.py::TestQueueAttributesInCompatibilityMode::test_set_queue_attributes_no_validation[tables_format_v1-std] >> test_account_actions.py::TestAccountActionsWithTenant::test_manage_account[with_queues-tables_format_v0] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_queue_batch[tables_format_v1] >> test_account_actions.py::TestAccountActionsWithTenant::test_manage_account[with_queues-tables_format_v1] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_delete_and_create_queue[fifo] >> test_account_actions.py::TestAccountActionsWithTenant::test_manage_account[with_queues-tables_format_v1] [GOOD] >> test_account_actions.py::TestAccountActionsWithTenant::test_manage_account[without_queues-tables_format_v0] |73.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] >> test_account_actions.py::TestAccountActionsWithTenant::test_manage_account[without_queues-tables_format_v0] [GOOD] >> test_account_actions.py::TestAccountActionsWithTenant::test_manage_account[without_queues-tables_format_v1] >> test_account_actions.py::TestAccountActionsWithTenant::test_manage_account[without_queues-tables_format_v1] [GOOD] >> test_acl.py::TestSqsWithForceAuthorizationWithPath::test_invalid_token[tables_format_v0-empty] >> test_throttling.py::TestSqsThrottlingOnNonexistentQueue::test_action_which_does_not_requere_existing_queue >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_double_create[fifo] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithTenant::test_create_queue[tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_fifo_queue_wo_postfix[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_acl.py::TestSqsWithForceAuthorizationWithTenant::test_invalid_token[tables_format_v0-no] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_queue[tables_format_v1-fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_queue[tables_format_v1-std] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_request_to_deleted_queue[tables_format_v0-std] |73.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] |73.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorKeep::test_restart_as_much_as_can [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue[tables_format_v1-std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue_generates_event[tables_format_v0] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_ya_count_queues[tables_format_v0] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_ya_count_queues[tables_format_v1] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue_generates_event[tables_format_v0] [SKIPPED] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_purge_queue_batch[tables_format_v1] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_queues_count_over_limit[tables_format_v0] >> test_public_api.py::TestMetaDataInvalidation::test_invalidation_success >> test_queue_attributes_validation.py::TestQueueAttributesInCompatibilityMode::test_set_queue_attributes_no_validation[tables_format_v1-std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue_generates_event[tables_format_v1] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue_generates_event[tables_format_v1] [SKIPPED] |73.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_counters.py::TestSqsCountersFeatures::test_aggregates_transaction_counters[user] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_queue_batch[tables_format_v1] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue[tables_format_v0-fifo] >> test_counters.py::TestSqsCountersFeatures::test_disables_user_counters |73.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_create_queue[fifo] [GOOD] >> test_acl.py::TestSqsWithForceAuthorizationWithPath::test_invalid_token[tables_format_v0-empty] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorMax::test_restart_as_much_as_can [GOOD] >> test_acl.py::TestSqsWithForceAuthorizationWithPath::test_invalid_token[tables_format_v0-invalid] >> test_acl.py::TestSqsWithForceAuthorizationWithPath::test_invalid_token[tables_format_v0-invalid] [GOOD] >> test_acl.py::TestSqsWithForceAuthorizationWithPath::test_invalid_token[tables_format_v0-no] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_double_create[fifo] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_double_create[std] >> test_garbage_collection.py::TestSqsGarbageCollection::test_removes_messages_by_retention_time[tables_format_v0-fifo] [GOOD] >> test_acl.py::TestSqsWithForceAuthorizationWithPath::test_invalid_token[tables_format_v0-no] [GOOD] >> test_acl.py::TestSqsWithForceAuthorizationWithPath::test_invalid_token[tables_format_v1-empty] >> test_throttling.py::TestSqsThrottlingOnNonexistentQueue::test_action_which_does_not_requere_existing_queue [GOOD] >> test_throttling.py::TestSqsThrottlingOnNonexistentQueue::test_that_queue_can_be_created_despite_lack_of_throttling_budget >> test_acl.py::TestSqsWithForceAuthorizationWithPath::test_invalid_token[tables_format_v1-empty] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_double_create[std] [GOOD] >> test_cms_restart.py::TestCmsStateStorageRestartsBlockKeep::test_restart_as_much_as_can [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_double_create_old[fifo] >> test_queue_counters.py::TestSqsGettingCounters::test_receive_message_immediate_duration_counter [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Date-pk_types18-all_types18-index18-Date--] >> test_queue_counters.py::TestSqsGettingCounters::test_sqs_action_counters >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_double_create_old[fifo] [GOOD] >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_double_create_old[std] >> test_throttling.py::TestSqsThrottlingOnNonexistentQueue::test_that_queue_can_be_created_despite_lack_of_throttling_budget [GOOD] >> test_throttling.py::TestSqsThrottlingOnNonexistentQueue::test_throttling_on_nonexistent_queue >> test_queue_counters.py::TestSqsGettingCounters::test_sqs_action_counters [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_and_create_queue[fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_double_create_old[std] [GOOD] >> test_public_api.py::TestMetaDataInvalidation::test_invalidation_success [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] >> test_throttling.py::TestSqsThrottlingOnNonexistentQueue::test_throttling_on_nonexistent_queue [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue[tables_format_v0-fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue[tables_format_v0-std] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_ya_count_queues[tables_format_v1] [GOOD] |73.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithTenant::test_send_message[std] [GOOD] >> test_acl.py::TestSqsWithForceAuthorizationWithTenant::test_invalid_token[tables_format_v0-no] [GOOD] |73.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsMirrorMax::test_restart_as_much_as_can [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_purge_queue[tables_format_v0-std] [GOOD] >> test_acl.py::TestSqsWithForceAuthorizationWithTenant::test_invalid_token[tables_format_v1-empty] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_purge_queue[tables_format_v1-fifo] >> test_acl.py::TestSqsWithForceAuthorizationWithTenant::test_invalid_token[tables_format_v1-empty] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> test_acl.py::TestSqsWithForceAuthorizationWithTenant::test_invalid_token[tables_format_v1-invalid] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_queues_count_over_limit[tables_format_v0] [GOOD] >> test_acl.py::TestSqsWithForceAuthorizationWithTenant::test_invalid_token[tables_format_v1-invalid] [GOOD] >> test_acl.py::TestSqsWithForceAuthorizationWithTenant::test_invalid_token[tables_format_v1-no] >> test_acl.py::TestSqsWithForceAuthorizationWithTenant::test_invalid_token[tables_format_v1-no] [GOOD] >> test_counters.py::TestSqsCountersFeatures::test_disables_user_counters [GOOD] >> test_counters.py::TestSqsCountersFeatures::test_removes_user_counters_after_user_deletion[tables_format_v0] >> test_acl.py::TestSqsWithForceAuthorizationWithTenant::test_invalid_token[tables_format_v0-empty] |73.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] >> test_restarts.py::TestRestartSingleMirror3DC::test_restart_single_node_is_ok [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue[tables_format_v0-std] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_throttling.py::TestSqsThrottlingOnNonexistentQueue::test_throttling_on_nonexistent_queue [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_garbage_collection.py::TestSqsGarbageCollection::test_cleanups_deduplication_table[tables_format_v0] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] |73.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_all_types-pk_types7-all_types7-index7---] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_purge_queue[tables_format_v1-fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_purge_queue[tables_format_v1-std] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_request_to_deleted_queue[tables_format_v0-std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_request_to_deleted_queue[tables_format_v1-fifo] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] >> test_restarts.py::TestRestartMultipleBlock42::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] >> test_counters.py::TestSqsCountersFeatures::test_removes_user_counters_after_user_deletion[tables_format_v0] [GOOD] >> test_counters.py::TestSqsCountersFeatures::test_removes_user_counters_after_user_deletion[tables_format_v1] |73.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartSingleMirror3DC::test_restart_single_node_is_ok [GOOD] >> test_restarts.py::TestRestartClusterBlock42::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] |73.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] |73.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_account_actions.py::TestAccountActionsWithTenant::test_manage_account[without_queues-tables_format_v1] [GOOD] |73.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartMultipleBlock42::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] |73.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_restart.py::TestCmsStateStorageRestartsBlockKeep::test_restart_as_much_as_can [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_delete_and_create_queue[fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_delete_and_create_queue[std] |73.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_request_to_deleted_queue[tables_format_v1-fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_request_to_deleted_queue[tables_format_v1-std] |73.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] >> test_counters.py::TestSqsCountersFeatures::test_removes_user_counters_after_user_deletion[tables_format_v1] [GOOD] >> test_counters.py::TestSqsCountersFeatures::test_updates_status_code_counters_when_parsing_errors_occur[tables_format_v0] |73.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queue_counters.py::TestSqsGettingCounters::test_sqs_action_counters [GOOD] >> test_counters.py::TestSqsCountersFeatures::test_updates_status_code_counters_when_parsing_errors_occur[tables_format_v0] [GOOD] >> test_restarts.py::TestRestartClusterMirror34::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] |73.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_send_message[std] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_purge_queue[tables_format_v1-std] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] >> test_acl.py::TestSqsWithForceAuthorizationWithTenant::test_invalid_token[tables_format_v0-empty] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_request_to_deleted_queue[tables_format_v1-std] [GOOD] >> test_acl.py::TestSqsWithForceAuthorizationWithTenant::test_invalid_token[tables_format_v0-invalid] >> test_acl.py::TestSqsWithForceAuthorizationWithTenant::test_invalid_token[tables_format_v0-invalid] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] >> test_insert.py::TestInsert::test[read_data_during_bulk_upsert] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_delete_queue[tables_format_v0-std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_delete_queue[tables_format_v1-fifo] |73.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] >> test_simple.py::TestSimple::test[alter_table] |73.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/py3test |73.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/py3test |73.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/py3test >> test_scheme_load.py::TestSchemeLoad::test[create_and_drop_tables] |73.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] |73.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/py3test >> test_alter_tiering.py::TestAlterTiering::test[many_tables] >> test_read_update_write_load.py::TestReadUpdateWriteLoad::test[read_update_write_load] >> test_public_api.py::TestJsonExample::test_json_unexpected_failure >> test_cms_state_storage.py::TestCmsStateStorageSimpleMax::test_check_shutdown_state_storage_nodes [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] |73.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] |73.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartClusterBlock42::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_delete_queue[tables_format_v1-fifo] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_delete_queue[tables_format_v1-std] |73.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/async_replication/py3test >> test_async_replication.py::TestAsyncReplication::test_async_replication[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith10Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith2Cpu::test >> test_queues_managing.py::TestQueuesManagingWithTenant::test_delete_and_create_queue[std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_delete_queue[tables_format_v0-fifo] >> test_actorsystem.py::TestWithHybridNodeWith19Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith13Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith5Cpu::test >> test_alter_compression.py::TestAlterCompression::test[alter_compression] |73.7%| [TA] $(B)/ydb/tests/datashard/async_replication/test-results/py3test/{meta.json ... results_accumulator.log} >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_create_queue_with_custom_attributes[tables_format_v0-fifo] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithTenant::test_ya_count_queues[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |73.7%| [TA] {RESULT} $(B)/ydb/tests/datashard/async_replication/test-results/py3test/{meta.json ... results_accumulator.log} >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] >> test_public_api.py::TestJsonExample::test_json_unexpected_failure [GOOD] >> test_public_api.py::TestJsonExample::test_json_success >> test_simple.py::TestSimple::test[alter_table] [GOOD] >> test_simple.py::TestSimple::test[alter_tablestore] >> test_public_api.py::TestJsonExample::test_json_success [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] |73.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_create_queue_generates_event[tables_format_v1] [SKIPPED] >> test_actorsystem.py::TestWithComputeNodeWith16Cpu::test |73.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartClusterMirror34::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_acl.py::TestSqsWithForceAuthorizationWithTenant::test_invalid_token[tables_format_v1-no] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |73.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> test_cms_erasure.py::TestDegradedGroupMirror3dcMax::test_no_degraded_groups_after_shutdown [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith7Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith2Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith10Cpu::test [GOOD] >> test_simple.py::TestSimple::test[alter_tablestore] [GOOD] >> test_simple.py::TestSimple::test[table] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_queue[tables_format_v0-std] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith13Cpu::test [GOOD] >> test_cms_erasure.py::TestDegradedGroupBlock42Keep::test_no_degraded_groups_after_shutdown [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith5Cpu::test [GOOD] |73.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] >> docker_wrapper_test.py::test_pg_generated[Test64BitErrorChecking] >> test_actorsystem.py::TestWithHybridNodeWith19Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith35Cpu::test >> test_simple.py::TestSimple::test[table] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_simple.py::TestSimple::test[tablestores] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_garbage_collection.py::TestSqsGarbageCollection::test_removes_messages_by_retention_time[tables_format_v0-fifo] [GOOD] Test command err: contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=822485) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/pool.py:268: ResourceWarning: unclosed running multiprocessing pool ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=822485) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/pool.py:268: ResourceWarning: unclosed running multiprocessing pool ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=822485) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/pool.py:268: ResourceWarning: unclosed running multiprocessing pool ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=822485) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/pool.py:268: ResourceWarning: unclosed running multiprocessing pool ResourceWarning: Enable tracemalloc to get the object allocation traceback ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_acl.py::TestSqsWithForceAuthorizationWithPath::test_invalid_token[tables_format_v1-empty] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_garbage_collection.py::TestSqsGarbageCollection::test_cleanups_deduplication_table[tables_format_v0] [GOOD] >> test_garbage_collection.py::TestSqsGarbageCollection::test_cleanups_deduplication_table[tables_format_v1] |73.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_erasure.py::TestDegradedGroupMirror3dcMax::test_no_degraded_groups_after_shutdown [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith10Cpu::test >> test_simple.py::TestSimple::test[tablestores] [GOOD] >> test_simple.py::TestSimple::test_multi[alter_table] [GOOD] >> test_simple.py::TestSimple::test_multi[alter_tablestore] [GOOD] >> test_simple.py::TestSimple::test_multi[table] |73.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_multiplexing_tables_format.py::TestMultiplexingTablesFormatWithPath::test_double_create_old[std] [GOOD] >> test_simple.py::TestSimple::test_multi[table] [GOOD] >> test_simple.py::TestSimple::test_multi[tablestores] [GOOD] |73.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_erasure.py::TestDegradedGroupBlock42Keep::test_no_degraded_groups_after_shutdown [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith16Cpu::test [GOOD] >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_create_queue_with_custom_attributes[tables_format_v0-fifo] [GOOD] >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_create_queue_with_custom_attributes[tables_format_v0-std] >> test_actorsystem.py::TestWithStorageNodeWith21Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith37Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith7Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith32Cpu::test >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_create_queue_with_custom_attributes[tables_format_v0-std] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] >> test_actorsystem.py::TestWithComputeNodeWith27Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith4Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith21Cpu::test >> test_restarts.py::TestRestartMultipleMirror34::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith10Cpu::test [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithTenant::test_queues_count_over_limit[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |73.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/postgres_integrations/go-libpq/py3test >> test_actorsystem.py::TestWithComputeNodeWith32Cpu::test |73.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/cms/py3test >> test_cms_state_storage.py::TestCmsStateStorageSimpleMax::test_check_shutdown_state_storage_nodes [GOOD] >> test_scheme_load.py::TestSchemeLoad::test[create_and_drop_tables] [GOOD] >> test_scheme_load.py::TestSchemeLoad::test_multi[create_and_drop_tables] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] |73.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] |73.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] |73.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithTenant::test_purge_queue[tables_format_v1-std] [GOOD] |73.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_purge_queue[tables_format_v0-std] [GOOD] |73.8%| [TA] $(B)/ydb/tests/functional/cms/test-results/py3test/{meta.json ... results_accumulator.log} >> test_restarts.py::TestRestartClusterMirror3DC::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] |73.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartMultipleMirror34::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] |73.8%| [TA] {RESULT} $(B)/ydb/tests/functional/cms/test-results/py3test/{meta.json ... results_accumulator.log} >> test_actorsystem.py::TestWithHybridNodeWith4Cpu::test [GOOD] >> test_restarts.py::TestRestartMultipleMirror3DC::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith21Cpu::test [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_acl.py::TestSqsWithForceAuthorizationWithTenant::test_invalid_token[tables_format_v0-invalid] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_actorsystem.py::TestWithHybridNodeWith30Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith11Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith14Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith6Cpu::test |73.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith1Cpu::test >> test_ydb_sql.py::TestExecuteSqlWithParams::test_uint32 >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v1] >> test_ydb_scripting.py::TestExecuteScriptWithParams::test_uint32 >> test_actorsystem.py::TestWithComputeNodeWith21Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith38Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith16Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith27Cpu::test [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith17Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith8Cpu::test |73.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_dispatch.py::TestMapping::test_mapping ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_counters.py::TestSqsCountersFeatures::test_updates_status_code_counters_when_parsing_errors_occur[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |73.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartMultipleMirror3DC::test_tablets_are_successfully_started_after_few_killed_nodes [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith11Cpu::test [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith14Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith6Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith24Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith32Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith35Cpu::test [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_queue[tables_format_v1-std] [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_queue_batch[tables_format_v0] >> test_actorsystem.py::TestWithStorageNodeWith16Cpu::test [GOOD] >> test_garbage_collection.py::TestSqsGarbageCollection::test_cleanups_deduplication_table[tables_format_v1] [GOOD] >> test_garbage_collection.py::TestSqsGarbageCollection::test_cleanups_reads_table[tables_format_v0-200] >> test_ydb_sql.py::TestExecuteSqlWithParams::test_uint32 [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParams::test_uint64_and_string >> test_ydb_scripting.py::TestExecuteScriptWithParams::test_uint32 [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParams::test_uint64_and_string >> test_public_api.py::TestForPotentialDeadlock::test_deadlocked_threads_on_cleanup >> test_actorsystem.py::TestWithStorageNodeWith7Cpu::test >> test_ydb_sql.py::TestExecuteSqlWithParams::test_uint64_and_string [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParams::test_list >> test_ydb_scripting.py::TestExecuteScriptWithParams::test_uint64_and_string [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParams::test_list >> test_cp_ic.py::TestCpIc::test_discovery >> test_retry.py::TestRetry::test_fail_first[kikimr0] >> test_actorsystem.py::TestWithComputeNodeWith11Cpu::test >> test_retry_high_rate.py::TestRetry::test_high_rate[kikimr0] >> test_ydb_sql.py::TestExecuteSqlWithParams::test_list [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParams::test_struct ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_request_to_deleted_queue[tables_format_v1-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_ydb_scripting.py::TestExecuteScriptWithParams::test_list [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParams::test_struct >> test_ydb_sql.py::TestExecuteSqlWithParams::test_struct [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParams::test_struct [GOOD] |73.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith8Cpu::test [GOOD] >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_queue_batch[tables_format_v0] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith17Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith27Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith32Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith1Cpu::test [GOOD] >> test_cp_ic.py::TestCpIc::test_discovery [GOOD] |73.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithHybridNodeWith5Cpu::test |73.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ydb_cli/py3test >> test_actorsystem.py::TestWithStorageNodeWith37Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith9Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith22Cpu::test |73.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/restarts/py3test >> test_restarts.py::TestRestartClusterMirror3DC::test_when_create_many_tablets_and_restart_cluster_then_every_thing_is_ok [GOOD] >> test_queues_managing.py::TestQueuesManagingWithTenant::test_delete_queue[tables_format_v0-fifo] [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith7Cpu::test [GOOD] >> test_public_api.py::TestForPotentialDeadlock::test_deadlocked_threads_on_cleanup [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith11Cpu::test [GOOD] |73.9%| [TA] $(B)/ydb/tests/functional/restarts/test-results/py3test/{meta.json ... results_accumulator.log} >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-fifo] |73.9%| [TA] {RESULT} $(B)/ydb/tests/functional/restarts/test-results/py3test/{meta.json ... results_accumulator.log} >> test_actorsystem.py::TestWithHybridNodeWith30Cpu::test [GOOD] >> test_ydb_impex.py::TestImpex::test_simple[csv-additional_args0-row] >> test_public_api.py::TestRecursiveCreation::test_mkdir >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith22Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith24Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith28Cpu::test >> test_serverless.py::test_database_with_column_disk_quotas[enable_alter_database_create_hive_first--true] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith5Cpu::test [GOOD] >> test_ydb_flame_graph.py::TestExecuteWithFlameGraph::test_fg_with_full_stats >> test_actorsystem.py::TestWithComputeNodeWith9Cpu::test [GOOD] |73.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |73.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/multi_plane/py3test >> test_cp_ic.py::TestCpIc::test_discovery [GOOD] Test command err: contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=872710) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_actorsystem.py::TestWithHybridNodeWith6Cpu::test |73.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithStorageNodeWith8Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith12Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith15Cpu::test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v1] [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith33Cpu::test >> test_public_api.py::TestRecursiveCreation::test_mkdir [GOOD] >> test_public_api.py::TestRecursiveCreation::test_create_table >> test_public_api.py::TestRecursiveCreation::test_create_table [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith27Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith22Cpu::test [GOOD] |73.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith17Cpu::test >> test_ydb_impex.py::TestImpex::test_simple[csv-additional_args0-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_simple[csv-additional_args0-column] |73.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_queue[tables_format_v0-std] [GOOD] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-fifo] [GOOD] >> test_ydb_scheme.py::TestSchemeDescribe::test_describe_view >> test_ydb_impex.py::TestImpex::test_simple[csv-additional_args0-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_simple[csv-additional_args1-row] >> test_actorsystem.py::TestWithHybridNodeWith10Cpu::test |73.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] >> test_ydb_flame_graph.py::TestExecuteWithFlameGraph::test_fg_with_full_stats [GOOD] >> test_ydb_flame_graph.py::TestExecuteWithFlameGraph::test_fg_with_profile_stats |73.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithComputeNodeWith18Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith6Cpu::test [GOOD] >> test_ydb_impex.py::TestImpex::test_simple[csv-additional_args1-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_simple[csv-additional_args1-column] >> test_actorsystem.py::TestWithComputeNodeWith33Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith38Cpu::test [GOOD] >> test_ydb_flame_graph.py::TestExecuteWithFlameGraph::test_fg_with_profile_stats [GOOD] >> test_ydb_flame_graph.py::TestExecuteWithFlameGraph::test_fg_with_basic_stats |73.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithStorageNodeWith12Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith20Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith8Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith22Cpu::test [GOOD] >> test_ydb_flame_graph.py::TestExecuteWithFlameGraph::test_fg_with_basic_stats [GOOD] >> test_ydb_flame_graph.py::TestExecuteWithFlameGraph::test_fg_to_dir >> test_actorsystem.py::TestWithHybridNodeWith15Cpu::test [GOOD] |73.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_ydb_flame_graph.py::TestExecuteWithFlameGraph::test_fg_to_dir [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParams::test_uint32 >> test_ydb_impex.py::TestImpex::test_simple[csv-additional_args1-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_simple[tsv-additional_args2-row] >> test_actorsystem.py::TestWithStorageNodeWith17Cpu::test [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queue_attributes_validation.py::TestQueueAttributesValidation::test_create_queue_with_custom_attributes[tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_ydb_impex.py::TestImpex::test_simple[tsv-additional_args2-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_simple[tsv-additional_args2-column] >> test_actorsystem.py::TestWithComputeNodeWith12Cpu::test |73.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_ydb_impex.py::TestImpex::test_simple[tsv-additional_args2-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_simple[tsv-additional_args3-row] >> test_actorsystem.py::TestWithHybridNodeWith10Cpu::test [GOOD] >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] |73.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith31Cpu::test >> test_ydb_impex.py::TestImpex::test_simple[tsv-additional_args3-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_simple[tsv-additional_args3-column] |73.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithComputeNodeWith28Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith18Cpu::test [GOOD] |73.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-std] >> test_actorsystem.py::TestWithHybridNodeWith25Cpu::test |73.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithHybridNodeWith7Cpu::test >> test_ydb_impex.py::TestImpex::test_simple[tsv-additional_args3-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_simple[json-additional_args4-row] >> test_actorsystem.py::TestWithHybridNodeWith36Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith20Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith9Cpu::test >> test_ydb_impex.py::TestImpex::test_simple[json-additional_args4-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_simple[json-additional_args4-column] >> test_ydb_table.py::TestExecuteQueryWithParams::test_uint32 [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParams::test_uint64_and_string >> test_ydb_table.py::TestExecuteQueryWithParams::test_uint64_and_string [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParams::test_list >> test_ydb_table.py::TestExecuteQueryWithParams::test_list [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParams::test_struct |73.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithComputeNodeWith12Cpu::test [GOOD] >> test_ydb_impex.py::TestImpex::test_simple[json-additional_args4-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_delimeter_at_end_of_lines[csv-additional_args0-row] >> test_ydb_table.py::TestExecuteQueryWithParams::test_struct [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParams::test_scan_query_with_parameters >> test_ydb_table.py::TestExecuteQueryWithParams::test_scan_query_with_parameters [GOOD] >> test_ydb_impex.py::TestImpex::test_delimeter_at_end_of_lines[csv-additional_args0-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_delimeter_at_end_of_lines[csv-additional_args0-column] |73.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_ydb_recursive_remove.py::TestRecursiveRemove::test_various_scheme_objects >> test_actorsystem.py::TestWithHybridNodeWith11Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith23Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith28Cpu::test >> test_ydb_backup.py::TestBackupSingle::test_single_table_backup |73.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithHybridNodeWith7Cpu::test [GOOD] >> test_ydb_impex.py::TestImpex::test_delimeter_at_end_of_lines[csv-additional_args0-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_delimeter_at_end_of_lines[csv-additional_args1-row] >> test_actorsystem.py::TestWithStorageNodeWith9Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith38Cpu::test |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/py3test >> test_simple.py::TestSimple::test_multi[tablestores] [GOOD] >> test_garbage_collection.py::TestSqsGarbageCollection::test_cleanups_reads_table[tables_format_v0-200] [GOOD] >> test_ydb_impex.py::TestImpex::test_delimeter_at_end_of_lines[csv-additional_args1-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_delimeter_at_end_of_lines[csv-additional_args1-column] >> test_actorsystem.py::TestWithStorageNodeWith33Cpu::test [GOOD] |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_queues_managing.py::TestQueuesManagingWithTenant::test_delete_queue[tables_format_v1-std] [GOOD] >> test_ydb_impex.py::TestImpex::test_delimeter_at_end_of_lines[csv-additional_args1-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_delimeter_at_end_of_lines[tsv-additional_args2-row] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-std] [GOOD] >> test_ydb_impex.py::TestImpex::test_delimeter_at_end_of_lines[tsv-additional_args2-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_delimeter_at_end_of_lines[tsv-additional_args2-column] |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithTenant::test_delete_queue[tables_format_v0-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_actorsystem.py::TestWithStorageNodeWith13Cpu::test >> test_retry.py::TestRetry::test_fail_first[kikimr0] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith23Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith13Cpu::test |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithStorageNodeWith9Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith25Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith16Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith11Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith33Cpu::test [GOOD] >> test_ydb_impex.py::TestImpex::test_delimeter_at_end_of_lines[tsv-additional_args2-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_delimeter_at_end_of_lines[tsv-additional_args3-row] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-fifo] |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/py3test >> test_scheme_load.py::TestSchemeLoad::test_multi[create_and_drop_tables] [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith18Cpu::test >> test_retry.py::TestRetry::test_low_rate[kikimr0] >> test_ydb_impex.py::TestImpex::test_delimeter_at_end_of_lines[tsv-additional_args3-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_delimeter_at_end_of_lines[tsv-additional_args3-column] |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/serverless/py3test >> test_serverless.py::test_database_with_column_disk_quotas[enable_alter_database_create_hive_first--true] [GOOD] >> test_ydb_backup.py::TestBackupSingle::test_single_table_backup [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith8Cpu::test >> test_ydb_scheme.py::TestSchemeDescribe::test_describe_view [GOOD] |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_ydb_impex.py::TestImpex::test_delimeter_at_end_of_lines[tsv-additional_args3-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_excess_columns[csv-additional_args0-row] |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithHybridNodeWith31Cpu::test [GOOD] >> test_ydb_impex.py::TestImpex::test_excess_columns[csv-additional_args0-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_excess_columns[csv-additional_args0-column] >> test_actorsystem.py::TestWithStorageNodeWith13Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith19Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith13Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith23Cpu::test [GOOD] |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/copy_table/py3test >> test_copy_table.py::TestCopyTable::test_copy_table[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith16Cpu::test [GOOD] |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithHybridNodeWith21Cpu::test >> test_public_api.py::TestAttributes::test_create_table >> test_ydb_scheme.py::TestSchemeDescribe::test_describe_view_json >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-std] >> test_ydb_impex.py::TestImpex::test_excess_columns[csv-additional_args0-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_excess_columns[csv-additional_args1-row] |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithPathTestQueuesManagingWithPath::test_delete_queue_batch[tables_format_v0] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith12Cpu::test >> test_dispatch.py::TestMapping::test_mapping [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-std] >> test_ydb_impex.py::TestImpex::test_excess_columns[csv-additional_args1-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_excess_columns[csv-additional_args1-column] >> test_actorsystem.py::TestWithHybridNodeWith8Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith18Cpu::test [GOOD] |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v0] >> test_actorsystem.py::TestWithStorageNodeWith28Cpu::test [GOOD] >> test_ydb_impex.py::TestImpex::test_excess_columns[csv-additional_args1-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_excess_columns[tsv-additional_args2-row] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-fifo] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith23Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith39Cpu::test >> test_ydb_impex.py::TestImpex::test_excess_columns[tsv-additional_args2-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_excess_columns[tsv-additional_args2-column] |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithStorageNodeWith14Cpu::test >> test_quoting.py::TestSqsQuotingWithKesus::test_creates_quoter >> test_actorsystem.py::TestWithComputeNodeWith19Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith24Cpu::test |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_ydb_impex.py::TestImpex::test_excess_columns[tsv-additional_args2-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_excess_columns[tsv-additional_args3-row] >> test_actorsystem.py::TestWithStorageNodeWith34Cpu::test >> test_actorsystem.py::TestWithHybridNodeWith12Cpu::test [GOOD] |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithHybridNodeWith17Cpu::test >> test_ydb_impex.py::TestImpex::test_excess_columns[tsv-additional_args3-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_excess_columns[tsv-additional_args3-column] >> test_actorsystem.py::TestWithHybridNodeWith36Cpu::test [GOOD] >> test_ydb_scheme.py::TestSchemeDescribe::test_describe_view_json [GOOD] >> test_dispatch.py::TestMapping::test_idle >> test_actorsystem.py::TestWithHybridNodeWith9Cpu::test >> test_ydb_impex.py::TestImpex::test_excess_columns[tsv-additional_args3-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_excess_columns[json-additional_args4-row] >> test_ydb_impex.py::TestImpex::test_excess_columns[json-additional_args4-row] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith26Cpu::test >> test_ydb_impex.py::TestImpex::test_excess_columns[json-additional_args4-column] >> test_actorsystem.py::TestWithComputeNodeWith29Cpu::test >> test_ydb_sql.py::TestExecuteSqlWithParamsFromJson::test_script_from_file >> test_ydb_scripting.py::TestExecuteScriptWithFormats::test_yql_script_pretty >> test_actorsystem.py::TestWithComputeNodeWith34Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith24Cpu::test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-std] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-std] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith21Cpu::test [GOOD] >> test_ydb_recursive_remove.py::TestRecursiveRemove::test_various_scheme_objects [GOOD] >> test_ydb_scheme.py::TestSchemeDescribe::test_describe_external_table_references_json >> test_actorsystem.py::TestWithStorageNodeWith14Cpu::test [GOOD] >> test_ydb_impex.py::TestImpex::test_excess_columns[json-additional_args4-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_stdin[csv-additional_args0-row] >> test_public_api.py::TestAttributes::test_create_table [GOOD] >> test_public_api.py::TestAttributes::test_copy_table >> test_public_api.py::TestAttributes::test_copy_table [GOOD] >> test_public_api.py::TestAttributes::test_create_indexed_table >> test_public_api.py::TestAttributes::test_create_indexed_table [GOOD] >> test_public_api.py::TestAttributes::test_alter_table |74.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v1] [GOOD] >> test_public_api.py::TestAttributes::test_alter_table [GOOD] >> test_public_api.py::TestAttributes::test_limits[attributes0] [GOOD] >> test_public_api.py::TestAttributes::test_limits[attributes1] [GOOD] >> test_public_api.py::TestAttributes::test_limits[attributes2] [GOOD] >> test_public_api.py::TestAttributes::test_limits[attributes3] [GOOD] >> test_public_api.py::TestAttributes::test_limits[attributes4] [GOOD] >> test_ydb_impex.py::TestImpex::test_stdin[csv-additional_args0-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_stdin[csv-additional_args0-column] >> test_actorsystem.py::TestWithHybridNodeWith17Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith32Cpu::test |74.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-fifo] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith9Cpu::test [GOOD] >> test_quoting.py::TestSqsQuotingWithKesus::test_creates_quoter [GOOD] >> test_ydb_impex.py::TestImpex::test_stdin[csv-additional_args0-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_stdin[csv-additional_args1-row] >> test_actorsystem.py::TestWithComputeNodeWith14Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith38Cpu::test [GOOD] >> test_ydb_impex.py::TestImpex::test_stdin[csv-additional_args1-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_stdin[csv-additional_args1-column] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromJson::test_script_from_file [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithFormats::test_yql_script_pretty [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithFormats::test_yql_script_json_base64 >> test_actorsystem.py::TestWithStorageNodeWith24Cpu::test [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithFormats::test_yql_script_json_base64 [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithFormats::test_yql_script_json_base64_array >> test_actorsystem.py::TestWithHybridNodeWith22Cpu::test >> test_ydb_scripting.py::TestExecuteScriptWithFormats::test_yql_script_json_base64_array [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithFormats::test_yql_script_json_unicode >> test_ydb_impex.py::TestImpex::test_stdin[csv-additional_args1-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_stdin[tsv-additional_args2-row] >> test_actorsystem.py::TestWithStorageNodeWith19Cpu::test >> test_ydb_scripting.py::TestExecuteScriptWithFormats::test_yql_script_json_unicode [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithFormats::test_yql_script_json_unicode_array >> test_ydb_impex.py::TestImpex::test_stdin[tsv-additional_args2-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_stdin[tsv-additional_args2-column] >> test_ydb_scripting.py::TestExecuteScriptWithFormats::test_yql_script_json_unicode_array [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithFormats::test_stream_yql_script_pretty >> test_ydb_scripting.py::TestExecuteScriptWithFormats::test_stream_yql_script_pretty [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithFormats::test_stream_yql_script_json_base64 |74.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_queues_managing.py::TestQueuesManagingWithTenant::test_delete_queue[tables_format_v1-std] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithFormats::test_stream_yql_script_json_base64 [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithFormats::test_stream_yql_script_json_base64_array >> test_inflight.py::TestS3::test_inflight[v1-client0-kikimr_params0] >> test_ydb_impex.py::TestImpex::test_stdin[tsv-additional_args2-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_stdin[tsv-additional_args3-row] >> test_actorsystem.py::TestWithComputeNodeWith14Cpu::test [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithFormats::test_stream_yql_script_json_base64_array [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithFormats::test_stream_yql_script_json_unicode >> test_actorsystem.py::TestWithComputeNodeWith24Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith26Cpu::test [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client0-year Int32-False] >> test_ydb_impex.py::TestImpex::test_stdin[tsv-additional_args3-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_stdin[tsv-additional_args3-column] >> test_ydb_scripting.py::TestExecuteScriptWithFormats::test_stream_yql_script_json_unicode [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithFormats::test_stream_yql_script_json_unicode_array >> test_actorsystem.py::TestWithComputeNodeWith1Cpu::test >> test_s3_1.py::TestS3::test_write_result[v1-kikimr_params0-client0] >> test_retry_high_rate.py::TestRetry::test_high_rate[kikimr0] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithFormats::test_stream_yql_script_json_unicode_array [GOOD] |74.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ydb_cli/py3test >> test_ydb_recursive_remove.py::TestRecursiveRemove::test_various_scheme_objects [GOOD] >> test_validation.py::TestS3::test_empty[v1-client0] |74.1%| [TA] $(B)/ydb/tests/functional/serverless/test-results/py3test/{meta.json ... results_accumulator.log} >> test_s3_0.py::TestS3::test_csv[v1-false-client0] >> test_ydb_impex.py::TestImpex::test_stdin[tsv-additional_args3-column] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v0] [GOOD] >> test_insert.py::TestS3::test_insert[v1-false-client0-json_list-dataset] >> test_ydb_impex.py::TestImpex::test_stdin[json-additional_args4-row] >> test_ydb_impex.py::TestImpex::test_stdin[json-additional_args4-row] [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith19Cpu::test [GOOD] >> test_ydb_impex.py::TestImpex::test_stdin[json-additional_args4-column] |74.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithHybridNodeWith12Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith29Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith34Cpu::test [GOOD] >> test_ydb_impex.py::TestImpex::test_stdin[json-additional_args4-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files[csv-additional_args0-row] >> test_ydb_impex.py::TestImpex::test_multiple_files[csv-additional_args0-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files[csv-additional_args0-column] |74.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithHybridNodeWith22Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith15Cpu::test >> test_insert.py::TestInsert::test[read_data_during_bulk_upsert] [FAIL] >> test_actorsystem.py::TestWithComputeNodeWith15Cpu::test >> test_insert.py::TestInsert::test_multi[read_data_during_bulk_upsert] >> test_ydb_impex.py::TestImpex::test_multiple_files[csv-additional_args0-column] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith34Cpu::test [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files[csv-additional_args1-row] >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.gz-gzip] >> test_ydb_scheme.py::TestSchemeDescribe::test_describe_external_table_references_json [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith39Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith18Cpu::test >> test_ydb_impex.py::TestImpex::test_multiple_files[csv-additional_args1-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files[csv-additional_args1-column] >> test_actorsystem.py::TestWithHybridNodeWith32Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith29Cpu::test |74.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params0-false] >> test_actorsystem.py::TestWithComputeNodeWith1Cpu::test [GOOD] |74.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ydb_cli/py3test >> test_ydb_flame_graph.py::TestExecuteWithFlameGraph::test_fg_to_dir [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files[csv-additional_args1-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files[tsv-additional_args2-row] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/multi_plane/py3test >> test_retry_high_rate.py::TestRetry::test_high_rate[kikimr0] [GOOD] Test command err: contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=872771) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback |74.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithHybridNodeWith9Cpu::test [GOOD] |74.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_creates_quoter [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/common/py3test >> test_garbage_collection.py::TestSqsGarbageCollection::test_cleanups_reads_table[tables_format_v0-200] [GOOD] Test command err: contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=839533) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/pool.py:268: ResourceWarning: unclosed running multiprocessing pool ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=839533) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/pool.py:268: ResourceWarning: unclosed running multiprocessing pool ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=839533) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/pool.py:268: ResourceWarning: unclosed running multiprocessing pool ResourceWarning: Enable tracemalloc to get the object allocation traceback |74.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v1-std] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files[tsv-additional_args2-row] [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith25Cpu::test >> test_ydb_impex.py::TestImpex::test_multiple_files[tsv-additional_args2-column] >> test_actorsystem.py::TestWithStorageNodeWith15Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith37Cpu::test |74.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |74.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_ydb_impex.py::TestImpex::test_multiple_files[tsv-additional_args2-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files[tsv-additional_args3-row] >> test_actorsystem.py::TestWithComputeNodeWith15Cpu::test [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files[tsv-additional_args3-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files[tsv-additional_args3-column] >> test_actorsystem.py::TestWithHybridNodeWith18Cpu::test [GOOD] |74.1%| [TA] {RESULT} $(B)/ydb/tests/functional/serverless/test-results/py3test/{meta.json ... results_accumulator.log} >> test_actorsystem.py::TestWithComputeNodeWith25Cpu::test >> test_ydb_table.py::TestExecuteQueryWithFormats::test_data_query_pretty >> test_ydb_impex.py::TestImpex::test_multiple_files[tsv-additional_args3-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files[json-additional_args4-row] >> test_ydb_impex.py::TestImpex::test_multiple_files[json-additional_args4-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files[json-additional_args4-column] >> test_actorsystem.py::TestWithStorageNodeWith1Cpu::test >> test_s3_0.py::TestS3::test_csv[v1-false-client0] [GOOD] >> test_s3_0.py::TestS3::test_csv[v1-true-client0] |74.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ydb_cli/py3test >> test_ydb_scheme.py::TestSchemeDescribe::test_describe_external_table_references_json [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith35Cpu::test >> test_public_api.py::TestDocApiTables::test_create_table >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client0-year Int32-False] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files[json-additional_args4-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_stdin[csv-additional_args0-row] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client1-year Int32 NOT NULL-False] |74.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithStorageNodeWith15Cpu::test [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_stdin[csv-additional_args0-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_stdin[csv-additional_args0-column] >> test_actorsystem.py::TestWithHybridNodeWith23Cpu::test |74.1%| [TA] $(B)/ydb/tests/functional/sqs/common/test-results/py3test/{meta.json ... results_accumulator.log} >> test_actorsystem.py::TestWithComputeNodeWith35Cpu::test >> test_s3_0.py::TestS3::test_csv[v1-true-client0] [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith25Cpu::test [GOOD] |74.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-fifo] [GOOD] >> test_s3_0.py::TestS3::test_csv[v2-false-client0] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_data_query_pretty [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_data_query_json_base64 >> test_ydb_table.py::TestExecuteQueryWithFormats::test_data_query_json_base64 [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_data_query_json_base64_array >> test_ydb_impex.py::TestImpex::test_multiple_files_and_stdin[csv-additional_args0-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_stdin[csv-additional_args1-row] >> test_ydb_backup.py::TestBackupSingleNotNull::test_single_table_backup >> test_ydb_table.py::TestExecuteQueryWithFormats::test_data_query_json_base64_array [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_data_query_json_unicode >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client1-year Int32 NOT NULL-False] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_data_query_json_unicode [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_data_query_json_unicode_array >> test_ydb_impex.py::TestImpex::test_multiple_files_and_stdin[csv-additional_args1-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_stdin[csv-additional_args1-column] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client2-year Uint32-False] >> test_insert.py::TestS3::test_insert[v1-false-client0-json_list-dataset] [GOOD] >> test_insert.py::TestS3::test_insert[v1-false-client0-json_list-dataにちは% set] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_data_query_json_unicode_array [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_data_query_csv |74.1%| [TA] {RESULT} $(B)/ydb/tests/functional/sqs/common/test-results/py3test/{meta.json ... results_accumulator.log} >> test_actorsystem.py::TestWithStorageNodeWith29Cpu::test [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_data_query_csv [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_data_query_tsv >> test_actorsystem.py::TestWithComputeNodeWith20Cpu::test >> test_ydb_table.py::TestExecuteQueryWithFormats::test_data_query_tsv [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_scan_query_pretty >> test_actorsystem.py::TestWithHybridNodeWith27Cpu::test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v1] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_scan_query_pretty [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_scan_query_json_base64 >> test_ydb_impex.py::TestImpex::test_multiple_files_and_stdin[csv-additional_args1-column] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith25Cpu::test [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_stdin[tsv-additional_args2-row] >> test_bindings_0.py::TestBindings::test_binding_operations[v1-kikimr_settings0-client0] >> test_formats.py::TestS3Formats::test_format[v1-test.csv-csv_with_names-kikimr_settings0] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_scan_query_json_base64 [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_scan_query_json_base64_array >> test_statistics.py::TestS3::test_egress[v1-client0-json_list] >> test_ydb_over_fq.py::TestYdbOverFq::test_list_directory_v2[v2-client0] >> test_explicit_partitioning_0.py::TestS3::test_partitioned_by[v1-false-client0] >> test_bindings_1.py::TestBindings::test_s3_insert[v1-kikimr_settings0-client0] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_stdin[tsv-additional_args2-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_stdin[tsv-additional_args2-column] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_scan_query_json_base64_array [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_scan_query_json_unicode >> test_actorsystem.py::TestWithStorageNodeWith1Cpu::test [GOOD] |74.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-std] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client2-year Uint32-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client3-year Uint32 NOT NULL-True] >> test_streaming_join.py::TestStreamingJoin::test_grace_join[v1-client0] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_scan_query_json_unicode [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_scan_query_json_unicode_array |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-std] [GOOD] >> test_insert.py::TestS3::test_insert[v1-false-client0-json_list-dataにちは% set] [GOOD] >> test_test_connection.py::TestConnection::test_test_s3_connection[v2-client0] >> test_insert.py::TestS3::test_insert[v1-false-client0-json_each_row-dataset] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_scan_query_json_unicode_array [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_scan_query_csv >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params0-false] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith2Cpu::test >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params0-true] >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.gz-gzip] [GOOD] |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithComputeNodeWith15Cpu::test [GOOD] |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.lz4-lz4] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_stdin[tsv-additional_args2-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_stdin[tsv-additional_args3-row] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_scan_query_csv [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_scan_query_tsv >> test_ydb_table.py::TestExecuteQueryWithFormats::test_scan_query_tsv [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_read_table_pretty >> test_ydb_backup.py::TestBackupSingleNotNull::test_single_table_backup [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith39Cpu::test >> test_ydb_impex.py::TestImpex::test_multiple_files_and_stdin[tsv-additional_args3-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_stdin[tsv-additional_args3-column] >> test_format_setting.py::TestS3::test_interval_unit[v1-client0] |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithHybridNodeWith18Cpu::test [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_read_table_pretty [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_read_table_json_base64 >> test_public_api.py::TestDocApiTables::test_create_table [GOOD] >> test_public_api.py::TestDocApiTables::test_alter_table[None-BadRequest] >> test_public_api.py::TestDocApiTables::test_alter_table[None-BadRequest] [GOOD] >> test_public_api.py::TestDocApiTables::test_alter_table[settings1-None] >> test_actorsystem.py::TestWithHybridNodeWith23Cpu::test [GOOD] >> test_public_api.py::TestDocApiTables::test_alter_table[settings1-None] [GOOD] >> test_public_api.py::TestDocApiTables::test_drop_table[None-None] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params0-true] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params0-false] >> test_s3_0.py::TestS3::test_csv[v2-false-client0] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_read_table_json_base64 [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_read_table_json_base64_array >> test_public_api.py::TestDocApiTables::test_drop_table[None-None] [GOOD] >> test_public_api.py::TestDocApiTables::test_drop_table[settings1-None] >> test_s3_0.py::TestS3::test_csv[v2-true-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client3-year Uint32 NOT NULL-True] [GOOD] >> test_public_api.py::TestDocApiTables::test_drop_table[settings1-None] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client4-year Int64-False] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_read_table_json_base64_array [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_read_table_json_unicode >> test_actorsystem.py::TestWithComputeNodeWith20Cpu::test [GOOD] >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.lz4-lz4] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_read_table_json_unicode [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_read_table_json_unicode_array >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.br-brotli] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_stdin[tsv-additional_args3-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_stdin[json-additional_args4-row] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_read_table_json_unicode_array [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_read_table_csv >> test_ydb_table.py::TestExecuteQueryWithFormats::test_read_table_csv [GOOD] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_read_table_tsv >> test_insert.py::TestS3::test_insert[v1-false-client0-json_each_row-dataset] [GOOD] >> test_insert.py::TestS3::test_insert[v1-false-client0-json_each_row-dataにちは% set] >> test_actorsystem.py::TestWithHybridNodeWith33Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith2Cpu::test [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_stdin[json-additional_args4-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_stdin[json-additional_args4-column] >> test_ydb_table.py::TestExecuteQueryWithFormats::test_read_table_tsv [GOOD] >> test_s3_0.py::TestS3::test_csv[v2-true-client0] [GOOD] >> test_public_metrics.py::TestPublicMetrics::test_public_metrics[v2-client0] |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_s3_0.py::TestS3::test_inference[v2-client0] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params0-false] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params0-true] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client4-year Int64-False] [GOOD] |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client5-year Int64 NOT NULL-False] >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.br-brotli] [GOOD] >> test_dispatch.py::TestMapping::test_idle [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith26Cpu::test >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.bz2-bzip2] |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_ydb_impex.py::TestImpex::test_multiple_files_and_stdin[json-additional_args4-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_columns_opt[csv-additional_args0-row] >> test_actorsystem.py::TestWithHybridNodeWith27Cpu::test [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith37Cpu::test [GOOD] >> test_insert.py::TestS3::test_insert[v1-false-client0-json_each_row-dataにちは% set] [GOOD] >> test_insert.py::TestS3::test_insert[v1-false-client0-csv_with_names-dataset] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_columns_opt[csv-additional_args0-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_columns_opt[csv-additional_args0-column] >> test_s3_0.py::TestS3::test_inference[v2-client0] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params0-true] [GOOD] >> test_s3_0.py::TestS3::test_inference_null_column[v2-client0] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params0-false] |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_send_message_rate[tables_format_v0] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.bz2-bzip2] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client5-year Int64 NOT NULL-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client6-year Uint64-False] >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.zst-zstd] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_columns_opt[csv-additional_args0-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_columns_opt[csv-additional_args1-row] >> test_actorsystem.py::TestWithStorageNodeWith20Cpu::test >> test_actorsystem.py::TestWithStorageNodeWith35Cpu::test [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v1] >> test_insert.py::TestS3::test_insert[v1-false-client0-csv_with_names-dataset] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith3Cpu::test >> test_ydb_impex.py::TestImpex::test_multiple_files_and_columns_opt[csv-additional_args1-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_columns_opt[csv-additional_args1-column] >> test_insert.py::TestS3::test_insert[v1-false-client0-csv_with_names-dataにちは% set] >> test_s3_0.py::TestS3::test_inference_null_column[v2-client0] [GOOD] >> test_s3_0.py::TestS3::test_inference_optional_types[v2-client0] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params0-false] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params0-true] |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client6-year Uint64-False] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith35Cpu::test [GOOD] >> test_test_connection.py::TestConnection::test_test_s3_connection[v2-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client7-year Uint64 NOT NULL-False] >> test_test_connection.py::TestConnection::test_test_s3_connection[v1-client0] >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.zst-zstd] [GOOD] >> test_test_connection.py::TestConnection::test_test_s3_connection[v1-client0] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.xz-xz] >> test_test_connection.py::TestConnection::test_test_s3_connection_uri[v2-client0] >> test_test_connection.py::TestConnection::test_test_s3_connection_uri[v2-client0] [GOOD] |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_test_connection.py::TestConnection::test_test_s3_connection_uri[v1-client0] >> test_s3_1.py::TestS3::test_write_result[v1-kikimr_params0-client0] [GOOD] >> test_test_connection.py::TestConnection::test_test_s3_connection_uri[v1-client0] [GOOD] >> test_test_connection.py::TestConnection::test_test_s3_connection_error[v2-client0] >> test_s3_1.py::TestS3::test_top_level_listing_2[v1-kikimr_params0-false-client0] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_columns_opt[csv-additional_args1-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_columns_opt[tsv-additional_args2-row] >> test_test_connection.py::TestConnection::test_test_s3_connection_error[v2-client0] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params0-true] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_uint32 >> test_test_connection.py::TestConnection::test_test_s3_connection_error[v1-client0] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params0-false] >> test_test_connection.py::TestConnection::test_test_s3_connection_error[v1-client0] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v1] [GOOD] >> test_statistics.py::TestS3::test_egress[v1-client0-json_list] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/multi_plane/py3test >> test_dispatch.py::TestMapping::test_idle [GOOD] Test command err: contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=869918) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_s3_0.py::TestS3::test_inference_optional_types[v2-client0] [GOOD] >> test_statistics.py::TestS3::test_egress[v1-client0-json_each_row] >> test_s3_0.py::TestS3::test_inference_multiple_files[v2-client0] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_columns_opt[tsv-additional_args2-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_columns_opt[tsv-additional_args2-column] |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithStorageNodeWith26Cpu::test [GOOD] >> test_early_finish.py::TestEarlyFinish::test_early_finish[v1-client0] >> test_insert.py::TestS3::test_insert[v1-false-client0-csv_with_names-dataにちは% set] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client7-year Uint64 NOT NULL-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client8-year String NOT NULL-True] >> test_compressions.py::TestS3Compressions::test_compression[v2-test.json.xz-xz] [GOOD] >> test_insert.py::TestS3::test_insert[v1-false-client0-parquet-dataset] >> test_formats.py::TestS3Formats::test_format[v1-test.csv-csv_with_names-kikimr_settings0] [GOOD] |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithComputeNodeWith20Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith30Cpu::test >> test_formats.py::TestS3Formats::test_format[v1-test.tsv-tsv_with_names-kikimr_settings0] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.gz-gzip] >> test_explicit_partitioning_0.py::TestS3::test_partitioned_by[v1-false-client0] [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith20Cpu::test [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_partitioned_by[v1-true-client0] >> test_actorsystem.py::TestWithStorageNodeWith2Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith3Cpu::test [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_columns_opt[tsv-additional_args2-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_columns_opt[tsv-additional_args3-row] >> test_statistics.py::TestS3::test_egress[v1-client0-json_each_row] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith26Cpu::test >> test_statistics.py::TestS3::test_egress[v1-client0-csv_with_names] >> test_bindings_1.py::TestBindings::test_s3_insert[v1-kikimr_settings0-client0] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_columns_opt[tsv-additional_args3-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_multiple_files_and_columns_opt[tsv-additional_args3-column] >> test_bindings_1.py::TestBindings::test_s3_insert[v2-kikimr_settings0-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client8-year String NOT NULL-True] [GOOD] >> test_push_down.py::TestS3PushDown::test_simple_case[v1-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client9-year String-False] |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_simple_json[sql] >> test_s3_0.py::TestS3::test_inference_multiple_files[v2-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_partitioned_by[v1-true-client0] [GOOD] >> test_s3_0.py::TestS3::test_inference_file_error[v2-client0] >> test_actorsystem.py::TestWithHybridNodeWith28Cpu::test >> test_format_setting.py::TestS3::test_interval_unit[v1-client0] [GOOD] >> test_insert.py::TestS3::test_insert[v1-false-client0-parquet-dataset] [GOOD] >> test_statistics.py::TestS3::test_egress[v1-client0-csv_with_names] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith33Cpu::test [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_partitioned_by[v2-false-client0] >> test_format_setting.py::TestS3::test_interval_unit[v2-client0] >> test_insert.py::TestS3::test_insert[v1-false-client0-parquet-dataにちは% set] >> test_statistics.py::TestS3::test_egress[v1-client0-parquet] >> test_formats.py::TestS3Formats::test_format[v1-test.tsv-tsv_with_names-kikimr_settings0] [GOOD] >> test_formats.py::TestS3Formats::test_format[v1-test_each_row.json-json_each_row-kikimr_settings0] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_uint32 [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_uint64_and_string >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_not_create_kesus >> test_ydb_impex.py::TestImpex::test_multiple_files_and_columns_opt[tsv-additional_args3-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_big_dataset[csv-additional_args0-row] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-fifo] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_uint64_and_string [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_list >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.gz-gzip] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_list [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_struct >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.lz4-lz4] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_struct [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_multiple_files >> test_statistics.py::TestS3::test_egress[v1-client0-parquet] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params0-false] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_multiple_files [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_ignore_excess_parameters >> test_statistics.py::TestS3::test_egress[v2-client0-json_list] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params0-true] >> test_actorsystem.py::TestWithComputeNodeWith4Cpu::test >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_ignore_excess_parameters [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_script_from_file >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client9-year String-False] [GOOD] |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithStorageNodeWith36Cpu::test >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_script_from_file [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_stream_uint32 >> test_insert.py::TestS3::test_insert[v1-false-client0-parquet-dataにちは% set] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client10-year Utf8-False] >> test_insert.py::TestS3::test_insert[v1-true-client0-json_list-dataset] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v1] [GOOD] >> test_bindings_0.py::TestBindings::test_binding_operations[v1-kikimr_settings0-client0] [GOOD] >> test_s3_0.py::TestS3::test_inference_file_error[v2-client0] [GOOD] >> test_insert.py::TestS3::test_insert[v1-true-client0-json_list-dataset] [SKIPPED] >> test_bindings_0.py::TestBindings::test_binding_operations[v2-kikimr_settings0-client0] |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithHybridNodeWith23Cpu::test [GOOD] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.lz4-lz4] [GOOD] >> test_formats.py::TestS3Formats::test_format[v1-test_each_row.json-json_each_row-kikimr_settings0] [GOOD] >> test_formats.py::TestS3Formats::test_format[v1-test_list.json-json_list-kikimr_settings0] >> test_insert.py::TestS3::test_insert[v1-true-client0-json_list-dataにちは% set] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_stream_uint32 [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_stream_uint64_and_string >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.br-brotli] >> test_s3_0.py::TestS3::test_inference_parameters[v2-client0] >> test_insert.py::TestS3::test_insert[v1-true-client0-json_list-dataにちは% set] [SKIPPED] >> test_insert.py::TestS3::test_insert[v1-true-client0-json_each_row-dataset] >> test_actorsystem.py::TestWithStorageNodeWith39Cpu::test [GOOD] >> test_insert.py::TestS3::test_insert[v1-true-client0-json_each_row-dataset] [SKIPPED] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_stream_uint64_and_string [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_stream_list >> test_insert.py::TestS3::test_insert[v1-true-client0-json_each_row-dataにちは% set] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_simple_json[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_simple_csv[sql] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-std] >> test_actorsystem.py::TestWithStorageNodeWith2Cpu::test [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_stream_list [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_stream_struct >> test_insert.py::TestS3::test_insert[v1-true-client0-json_each_row-dataにちは% set] [SKIPPED] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_simple_csv[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_simple_tsv[sql] >> test_insert.py::TestS3::test_insert[v1-true-client0-csv_with_names-dataset] [SKIPPED] >> test_insert.py::TestS3::test_insert[v1-true-client0-csv_with_names-dataにちは% set] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_stream_struct [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_stream_multiple_files >> test_insert.py::TestS3::test_insert[v1-true-client0-csv_with_names-dataにちは% set] [SKIPPED] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_simple_tsv[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_stdin_par_raw[sql] >> test_insert.py::TestS3::test_insert[v1-true-client0-parquet-dataset] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/api/py3test >> test_public_api.py::TestDocApiTables::test_drop_table[settings1-None] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params0-true] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_stdin_par_raw[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_stdin_par_json[sql] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_stream_multiple_files [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_stream_ignore_excess_parameters >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params0-false] >> test_public_metrics.py::TestPublicMetrics::test_public_metrics[v2-client0] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_stdin_par_json[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_stdin_par_csv[sql] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_stream_ignore_excess_parameters [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_stream_script_from_file >> test_actorsystem.py::TestWithStorageNodeWith30Cpu::test >> test_formats.py::TestS3Formats::test_format[v1-test_list.json-json_list-kikimr_settings0] [GOOD] >> test_public_metrics.py::TestPublicMetrics::test_public_metrics[v1-client0] >> test_formats.py::TestS3Formats::test_format[v1-test.parquet-parquet-kikimr_settings0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client10-year Utf8-False] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_stdin_par_csv[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_stdin_par_tsv[sql] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromJson::test_stream_script_from_file [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client11-year Utf8 NOT NULL-True] >> test_ydb_impex.py::TestImpex::test_big_dataset[csv-additional_args0-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_big_dataset[csv-additional_args0-column] >> test_actorsystem.py::TestWithComputeNodeWith4Cpu::test [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_stdin_par_tsv[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_framing_newline_delimited_json[sql] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.br-brotli] [GOOD] >> test_s3_0.py::TestS3::test_inference_parameters[v2-client0] [GOOD] >> test_yq_v2.py::TestS3::test_yqv2_enabled[v2-False-client0] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.bz2-bzip2] >> test_s3_0.py::TestS3::test_inference_timestamp[v2-client0] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_framing_newline_delimited_json[sql] [GOOD] >> test_format_setting.py::TestS3::test_interval_unit[v2-client0] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_framing_newline_delimited_csv[sql] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_not_create_kesus [GOOD] |74.2%| [TA] $(B)/ydb/tests/functional/api/test-results/py3test/{meta.json ... results_accumulator.log} >> test_actorsystem.py::TestWithHybridNodeWith38Cpu::test >> test_format_setting.py::TestS3::test_bad_format_setting[v1-client0] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_framing_newline_delimited_csv[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_framing_newline_delimited_tsv[sql] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client11-year Utf8 NOT NULL-True] [GOOD] |74.2%| [TA] {RESULT} $(B)/ydb/tests/functional/api/test-results/py3test/{meta.json ... results_accumulator.log} >> test_format_setting.py::TestS3::test_bad_format_setting[v1-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_s3_insert[v2-kikimr_settings0-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client12-year Date-False] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v0] >> test_format_setting.py::TestS3::test_bad_format_setting[v2-client0] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_framing_newline_delimited_tsv[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_framing_newline_delimited_raw[sql] >> test_bindings_1.py::TestBindings::test_s3_format_mismatch[v1-client0] |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_explicit_partitioning_0.py::TestS3::test_partitioned_by[v2-false-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_partitioned_by[v2-true-client0] >> test_format_setting.py::TestS3::test_bad_format_setting[v2-client0] [GOOD] >> test_format_setting.py::TestS3::test_date_simple[v1-date/simple/test.csv-csv_with_names] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_framing_newline_delimited_raw[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_batching_full_raw[sql] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-fifo] [GOOD] |74.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithStorageNodeWith26Cpu::test [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith26Cpu::test [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_batching_full_raw[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_batching_full_json[sql] >> test_statistics.py::TestS3::test_egress[v2-client0-json_list] [GOOD] >> test_insert.py::TestS3::test_insert[v1-true-client0-parquet-dataset] [GOOD] >> test_statistics.py::TestS3::test_egress[v2-client0-json_each_row] >> test_insert.py::TestS3::test_insert[v1-true-client0-parquet-dataにちは% set] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_batching_full_json[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_batching_full_csv[sql] |74.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithStorageNodeWith20Cpu::test [GOOD] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.bz2-bzip2] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params0-false] [GOOD] >> test_formats.py::TestS3Formats::test_format[v1-test.parquet-parquet-kikimr_settings0] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.zst-zstd] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params0-true] >> test_formats.py::TestS3Formats::test_format[v2-test.csv-csv_with_names-kikimr_settings0] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v0] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_batching_full_csv[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_batching_full_tsv[sql] >> test_actorsystem.py::TestWithComputeNodeWith30Cpu::test [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_batching_full_tsv[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_batching_adaptive_raw[sql] >> test_actorsystem.py::TestWithHybridNodeWith34Cpu::test >> test_format_setting.py::TestS3::test_date_simple[v1-date/simple/test.csv-csv_with_names] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_batching_adaptive_raw[sql] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client12-year Date-False] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith28Cpu::test [GOOD] >> test_format_setting.py::TestS3::test_date_simple[v1-date/simple/test.tsv-tsv_with_names] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_batching_adaptive_json[sql] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client13-year Date NOT NULL-True] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.zst-zstd] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_batching_adaptive_json[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_batching_adaptive_csv[sql] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.xz-xz] >> test_statistics.py::TestS3::test_egress[v2-client0-json_each_row] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith5Cpu::test >> test_ydb_impex.py::TestImpex::test_big_dataset[csv-additional_args0-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_big_dataset[csv-additional_args1-row] >> test_explicit_partitioning_0.py::TestS3::test_partitioned_by[v2-true-client0] [GOOD] >> test_statistics.py::TestS3::test_egress[v2-client0-csv_with_names] >> test_explicit_partitioning_0.py::TestS3::test_projection[v1-false-client0] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_batching_adaptive_csv[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_batching_adaptive_tsv[sql] >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-std] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith36Cpu::test >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_batching_adaptive_tsv[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_ignore_excess_parameters_json[sql] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client13-year Date NOT NULL-True] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-fifo] >> test_insert.py::TestS3::test_insert[v1-true-client0-parquet-dataにちは% set] [GOOD] >> test_insert.py::TestS3::test_insert[v2-false-client0-json_list-dataset] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client14-year Datetime-False] >> test_public_metrics.py::TestPublicMetrics::test_public_metrics[v1-client0] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_ignore_excess_parameters_json[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_ignore_excess_parameters_csv[sql] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_ignore_excess_parameters_csv[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_ignore_excess_parameters_tsv[sql] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params0-true] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_ignore_excess_parameters_tsv[sql] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params0-false] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_columns_bad_header_csv[sql] >> test_format_setting.py::TestS3::test_date_simple[v1-date/simple/test.tsv-tsv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_simple[v1-date/simple/test.json-json_each_row] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_columns_bad_header_csv[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_columns_bad_header_tsv[sql] >> test_compressions.py::TestS3Compressions::test_compression[v1-test.json.xz-xz] [GOOD] >> test_statistics.py::TestS3::test_egress[v2-client0-csv_with_names] [GOOD] |74.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.gz-gzip] >> test_statistics.py::TestS3::test_egress[v2-client0-parquet] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_columns_bad_header_tsv[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_columns_no_header_csv[sql] >> test_explicit_partitioning_0.py::TestS3::test_projection[v1-false-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection[v1-true-client0] >> test_push_down.py::TestS3PushDown::test_simple_case[v1-client0] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_columns_no_header_csv[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_columns_no_header_tsv[sql] >> test_s3_0.py::TestS3::test_inference_timestamp[v2-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client14-year Datetime-False] [GOOD] >> test_push_down.py::TestS3PushDown::test_simple_case[v2-client0] >> test_s3_0.py::TestS3::test_inference_projection[v2-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client15-year Datetime NOT NULL-True] |74.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_actorsystem.py::TestWithComputeNodeWith5Cpu::test [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_columns_no_header_tsv[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_skip_rows_csv[sql] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_skip_rows_csv[sql] [GOOD] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_skip_rows_tsv[sql] >> test_formats.py::TestS3Formats::test_format[v2-test.csv-csv_with_names-kikimr_settings0] [GOOD] >> test_formats.py::TestS3Formats::test_format[v2-test.tsv-tsv_with_names-kikimr_settings0] >> test_ydb_sql.py::TestExecuteSqlWithParamsFromStdin::test_skip_rows_tsv[sql] [GOOD] >> test_ydb_impex.py::TestImpex::test_big_dataset[csv-additional_args1-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_big_dataset[csv-additional_args1-column] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-false-client15-year Datetime NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client0-year Int32-False] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params0-false] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.gz-gzip] [GOOD] >> test_statistics.py::TestS3::test_egress[v2-client0-parquet] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params0-true] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.lz4-lz4] >> test_statistics.py::TestS3::test_convert[v1-client0-json_list-json_list] >> test_format_setting.py::TestS3::test_date_simple[v1-date/simple/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_date_simple[v1-date/simple/test.parquet-parquet] >> test_explicit_partitioning_0.py::TestS3::test_projection[v1-true-client0] [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith30Cpu::test [GOOD] >> test_s3_0.py::TestS3::test_inference_projection[v2-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection[v2-false-client0] >> test_s3_0.py::TestS3::test_inference_null_column_name[v2-client0] >> test_formats.py::TestS3Formats::test_format[v2-test.tsv-tsv_with_names-kikimr_settings0] [GOOD] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-fifo] [GOOD] >> test_formats.py::TestS3Formats::test_format[v2-test_each_row.json-json_each_row-kikimr_settings0] |74.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_not_create_kesus [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client0-year Int32-False] [GOOD] >> test_insert.py::TestS3::test_insert[v2-false-client0-json_list-dataset] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client1-year Int32 NOT NULL-False] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params0-true] [GOOD] >> test_insert.py::TestS3::test_insert[v2-false-client0-json_list-dataにちは% set] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params1-false] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.lz4-lz4] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection[v2-false-client0] [GOOD] >> test_s3_0.py::TestS3::test_inference_null_column_name[v2-client0] [GOOD] >> test_formats.py::TestS3Formats::test_format[v2-test_each_row.json-json_each_row-kikimr_settings0] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.br-brotli] >> test_explicit_partitioning_0.py::TestS3::test_projection[v2-true-client0] >> test_s3_0.py::TestS3::test_inference_unsupported_types[v2-client0] >> test_format_setting.py::TestS3::test_date_simple[v1-date/simple/test.parquet-parquet] [GOOD] >> test_format_setting.py::TestS3::test_date_simple[v2-date/simple/test.csv-csv_with_names] >> test_formats.py::TestS3Formats::test_format[v2-test_list.json-json_list-kikimr_settings0] >> test_early_finish.py::TestEarlyFinish::test_early_finish[v1-client0] [GOOD] |74.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithComputeNodeWith26Cpu::test [GOOD] |74.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/py3test >> test_actorsystem.py::TestWithStorageNodeWith36Cpu::test [GOOD] >> test_ydb_impex.py::TestImpex::test_big_dataset[csv-additional_args1-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_big_dataset[tsv-additional_args2-row] >> test_s3_1.py::TestS3::test_top_level_listing_2[v1-kikimr_params0-false-client0] [GOOD] |74.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/py3test >> test_s3_1.py::TestS3::test_top_level_listing_2[v1-kikimr_params0-true-client0] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v0] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith29Cpu::test >> test_format_setting.py::TestS3::test_date_simple[v2-date/simple/test.csv-csv_with_names] [GOOD] >> test_push_down.py::TestS3PushDown::test_simple_case[v2-client0] [GOOD] >> test_format_setting.py::TestS3::test_date_simple[v2-date/simple/test.tsv-tsv_with_names] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.br-brotli] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-json_list-json_list] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client1-year Int32 NOT NULL-False] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.bz2-bzip2] >> test_statistics.py::TestS3::test_convert[v1-client0-json_list-json_each_row] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client2-year Uint32-False] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_bindings_0.py::TestBindings::test_binding_operations[v2-kikimr_settings0-client0] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_test_connection.py::TestConnection::test_test_s3_connection_error[v1-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/002577/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_test_connection/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/002577/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_test_connection/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=920449) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 924283 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_bindings_0.py::TestBindings::test_modify_connection_with_a_lot_of_bindings[v1-kikimr_settings0-client0] >> test_yq_v2.py::TestS3::test_yqv2_enabled[v2-False-client0] [GOOD] >> test_yq_v2.py::TestS3::test_yqv2_enabled[v2-True-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection[v2-true-client0] [GOOD] >> test_s3_0.py::TestS3::test_inference_unsupported_types[v2-client0] [GOOD] >> test_formats.py::TestS3Formats::test_format[v2-test_list.json-json_list-kikimr_settings0] [GOOD] >> test_s3_0.py::TestS3::test_json_list_formats[v2-client0] >> test_formats.py::TestS3Formats::test_format[v2-test.parquet-parquet-kikimr_settings0] >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_pruning[v1-false-client0] |74.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/py3test >> test_ydb_backup.py::TestBaseSingleFromDifPlaces::test_single_table_backup_from_different_places >> test_actorsystem.py::TestWithHybridNodeWith34Cpu::test [GOOD] >> test_insert.py::TestS3::test_insert[v2-false-client0-json_list-dataにちは% set] [GOOD] >> test_insert.py::TestS3::test_insert[v2-false-client0-json_each_row-dataset] >> test_quota_exhaustion.py::TestYdbWorkload::test >> test_format_setting.py::TestS3::test_date_simple[v2-date/simple/test.tsv-tsv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_simple[v2-date/simple/test.json-json_each_row] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client2-year Uint32-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client3-year Uint32 NOT NULL-True] >> test_yq_v2.py::TestS3::test_yqv2_enabled[v2-True-client0] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith38Cpu::test [GOOD] >> test_yq_v2.py::TestS3::test_removed_database_path[v2-client0] >> test_s3_1.py::TestS3::test_top_level_listing_2[v1-kikimr_params0-true-client0] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.bz2-bzip2] [GOOD] |74.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/py3test >> test_s3_1.py::TestS3::test_top_level_listing_2[v2-kikimr_params0-false-client0] >> test_actorsystem.py::TestWithComputeNodeWith6Cpu::test >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.zst-zstd] >> test_ydb_impex.py::TestImpex::test_big_dataset[tsv-additional_args2-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_big_dataset[tsv-additional_args2-column] >> test_formats.py::TestS3Formats::test_format[v2-test.parquet-parquet-kikimr_settings0] [GOOD] >> test_formats.py::TestS3Formats::test_format_inference[v2-test.csv-csv_with_names] |74.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/py3test >> test_quota_exhaustion.py::TestYdbWorkload::test_duplicates >> test_explicit_partitioning_0.py::TestS3::test_pruning[v1-false-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_pruning[v1-true-client0] >> test_quota_exhaustion.py::TestYdbWorkload::test_delete >> test_statistics.py::TestS3::test_convert[v1-client0-json_list-json_each_row] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-json_list-csv_with_names] >> test_s3_0.py::TestS3::test_json_list_formats[v2-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client3-year Uint32 NOT NULL-True] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_uint32[data] >> test_actorsystem.py::TestWithComputeNodeWith36Cpu::test [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client4-year Int64-False] >> test_s3_0.py::TestS3::test_csv_with_hopping[v1-client0] |74.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/py3test >> test_actorsystem.py::TestWithComputeNodeWith31Cpu::test >> test_format_setting.py::TestS3::test_date_simple[v2-date/simple/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_date_simple[v2-date/simple/test.parquet-parquet] |74.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/py3test >> test_insert.py::TestS3::test_insert[v2-false-client0-json_each_row-dataset] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.zst-zstd] [GOOD] >> test_insert.py::TestS3::test_insert[v2-false-client0-json_each_row-dataにちは% set] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.xz-xz] >> test_explicit_partitioning_0.py::TestS3::test_pruning[v1-true-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_pruning[v2-false-client0] >> test_actorsystem.py::TestWithStorageNodeWith3Cpu::test >> test_actorsystem.py::TestWithComputeNodeWith6Cpu::test [GOOD] >> test_s3_0.py::TestS3::test_csv_with_hopping[v1-client0] [GOOD] >> test_s3_0.py::TestS3::test_csv_with_hopping[v2-client0] >> test_retry.py::TestRetry::test_low_rate[kikimr0] [GOOD] >> test_format_setting.py::TestS3::test_date_simple[v2-date/simple/test.parquet-parquet] [GOOD] >> docker_wrapper_test.py::test_pg_generated[Test64BitErrorChecking] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestAppendEncodedText] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestAppendEscapedText] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestAppendEscapedTextExistingBuffer] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestArrayScanBackend] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestArrayScanner] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestArrayValueBackend] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestArrayValuer] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestBadConn] >> test_ydb_backup.py::TestBaseSingleFromDifPlaces::test_single_table_backup_from_different_places [GOOD] >> test_format_setting.py::TestS3::test_date_simple_insert[v1-date/simple/test.csv-csv_with_names] >> docker_wrapper_test.py::test_pg_generated[TestBadConn] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestBinaryByteSliceToInt] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestBinaryByteSlicetoUUID] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestBindError] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestBoolArrayScanBytes] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestBoolArrayScanEmpty] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestBoolArrayScanError] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestBoolArrayScanNil] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestBoolArrayScanString] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestBoolArrayScanUnsupported] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestBoolArrayValue] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestByteSliceToText] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestByteaArrayScanBytes] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestByteaArrayScanEmpty] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestByteaArrayScanError] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestByteaArrayScanNil] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestByteaArrayScanString] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestByteaArrayScanUnsupported] >> test_bindings_1.py::TestBindings::test_s3_format_mismatch[v1-client0] [GOOD] >> test_ydb_impex.py::TestImpex::test_big_dataset[tsv-additional_args2-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_big_dataset[tsv-additional_args3-row] >> docker_wrapper_test.py::test_pg_generated[TestByteaArrayScanUnsupported] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestByteaArrayValue] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestByteaOutputFormatEncoding] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestByteaOutputFormats] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestCloseBadConn] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestCommit] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestCommitInFailedTransaction] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestCommitInFailedTransactionWithCancelContext] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestConnClose] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestConnExecDeadlock] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestConnListen] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestConnPing] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestConnPrepareContext] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestConnPrepareContext/context.Background] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestConnPrepareContext/context.WithTimeout] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestConnPrepareContext/context.WithTimeout_exceeded] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestConnUnlisten] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestConnUnlistenAll] [SKIPPED] >> test_bindings_1.py::TestBindings::test_s3_format_mismatch[v2-client0] >> docker_wrapper_test.py::test_pg_generated[TestConnectorWithNoticeHandler_Simple] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestConnectorWithNotificationHandler_Simple] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestContextCancelBegin] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestContextCancelExec] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestContextCancelQuery] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestCopyFromError] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestCopyInBinaryError] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestCopyInMultipleValues] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestCopyInRaiseStmtTrigger] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestCopyInSchemaStmt] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestCopyInStmt] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestCopyInStmtAffectedRows] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestCopyInTypes] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestCopyInWrongType] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestCopyOutsideOfTxnError] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestCopyRespLoopConnectionError] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestCopySyntaxError] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestDataType] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client4-year Int64-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client5-year Int64 NOT NULL-False] >> docker_wrapper_test.py::test_pg_generated[TestDataType] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestDataTypeLength] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestDataTypeName] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestDataTypePrecisionScale] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestDecodeBool] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestDecodeUUIDBackend] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestDecodeUUIDBinaryError] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestEmptyQuery] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestEmptyResultSetColumns] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestEncodeAndParseTs] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestEncodeDecode] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestErrorClass] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestErrorDuringStartup] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestErrorDuringStartupClosesConn] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestErrorOnExec] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestErrorOnQuery] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestErrorOnQueryRowSimpleQuery] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestErrorSQLState] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_pruning[v2-false-client0] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestExec] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestFloat32ArrayScanBytes] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestFloat32ArrayScanEmpty] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestFloat32ArrayScanError] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestFloat32ArrayScanNil] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestFloat32ArrayScanString] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestFloat32ArrayScanUnsupported] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestFloat32ArrayValue] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestFloat64ArrayScanBytes] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestFloat64ArrayScanEmpty] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestFloat64ArrayScanError] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestFloat64ArrayScanNil] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestFloat64ArrayScanString] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestFloat64ArrayScanUnsupported] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestFloat64ArrayValue] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestFormatAndParseTimestamp] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestFormatTs] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_pruning[v2-true-client0] >> test_statistics.py::TestS3::test_convert[v1-client0-json_list-csv_with_names] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-json_list-parquet] >> docker_wrapper_test.py::test_pg_generated[TestFormatTsBackend] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestFullParseURL] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestGenericArrayScanDelimiter] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestGenericArrayScanErrors] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestGenericArrayScanScannerArrayBytes] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestGenericArrayScanScannerArrayString] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestGenericArrayScanScannerSliceBytes] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestGenericArrayScanScannerSliceEmpty] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestGenericArrayScanScannerSliceNil] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestGenericArrayScanScannerSliceString] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestGenericArrayScanUnsupported] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestGenericArrayValue] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestGenericArrayValueErrors] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestGenericArrayValueUnsupported] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestHasCorrectRootGroupPermissions] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestIPv6LoopbackParseURL] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestInfinityTimestamp] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestInt32ArrayScanBytes] [GOOD] >> test_compressions.py::TestS3Compressions::test_compression_inference[v2-test.csv.xz-xz] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_uint32[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_uint32[scan] >> docker_wrapper_test.py::test_pg_generated[TestInt32ArrayScanEmpty] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestInt32ArrayScanError] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestInt32ArrayScanNil] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestInt32ArrayScanString] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestInt32ArrayScanUnsupported] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestInt32ArrayValue] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestInt64ArrayScanBytes] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestInt64ArrayScanEmpty] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestInt64ArrayScanError] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestInt64ArrayScanNil] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestInt64ArrayScanString] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestInt64ArrayScanUnsupported] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestInt64ArrayValue] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestInvalidProtocolParseURL] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestIsUTF8] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestIssue1046] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestIssue1062] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestIssue186] [SKIPPED] >> test_actorsystem.py::TestWithHybridNodeWith29Cpu::test [GOOD] >> test_s3_0.py::TestS3::test_csv_with_hopping[v2-client0] [GOOD] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.gz-gzip] >> test_yq_v2.py::TestS3::test_removed_database_path[v2-client0] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestIssue196] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestIssue282] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestIssue494] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestIssue617] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestListenerClose] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestListenerConnCloseWhileQueryIsExecuting] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestListenerFailedQuery] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestListenerListen] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestListenerPing] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestListenerReconnect] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestListenerUnlisten] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestListenerUnlistenAll] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestMinimalURL] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestMultipleEmptyResult] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestMultipleResult] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestMultipleSimpleQuery] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestNewConnector_Connect] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestNewConnector_Driver] [SKIPPED] >> test_s3_0.py::TestS3::test_raw[v1-false-client0] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_uint32[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_uint64_and_string[data] >> test_yq_v2.py::TestS3::test_query_parameters[v2-client0] >> docker_wrapper_test.py::test_pg_generated[TestNewConnector_WorksWithOpenDB] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestNewListenerConn] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestNoData] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestNotifyExtra] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestNullAfterNonNull] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestOpenURL] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestParameterCountMismatch] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestParseArray] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestParseArrayError] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestParseComplete] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestParseEnviron] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestParseErrorInExtendedQuery] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestParseOpts] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestParseTs] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestParseTsErrors] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestPgpass] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestPing] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestQueryCancelRace] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestQueryCancelledReused] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestQueryRowBugWorkaround] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestQuickClose] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestQuoteIdentifier] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestQuoteLiteral] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestReadFloatPrecision] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestReconnect] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestReturning] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestRowsCloseBeforeDone] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestRowsColumnTypes] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestRowsResultTag] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestRuntimeParameters] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestSNISupport] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestSNISupport/SNI_is_not_passed_when_disabled] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestSNISupport/SNI_is_not_set_for_IPv4] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestSNISupport/SNI_is_passed_when_asked_for] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestSNISupport/SNI_is_set_by_default] >> test_insert.py::TestS3::test_insert[v2-false-client0-json_each_row-dataにちは% set] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestSNISupport/SNI_is_set_by_default] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestSSLClientCertificates] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestSSLConnection] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestSSLRequireWithRootCert] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestSSLVerifyCA] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestSSLVerifyFull] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestScanNilTimestamp] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestScanTimestamp] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestSimpleParseURL] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestSimpleQuery] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestStatment] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestStmtExecContext] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestStmtExecContext/context.Background] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestStmtExecContext/context.WithTimeout] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestStmtExecContext/context.WithTimeout_exceeded] >> test_insert.py::TestS3::test_insert[v2-false-client0-csv_with_names-dataset] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_uint64_and_string[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_uint64_and_string[scan] >> test_s3_1.py::TestS3::test_top_level_listing_2[v2-kikimr_params0-false-client0] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestStmtExecContext/context.WithTimeout_exceeded] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestStmtQueryContext] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestStmtQueryContext/context.Background] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestStmtQueryContext/context.WithTimeout] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestStmtQueryContext/context.WithTimeout_exceeded] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestStringArrayScanBytes] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestStringArrayScanEmpty] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestStringArrayScanError] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestStringArrayScanNil] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestStringArrayScanString] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestStringArrayScanUnsupported] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestStringArrayValue] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestStringToBytea] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestStringToUUID] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestStringWithNul] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestTextByteSliceToInt] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTextByteSliceToUUID] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTextDecodeIntoString] >> docker_wrapper_test.py::test_pg_generated[TestTextDecodeIntoString] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTimeWithTimezone] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTimeWithTimezone/11:59:59+00:00_=>_0000-01-01T11:59:59Z] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTimeWithTimezone/11:59:59+04:00_=>_0000-01-01T11:59:59+04:00] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTimeWithTimezone/11:59:59+04:01:02_=>_0000-01-01T11:59:59+04:01] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTimeWithTimezone/11:59:59-04:01:02_=>_0000-01-01T11:59:59-04:01] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTimeWithTimezone/24:00+00_=>_0000-01-02T00:00:00Z] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTimeWithTimezone/24:00-04:00_=>_0000-01-02T00:00:00-04:00] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTimeWithTimezone/24:00:00+00_=>_0000-01-02T00:00:00Z] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTimeWithTimezone/24:00:00.0+00_=>_0000-01-02T00:00:00Z] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTimeWithTimezone/24:00:00.000000+00_=>_0000-01-02T00:00:00Z] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTimeWithTimezone/24:00Z_=>_0000-01-02T00:00:00Z] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTimeWithoutTimezone] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTimeWithoutTimezone/11:59:59_=>_0000-01-01T11:59:59Z] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTimeWithoutTimezone/24:00:00.000000_=>_0000-01-02T00:00:00Z] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTimeWithoutTimezone/24:00:00.0_=>_0000-01-02T00:00:00Z] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTimeWithoutTimezone/24:00:00_=>_0000-01-02T00:00:00Z] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTimeWithoutTimezone/24:00_=>_0000-01-02T00:00:00Z] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTimestampWithOutTimezone] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_uint64_and_string[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_list[data] >> test_s3_1.py::TestS3::test_top_level_listing_2[v2-kikimr_params0-true-client0] >> docker_wrapper_test.py::test_pg_generated[TestTimestampWithOutTimezone] [GOOD] >> docker_wrapper_test.py::test_pg_generated[TestTimestampWithTimeZone] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestTxOptions] [SKIPPED] >> docker_wrapper_test.py::test_pg_generated[TestXactMultiStmt] [SKIPPED] >> test_explicit_partitioning_0.py::TestS3::test_pruning[v2-true-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_validation[v1-client0] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_list[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_list[scan] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_list[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_struct[data] >> test_explicit_partitioning_0.py::TestS3::test_validation[v1-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_validation[v2-client0] >> test_inflight.py::TestS3::test_inflight[v1-client0-kikimr_params0] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_struct[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_struct[scan] >> test_yq_v2.py::TestS3::test_query_parameters[v2-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client5-year Int64 NOT NULL-False] [GOOD] >> test_s3_0.py::TestS3::test_raw[v1-false-client0] [GOOD] >> test_s3_0.py::TestS3::test_raw[v1-true-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client6-year Uint64-False] >> test_inflight.py::TestS3::test_data_inflight[v1-client0-kikimr_params0] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.gz-gzip] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_struct[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_multiple_files[data] >> test_explicit_partitioning_0.py::TestS3::test_validation[v2-client0] [GOOD] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.lz4-lz4] >> test_explicit_partitioning_0.py::TestS3::test_no_schema_columns_except_partitioning_ones[v1-false-client0] >> test_format_setting.py::TestS3::test_date_simple_insert[v1-date/simple/test.csv-csv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_simple_insert[v1-date/simple/test.tsv-tsv_with_names] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_multiple_files[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_multiple_files[scan] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_public_metrics.py::TestPublicMetrics::test_public_metrics[v1-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/002575/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_public_metrics/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/002575/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_public_metrics/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=924779) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 928426 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback |74.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v0-fifo] [GOOD] >> test_ydb_impex.py::TestImpex::test_big_dataset[tsv-additional_args3-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_big_dataset[tsv-additional_args3-column] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_multiple_files[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_ignore_excess_parameters[data] >> test_actorsystem.py::TestWithStorageNodeWith31Cpu::test >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_ignore_excess_parameters[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_ignore_excess_parameters[scan] |74.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithHybridNodeWith34Cpu::test [GOOD] >> test_s3_1.py::TestS3::test_top_level_listing_2[v2-kikimr_params0-true-client0] [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith3Cpu::test [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_ignore_excess_parameters[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_script_from_file[data] >> test_s3_1.py::TestS3::test_precompute[v1-false-client0] >> test_insert.py::TestS3::test_insert[v2-false-client0-csv_with_names-dataset] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_script_from_file[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_script_from_file[scan] >> test_insert.py::TestS3::test_insert[v2-false-client0-csv_with_names-dataにちは% set] >> TFqYdbTest::ShouldStatusToIssuesProcessExceptions [GOOD] >> TFqYdbTest::ShouldStatusToIssuesProcessEmptyIssues [GOOD] >> TRegisterCheckTest::ShouldRegisterCheckSameGenerationAndTransact >> test_ydb_table.py::TestExecuteQueryWithParamsFromJson::test_script_from_file[scan] [GOOD] >> test_s3_0.py::TestS3::test_raw[v1-true-client0] [GOOD] >> test_s3_0.py::TestS3::test_raw[v2-false-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client6-year Uint64-False] [GOOD] >> TRegisterCheckTest::ShouldRegisterCheckSameGenerationAndTransact [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client7-year Uint64 NOT NULL-False] >> TRegisterCheckTest::ShouldRegisterCheckNewGeneration >> test_explicit_partitioning_0.py::TestS3::test_no_schema_columns_except_partitioning_ones[v1-false-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_no_schema_columns_except_partitioning_ones[v1-true-client0] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.lz4-lz4] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-json_list-parquet] [GOOD] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.br-brotli] >> TRegisterCheckTest::ShouldRegisterCheckNewGeneration [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-json_each_row-json_list] >> test_bindings_1.py::TestBindings::test_s3_format_mismatch[v2-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_pg_binding[v1-client0] |74.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithKesus::test_properly_creates_and_deletes_queue[tables_format_v0-std] [GOOD] >> test_format_setting.py::TestS3::test_date_simple_insert[v1-date/simple/test.tsv-tsv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_simple_insert[v1-date/simple/test.json-json_each_row] >> test_actorsystem.py::TestWithComputeNodeWith31Cpu::test [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/multi_plane/py3test >> test_retry.py::TestRetry::test_low_rate[kikimr0] [GOOD] Test command err: contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=872706) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_simple_json[scripting] |74.3%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/ydb/ut/unittest >> TFqYdbTest::ShouldStatusToIssuesProcessExceptions [GOOD] >> test_s3_0.py::TestS3::test_raw[v2-false-client0] [GOOD] >> test_ydb_impex.py::TestImpex::test_big_dataset[tsv-additional_args3-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_big_dataset[json-additional_args4-row] >> test_s3_0.py::TestS3::test_raw[v2-true-client0] |74.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithStorageNodeWith36Cpu::test [GOOD] |74.4%| [TA] $(B)/ydb/tests/fq/multi_plane/test-results/py3test/{meta.json ... results_accumulator.log} |74.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/ydb/ut/unittest >> TFqYdbTest::ShouldStatusToIssuesProcessEmptyIssues [GOOD] >> test_insert.py::TestS3::test_insert[v2-false-client0-csv_with_names-dataにちは% set] [GOOD] |74.4%| [TA] {RESULT} $(B)/ydb/tests/fq/multi_plane/test-results/py3test/{meta.json ... results_accumulator.log} >> test_insert.py::TestS3::test_insert[v2-false-client0-parquet-dataset] >> test_bindings_1.py::TestBindings::test_pg_binding[v1-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_pg_binding[v2-client0] >> test_config_with_metadata.py::TestConfigWithMetadataMirrorMax::test_cluster_is_operational_with_metadata >> test_explicit_partitioning_0.py::TestS3::test_no_schema_columns_except_partitioning_ones[v1-true-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_no_schema_columns_except_partitioning_ones[v2-false-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client7-year Uint64 NOT NULL-False] [GOOD] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.br-brotli] [GOOD] |74.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckSameGenerationAndTransact [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client8-year String NOT NULL-True] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.bz2-bzip2] |74.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithComputeNodeWith6Cpu::test [GOOD] |74.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckNewGeneration [GOOD] >> test_bindings_0.py::TestBindings::test_modify_connection_with_a_lot_of_bindings[v1-kikimr_settings0-client0] [SKIPPED] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client8-year String NOT NULL-True] [GOOD] >> test_bindings_0.py::TestBindings::test_modify_connection_with_a_lot_of_bindings[v2-kikimr_settings0-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client9-year String-False] >> test_bindings_0.py::TestBindings::test_modify_connection_with_a_lot_of_bindings[v2-kikimr_settings0-client0] [SKIPPED] >> test_bindings_0.py::TestBindings::test_binding_operations[v1-kikimr_settings1-client0] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params1-false] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params1-true] >> test_format_setting.py::TestS3::test_date_simple_insert[v1-date/simple/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_date_simple_insert[v1-date/simple/test.parquet-parquet] >> test_s3_0.py::TestS3::test_raw[v2-true-client0] [GOOD] |74.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_s3_0.py::TestS3::test_limit[v1-false-kikimr_params0-client0] >> test_configuration_version.py::TestConfigurationVersion::test_configuration_version >> test_statistics.py::TestS3::test_convert[v1-client0-json_each_row-json_list] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith39Cpu::test >> test_explicit_partitioning_0.py::TestS3::test_no_schema_columns_except_partitioning_ones[v2-false-client0] [GOOD] >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration >> test_bindings_1.py::TestBindings::test_pg_binding[v2-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_no_schema_columns_except_partitioning_ones[v2-true-client0] >> test_statistics.py::TestS3::test_convert[v1-client0-json_each_row-json_each_row] >> TRegisterCheckTest::ShouldRegisterCheckSameGeneration >> test_bindings_1.py::TestBindings::test_count_for_pg_binding[v1-yql_syntax-client0] >> test_insert.py::TestS3::test_insert[v2-false-client0-parquet-dataset] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_simple_json[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_simple_json[stream] >> test_insert.py::TestS3::test_insert[v2-false-client0-parquet-dataにちは% set] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.bz2-bzip2] [GOOD] |74.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithHybridNodeWith29Cpu::test [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_simple_json[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_simple_csv[scripting] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.zst-zstd] >> TRegisterCheckTest::ShouldRegisterCheckSameGeneration [GOOD] >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_push_down.py::TestS3PushDown::test_simple_case[v2-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00255e/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_push_down/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00255e/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_push_down/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=935482) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 937236 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback |74.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_simple_csv[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_simple_csv[stream] >> test_ydb_impex.py::TestImpex::test_big_dataset[json-additional_args4-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_big_dataset[json-additional_args4-column] |74.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test |74.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/postgres_integrations/go-libpq/py3test >> docker_wrapper_test.py::test_pg_generated[TestXactMultiStmt] [SKIPPED] |74.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_does_actions_with_queue[tables_format_v1-fifo] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params1-true] [GOOD] >> test_formats.py::TestS3Formats::test_format_inference[v2-test.csv-csv_with_names] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_simple_csv[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_simple_tsv[scripting] >> test_streaming_join.py::TestStreamingJoin::test_grace_join[v1-client0] [GOOD] >> test_formats.py::TestS3Formats::test_format_inference[v2-test.tsv-tsv_with_names] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client9-year String-False] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params1-false] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client10-year Utf8-False] |74.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_actorsystem.py::TestWithComputeNodeWith37Cpu::test |74.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_simple_tsv[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_simple_tsv[stream] |74.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_simple_tsv[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_stdin_par_raw[scripting] >> test_actorsystem.py::TestWithStorageNodeWith31Cpu::test [GOOD] >> test_actorsystem.py::TestWithStorageNodeWith4Cpu::test >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_stdin_par_raw[scripting] [GOOD] >> test_bindings_1.py::TestBindings::test_count_for_pg_binding[v1-yql_syntax-client0] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_stdin_par_raw[stream] >> test_explicit_partitioning_0.py::TestS3::test_no_schema_columns_except_partitioning_ones[v2-true-client0] [GOOD] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.zst-zstd] [GOOD] >> test_bindings_1.py::TestBindings::test_count_for_pg_binding[v1-pg_syntax-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_date[v1-false-client0] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.xz-xz] |74.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_format_setting.py::TestS3::test_date_simple_insert[v1-date/simple/test.parquet-parquet] [GOOD] >> test_formats.py::TestS3Formats::test_format_inference[v2-test.tsv-tsv_with_names] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_stdin_par_raw[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_stdin_par_json[scripting] >> test_format_setting.py::TestS3::test_date_simple_insert[v2-date/simple/test.csv-csv_with_names] >> test_formats.py::TestS3Formats::test_format_inference[v2-test_each_row.json-json_each_row] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_stdin_par_json[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_stdin_par_json[stream] |74.4%| [TA] $(B)/ydb/tests/postgres_integrations/go-libpq/test-results/py3test/{meta.json ... results_accumulator.log} |74.4%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration [GOOD] >> test_insert.py::TestS3::test_insert[v2-false-client0-parquet-dataにちは% set] [GOOD] >> test_insert.py::TestS3::test_insert[v2-true-client0-json_list-dataset] >> test_insert.py::TestS3::test_insert[v2-true-client0-json_list-dataset] [SKIPPED] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_stdin_par_json[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_stdin_par_csv[scripting] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params1-false] [GOOD] >> test_insert.py::TestS3::test_insert[v2-true-client0-json_list-dataにちは% set] |74.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_create_queue_rate[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |74.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test |74.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckSameGeneration [GOOD] |74.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params1-true] >> test_insert.py::TestS3::test_insert[v2-true-client0-json_list-dataにちは% set] [SKIPPED] >> test_statistics.py::TestS3::test_convert[v1-client0-json_each_row-json_each_row] [GOOD] |74.5%| [TA] {RESULT} $(B)/ydb/tests/postgres_integrations/go-libpq/test-results/py3test/{meta.json ... results_accumulator.log} >> test_insert.py::TestS3::test_insert[v2-true-client0-json_each_row-dataset] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_stdin_par_csv[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_stdin_par_csv[stream] >> test_statistics.py::TestS3::test_convert[v1-client0-json_each_row-csv_with_names] >> test_insert.py::TestS3::test_insert[v2-true-client0-json_each_row-dataset] [SKIPPED] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_stdin_par_csv[stream] [GOOD] >> test_insert.py::TestS3::test_insert[v2-true-client0-json_each_row-dataにちは% set] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_stdin_par_tsv[scripting] >> test_insert.py::TestS3::test_insert[v2-true-client0-json_each_row-dataにちは% set] [SKIPPED] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client10-year Utf8-False] [GOOD] >> test_read_update_write_load.py::TestReadUpdateWriteLoad::test[read_update_write_load] [GOOD] >> test_bindings_1.py::TestBindings::test_count_for_pg_binding[v1-pg_syntax-client0] [GOOD] >> test_insert.py::TestS3::test_insert[v2-true-client0-csv_with_names-dataset] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_stdin_par_tsv[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_stdin_par_tsv[stream] >> test_compressions.py::TestS3Compressions::test_big_compression[v2-big.json.xz-xz] [GOOD] >> test_bindings_1.py::TestBindings::test_count_for_pg_binding[v2-yql_syntax-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client11-year Utf8 NOT NULL-True] >> test_insert.py::TestS3::test_insert[v2-true-client0-csv_with_names-dataset] [SKIPPED] >> test_read_update_write_load.py::TestReadUpdateWriteLoad::test_multi[read_update_write_load] [GOOD] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.gz-gzip] >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_config_stored_in_config_store |74.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_insert.py::TestS3::test_insert[v2-true-client0-csv_with_names-dataにちは% set] [SKIPPED] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_early_finish.py::TestEarlyFinish::test_early_finish[v1-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/002560/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_early_finish/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/002560/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_early_finish/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=933875) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 935858 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback |74.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_stdin_par_tsv[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_mix_json_and_binary[scripting] >> test_insert.py::TestS3::test_insert[v2-true-client0-parquet-dataset] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_mix_json_and_binary[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_mix_json_and_binary[stream] >> test_explicit_partitioning_0.py::TestS3::test_projection_date[v1-false-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_date[v1-true-client0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/with_quotas/py3test >> test_quoting.py::TestSqsQuotingWithLocalRateLimiter::test_other_requests_rate[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_actorsystem.py::TestWithStorageNodeWith4Cpu::test [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_mix_json_and_binary[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_different_sources_json[scripting] >> test_formats.py::TestS3Formats::test_format_inference[v2-test_each_row.json-json_each_row] [GOOD] >> test_formats.py::TestS3Formats::test_format_inference[v2-test_list.json-json_list] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params1-true] [GOOD] |74.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params1-false] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_different_sources_json[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_different_sources_json[stream] >> test_config_with_metadata.py::TestKiKiMRWithMetadata::test_cluster_is_operational_with_metadata >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client11-year Utf8 NOT NULL-True] [GOOD] >> test_bindings_1.py::TestBindings::test_count_for_pg_binding[v2-yql_syntax-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client12-year Date-False] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_different_sources_json[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_different_sources_csv[scripting] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.gz-gzip] [GOOD] >> test_bindings_1.py::TestBindings::test_count_for_pg_binding[v2-pg_syntax-client0] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.lz4-lz4] >> test_format_setting.py::TestS3::test_date_simple_insert[v2-date/simple/test.csv-csv_with_names] [GOOD] >> test_ydb_impex.py::TestImpex::test_big_dataset[json-additional_args4-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_format_parquet[row] [SKIPPED] >> test_ydb_impex.py::TestImpex::test_format_parquet[column] [SKIPPED] >> test_ydb_impex.py::TestImpex::test_import_file_with_bom[csv-additional_args0-row] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_different_sources_csv[scripting] [GOOD] >> test_format_setting.py::TestS3::test_date_simple_insert[v2-date/simple/test.tsv-tsv_with_names] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_different_sources_csv[stream] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_different_sources_csv[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_different_sources_tsv[scripting] >> test_s3_1.py::TestS3::test_precompute[v1-false-client0] [GOOD] >> test_formats.py::TestS3Formats::test_format_inference[v2-test_list.json-json_list] [GOOD] |74.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test |74.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_date[v1-true-client0] [GOOD] >> test_formats.py::TestS3Formats::test_format_inference[v2-test.parquet-parquet] >> test_insert.py::TestS3::test_insert[v2-true-client0-parquet-dataset] [GOOD] >> test_s3_1.py::TestS3::test_precompute[v1-true-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_date[v2-false-client0] >> test_insert.py::TestS3::test_insert[v2-true-client0-parquet-dataにちは% set] |74.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_different_sources_tsv[scripting] [GOOD] |74.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test |74.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_statistics.py::TestS3::test_convert[v1-client0-json_each_row-csv_with_names] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_different_sources_tsv[stream] >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageBadCases::test_cluster_change_state_storage >> test_statistics.py::TestS3::test_convert[v1-client0-json_each_row-parquet] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params1-false] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_different_sources_tsv[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_framing_newline_delimited_json[scripting] >> test_ydb_impex.py::TestImpex::test_import_file_with_bom[csv-additional_args0-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_import_file_with_bom[csv-additional_args0-column] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params1-true] |74.5%| [TA] $(B)/ydb/tests/functional/sqs/with_quotas/test-results/py3test/{meta.json ... results_accumulator.log} >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration2 >> TRegisterCheckTest::ShouldRegisterCheckNextGeneration >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_is_operational_with_distconf >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_cluster_works_with_auto_conf_dir >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_framing_newline_delimited_json[scripting] [GOOD] |74.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client12-year Date-False] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_framing_newline_delimited_json[stream] >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client13-year Date NOT NULL-True] >> test_config_with_metadata.py::TestConfigWithoutMetadataMirror::test_cluster_is_operational_without_metadata >> TRegisterCheckTest::ShouldRegisterCheckNextGeneration [GOOD] >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration2 [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_framing_newline_delimited_json[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_framing_newline_delimited_csv[scripting] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.lz4-lz4] [GOOD] >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails [GOOD] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.br-brotli] >> test_bindings_1.py::TestBindings::test_count_for_pg_binding[v2-pg_syntax-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_ast_in_failed_query_compilation[v1-client0] |74.5%| [TA] {RESULT} $(B)/ydb/tests/functional/sqs/with_quotas/test-results/py3test/{meta.json ... results_accumulator.log} >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails2 >> test_ydb_sql.py::TestExecuteSqlFromStdinWithWideOutput::test_wide_table >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails2 [GOOD] >> test_ydb_impex.py::TestImpex::test_import_file_with_bom[csv-additional_args0-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_import_file_with_bom[csv-additional_args1-row] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_framing_newline_delimited_csv[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_framing_newline_delimited_csv[stream] >> test_explicit_partitioning_0.py::TestS3::test_projection_date[v2-false-client0] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_framing_newline_delimited_csv[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_framing_newline_delimited_tsv[scripting] >> test_explicit_partitioning_0.py::TestS3::test_projection_date[v2-true-client0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_yq_v2.py::TestS3::test_query_parameters[v2-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/002550/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_yq_v2/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/002550/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_yq_v2/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=939498) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 942486 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_ydb_impex.py::TestImpex::test_import_file_with_bom[csv-additional_args1-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_import_file_with_bom[csv-additional_args1-column] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client13-year Date NOT NULL-True] [GOOD] >> test_format_setting.py::TestS3::test_date_simple_insert[v2-date/simple/test.tsv-tsv_with_names] [GOOD] >> test_insert.py::TestS3::test_insert[v2-true-client0-parquet-dataにちは% set] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_framing_newline_delimited_tsv[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_framing_newline_delimited_tsv[stream] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client14-year Datetime-False] >> test_formats.py::TestS3Formats::test_format_inference[v2-test.parquet-parquet] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params1-true] [GOOD] >> test_format_setting.py::TestS3::test_date_simple_insert[v2-date/simple/test.json-json_each_row] >> test_config_with_metadata.py::TestConfigWithMetadataBlock::test_cluster_is_operational_with_metadata >> test_formats.py::TestS3Formats::test_btc[v1] >> test_s3_1.py::TestS3::test_precompute[v1-true-client0] [GOOD] >> test_s3_1.py::TestS3::test_precompute[v2-false-client0] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params1-false] |74.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithComputeNodeWith31Cpu::test [GOOD] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.br-brotli] [GOOD] >> test_insert.py::TestS3::test_big_json_list_insert[v1-client0] >> test_bindings_1.py::TestBindings::test_ast_in_failed_query_compilation[v1-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_ast_in_failed_query_compilation[v2-client0] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.bz2-bzip2] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_framing_newline_delimited_tsv[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_framing_newline_delimited_raw[scripting] |74.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test |74.5%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckNextGeneration [GOOD] |74.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_inflight.py::TestS3::test_data_inflight[v1-client0-kikimr_params0] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_framing_newline_delimited_raw[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_framing_newline_delimited_raw[stream] >> test_validation.py::TestS3::test_empty[v1-client0] [GOOD] |74.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldNotRegisterCheckPrevGeneration2 [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-json_each_row-parquet] [GOOD] >> test_validation.py::TestS3::test_empty[v2-client0] >> test_statistics.py::TestS3::test_convert[v1-client0-csv_with_names-json_list] >> test_inflight.py::TestS3::test_inflight[v1-client0-kikimr_params1] |74.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_ydb_impex.py::TestImpex::test_import_file_with_bom[csv-additional_args1-column] [GOOD] |74.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_ydb_impex.py::TestImpex::test_import_file_with_bom[tsv-additional_args2-row] >> test_explicit_partitioning_0.py::TestS3::test_projection_date[v2-true-client0] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_framing_newline_delimited_raw[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_full_raw[scripting] |74.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/ydb/ut/unittest >> TCheckGenerationTest::ShouldRollbackTransactionWhenCheckFails2 [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_validate_columns[v1-client0] >> test_ydb_sql.py::TestExecuteSqlFromStdinWithWideOutput::test_wide_table [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_full_raw[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_full_raw[stream] >> TRegisterCheckTest::ShouldRegisterCheckNewGenerationAndTransact |74.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithStorageNodeWith4Cpu::test [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_validate_columns[v1-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client14-year Datetime-False] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith39Cpu::test [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client15-year Datetime NOT NULL-True] >> test_ydb_impex.py::TestImpex::test_import_file_with_bom[tsv-additional_args2-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_import_file_with_bom[tsv-additional_args2-column] >> test_explicit_partitioning_0.py::TestS3::test_projection_validate_columns[v2-client0] >> TRegisterCheckTest::ShouldRegisterCheckNewGenerationAndTransact [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_full_raw[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_full_json[scripting] |74.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.bz2-bzip2] [GOOD] >> test_actorsystem.py::TestWithComputeNodeWith37Cpu::test [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_validate_columns[v2-client0] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_full_json[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_full_json[stream] >> test_explicit_partitioning_0.py::TestS3::test_no_paritioning_columns[v1-false-client0] >> test_config_with_metadata.py::TestConfigWithMetadataMirrorMax::test_cluster_is_operational_with_metadata [GOOD] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.zst-zstd] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_full_json[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_full_csv[scripting] |74.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageNoChanges::test_cluster_change_state_storage >> test_bindings_1.py::TestBindings::test_ast_in_failed_query_compilation[v2-client0] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_full_csv[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_full_csv[stream] >> test_bindings_1.py::TestBindings::test_raw_empty_schema_binding[v1-client0] >> test_ydb_impex.py::TestImpex::test_import_file_with_bom[tsv-additional_args2-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_import_file_with_bom[tsv-additional_args3-row] >> test_bindings_1.py::TestBindings::test_raw_empty_schema_binding[v1-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_raw_empty_schema_binding[v2-client0] >> test_format_setting.py::TestS3::test_date_simple_insert[v2-date/simple/test.json-json_each_row] [GOOD] >> test_config_migration.py::TestConfigMigrationToV2::test_migration_to_v2 >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_full_csv[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_full_tsv[scripting] >> test_format_setting.py::TestS3::test_date_simple_insert[v2-date/simple/test.parquet-parquet] >> test_bindings_1.py::TestBindings::test_raw_empty_schema_binding[v2-client0] [GOOD] >> test_generate_dynamic_config.py::TestGenerateDynamicConfigFromConfigDir::test_generate_dynamic_config_from_config_store |74.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithStorageNodeWith31Cpu::test [GOOD] >> test_bindings_1.py::TestBindings::test_binding_with_backslash_in_location[v1-client0] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params1-false] [GOOD] >> test_formats.py::TestS3Formats::test_btc[v1] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params1-true] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_full_tsv[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_full_tsv[stream] >> test_formats.py::TestS3Formats::test_btc[v2] >> test_ydb_impex.py::TestImpex::test_import_file_with_bom[tsv-additional_args3-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_import_file_with_bom[tsv-additional_args3-column] >> test_distconf.py::TestKiKiMRDistConfReassignStateStorage::test_cluster_change_state_storage >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v1-true-client15-year Datetime NOT NULL-True] [GOOD] >> test_insert.py::TestS3::test_big_json_list_insert[v1-client0] [GOOD] >> test_config_with_metadata.py::TestKiKiMRWithoutMetadata::test_cluster_is_operational_without_metadata >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client0-year Int32-False] >> test_explicit_partitioning_0.py::TestS3::test_no_paritioning_columns[v1-false-client0] [GOOD] >> test_insert.py::TestS3::test_big_json_list_insert[v2-client0] >> test_s3_0.py::TestS3::test_limit[v1-false-kikimr_params0-client0] [GOOD] >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageMultipleRingGroup::test_cluster_change_state_storage >> test_explicit_partitioning_0.py::TestS3::test_no_paritioning_columns[v1-true-client0] >> test_s3_0.py::TestS3::test_limit[v1-true-kikimr_params0-client0] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.zst-zstd] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_full_tsv[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_adaptive_raw[scripting] >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.xz-xz] >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_expand_with_distconf >> test_statistics.py::TestS3::test_convert[v1-client0-csv_with_names-json_list] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_adaptive_raw[scripting] [GOOD] |74.6%| [TS] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/ydb/ut/unittest >> TRegisterCheckTest::ShouldRegisterCheckNewGenerationAndTransact [GOOD] |74.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_statistics.py::TestS3::test_convert[v1-client0-csv_with_names-json_each_row] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_adaptive_raw[stream] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_adaptive_raw[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_adaptive_json[scripting] >> test_s3_1.py::TestS3::test_precompute[v2-false-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_binding_with_backslash_in_location[v1-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_binding_with_backslash_in_location[v2-client0] >> test_s3_1.py::TestS3::test_precompute[v2-true-client0] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params1-true] [GOOD] >> test_formats.py::TestS3Formats::test_btc[v2] [GOOD] >> test_format_setting.py::TestS3::test_date_simple_insert[v2-date/simple/test.parquet-parquet] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_no_paritioning_columns[v1-true-client0] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params1-false] >> test_ydb_impex.py::TestImpex::test_import_file_with_bom[tsv-additional_args3-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_import_file_with_bom[json-additional_args4-row] |74.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v1-timestamp/simple_iso/test.csv-csv_with_names] >> test_bindings_0.py::TestBindings::test_binding_operations[v1-kikimr_settings1-client0] [GOOD] >> test_formats.py::TestS3Formats::test_invalid_format[v1-client0] >> test_explicit_partitioning_0.py::TestS3::test_no_paritioning_columns[v2-false-client0] >> test_bindings_0.py::TestBindings::test_binding_operations[v2-kikimr_settings1-client0] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_adaptive_json[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_adaptive_json[stream] |74.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_compressions.py::TestS3Compressions::test_big_compression[v1-big.json.xz-xz] [GOOD] >> test_compressions.py::TestS3Compressions::test_invalid_compression[v2-client0] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_adaptive_json[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_adaptive_csv[scripting] >> test_ydb_impex.py::TestImpex::test_import_file_with_bom[json-additional_args4-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_import_file_with_bom[json-additional_args4-column] |74.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_insert.py::TestS3::test_big_json_list_insert[v2-client0] [GOOD] >> test_bindings_1.py::TestBindings::test_binding_with_backslash_in_location[v2-client0] [GOOD] >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_cluster_works_with_auto_conf_dir [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client0-year Int32-False] [GOOD] >> test_insert.py::TestS3::test_insert_csv_delimiter[v1-client0] >> test_bindings_1.py::TestBindings::test_decimal_binding[v1-client0] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_adaptive_csv[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_adaptive_csv[stream] |74.6%| [TA] $(B)/ydb/core/fq/libs/ydb/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client1-year Int32 NOT NULL-False] >> test_s3_0.py::TestS3::test_limit[v1-true-kikimr_params0-client0] [GOOD] >> test_s3_0.py::TestS3::test_limit[v2-false-kikimr_params0-client0] |74.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test |74.6%| [TA] {RESULT} $(B)/ydb/core/fq/libs/ydb/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_adaptive_csv[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_adaptive_tsv[scripting] >> test_statistics.py::TestS3::test_convert[v1-client0-csv_with_names-json_each_row] [GOOD] >> test_insert.py::TestInsert::test_multi[read_data_during_bulk_upsert] [FAIL] >> test_statistics.py::TestS3::test_convert[v1-client0-csv_with_names-csv_with_names] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_adaptive_tsv[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_adaptive_tsv[stream] |74.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test |74.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_explicit_partitioning_0.py::TestS3::test_no_paritioning_columns[v2-false-client0] [GOOD] >> test_ydb_impex.py::TestImpex::test_import_file_with_bom[json-additional_args4-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[csv-additional_args0-row] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params1-false] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params1-true] >> test_s3_1.py::TestS3::test_precompute[v2-true-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_no_paritioning_columns[v2-true-client0] >> test_s3_1.py::TestS3::test_failed_precompute[v1-false-client0] |74.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestConfigWithMetadataMirrorMax::test_cluster_is_operational_with_metadata [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_batching_adaptive_tsv[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_ignore_excess_parameters_json[scripting] >> test_config_with_metadata.py::TestKiKiMRWithMetadata::test_cluster_is_operational_with_metadata [GOOD] >> test_formats.py::TestS3Formats::test_invalid_format[v1-client0] [GOOD] >> test_formats.py::TestS3Formats::test_invalid_format[v2-client0] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v1-timestamp/simple_iso/test.csv-csv_with_names] [GOOD] >> test_compressions.py::TestS3Compressions::test_invalid_compression[v2-client0] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v1-timestamp/simple_iso/test.tsv-tsv_with_names] >> test_compressions.py::TestS3Compressions::test_invalid_compression[v1-client0] >> test_bindings_1.py::TestBindings::test_decimal_binding[v1-client0] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_ignore_excess_parameters_json[scripting] [GOOD] >> test_bindings_1.py::TestBindings::test_decimal_binding[v2-client0] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_ignore_excess_parameters_json[stream] >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[csv-additional_args0-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[csv-additional_args0-column] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_simple_json[data] >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageToTheSameConfig::test_cluster_change_state_storage >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageBadCases::test_cluster_change_state_storage [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_ignore_excess_parameters_json[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_ignore_excess_parameters_csv[scripting] >> test_config_with_metadata.py::TestConfigWithoutMetadataBlock::test_cluster_is_operational_without_metadata >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_ignore_excess_parameters_csv[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_ignore_excess_parameters_csv[stream] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client1-year Int32 NOT NULL-False] [GOOD] >> test_insert.py::TestS3::test_insert_csv_delimiter[v1-client0] [GOOD] >> test_insert.py::TestS3::test_insert_csv_delimiter[v2-client0] >> test_explicit_partitioning_0.py::TestS3::test_no_paritioning_columns[v2-true-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client2-year Uint32-False] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_ignore_excess_parameters_csv[stream] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client0-year Int32 NOT NULL-True] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_ignore_excess_parameters_tsv[scripting] >> test_ydb_backup.py::TestRecursiveNonConsistent::test_recursive_table_backup_from_different_places >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params1-true] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_ignore_excess_parameters_tsv[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_ignore_excess_parameters_tsv[stream] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params1-false] >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[csv-additional_args0-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[csv-additional_args1-row] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v1-timestamp/simple_iso/test.tsv-tsv_with_names] [GOOD] >> test_formats.py::TestS3Formats::test_invalid_format[v2-client0] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_ignore_excess_parameters_tsv[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_columns_bad_header_csv[scripting] >> test_compressions.py::TestS3Compressions::test_invalid_compression[v1-client0] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v1-timestamp/simple_iso/test.json-json_each_row] >> test_compressions.py::TestS3Compressions::test_invalid_compression_inference[v2-client0] >> test_formats.py::TestS3Formats::test_invalid_input_compression[v1-client0] >> test_ydb_sql.py::TestExecuteSqlWithPgSyntax::test_pg_syntax >> test_bindings_1.py::TestBindings::test_decimal_binding[v2-client0] [GOOD] |74.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test |74.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_generate_dynamic_config.py::TestGenerateDynamicConfig::test_generate_dynamic_config >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_columns_bad_header_csv[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_columns_bad_header_csv[stream] >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[csv-additional_args1-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[csv-additional_args1-column] >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_config_stored_in_config_store [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_columns_bad_header_csv[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_columns_bad_header_tsv[scripting] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client0-year Int32 NOT NULL-True] [GOOD] >> test_s3_1.py::TestS3::test_failed_precompute[v1-false-client0] [GOOD] >> test_insert.py::TestS3::test_insert_csv_delimiter[v2-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client1-year Uint32 NOT NULL-True] >> test_s3_1.py::TestS3::test_failed_precompute[v1-true-client0] >> test_insert.py::TestS3::test_append[v1-client0] >> test_statistics.py::TestS3::test_convert[v1-client0-csv_with_names-csv_with_names] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-csv_with_names-parquet] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_columns_bad_header_tsv[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_columns_bad_header_tsv[stream] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_simple_json[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_simple_json[scan] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client2-year Uint32-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client3-year Uint32 NOT NULL-True] >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[csv-additional_args1-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[tsv-additional_args2-row] >> test_formats.py::TestS3Formats::test_invalid_input_compression[v1-client0] [GOOD] |74.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_streaming_join.py::TestStreamingJoin::test_grace_join[v1-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/002576/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_streaming_join/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/002576/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_streaming_join/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=920300) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 923818 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback |74.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_formats.py::TestS3Formats::test_invalid_input_compression[v2-client0] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_simple_json[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_simple_csv[data] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_columns_bad_header_tsv[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_columns_no_header_csv[scripting] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params1-false] [GOOD] |74.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithComputeNodeWith37Cpu::test [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params1-true] |74.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageBadCases::test_cluster_change_state_storage [GOOD] >> test_generate_dynamic_config.py::TestGenerateDynamicConfigFromConfigDir::test_generate_dynamic_config_from_config_store [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v1-timestamp/simple_iso/test.json-json_each_row] [GOOD] >> test_s3_0.py::TestS3::test_limit[v2-false-kikimr_params0-client0] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_simple_csv[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_simple_csv[scan] >> test_s3_0.py::TestS3::test_limit[v2-true-kikimr_params0-client0] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_columns_no_header_csv[scripting] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v1-timestamp/simple_iso/test.parquet-parquet] >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[tsv-additional_args2-row] [GOOD] >> test_compressions.py::TestS3Compressions::test_invalid_compression_inference[v2-client0] [GOOD] >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[tsv-additional_args2-column] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_columns_no_header_csv[stream] >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_is_operational_with_distconf [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_simple_csv[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_simple_tsv[data] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_columns_no_header_csv[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_columns_no_header_tsv[scripting] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_simple_tsv[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_simple_tsv[scan] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client1-year Uint32 NOT NULL-True] [GOOD] |74.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_cluster_works_with_auto_conf_dir [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_columns_no_header_tsv[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_columns_no_header_tsv[stream] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client2-year Uint64 NOT NULL-True] >> test_ydb_sql.py::TestExecuteSqlWithPgSyntax::test_pg_syntax [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_simple_tsv[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_stdin_par_raw[data] >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[tsv-additional_args2-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[tsv-additional_args3-row] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_columns_no_header_tsv[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_skip_rows_csv[scripting] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/py3test >> test_read_update_write_load.py::TestReadUpdateWriteLoad::test_multi[read_update_write_load] [GOOD] Test command err: Was written: 0.0 MiB, Speed: 0.0 MiB/s Step 1. only write Write: 10% 5381 30% 5381 50% 5381 90% 5381 99% 5381 ms Write: 10% 11355 30% 11355 50% 11355 90% 11355 99% 11355 ms Write: 10% 12172 30% 12172 50% 12172 90% 12172 99% 12172 ms Write: 10% 12490 30% 12490 50% 12490 90% 12490 99% 12490 ms Write: 10% 11441 30% 11441 50% 11441 90% 11441 99% 11441 ms Write: 10% 11066 30% 11066 50% 11066 90% 11066 99% 11066 ms Write: 10% 12439 30% 12439 50% 12439 90% 12439 99% 12439 ms Write: 10% 11073 30% 11073 50% 11073 90% 11073 99% 11073 ms Write: 10% 11017 30% 11017 50% 11017 90% 11017 99% 11017 ms Write: 10% 9751 30% 9751 50% 9751 90% 9751 99% 9751 ms Write: 10% 12469 30% 12469 50% 12469 90% 12469 99% 12469 ms Write: 10% 10662 30% 10662 50% 10662 90% 10662 99% 10662 ms Write: 10% 12122 30% 12122 50% 12122 90% 12122 99% 12122 ms Write: 10% 11856 30% 11856 50% 11856 90% 11856 99% 11856 ms Write: 10% 10138 30% 10138 50% 10138 90% 10138 99% 10138 ms Write: 10% 8127 30% 8127 50% 8127 90% 8127 99% 8127 ms Write: 10% 11537 30% 11537 50% 11537 90% 11537 99% 11537 ms Write: 10% 10999 30% 10999 50% 10999 90% 10999 99% 10999 ms Write: 10% 9203 30% 9203 50% 9203 90% 9203 99% 9203 ms Write: 10% 8868 30% 8868 50% 8868 90% 8868 99% 8868 ms Write: 10% 9149 30% 9149 50% 9149 90% 9149 99% 9149 ms Write: 10% 9331 30% 9331 50% 9331 90% 9331 99% 9331 ms Write: 10% 8778 30% 8778 50% 8778 90% 8778 99% 8778 ms Write: 10% 9621 30% 9621 50% 9621 90% 9621 99% 9621 ms Write: 10% 8357 30% 8357 50% 8357 90% 8357 99% 8357 ms Write: 10% 7986 30% 7986 50% 7986 90% 7986 99% 7986 ms Write: 10% 10168 30% 10168 50% 10168 90% 10168 99% 10168 ms Write: 10% 5264 30% 5264 50% 5264 90% 5264 99% 5264 ms Write: 10% 7056 30% 7056 50% 7056 90% 7056 99% 7056 ms Write: 10% 6585 30% 6585 50% 6585 90% 6585 99% 6585 ms Write: 10% 6361 30% 6361 50% 6361 90% 6361 99% 6361 ms Write: 10% 6232 30% 6232 50% 6232 90% 6232 99% 6232 ms Write: 10% 5712 30% 5712 50% 5712 90% 5712 99% 5712 ms Write: 10% 6291 30% 6291 50% 6291 90% 6291 99% 6291 ms Write: 10% 6186 30% 6186 50% 6186 90% 6186 99% 6186 ms Write: 10% 6094 30% 6094 50% 6094 90% 6094 99% 6094 ms Write: 10% 9209 30% 9209 50% 9209 90% 9209 99% 9209 ms Write: 10% 5486 30% 5486 50% 5486 90% 5486 99% 5486 ms Write: 10% 7499 30% 7499 50% 7499 90% 7499 99% 7499 ms Write: 10% 5686 30% 5686 50% 5686 90% 5686 99% 5686 ms Write: 10% 4752 30% 4752 50% 4752 90% 4752 99% 4752 ms Write: 10% 4542 30% 4542 50% 4542 90% 4542 99% 4542 ms Write: 10% 4975 30% 4975 50% 4975 90% 4975 99% 4975 ms Write: 10% 4725 30% 4725 50% 4725 90% 4725 99% 4725 ms Write: 10% 3609 30% 3609 50% 3609 90% 3609 99% 3609 ms Write: 10% 3241 30% 3241 50% 3241 90% 3241 99% 3241 ms Write: 10% 3936 30% 3936 50% 3936 90% 3936 99% 3936 ms Write: 10% 3294 30% 3294 50% 3294 90% 3294 99% 3294 ms Write: 10% 2245 30% 2245 50% 2245 90% 2245 99% 2245 ms Write: 10% 4318 30% 4318 50% 4318 90% 4318 99% 4318 ms Write: 10% 3044 30% 3044 50% 3044 90% 3044 99% 3044 ms Write: 10% 2558 30% 2558 50% 2558 90% 2558 99% 2558 ms Write: 10% 2789 30% 2789 50% 2789 90% 2789 99% 2789 ms Write: 10% 2532 30% 2532 50% 2532 90% 2532 99% 2532 ms Write: 10% 2649 30% 2649 50% 2649 90% 2649 99% 2649 ms Write: 10% 2543 30% 2543 50% 2543 90% 2543 99% 2543 ms Write: 10% 2442 30% 2442 50% 2442 90% 2442 99% 2442 ms Write: 10% 2458 30% 2458 50% 2458 90% 2458 99% 2458 ms Write: 10% 2480 30% 2480 50% 2480 90% 2480 99% 2480 ms Write: 10% 2299 30% 2299 50% 2299 90% 2299 99% 2299 ms Write: 10% 2514 30% 2514 50% 2514 90% 2514 99% 2514 ms Write: 10% 2446 30% 2446 50% 2446 90% 2446 99% 2446 ms Write: 10% 3050 30% 3050 50% 3050 90% 3050 99% 3050 ms Write: 10% 2178 30% 2178 50% 2178 90% 2178 99% 2178 ms Step 2. read write Write: 10% 6202 30% 6202 50% 6202 90% 6202 99% 6202 ms Write: 10% 12201 30% 12201 50% 12201 90% 12201 99% 12201 ms Write: 10% 11965 30% 11965 50% 11965 90% 11965 99% 11965 ms Write: 10% 10455 30% 10455 50% 10455 90% 10455 99% 10455 ms Write: 10% 11645 30% 11645 50% 11645 90% 11645 99% 11645 ms Write: 10% 12429 30% 12429 50% 12429 90% 12429 99% 12429 ms Write: 10% 11898 30% 11898 50% 11898 90% 11898 99% 11898 ms Write: 10% 12010 30% 12010 50% 12010 90% 12010 99% 12010 ms Write: 10% 13257 30% 13257 50% 13257 90% 13257 99% 13257 ms Write: 10% 9453 30% 9453 50% 9453 90% 9453 99% 9453 ms Write: 10% 12185 30% 12185 50% 12185 90% 12185 99% 12185 ms Write: 10% 12063 30% 12063 50% 12063 90% 12063 99% 12063 ms Write: 10% 12928 30% 12928 50% 12928 90% 12928 99% 12928 ms Write: 10% 10692 30% 10692 50% 10692 90% 10692 99% 10692 ms Write: 10% 11344 30% 11344 50% 11344 90% 11344 99% 11344 ms Write: 10% 11088 30% 11088 50% 11088 90% 11088 99% 11088 ms Write: 10% 6470 30% 6470 50% 6470 90% 6470 99% 6470 ms Write: 10% 8342 30% 8342 50% 8342 90% 8342 99% 8342 ms Write: 10% 8400 30% 8400 50% 8400 90% 8400 99% 8400 ms Write: 10% 7669 30% 7669 50% 7669 90% 7669 99% 7669 ms Write: 10% 5246 30% 5246 50% 5246 90% 5246 99% 5246 ms Write: 10% 4861 30% 4861 50% 4861 90% 4861 99% 4861 ms Write: 10% 6068 30% 6068 50% 6068 90% 6068 99% 6068 ms Write: 10% 10695 30% 10695 50% 10695 90% 10695 99% 10695 ms Write: 10% 10231 30% 10231 50% 10231 90% 10231 99% 10231 ms Write: 10% 7815 30% 7815 50% 7815 90% 7815 99% 7815 ms Write: 10% 3075 30% 3075 50% 3075 90% 3075 99% 3075 ms Write: 10% 10146 30% 10146 50% 10146 90% 10146 99% 10146 ms Write: 10% 9672 30% 9672 50% 9672 90% 9672 99% 9672 ms Write: 10% 6863 30% 6863 50% 6863 90% 6863 99% 6863 ms Write: 10% 8935 30% 8935 50% 8935 90% 8935 99% 8935 ms Write: 10% 5527 30% 5527 50% 5527 90% 5527 99% 5527 ms Write: 10% 7381 30% 7381 50% 7381 90% 7381 99% 7381 ms Write: 10% 3327 30% 3327 50% 3327 90% 3327 99% 3327 ms Write: 10% 5988 30% 5988 50% 5988 90% 5988 99% 5988 ms Write: 10% 4696 30% 4696 50% 4696 90% 4696 99% 4696 ms Write: 10% 4504 30% 4504 50% 4504 90% 4504 99% 4504 ms Write: 10% 9552 30% 9552 50% 9552 90% 9552 99% 9552 ms Write: 10% 5648 30% 5648 50% 5648 90% 5648 99% 5648 ms Write: 10% 5204 30% 5204 50% 5204 90% 5204 99% 5204 ms Write: 10% 4156 30% 4156 50% 4156 90% 4156 99% 4156 ms Write: 10% 4157 30% 4157 50% 4157 90% 4157 99% 4157 ms Write: 10% 3867 30% 3867 50% 3867 90% 3867 99% 3867 ms Write: 10% 4559 30% 4559 50% 4559 90% 4559 99% 4559 ms Write: 10% 5220 30% 5220 50% 5220 90% 5220 99% 5220 ms Write: 10% 3187 30% 3187 50% 3187 90% 3187 99% 3187 ms Write: 10% 3165 30% 3165 50% 3165 90% 3165 99% 3165 ms Write: 10% 3298 30% 3298 50% 3298 90% 3298 99% 3298 ms Write: 10% 3429 30% 3429 50% 3429 90% 3429 99% 3429 ms Write: 10% 4458 30% 4458 50% 4458 90% 4458 99% 4458 ms Write: 10% 3144 30% 3144 50% 3144 90% 3144 99% 3144 ms Write: 10% 2911 30% 2911 50% 2911 90% 2911 99% 2911 ms Write: 10% 3188 30% 3188 50% 3188 90% 3188 99% 3188 ms Write: 10% 3701 30% 3701 50% 3701 90% 3701 99% 3701 ms Write: 10% 4302 30% 4302 50% 4302 90% 4302 99% 4302 ms Write: 10% 5483 30% 5483 50% 5483 90% 5483 99% 5483 ms Write: 10% 4729 30% 4729 50% 4729 90% 4729 99% 4729 ms Write: 10% 2969 30% 2969 50% 2969 90% 2969 99% 2969 ms Write: 10% 3269 30% 3269 50% 3269 90% 3269 99% 3269 ms Write: 10% 3101 30% 3101 50% 3101 90% 3101 99% 3101 ms Write: 10% 3699 30% 3699 50% 3699 90% 3699 99% 3699 ms Write: 10% 3095 30% 3095 50% 3095 90% 3095 99% 3095 ms Write: 10% 4461 30% 4461 50% 4461 90% 4461 99% 4461 ms Write: 10% 9867 30% 9867 50% 9867 90% 9867 99% 9867 ms Read: 10% 4998 30% 7854 50% 10710 90% 16423 99% 17708 ms Step 3. write modify Write: 10% 2933 30% 2933 50% 2933 90% 2933 99% 2933 ms Write: 10% 5506 30% 5506 50% 5506 90% 5506 99% 5506 ms Write: 10% 12223 30% 12223 50% 12223 90% 12223 99% 12223 ms Write: 10% 11940 30% 11940 50% 11940 90% 11940 99% 11940 ms Write: 10% 12874 30% 12874 50% 12874 90% 12874 99% 12874 ms Write: 10% 12918 30% 12918 50% 12918 90% 12918 99% 12918 ms Write: 10% 12864 30% 12864 50% 12864 90% 12864 99% 12864 ms Write: 10% 13279 30% 13279 50% 13279 90% 13279 99% 13279 ms Write: 10% 13410 30% 13410 50% 13410 90% 13410 99% 13410 ms Write: 10% 12621 30% 12621 50% 12621 90% 12621 99% 12621 ms Write: 10% 13118 30% 13118 50% 13118 90% 13118 99% 13118 ms Write: 10% 14065 30% 14065 50% 14065 90% 14065 99% 14065 ms Write: 10% 13618 30% 13618 50% 13618 90% 13618 99% 13618 ms Write: 10% 13676 30% 13676 50% 13676 90% 13676 99% 13676 ms Write: 10% 12787 30% 12787 50% 12787 90% 12787 99% 12787 ms Write: 10% 14276 30% 14276 50% 14276 90% 14276 99% 14276 ms Write: 10% 12923 30% 12923 50% 12923 90% 12923 99% 12923 ms Write: 10% 12986 30% 12986 50% 12986 90% 12986 99% 12986 ms Write: 10% 13369 30% 13369 50% 13369 90% 13369 99% 13369 ms Write: 10% 12496 30% 12496 50% 12496 90% 12496 99% 12496 ms Write: 10% 12680 30% 12680 50% 12680 90% 12680 99% 12680 ms Write: 10% 13401 30% 13401 50% 13401 90% 13401 99% 13401 ms Write: 10% 13194 30% 13194 50% 13194 90% 13194 99% 13194 ms Write: 10% 12204 30% 12204 50% 12204 90% 12204 99% 12204 ms Write: 10% 12113 30% 12113 50% 12113 90% 12113 99% 12113 ms Write: 10% 6867 30% 6867 50% 6867 90% 6867 99% 6867 ms Write: 10% 11917 30% 11917 50% 11917 90% 11917 99% 11917 ms Write: 10% 12143 30% 12143 50% 12143 90% 12143 99% 12143 ms Write: 10% 5853 30% 5853 50% 5853 90% 5853 99% 5853 ms Write: 10% 6091 30% 6091 50% 6091 90% 6091 99% 6091 ms Write: 10% 10192 30% 10192 50% 10192 90% 10192 99% 10192 ms Write: 10% 10582 30% 10582 50% 10582 90% 10582 99% 10582 ms Write: 10% 9454 30% 9454 50% 9454 90% 9454 99% 9454 ms Write: 10% 9551 30% 9551 50% 9551 90% 9551 99% 9551 ms Write: 10% 9264 30% 9264 50% 9264 90% 9264 99% 9264 ms Write: 10% 9306 30% 9306 50% 9306 90% 9306 99% 9306 ms Write: 10% 9480 30% 9480 50% 9480 90% 9480 99% 9480 ms Write: 10% 9313 30% 9313 50% 9313 90% 9313 99% 9313 ms Write: 10% 12787 30% 12787 50% 12787 90% 12787 99% 12787 ms Write: 10% 3493 30% 3493 50% 3493 90% 3493 99% 3493 ms Write: 10% 7239 30% 7239 50% 7239 90% 7239 99% 7239 ms Write: 10% 7764 30% 7764 50% 7764 90% 7764 99% 7764 ms Write: 10% 6191 30% 6191 50% 6191 90% 6191 99% 6191 ms Write: 10% 5917 30% 5917 50% 5917 90% 5917 99% 5917 ms Write: 10% 5962 30% 5962 50% 5962 90% 5962 99% 5962 ms Write: 10% 5571 30% 5571 50% 5571 90% 5571 99% 5571 ms Write: 10% 5244 30% 5244 50% 5244 90% 5244 99% 5244 ms Write: 10% 6648 30% 6648 50% 6648 90% 6648 99% 6648 ms Write: 10% 5035 30% 5035 50% 5035 90% 5035 99% 5035 ms Write: 10% 5030 30% 5030 50% 5030 90% 5030 99% 5030 ms Write: 10% 5017 30% 5017 50% 5017 90% 5017 99% 5017 ms Write: 10% 4655 30% 4655 50% 4655 90% 4655 99% 4655 ms Write: 10% 3681 30% 3681 50% 3681 90% 3681 99% 3681 ms Write: 10% 4346 30% 4346 50% 4346 90% 4346 99% 4346 ms Write: 10% 4295 30% 4295 50% 4295 90% 4295 99% 4295 ms Write: 10% 3592 30% 3592 50% 3592 90% 3592 99% 3592 ms Write: 10% 3340 30% 3340 50% 3340 90% 3340 99% 3340 ms Write: 10% 4178 30% 4178 50% 4178 90% 4178 99% 4178 ms Write: 10% 4198 30% 4198 50% 4198 90% 4198 99% 4198 ms Write: 10% 4405 30% 4405 50% 4405 90% 4405 99% 4405 ms Write: 10% 7336 30% 7336 50% 7336 90% 7336 99% 7336 ms Write: 10% 3354 30% 3354 50% 3354 90% 3354 99% 3354 ms Write: 10% 5328 30% 5328 50% 5328 90% 5328 99% 5328 ms Write: 10% 5919 30% 5919 50% 5919 90% 5919 99% 5919 ms Update: 10% 1748 30% 1748 50% 1748 90% 1748 99% 1748 ms Step 4. read modify write Was written: 18.9453125 MiB, Speed: 0.3157552083333333 MiB/s Write: 10% 6200 30% 6200 50% 6200 90% 6200 99% 6200 ms Write: 10% 8631 30% 8631 50% 8631 90% 8631 99% 8631 ms Write: 10% 8989 30% 8989 50% 8989 90% 8989 99% 8989 ms Write: 10% 10449 30% 10449 50% 10449 90% 10449 99% 10449 ms Write: 10% 13799 30% 13799 50% 13799 90% 13799 99% 13799 ms Write: 10% 12607 30% 12607 50% 12607 90% 12607 99% 12607 ms Write: 10% 12786 30% 12786 50% 12786 90% 12786 99% 12786 ms Write: 10% 12671 30% 12671 50% 12671 90% 12671 99% 12671 ms Write: 10% 13007 30% 13007 50% 13007 90% 13007 99% 13007 ms Write: 10% 12772 30% 12772 50% 12772 90% 12772 99% 12772 ms Write: 10% 13759 30% 13759 50% 13759 90% 13759 99% 13759 ms Write: 10% 13495 30% 13495 50% 13495 90% 13495 99% 13495 ms Write: 10% 12483 30% 12483 50% 12483 90% 12483 99% 12483 ms Write: 10% 11321 30% 11321 50% 11321 90% 11321 99% 11321 ms Write: 10% 11331 30% 11331 50% 11331 90% 11331 99% 11331 ms Write: 10% 11955 30% 11955 50% 11955 90% 11955 99% 11955 ms Write: 10% 11642 30% 11642 50% 11642 90% 11642 99% 11642 ms Write: 10% 10537 30% 10537 50% 10537 90% 10537 99% 10537 ms Write: 10% 10766 30% 10766 50% 10766 90% 10766 99% 10766 ms Write: 10% 6244 30% 6244 50% 6244 90% 6244 99% 6244 ms Write: 10% 10500 30% 10500 50% 10500 90% 10500 99% 10500 ms Write: 10% 10430 30% 10430 50% 10430 90% 10430 99% 10430 ms Write: 10% 10767 30% 10767 50% 10767 90% 10767 99% 10767 ms Write: 10% 9258 30% 9258 50% 9258 90% 9258 99% 9258 ms Write: 10% 7326 30% 7326 50% 7326 90% 7326 99% 7326 ms Write: 10% 3684 30% 3684 50% 3684 90% 3684 99% 3684 ms Write: 10% 9940 30% 9940 50% 9940 90% 9940 99% 9940 ms Write: 10% 6940 30% 6940 50% 6940 90% 6940 99% 6940 ms Write: 10% 7103 30% 7103 50% 7103 90% 7103 99% 7103 ms Write: 10% 7575 30% 7575 50% 7575 90% 7575 99% 7575 ms Write: 10% 7845 30% 7845 50% 7845 90% 7845 99% 7845 ms Write: 10% 6934 30% 6934 50% 6934 90% 6934 99% 6934 ms Write: 10% 6352 30% 6352 50% 6352 90% 6352 99% 6352 ms Write: 10% 7205 30% 7205 50% 7205 90% 7205 99% 7205 ms Write: 10% 8255 30% 8255 50% 8255 90% 8255 99% 8255 ms Write: 10% 6838 30% 6838 50% 6838 90% 6838 99% 6838 ms Write: 10% 6797 30% 6797 50% 6797 90% 6797 99% 6797 ms Write: 10% 6301 30% 6301 50% 6301 90% 6301 99% 6301 ms Write: 10% 7879 30% 7879 50% 7879 90% 7879 99% 7879 ms Write: 10% 6378 30% 6378 50% 6378 90% 6378 99% 6378 ms Write: 10% 6939 30% 6939 50% 6939 90% 6939 99% 6939 ms Write: 10% 6669 30% 6669 50% 6669 90% 6669 99% 6669 ms Write: 10% 6523 30% 6523 50% 6523 90% 6523 99% 6523 ms Write: 10% 6317 30% 6317 50% 6317 90% 6317 99% 6317 ms Write: 10% 6269 30% 6269 50% 6269 90% 6269 99% 6269 ms Write: 10% 6759 30% 6759 50% 6759 90% 6759 99% 6759 ms Write: 10% 5901 30% 5901 50% 5901 90% 5901 99% 5901 ms Write: 10% 5436 30% 5436 50% 5436 90% 5436 99% 5436 ms Write: 10% 4754 30% 4754 50% 4754 90% 4754 99% 4754 ms Write: 10% 5680 30% 5680 50% 5680 90% 5680 99% 5680 ms Write: 10% 5160 30% 5160 50% 5160 90% 5160 99% 5160 ms Write: 10% 4458 30% 4458 50% 4458 90% 4458 99% 4458 ms Write: 10% 4879 30% 4879 50% 4879 90% 4879 99% 4879 ms Write: 10% 4254 30% 4254 50% 4254 90% 4254 99% 4254 ms Write: 10% 4106 30% 4106 50% 4106 90% 4106 99% 4106 ms Write: 10% 4201 30% 4201 50% 4201 90% 4201 99% 4201 ms Write: 10% 9169 30% 9169 50% 9169 90% 9169 99% 9169 ms Write: 10% 4432 30% 4432 50% 4432 90% 4432 99% 4432 ms Write: 10% 4402 30% 4402 50% 4402 90% 4402 99% 4402 ms Write: 10% 4657 30% 4657 50% 4657 90% 4657 99% 4657 ms Write: 10% 4526 30% 4526 50% 4526 90% 4526 99% 4526 ms Write: 10% 4213 30% 4213 50% 4213 90% 4213 99% 4213 ms Write: 10% 4909 30% 4909 50% 4909 90% 4909 99% 4909 ms Write: 10% 5321 30% 5321 50% 5321 90% 5321 99% 5321 ms Read: 10% 3767 30% 3821 50% 3874 90% 15015 99% 17522 ms Update: 10% 1733 30% 1733 50% 1733 90% 1733 99% 1733 ms |74.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_stdin_par_raw[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_stdin_par_raw[scan] |74.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client3-year Uint32 NOT NULL-True] [GOOD] >> test_formats.py::TestS3Formats::test_invalid_input_compression[v2-client0] [GOOD] >> test_config_with_metadata.py::TestConfigWithMetadataBlock::test_cluster_is_operational_with_metadata [GOOD] >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[tsv-additional_args3-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[tsv-additional_args3-column] >> test_formats.py::TestS3Formats::test_invalid_output_compression[v1-client0] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_skip_rows_csv[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_skip_rows_csv[stream] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_stdin_par_raw[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_stdin_par_json[data] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client4-year Int64-False] >> test_s3_1.py::TestS3::test_failed_precompute[v1-true-client0] [GOOD] >> test_s3_1.py::TestS3::test_failed_precompute[v2-false-client0] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_skip_rows_csv[stream] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_skip_rows_tsv[scripting] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_stdin_par_json[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_stdin_par_json[scan] >> test_insert.py::TestS3::test_append[v1-client0] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-csv_with_names-parquet] [GOOD] |74.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestKiKiMRWithMetadata::test_cluster_is_operational_with_metadata [GOOD] >> test_insert.py::TestS3::test_append[v2-client0] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_reassign_master[kick_tablets] >> test_statistics.py::TestS3::test_convert[v1-client0-parquet-json_list] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params1-true] [GOOD] >> test_actorsystem.py::TestWithHybridNodeWith3Cpu::test >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params2-false] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_skip_rows_tsv[scripting] [GOOD] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_skip_rows_tsv[stream] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_stdin_par_json[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_stdin_par_csv[data] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client2-year Uint64 NOT NULL-True] [GOOD] >> test_ydb_backup.py::TestRecursiveNonConsistent::test_recursive_table_backup_from_different_places [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v1-timestamp/simple_iso/test.parquet-parquet] [GOOD] >> test_config_with_metadata.py::TestConfigWithoutMetadataMirror::test_cluster_is_operational_without_metadata [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client3-year Date NOT NULL-False] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v2-timestamp/simple_iso/test.csv-csv_with_names] >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v0-std] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_stdin_par_csv[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_stdin_par_csv[scan] >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[tsv-additional_args3-column] [GOOD] >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[json-additional_args4-row] >> test_ydb_scripting.py::TestExecuteScriptWithParamsFromStdin::test_skip_rows_tsv[stream] [GOOD] |74.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |74.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_stdin_par_csv[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_stdin_par_tsv[data] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client4-year Int64-False] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_stdin_par_tsv[data] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client5-year Int64 NOT NULL-False] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_stdin_par_tsv[scan] >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[json-additional_args4-row] [GOOD] >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[json-additional_args4-column] |74.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfBasic::test_cluster_is_operational_with_distconf [GOOD] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_reassign_master[stop_node] >> test_stream_query.py::TestStreamQuery::test_sql_suite[plan-window.test] |74.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_formats.py::TestS3Formats::test_invalid_output_compression[v1-client0] [GOOD] >> test_formats.py::TestS3Formats::test_invalid_output_compression[v2-client0] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_stdin_par_tsv[scan] [GOOD] >> test_s3_0.py::TestS3::test_limit[v2-true-kikimr_params0-client0] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_mix_json_and_binary[data] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v2-timestamp/simple_iso/test.csv-csv_with_names] [GOOD] >> test_s3_0.py::TestS3::test_bad_format[v1-false-client0] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v2-timestamp/simple_iso/test.tsv-tsv_with_names] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client3-year Date NOT NULL-False] [GOOD] >> test_s3_1.py::TestS3::test_failed_precompute[v2-false-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client4-year String NOT NULL-True] >> test_s3_1.py::TestS3::test_failed_precompute[v2-true-client0] |74.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_mix_json_and_binary[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_mix_json_and_binary[scan] >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[json-additional_args4-column] [GOOD] |74.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test |74.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestKiKiMRStoreConfigDir::test_config_stored_in_config_store [GOOD] |74.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestConfigWithMetadataBlock::test_cluster_is_operational_with_metadata [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_mix_json_and_binary[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_different_sources_json[data] >> test_insert.py::TestS3::test_append[v2-client0] [GOOD] >> test_bindings_0.py::TestBindings::test_binding_operations[v2-kikimr_settings1-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client4-year String NOT NULL-True] [GOOD] >> test_bindings_0.py::TestBindings::test_modify_connection_with_a_lot_of_bindings[v1-kikimr_settings1-client0] >> test_insert.py::TestS3::test_part_split[v1-client0] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_different_sources_json[data] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client5-year String-False] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_different_sources_json[scan] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v2-timestamp/simple_iso/test.tsv-tsv_with_names] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client5-year Int64 NOT NULL-False] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v2-timestamp/simple_iso/test.json-json_each_row] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client6-year Uint64-False] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_different_sources_json[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_different_sources_csv[data] >> test_actorsystem.py::TestWithHybridNodeWith3Cpu::test [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-parquet-json_list] [GOOD] >> test_formats.py::TestS3Formats::test_invalid_output_compression[v2-client0] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-parquet-json_each_row] |74.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_formats.py::TestS3Formats::test_custom_csv_delimiter_format[v1-client0] |74.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |74.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_different_sources_csv[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_different_sources_csv[scan] >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageNoChanges::test_cluster_change_state_storage [GOOD] |74.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_generate_dynamic_config.py::TestGenerateDynamicConfigFromConfigDir::test_generate_dynamic_config_from_config_store [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_different_sources_csv[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_different_sources_tsv[data] >> test_config_with_metadata.py::TestKiKiMRWithoutMetadata::test_cluster_is_operational_without_metadata [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_different_sources_tsv[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_different_sources_tsv[scan] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v2-timestamp/simple_iso/test.json-json_each_row] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_different_sources_tsv[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_framing_newline_delimited_json[data] |74.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v2-timestamp/simple_iso/test.parquet-parquet] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_ends_request_after_kill |74.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client5-year String-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client6-year Utf8 NOT NULL-True] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_framing_newline_delimited_json[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_framing_newline_delimited_json[scan] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v0-std] >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v1-std] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client6-year Uint64-False] [GOOD] >> test_s3_1.py::TestS3::test_failed_precompute[v2-true-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client7-year Uint64 NOT NULL-False] >> test_insert.py::TestS3::test_part_split[v1-client0] [GOOD] >> test_s3_1.py::TestS3::test_missed[v1-false-client0] |74.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_insert.py::TestS3::test_part_split[v2-client0] >> test_formats.py::TestS3Formats::test_custom_csv_delimiter_format[v1-client0] [GOOD] >> test_formats.py::TestS3Formats::test_custom_csv_delimiter_format[v2-client0] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_framing_newline_delimited_json[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_framing_newline_delimited_csv[data] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v0-fifo] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_framing_newline_delimited_csv[data] [GOOD] |74.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestConfigWithoutMetadataMirror::test_cluster_is_operational_without_metadata [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_framing_newline_delimited_csv[scan] >> test_format_setting.py::TestS3::test_timestamp_simple_iso[v2-timestamp/simple_iso/test.parquet-parquet] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v1-timestamp/simple_iso/test.csv-csv_with_names] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client6-year Utf8 NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client7-year Utf8-False] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_framing_newline_delimited_csv[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_framing_newline_delimited_tsv[data] |74.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/autoconfig/py3test >> test_actorsystem.py::TestWithHybridNodeWith3Cpu::test [GOOD] |74.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |74.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_statistics.py::TestS3::test_convert[v1-client0-parquet-json_each_row] [GOOD] >> test_statistics.py::TestS3::test_convert[v1-client0-parquet-csv_with_names] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client7-year Uint64 NOT NULL-False] [GOOD] |74.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_framing_newline_delimited_tsv[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_framing_newline_delimited_tsv[scan] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client8-year String NOT NULL-True] >> test_formats.py::TestS3Formats::test_custom_csv_delimiter_format[v2-client0] [GOOD] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v1-fifo] >> test_formats.py::TestS3Formats::test_no_not_nullable_column[v1-client0] >> test_s3_1.py::TestS3::test_missed[v1-false-client0] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_framing_newline_delimited_tsv[scan] [GOOD] >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v0-fifo] >> test_s3_1.py::TestS3::test_missed[v1-true-client0] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_framing_newline_delimited_raw[data] >> test_alter_tiering.py::TestAlterTiering::test[many_tables] [FAIL] >> test_insert.py::TestS3::test_part_split[v2-client0] [GOOD] |74.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_insert.py::TestS3::test_part_merge[v1-client0] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_framing_newline_delimited_raw[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_framing_newline_delimited_raw[scan] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_bindings_1.py::TestBindings::test_decimal_binding[v2-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/002578/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_bindings_1/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/002578/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_bindings_1/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=919689) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 922879 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_configuration_version.py::TestConfigurationVersion::test_configuration_version [GOOD] >> test_ydb_scripting.py::TestExecuteScriptFromStdinWithWideOutput::test_wide_table >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client7-year Utf8-False] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_framing_newline_delimited_raw[scan] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client8-year Int32-False] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_full_raw[data] |74.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client8-year String NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client9-year String-False] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_full_raw[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_full_raw[scan] |74.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageNoChanges::test_cluster_change_state_storage [GOOD] |74.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestKiKiMRWithoutMetadata::test_cluster_is_operational_without_metadata [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params2-false] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_compressions.py::TestS3Compressions::test_invalid_compression_inference[v2-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258b/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_compressions/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258b/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_compressions/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=906729) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 909063 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback |74.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v1-timestamp/simple_iso/test.csv-csv_with_names] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params2-true] >> test_formats.py::TestS3Formats::test_no_not_nullable_column[v1-client0] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v1-timestamp/simple_iso/test.tsv-tsv_with_names] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_full_raw[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_full_json[data] >> test_formats.py::TestS3Formats::test_no_not_nullable_column[v2-client0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/py3test >> test_insert.py::TestInsert::test_multi[read_data_during_bulk_upsert] [FAIL] Test command err: contrib/python/pytest/py3/_pytest/threadexception.py:77: PytestUnhandledThreadExceptionWarning: Exception in thread Thread-28 (_test_suffix) Traceback (most recent call last): File "contrib/tools/python3/Lib/threading.py", line 1075, in _bootstrap_inner self.run() File "contrib/tools/python3/Lib/threading.py", line 1012, in run self._target(*self._args, **self._kwargs) File "/home/runner/.ya/build/build_root/u93c/0026b2/environment/arcadia/ydb/tests/olap/scenario/conftest.py", line 110, in _test_suffix ctx.executable(self, ctx) File "ydb/tests/olap/scenario/test_insert.py", line 103, in scenario_read_data_during_bulk_upsert thread2.join_all() File "ydb/tests/olap/common/thread_helper.py", line 45, in join_all thread.join(timeout=timeout) File "ydb/tests/olap/common/thread_helper.py", line 18, in join raise self.exc File "ydb/tests/olap/common/thread_helper.py", line 11, in run self.ret = self._target(*self._args, **self._kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "ydb/tests/olap/scenario/test_insert.py", line 59, in _loop_insert raise Exception('Insert failed table {}'.format(table)) Exception: Insert failed table 0 |74.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v1-fifo] >> test_bindings_0.py::TestBindings::test_modify_connection_with_a_lot_of_bindings[v1-kikimr_settings1-client0] [SKIPPED] |74.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_statistics.py::TestS3::test_convert[v1-client0-parquet-csv_with_names] [GOOD] |74.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ydb_cli/py3test >> test_ydb_sql.py::TestExecuteSqlWithPgSyntax::test_pg_syntax [GOOD] |74.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_statistics.py::TestS3::test_convert[v1-client0-parquet-parquet] >> test_bindings_0.py::TestBindings::test_modify_connection_with_a_lot_of_bindings[v2-kikimr_settings1-client0] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_full_json[data] [GOOD] >> test_bindings_0.py::TestBindings::test_modify_connection_with_a_lot_of_bindings[v2-kikimr_settings1-client0] [SKIPPED] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_full_json[scan] >> test_alter_tiering.py::TestAlterTiering::test_multi[many_tables] >> test_s3_1.py::TestS3::test_missed[v1-true-client0] [GOOD] >> test_generate_dynamic_config.py::TestGenerateDynamicConfig::test_generate_dynamic_config [GOOD] >> test_bindings_0.py::TestBindings::test_name_uniqueness_constraint[v1-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client8-year Int32-False] [GOOD] >> test_s3_1.py::TestS3::test_missed[v2-false-client0] >> test_alter_tiering.py::TestAlterTiering::test_multi[many_tables] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client9-year Uint32-False] >> test_insert.py::TestS3::test_part_merge[v1-client0] [GOOD] >> test_insert.py::TestS3::test_part_merge[v2-client0] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_full_json[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_full_csv[data] >> test_ydb_backup.py::TestRecursiveSchemeOnly::test_recursive_table_backup_from_different_places |74.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_ydb_scripting.py::TestExecuteScriptFromStdinWithWideOutput::test_wide_table [GOOD] |74.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_configuration_version.py::TestConfigurationVersion::test_configuration_version [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client9-year String-False] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_full_csv[data] [GOOD] >> test_config_with_metadata.py::TestConfigWithoutMetadataBlock::test_cluster_is_operational_without_metadata [GOOD] |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_full_csv[scan] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client10-year Utf8-False] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-5-kikimr_params2-true] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params2-false] |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_full_csv[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_full_tsv[data] |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[kick_tablets-std] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_full_tsv[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_full_tsv[scan] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client9-year Uint32-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client10-year Int64 NOT NULL-True] >> test_formats.py::TestS3Formats::test_no_not_nullable_column[v2-client0] [GOOD] >> test_formats.py::TestS3Formats::test_no_nullable_column[v1-client0] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_full_tsv[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_adaptive_raw[data] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v1-timestamp/simple_iso/test.tsv-tsv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v1-timestamp/simple_iso/test.json-json_each_row] |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_adaptive_raw[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_adaptive_raw[scan] >> test_s3_1.py::TestS3::test_missed[v2-false-client0] [GOOD] |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params2-false] [GOOD] >> test_s3_1.py::TestS3::test_missed[v2-true-client0] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params2-true] >> test_drain.py::TestHive::test_drain_tablets >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_adaptive_raw[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_adaptive_json[data] >> test_insert.py::TestS3::test_part_merge[v2-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client10-year Utf8-False] [GOOD] >> test_insert.py::TestS3::test_part_binding[v1-client0-json_list] >> test_insert.py::TestS3::test_part_binding[v1-client0-json_list] [SKIPPED] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client11-year Utf8 NOT NULL-True] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client10-year Int64 NOT NULL-True] [GOOD] >> test_insert.py::TestS3::test_part_binding[v1-client0-json_each_row] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client11-year Int64-False] |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_adaptive_json[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_adaptive_json[scan] >> test_statistics.py::TestS3::test_convert[v1-client0-parquet-parquet] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-json_list-json_list] >> test_s3_0.py::TestS3::test_bad_format[v1-false-client0] [GOOD] >> test_s3_0.py::TestS3::test_bad_format[v1-true-client0] |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_formats.py::TestS3Formats::test_no_nullable_column[v1-client0] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_adaptive_json[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_adaptive_csv[data] |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v0-std] [GOOD] >> test_formats.py::TestS3Formats::test_no_nullable_column[v2-client0] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-100-kikimr_params2-true] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client11-year Int64-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client11-year Utf8 NOT NULL-True] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_adaptive_csv[data] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params2-false] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_adaptive_csv[scan] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client12-year Uint64-False] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client12-year Date-False] |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_generate_dynamic_config.py::TestGenerateDynamicConfig::test_generate_dynamic_config [GOOD] >> test_ydb_backup.py::TestRecursiveSchemeOnly::test_recursive_table_backup_from_different_places [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v1-timestamp/simple_iso/test.json-json_each_row] [GOOD] |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v1-timestamp/simple_iso/test.parquet-parquet] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_adaptive_csv[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_adaptive_tsv[data] >> test_s3_1.py::TestS3::test_missed[v2-true-client0] [GOOD] >> test_s3_1.py::TestS3::test_simple_hits_47[v1-false-client0] |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_config_with_metadata.py::TestConfigWithoutMetadataBlock::test_cluster_is_operational_without_metadata [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_adaptive_tsv[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_adaptive_tsv[scan] |74.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v1-std] [GOOD] >> test_insert.py::TestS3::test_part_binding[v1-client0-json_each_row] [GOOD] >> test_insert.py::TestS3::test_part_binding[v1-client0-csv_with_names] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params2-false] [GOOD] >> test_s3_0.py::TestS3::test_bad_format[v1-true-client0] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params2-true] >> test_formats.py::TestS3Formats::test_no_nullable_column[v2-client0] [GOOD] >> test_s3_0.py::TestS3::test_bad_format[v2-false-client0] >> test_formats.py::TestS3Formats::test_invalid_column_type_in_csv[v1-client0] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_batching_adaptive_tsv[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_ignore_excess_parameters_json[data] |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client12-year Date-False] [GOOD] >> test_distconf.py::TestKiKiMRDistConfReassignStateStorage::test_cluster_change_state_storage [GOOD] |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client13-year Date NOT NULL-True] |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_ignore_excess_parameters_json[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_ignore_excess_parameters_json[scan] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client12-year Uint64-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client13-year Date-False] >> test_s3_1.py::TestS3::test_simple_hits_47[v1-false-client0] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-json_list-json_list] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-json_list-json_each_row] >> test_s3_1.py::TestS3::test_simple_hits_47[v1-true-client0] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_ignore_excess_parameters_json[scan] [GOOD] |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_ignore_excess_parameters_csv[data] |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_quota_exhaustion.py::TestYdbWorkload::test [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_ignore_excess_parameters_csv[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_ignore_excess_parameters_csv[scan] |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v1-timestamp/simple_iso/test.parquet-parquet] [GOOD] |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[stop_node-std] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v2-timestamp/simple_iso/test.csv-csv_with_names] >> test_size_limit.py::TestS3::test_size_limit[v1-client0-500-kikimr_params2-true] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params2-false] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_ignore_excess_parameters_csv[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_ignore_excess_parameters_tsv[data] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client13-year Date NOT NULL-True] [GOOD] |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client14-year Datetime-False] |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_create_tablets.py::TestHive::test_when_create_tablets_then_can_lookup_them |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_insert.py::TestS3::test_part_binding[v1-client0-csv_with_names] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_ignore_excess_parameters_tsv[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_ignore_excess_parameters_tsv[scan] >> test_insert.py::TestS3::test_part_binding[v2-client0-json_list] >> test_insert.py::TestS3::test_part_binding[v2-client0-json_list] [SKIPPED] >> test_insert.py::TestS3::test_part_binding[v2-client0-json_each_row] >> test_ydb_over_fq.py::TestYdbOverFq::test_list_directory_v2[v2-client0] [GOOD] >> test_formats.py::TestS3Formats::test_invalid_column_type_in_csv[v1-client0] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_ignore_excess_parameters_tsv[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_columns_bad_header_csv[data] >> test_ydb_over_fq.py::TestYdbOverFq::test_list_directory_v1[v1-client0] >> test_formats.py::TestS3Formats::test_invalid_column_type_in_csv[v2-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-false-client13-year Date-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client0-year Int32 NOT NULL-True] |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_s3_0.py::TestS3::test_bad_format[v2-false-client0] [GOOD] |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_s3_1.py::TestS3::test_simple_hits_47[v1-true-client0] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_columns_bad_header_csv[data] [GOOD] >> test_s3_0.py::TestS3::test_bad_format[v2-true-client0] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_columns_bad_header_csv[scan] >> test_s3_1.py::TestS3::test_simple_hits_47[v2-false-client0] |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client14-year Datetime-False] [GOOD] >> test_bindings_0.py::TestBindings::test_name_uniqueness_constraint[v1-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client15-year Datetime NOT NULL-True] |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_bindings_0.py::TestBindings::test_name_uniqueness_constraint[v2-client0] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_columns_bad_header_csv[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_columns_bad_header_tsv[data] >> test_statistics.py::TestS3::test_convert[v2-client0-json_list-json_each_row] [GOOD] |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_statistics.py::TestS3::test_convert[v2-client0-json_list-csv_with_names] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_columns_bad_header_tsv[data] [GOOD] |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_columns_bad_header_tsv[scan] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client0-year Int32 NOT NULL-True] [GOOD] |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client1-year Uint32 NOT NULL-True] |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v2-timestamp/simple_iso/test.csv-csv_with_names] [GOOD] >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v0-fifo] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_columns_bad_header_tsv[scan] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params2-false] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v2-timestamp/simple_iso/test.tsv-tsv_with_names] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_columns_no_header_csv[data] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params2-true] |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v1-fifo] [GOOD] >> test_bindings_0.py::TestBindings::test_name_uniqueness_constraint[v2-client0] [GOOD] |75.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorage::test_cluster_change_state_storage [GOOD] >> test_s3_0.py::TestS3::test_bad_format[v2-true-client0] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_columns_no_header_csv[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_columns_no_header_csv[scan] >> test_s3_0.py::TestS3::test_bad_request_on_invalid_parquet[v2-client0] >> test_insert.py::TestS3::test_part_binding[v2-client0-json_each_row] [GOOD] >> test_formats.py::TestS3Formats::test_invalid_column_type_in_csv[v2-client0] [GOOD] >> test_insert.py::TestS3::test_part_binding[v2-client0-csv_with_names] >> test_s3_1.py::TestS3::test_simple_hits_47[v2-false-client0] [GOOD] >> test_formats.py::TestS3Formats::test_invalid_column_in_parquet[v1-client0] >> test_s3_1.py::TestS3::test_simple_hits_47[v2-true-client0] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_columns_no_header_csv[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_columns_no_header_tsv[data] |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-false-client15-year Datetime NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client0-year Int32-False] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client1-year Uint32 NOT NULL-True] [GOOD] |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |75.1%| [TA] $(B)/ydb/tests/functional/autoconfig/test-results/py3test/{meta.json ... results_accumulator.log} |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_columns_no_header_tsv[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_columns_no_header_tsv[scan] >> test_create_tablets.py::TestHive::test_when_create_tablets_then_can_lookup_them [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client2-year Uint64 NOT NULL-True] |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v0-std] [GOOD] |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ydb_cli/py3test >> test_ydb_scripting.py::TestExecuteScriptFromStdinWithWideOutput::test_wide_table [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-5-kikimr_params2-true] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params2-false] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_columns_no_header_tsv[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_skip_rows_csv[data] |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_statistics.py::TestS3::test_convert[v2-client0-json_list-csv_with_names] [GOOD] |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ydb_cli/py3test >> test_ydb_impex.py::TestImpex::test_import_stdin_with_bom[json-additional_args4-column] [GOOD] |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_statistics.py::TestS3::test_convert[v2-client0-json_list-parquet] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_skip_rows_csv[data] [GOOD] |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_skip_rows_csv[scan] |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_kill_tablets.py::TestKillTablets::test_when_kill_keyvalue_tablet_it_will_be_restarted >> test_insert.py::TestS3::test_part_binding[v2-client0-csv_with_names] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client0-year Int32-False] [GOOD] >> test_insert.py::TestS3::test_error[v1-client0-json_each_row] >> test_s3_0.py::TestS3::test_bad_request_on_invalid_parquet[v2-client0] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_skip_rows_csv[scan] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_skip_rows_tsv[data] |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client1-year Int32 NOT NULL-False] |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_s3_0.py::TestS3::test_bad_request_on_compression[v2-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client2-year Uint64 NOT NULL-True] [GOOD] >> test_s3_1.py::TestS3::test_simple_hits_47[v2-true-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client3-year Date NOT NULL-False] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-false-exact_file-True-client0] |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v1-std] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_skip_rows_tsv[data] [GOOD] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_skip_rows_tsv[scan] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params2-false] [GOOD] >> test_validation.py::TestS3::test_empty[v2-client0] [GOOD] >> test_formats.py::TestS3Formats::test_invalid_column_in_parquet[v1-client0] [GOOD] >> test_validation.py::TestS3::test_nested_issues[v1-client0] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params2-true] |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_formats.py::TestS3Formats::test_invalid_column_in_parquet[v2-client0] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v2-timestamp/simple_iso/test.tsv-tsv_with_names] [GOOD] >> test_kill_tablets.py::TestKillTablets::test_then_kill_system_tablets_and_it_increases_generation >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v2-timestamp/simple_iso/test.json-json_each_row] >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_skip_rows_tsv[scan] [GOOD] >> test_ydb_backup.py::TestRecursiveConsistent::test_recursive_table_backup_from_different_places |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/py3test >> test_quota_exhaustion.py::TestYdbWorkload::test [GOOD] Test command err: upsert #0 ok, result: [] Quota exceeded False upsert #1 ok, result: [] Quota exceeded False upsert #2 ok, result: [] Quota exceeded False upsert #3 ok, result: [] Quota exceeded False upsert #4 ok, result: [] Quota exceeded False upsert #5 ok, result: [] Quota exceeded False upsert #6 ok, result: [] Quota exceeded False upsert #7 ok, result: [] Quota exceeded False upsert #8 ok, result: [] Quota exceeded False upsert #9 ok, result: [] Quota exceeded False upsert #10 ok, result: [] Quota exceeded False upsert #11 ok, result: [] Quota exceeded False upsert #12 ok, result: [] Quota exceeded False upsert: got overload issue |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_size_limit.py::TestS3::test_size_limit[v2-client0-100-kikimr_params2-true] [GOOD] |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params2-false] |75.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |75.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client1-year Int32 NOT NULL-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client2-year Uint32-False] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-false-exact_file-True-client0] [GOOD] >> test_validation.py::TestS3::test_nested_issues[v1-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-false-exact_file-False-client0] >> test_validation.py::TestS3::test_nested_issues[v2-client0] >> test_s3_0.py::TestS3::test_bad_request_on_compression[v2-client0] [GOOD] >> test_formats.py::TestS3Formats::test_invalid_column_in_parquet[v2-client0] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-json_list-parquet] [GOOD] >> test_formats.py::TestS3Formats::test_simple_pg_types[v1-client0] |75.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client3-year Date NOT NULL-False] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-json_each_row-json_list] >> test_s3_0.py::TestS3::test_checkpoints_on_join_s3_with_yds[v1-mvp_external_ydb_endpoint0-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client4-year String NOT NULL-True] |75.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |75.2%| [TA] {RESULT} $(B)/ydb/tests/functional/autoconfig/test-results/py3test/{meta.json ... results_accumulator.log} >> test_drain.py::TestHive::test_drain_on_stop |75.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |75.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |75.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_kill_tablets.py::TestKillTablets::test_when_kill_keyvalue_tablet_it_will_be_restarted [GOOD] |75.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_insert.py::TestS3::test_error[v1-client0-json_each_row] [GOOD] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params2-false] [GOOD] >> test_insert.py::TestS3::test_error[v1-client0-csv_with_names] >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params2-true] |75.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_create_tablets.py::TestHive::test_when_create_tablets_then_can_lookup_them [GOOD] |75.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v2-timestamp/simple_iso/test.json-json_each_row] [GOOD] |75.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v2-timestamp/simple_iso/test.parquet-parquet] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client2-year Uint32-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client4-year String NOT NULL-True] [GOOD] >> test_validation.py::TestS3::test_nested_issues[v2-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client3-year Uint32 NOT NULL-True] |75.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client5-year String-False] >> test_validation.py::TestS3::test_nested_type[v1-client0] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-false-exact_file-False-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-false-directory_scan-True-client0] |75.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_formats.py::TestS3Formats::test_simple_pg_types[v1-client0] [GOOD] >> test_formats.py::TestS3Formats::test_simple_pg_types[v2-client0] >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageMultipleRingGroup::test_cluster_change_state_storage [GOOD] |75.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params2-true] [GOOD] |75.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |75.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/py3test >> test_alter_tiering.py::TestAlterTiering::test_multi[many_tables] [GOOD] Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/moto/py3/moto/s3/models.py:122: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC). contrib/python/moto/py3/moto/s3/models.py:122: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC). contrib/python/moto/py3/moto/s3/models.py:122: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC). contrib/python/moto/py3/moto/s3/models.py:122: DeprecationWarning: datetime.datetime.utcnow() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.now(datetime.UTC). ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_bindings_0.py::TestBindings::test_name_uniqueness_constraint[v2-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00257d/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_bindings_0/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00257d/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_bindings_0/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=919423) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 922320 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client3-year Uint32 NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client4-year Int64-False] |75.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v0-fifo] [GOOD] |75.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |75.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_statistics.py::TestS3::test_convert[v2-client0-json_each_row-json_list] [GOOD] |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_statistics.py::TestS3::test_convert[v2-client0-json_each_row-json_each_row] |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_recompiles_requests.py::TestSqsRecompilesRequestsForOtherQueue::test_recompiles_queries[tables_format_v1-fifo] [GOOD] |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_insert.py::TestS3::test_error[v1-client0-csv_with_names] [GOOD] >> test_create_tablets.py::TestHive::test_when_create_tablets_after_bs_groups_and_kill_hive_then_tablets_start |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_insert.py::TestS3::test_error[v1-client0-tsv_with_names] |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_kill_tablets.py::TestKillTablets::test_when_kill_hive_it_will_be_restarted_and_can_create_tablets |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client5-year String-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client6-year Utf8 NOT NULL-True] >> test_format_setting.py::TestS3::test_timestamp_simple_iso_insert[v2-timestamp/simple_iso/test.parquet-parquet] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-false-directory_scan-True-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-false-directory_scan-False-client0] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v1-common/simple_posix/test.csv-csv_with_names] |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_ydb_backup.py::TestRecursiveConsistent::test_recursive_table_backup_from_different_places [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client4-year Int64-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client5-year Int64 NOT NULL-False] >> test_formats.py::TestS3Formats::test_simple_pg_types[v2-client0] [GOOD] |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_formats.py::TestS3Formats::test_precompute[v1-client0] >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageToTheSameConfig::test_cluster_change_state_storage [GOOD] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[kick_tablets-fifo] |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_kill_tablets.py::TestKillTablets::test_when_kill_keyvalue_tablet_it_will_be_restarted [GOOD] |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v1-std] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client6-year Utf8 NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client7-year Utf8-False] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-false-directory_scan-False-client0] [GOOD] |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[stop_node-fifo] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-true-exact_file-True-client0] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v1-common/simple_posix/test.csv-csv_with_names] [GOOD] >> test_insert.py::TestS3::test_error[v1-client0-tsv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v1-common/simple_posix/test.tsv-tsv_with_names] >> test_insert.py::TestS3::test_error[v1-client0-parquet] |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_formats.py::TestS3Formats::test_precompute[v1-client0] [GOOD] >> test_insert.py::TestS3::test_error[v1-client0-parquet] [SKIPPED] >> test_insert.py::TestS3::test_insert_empty_object[v1] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client5-year Int64 NOT NULL-False] [GOOD] |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_formats.py::TestS3Formats::test_precompute[v2-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client6-year Uint64-False] |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_kill_tablets.py::TestKillTablets::test_when_kill_hive_it_will_be_restarted_and_can_create_tablets [GOOD] |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TBSVWithReboots::CreateAssignAlterIsAllowedNoVersion |75.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> test_statistics.py::TestS3::test_convert[v2-client0-json_each_row-json_each_row] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-json_each_row-csv_with_names] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client7-year Utf8-False] [GOOD] >> test_kill_tablets.py::TestKillTablets::test_then_kill_system_tablets_and_it_increases_generation [GOOD] >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[none] |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client8-year Int32-False] |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-true-exact_file-True-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-true-exact_file-False-client0] >> test_create_tablets.py::TestHive::test_when_create_tablets_after_bs_groups_and_kill_hive_then_tablets_start [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v1-common/simple_posix/test.tsv-tsv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v1-common/simple_posix/test.json-json_each_row] |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageMultipleRingGroup::test_cluster_change_state_storage [GOOD] |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_and_drop_table_many_times_in_range >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client6-year Uint64-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client7-year Uint64 NOT NULL-False] >> test_formats.py::TestS3Formats::test_precompute[v2-client0] [GOOD] |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/limits/py3test >> test_insert.py::TestS3::test_insert_empty_object[v1] [GOOD] >> test_insert.py::TestS3::test_insert_empty_object[v2] >> test_formats.py::TestS3Formats::test_raw_empty_schema_query[v1-client0] |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/limits/py3test |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/limits/py3test |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/limits/py3test >> test_validation.py::TestS3::test_nested_type[v1-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client8-year Int32-False] [GOOD] >> test_validation.py::TestS3::test_nested_type[v2-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client9-year Uint32-False] |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/limits/py3test |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_distconf.py::TestKiKiMRDistConfReassignStateStorageToTheSameConfig::test_cluster_change_state_storage [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-json_each_row-csv_with_names] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-json_each_row-parquet] |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/limits/py3test |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/limits/py3test >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-true-exact_file-False-client0] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v1-common/simple_posix/test.json-json_each_row] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-true-directory_scan-True-client0] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v1-common/simple_posix/test.parquet-parquet] >> test_schemeshard_limits.py::TestSchemeShardLimitsCase0::test_effective_acls_are_too_large |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ydb_cli/py3test >> test_ydb_table.py::TestExecuteQueryWithParamsFromStdin::test_skip_rows_tsv[scan] [GOOD] >> test_ydb_backup.py::TestSingleBackupRestore::test_single_table_with_data_backup_restore >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client7-year Uint64 NOT NULL-False] [GOOD] >> test_schemeshard_limits.py::TestSchemeShardLimitsCase1::test_too_large_acls >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client8-year String NOT NULL-True] >> test_formats.py::TestS3Formats::test_raw_empty_schema_query[v1-client0] [GOOD] |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/limits/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client9-year Uint32-False] [GOOD] >> test_formats.py::TestS3Formats::test_raw_empty_schema_query[v2-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client10-year Int64 NOT NULL-True] |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_kill_tablets.py::TestKillTablets::test_when_kill_hive_it_will_be_restarted_and_can_create_tablets [GOOD] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_reassign_master[kick_tablets] [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldSuccess >> TYdbControlPlaneStorageListConnections::ShouldSuccess >> test_insert.py::TestS3::test_insert_empty_object[v2] [GOOD] >> test_insert.py::TestS3::test_insert_without_format_error[v1-client0] |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_kill_tablets.py::TestKillTablets::test_then_kill_system_tablets_and_it_increases_generation [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-true-directory_scan-True-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-true-directory_scan-False-client0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_size_limit.py::TestS3::test_size_limit[v2-client0-500-kikimr_params2-true] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258a/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_size_limit/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258a/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_size_limit/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=907319) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 911057 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client8-year String NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client9-year String-False] >> TYdbControlPlaneStorageListConnections::ShouldSuccess [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v1-common/simple_posix/test.parquet-parquet] [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldPageToken >> TYdbControlPlaneStorageDeleteQuery::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldValidate >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v2-common/simple_posix/test.csv-csv_with_names] |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_create_tablets.py::TestHive::test_when_create_tablets_after_bs_groups_and_kill_hive_then_tablets_start [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_and_drop_table_many_times_in_range [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_many_directories_success >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client10-year Int64 NOT NULL-True] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-json_each_row-parquet] [GOOD] >> test_formats.py::TestS3Formats::test_raw_empty_schema_query[v2-client0] [GOOD] >> test_formats.py::TestS3Formats::test_with_infer_and_unsupported_option[v2-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client11-year Int64-False] >> test_statistics.py::TestS3::test_convert[v2-client0-csv_with_names-json_list] >> TYdbControlPlaneStorageDeleteQuery::ShouldValidate [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckSuperUser >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[none] [GOOD] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_many_directories_success [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldSuccess >> TYdbControlPlaneStoragePipeline::ShouldSkipBindingIfDisabledConnection >> TYdbControlPlaneStorageModifyBinding::ShouldValidate >> TYdbControlPlaneStorageDescribeQuery::ShouldSuccess >> test_insert.py::TestS3::test_insert_without_format_error[v1-client0] [GOOD] >> test_insert.py::TestS3::test_insert_without_format_error[v2-client0] >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckPreviousRevisionSuccess >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v1-true-directory_scan-False-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-false-exact_file-True-client0] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client9-year String-False] [GOOD] |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[none] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client10-year Utf8-False] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v2-common/simple_posix/test.csv-csv_with_names] [GOOD] >> TYdbControlPlaneStorageDeleteBindingPermissions::ShouldApplyPermissionViewPublic >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v2-common/simple_posix/test.tsv-tsv_with_names] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client11-year Int64-False] [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldSuccess >> test_formats.py::TestS3Formats::test_with_infer_and_unsupported_option[v2-client0] [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldValidate >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client12-year Uint64-False] >> TYdbControlPlaneStorageListQueries::ShouldSuccess >> TYdbControlPlaneStorageDescribeBindingPermissions::ShouldApplyPermissionViewPublic >> test_config_migration.py::TestConfigMigrationToV2::test_migration_to_v2 [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckPreviousRevisionSuccess [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldProhibitDeletionOfRunningQuery >> TYdbControlPlaneStorageDescribeQuery::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageDescribeQuery::ShouldValidate >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-false-exact_file-True-client0] [GOOD] >> InMemoryControlPlaneStorage::ExecuteSimpleStreamQuery >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-false-exact_file-False-client0] >> TYdbControlPlaneStorageModifyConnection::ShouldValidate [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckSuperUser >> test_insert.py::TestS3::test_insert_without_format_error[v2-client0] [GOOD] >> test_insert.py::TestS3::test_raw_format_validation[v1-client0] >> test_statistics.py::TestS3::test_convert[v2-client0-csv_with_names-json_list] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-csv_with_names-json_each_row] >> TYdbControlPlaneStorageDescribeQuery::ShouldValidate [GOOD] >> TYdbControlPlaneStorageDescribeQuery::ShouldCheckSuperUser >> TYdbControlPlaneStorageModifyBinding::ShouldValidate [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouleCheckObjectStorageProjectionByColumns >> TYdbControlPlaneStorageGetQueryStatusPermissions::ShouldApplyPermissionViewPublic >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client10-year Utf8-False] [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldSuccess >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client11-year Utf8 NOT NULL-True] >> test_ydb_backup.py::TestSingleBackupRestore::test_single_table_with_data_backup_restore [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v2-common/simple_posix/test.tsv-tsv_with_names] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client12-year Uint64-False] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v2-common/simple_posix/test.json-json_each_row] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client13-year Date-False] >> TYdbControlPlaneStorageListQueries::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldPageToken >> TYdbControlPlaneStorageListConnections::ShouldPageToken [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldEmptyPageToken >> TYdbControlPlaneStorageDeleteQuery::ShouldProhibitDeletionOfRunningQuery [GOOD] >> TYdbControlPlaneStorageDeleteQueryPermissions::ShouldApplyPermissionEmpty >> test_schemeshard_limits.py::TestSchemeShardLimitsCase0::test_effective_acls_are_too_large [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldSuccess >> TYdbControlPlaneStorageDescribeQuery::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldApplyPermissionEmpty >> TYdbControlPlaneStorageModifyConnection::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckWithoutIdempotencyKey >> TYdbControlPlaneStorageModifyBinding::ShouleCheckObjectStorageProjectionByColumns [GOOD] >> TYdbControlPlaneStorageModifyBindingPermissions::ShouldApplyPermissionEmpty >> TYdbControlPlaneStorageModifyBinding::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckLowerCaseName >> TYdbControlPlaneStorageGetQueryStatusPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageGetResult::ShouldSuccess >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-false-exact_file-False-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-false-directory_scan-True-client0] >> InMemoryControlPlaneStorage::ExecuteSimpleStreamQuery [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client11-year Utf8 NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client12-year Date-False] |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/config/py3test >> test_config_migration.py::TestConfigMigrationToV2::test_migration_to_v2 [GOOD] >> TYdbControlPlaneStorageDeleteQueryPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageDeleteQueryPermissions::ShouldApplyPermissionManagePublic >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldApplyPermissionViewPublic >> TYdbControlPlaneStorageCreateConnection::ShouldSucccess >> TYdbControlPlaneStorageModifyQuery::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldModifyRunningQuery >> InMemoryControlPlaneStorage::ExecuteSimpleAnalyticsQuery |75.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> TYdbControlPlaneStoragePipeline::ShouldCheckSimplePipeline >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v2-common/simple_posix/test.json-json_each_row] [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckWithoutIdempotencyKey [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckPreviousRevisionSuccess >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v2-common/simple_posix/test.parquet-parquet] >> TYdbControlPlaneStorageDeleteBindingPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageDeleteConnection::ShouldSuccess >> TYdbControlPlaneStorageListBindings::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldFilterByName >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v1-true-client13-year Date-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client0-year Int32 NOT NULL-True] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-false-directory_scan-True-client0] [GOOD] >> test_insert.py::TestS3::test_raw_format_validation[v1-client0] [GOOD] >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldApplyPermissionViewPrivate >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-false-directory_scan-False-client0] >> test_insert.py::TestS3::test_raw_format_validation[v2-client0] >> TYdbControlPlaneStoragePipeline::ShouldSkipBindingIfDisabledConnection [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldSaveTopicConsumers >> TYdbControlPlaneStorageDescribeBindingPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageDescribeConnection::ShouldSuccess >> TYdbControlPlaneStorageCreateConnection::ShouldSucccess [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldDisableCurrentIam >> TYdbControlPlaneStorageGetResult::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageGetResult::ShouldEmpty >> TYdbControlPlaneStorageDeleteQueryPermissions::ShouldApplyPermissionManagePublic [GOOD] >> TYdbControlPlaneStorageDeleteQueryPermissions::ShouldApplyPermissionManagePrivate |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_reassign_master[kick_tablets] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-csv_with_names-json_each_row] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-csv_with_names-csv_with_names] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckPreviousRevisionSuccess [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldMoveFromScopeToPrivateWithError |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/limits/py3test >> test_schemeshard_limits.py::TestSchemeShardLimitsCase0::test_effective_acls_are_too_large [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldDisableCurrentIam [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldValidate >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldApplyPermissionViewPrivatePublic >> TYdbControlPlaneStorageListQueries::ShouldPageToken [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldEmptyPageToken >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client12-year Date-False] [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckLowerCaseName [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckMaxLengthName >> TYdbControlPlaneStorageCreateConnection::ShouldValidate [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckUniqueName >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client13-year Date NOT NULL-True] >> TYdbControlPlaneStorageModifyQuery::ShouldModifyRunningQuery [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldValidate >> test_format_setting.py::TestS3::test_timestamp_simple_posix[v2-common/simple_posix/test.parquet-parquet] [GOOD] >> TYdbControlPlaneStorageDeleteConnection::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckPermission >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v1-common/simple_posix/test.csv-csv_with_names] >> InMemoryControlPlaneStorage::ExecuteSimpleAnalyticsQuery [GOOD] >> TYdbControlPlaneStorageDeleteQueryPermissions::ShouldApplyPermissionManagePrivate [GOOD] >> TYdbControlPlaneStorageDeleteQueryPermissions::ShouldApplyPermissionManagePrivatePublic >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-false-directory_scan-False-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client0-year Int32 NOT NULL-True] [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldMoveFromScopeToPrivateWithError [GOOD] >> TYdbControlPlaneStorageModifyConnectionPermissions::ShouldApplyPermissionEmpty >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client1-year Uint32 NOT NULL-True] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-true-exact_file-True-client0] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_4-pk_types12-all_types12-index12-Int8] >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldApplyPermissionViewAst >> ShouldNotShowPassword::ShouldNotShowPasswordClickHouse >> TYdbControlPlaneStorageListBindings::ShouldFilterByName [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldFilterByMe >> TYdbControlPlaneStorageCreateConnection::ShouldCheckUniqueName [GOOD] >> TYdbControlPlaneStorageCreateConnectionPermissions::ShouldApplyPermissionManagePublicSuccess >> TYdbControlPlaneStorageDescribeConnection::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageDescribeConnection::ShouldCheckPermission >> TYdbControlPlaneStorageModifyBindingPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageModifyBindingPermissions::ShouldApplyPermissionViewPublic >> TYdbControlPlaneStorageModifyQuery::ShouldValidate [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckSuperUser >> TYdbControlPlaneStorageGetResult::ShouldEmpty [GOOD] >> TYdbControlPlaneStorageGetResultDataPermissions::ShouldApplyPermissionEmpty >> TYdbControlPlaneStoragePipeline::ShouldSaveTopicConsumers [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldSaveDqGraphs >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_reassign_master[stop_node] [GOOD] >> test_select.py::TestDML::test_select[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] >> TYdbControlPlaneStorageCreateConnectionPermissions::ShouldApplyPermissionManagePublicSuccess [GOOD] >> TYdbControlPlaneStorageCreateConnectionPermissions::ShouldApplyPermissionManagePublicFailed >> TYdbControlPlaneStorageDeleteQueryPermissions::ShouldApplyPermissionManagePrivatePublic [GOOD] >> TYdbControlPlaneStorageDescribeBinding::ShouldSuccess >> test_insert.py::TestS3::test_raw_format_validation[v2-client0] [GOOD] >> test_validation.py::TestS3::test_nested_type[v2-client0] [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldEmptyPageToken [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldValidate >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client13-year Date NOT NULL-True] [GOOD] >> test_insert.py::TestS3::test_block_insert_enable[v1-client0] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckMaxLengthName [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckMultipleDotsName >> TYdbControlPlaneStorageCreateConnectionPermissions::ShouldApplyPermissionManagePublicFailed [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCheckIdempotencyKey >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldApplyPermissionViewAst [GOOD] >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldNotApplyPermissionViewAstAndViewQueryText >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client14-year Datetime-False] >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckPermission [GOOD] >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckExist >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[kick_tablets-std] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-csv_with_names-csv_with_names] [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckWithoutIdempotencyKey >> test_statistics.py::TestS3::test_convert[v2-client0-csv_with_names-parquet] >> TYdbControlPlaneStoragePipeline::ShouldCheckSimplePipeline [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldIncrementGeneration >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client1-year Uint32 NOT NULL-True] [GOOD] >> ShouldNotShowPassword::ShouldNotShowPasswordClickHouse [GOOD] >> ShouldNotShowPassword::ShouldNotShowPasswordPostgreSQL >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client2-year Uint64 NOT NULL-True] >> TYdbControlPlaneStorageModifyConnectionPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageModifyConnectionPermissions::ShouldApplyPermissionViewPublic >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-true-exact_file-True-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-true-exact_file-False-client0] >> TYdbControlPlaneStorageCreateQuery::ShouldSucccess >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v1-common/simple_posix/test.csv-csv_with_names] [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldSaveDqGraphs [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldSaveResultSetMetas >> TYdbControlPlaneStorageCreateQuery::ShouldCheckIdempotencyKey [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCreateJob >> TYdbControlPlaneStorageListBindings::ShouldFilterByMe [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldPageToken >> test_drain.py::TestHive::test_drain_on_stop [FAIL] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v1-common/simple_posix/test.tsv-tsv_with_names] >> TYdbControlPlaneStorageDescribeConnection::ShouldCheckPermission [GOOD] >> TYdbControlPlaneStorageDescribeConnection::ShouldCheckExist >> TYdbControlPlaneStorageListConnections::ShouldValidate [GOOD] >> TYdbControlPlaneStorageListConnectionsPermissions::ShouldApplyPermissionEmpty >> TYdbControlPlaneStorageDescribeBinding::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageDescribeBinding::ShouldCheckPermission >> TYdbControlPlaneStorageListQueries::ShouldEmptyPageToken [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldValidate >> TYdbControlPlaneStoragePipeline::ShouldIncrementGeneration [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckStopModifyRun >> TYdbControlPlaneStorageCreateQuery::ShouldCreateJob [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCheckListJobs >> TYdbControlPlaneStorageCreateQuery::ShouldSucccess [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldValidate >> TYdbControlPlaneStorageDescribeQueryPermissions::ShouldNotApplyPermissionViewAstAndViewQueryText [GOOD] >> TYdbControlPlaneStorageGetQueryStatus::ShouldSuccess >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckExist [GOOD] >> TYdbControlPlaneStorageDeleteConnection::ShouldValidate >> TYdbControlPlaneStorageModifyQuery::ShouldCheckWithoutIdempotencyKey [GOOD] >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionQueryInvokeSuccess >> test_ydb_backup.py::TestBackupRestoreInRoot::test_table_backup_restore_in_root >> ShouldNotShowPassword::ShouldNotShowPasswordPostgreSQL [GOOD] >> TYdbControlPlaneStorageControlQuery::ShouldSucccess >> TYdbControlPlaneStorageGetResultDataPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageGetResultDataPermissions::ShouldApplyPermissionViewPublic >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-true-exact_file-False-client0] [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldValidate [GOOD] >> TYdbControlPlaneStorageCreateQueryPermissions::ShouldApplyPermissionManagePublicSuccess >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client14-year Datetime-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client15-year Datetime NOT NULL-True] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-true-directory_scan-True-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client2-year Uint64 NOT NULL-True] [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckMultipleDotsName [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_list_directory_v1[v1-client0] [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckPermission >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client3-year Date NOT NULL-False] >> test_ydb_over_fq.py::TestYdbOverFq::test_list_without_streams[v2-client0] >> TYdbControlPlaneStoragePipeline::ShouldSaveResultSetMetas [GOOD] >> TYdbControlPlaneStorageQuotas::GetDefaultQuotas >> TYdbControlPlaneStorageCreateQuery::ShouldCheckListJobs [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldListJobsByQuery >> TYdbControlPlaneStorageListQueries::ShouldValidate [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldFilterName >> TYdbControlPlaneStorageDescribeBinding::ShouldCheckPermission [GOOD] >> TYdbControlPlaneStorageDescribeBinding::ShouldCheckExist >> TYdbControlPlaneStorageCreateQueryPermissions::ShouldApplyPermissionManagePublicSuccess [GOOD] >> TYdbControlPlaneStorageCreateQueryPermissions::ShouldApplyPermissionManagePublicFailed >> TYdbControlPlaneStorageGetQueryStatus::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageGetQueryStatus::ShouldCheckPermission >> test_drain.py::TestHive::test_drain_tablets [GOOD] >> TYdbControlPlaneStorageDescribeConnection::ShouldCheckExist [GOOD] >> TYdbControlPlaneStorageDescribeConnection::ShouldValidate >> test_insert.py::TestS3::test_block_insert_enable[v1-client0] [GOOD] >> TYdbControlPlaneStorageCreateQueryPermissions::ShouldApplyPermissionManagePublicFailed [GOOD] >> TYdbControlPlaneStorageCreateQueryPermissions::ShouldApplyPermissionQueryInvokeSuccess >> TYdbControlPlaneStorageQuotas::GetDefaultQuotas [GOOD] >> TYdbControlPlaneStorageQuotas::OverrideQuotas >> test_insert.py::TestS3::test_block_insert_enable[v2-client0] >> TYdbControlPlaneStorageListConnectionsPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageListConnectionsPermissions::ShouldApplyPermissionViewPublic >> TYdbControlPlaneStorageModifyBindingPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageModifyBindingPermissions::ShouldApplyPermissionViewPrivate >> test_ydb_over_fq.py::TestYdbOverFq::test_list_without_streams[v2-client0] [GOOD] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v1-fifo] [GOOD] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v0-fifo] [GOOD] >> TYdbControlPlaneStorageModifyConnectionPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageModifyConnectionPermissions::ShouldApplyPermissionViewPrivate >> TYdbControlPlaneStorageDeleteConnection::ShouldValidate [GOOD] >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckSuperUser >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionQueryInvokeSuccess [GOOD] >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionQueryInvokeFailed >> test_ydb_over_fq.py::TestYdbOverFq::test_list_without_streams[v1-client0] >> TYdbControlPlaneStorageCreateQuery::ShouldListJobsByQuery [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldListJobsCreatedByMe >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v0-std] [GOOD] >> TYdbControlPlaneStorageGetQueryStatus::ShouldCheckPermission [GOOD] >> TYdbControlPlaneStorageGetQueryStatus::ShouldCheckExist >> TYdbControlPlaneStorageCreateQueryPermissions::ShouldApplyPermissionQueryInvokeSuccess [GOOD] >> TYdbControlPlaneStorageCreateQueryPermissions::ShouldApplyPermissionQueryInvokeFailed >> TYdbControlPlaneStorageControlQuery::ShouldSucccess [GOOD] >> TYdbControlPlaneStorageControlQuery::ShouldValidate >> TYdbControlPlaneStoragePipeline::ShouldCheckStopModifyRun [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckJobMeta >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client3-year Date NOT NULL-False] [GOOD] >> TYdbControlPlaneStorageQuotas::OverrideQuotas [GOOD] >> TYdbControlPlaneStorageQuotas::GetStaleUsage >> test_explicit_partitioning_1.py::TestS3::test_projection_date_type_validation[v2-true-client15-year Datetime NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client0-column_type0-True] >> TYdbControlPlaneStorageCreateQueryPermissions::ShouldApplyPermissionQueryInvokeFailed [GOOD] >> TYdbControlPlaneStorageDeleteBinding::ShouldSuccess >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client4-year String NOT NULL-True] >> TYdbControlPlaneStorageDescribeConnection::ShouldValidate [GOOD] >> TYdbControlPlaneStorageDescribeConnection::ShouldCheckSuperUser >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionQueryInvokeFailed [GOOD] >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionEmpty >> test_ydb_over_fq.py::TestYdbOverFq::test_list_without_streams[v1-client0] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_formats.py::TestS3Formats::test_with_infer_and_unsupported_option[v2-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00257e/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_formats/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00257e/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_formats/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=918787) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 922342 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-true-directory_scan-True-client0] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v1-common/simple_posix/test.tsv-tsv_with_names] [GOOD] |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_drain.py::TestHive::test_drain_on_stop [FAIL] >> TYdbControlPlaneStorageDescribeBinding::ShouldCheckExist [GOOD] >> TYdbControlPlaneStorageDescribeBinding::ShouldValidate >> TYdbControlPlaneStorageCreateQuery::ShouldListJobsCreatedByMe [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCheckDescribeJob >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query[v2-client0] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v1-common/simple_posix/test.json-json_each_row] >> TYdbControlPlaneStorageListQueries::ShouldFilterName [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldFilterByMe >> TYdbControlPlaneStorageModifyBinding::ShouldCheckPermission [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckNotExistOldName >> test_statistics.py::TestS3::test_convert[v2-client0-csv_with_names-parquet] [GOOD] >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-true-directory_scan-False-client0] >> test_statistics.py::TestS3::test_convert[v2-client0-parquet-json_list] >> TYdbControlPlaneStorageGetQueryStatus::ShouldCheckExist [GOOD] >> TYdbControlPlaneStorageGetQueryStatus::ShouldValidate >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client0-column_type0-True] [GOOD] >> TYdbControlPlaneStorageQuotas::GetStaleUsage [GOOD] >> TYdbControlPlaneStorageQuotas::PushUsageUpdate >> TYdbControlPlaneStorageListConnectionsPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageListConnectionsPermissions::ShouldApplyPermissionViewPrivate >> TYdbControlPlaneStorageControlQuery::ShouldValidate [GOOD] >> TYdbControlPlaneStorageControlQuery::ShouldCheckIdempotencyKey >> TYdbControlPlaneStorageCreateQuery::ShouldCheckDescribeJob [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCheckDescribeIncorrectJob >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client1-column_type1-True] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_ends_request_after_kill [GOOD] >> TYdbControlPlaneStorageGetResultDataPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageGetResultDataPermissions::ShouldApplyPermissionViewPrivate >> TYdbControlPlaneStorageGetQueryStatus::ShouldValidate [GOOD] >> TYdbControlPlaneStorageGetQueryStatus::ShouldCheckSuperUser >> TYdbControlPlaneStorageDeleteBinding::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckPermission >> TYdbControlPlaneStorageDescribeBinding::ShouldValidate [GOOD] >> TYdbControlPlaneStorageDescribeBinding::ShouldCheckSuperUser >> test_s3_0.py::TestS3::test_checkpoints_on_join_s3_with_yds[v1-mvp_external_ydb_endpoint0-client0] [GOOD] >> test_ydb_backup.py::TestBackupRestoreInRoot::test_table_backup_restore_in_root [GOOD] >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionManagePublic >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckIdempotencyKey >> test_s3_0.py::TestS3::test_double_optional_types_validation[v2-client0] >> TYdbControlPlaneStorageModifyConnectionPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageModifyConnectionPermissions::ShouldApplyPermissionViewPrivatePublic >> TYdbControlPlaneStorageDescribeConnection::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageDescribeConnection::ShouldNotShowClickHousePassword >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client1-column_type1-True] [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCheckDescribeIncorrectJob [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCheckDescribeJobIncorrectVisibility >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client4-year String NOT NULL-True] [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldFilterByMe [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldFilterType >> TYdbControlPlaneStorageQuotas::PushUsageUpdate [GOOD] >> TYdbControlPlaneStorageRateLimiter::ShouldValidateCreate >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client2-column_type2-True] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client5-year String-False] >> test_insert.py::TestS3::test_block_insert_enable[v2-client0] [GOOD] >> TYdbControlPlaneStorageGetQueryStatus::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageGetQueryStatusPermissions::ShouldApplyPermissionEmpty >> test_insert.py::TestS3::test_block_insert_value[v1-client0] >> TYdbControlPlaneStoragePipeline::ShouldCheckJobMeta [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckClearFields >> test_s3_1.py::TestS3::test_i18n_unpartitioned[v2-true-directory_scan-False-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client2-column_type2-True] [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCheckDescribeJobIncorrectVisibility [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldSaveQuery >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-false-hive-False-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client3-column_type3-False] >> TYdbControlPlaneStorageRateLimiter::ShouldValidateCreate [GOOD] >> TYdbControlPlaneStorageRateLimiter::ShouldValidateDelete >> TYdbControlPlaneStorageControlQuery::ShouldCheckIdempotencyKey [GOOD] >> TYdbControlPlaneStorageControlQuery::ShouldCheckPreviousRevisionFailed >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_1-pk_types9-all_types9-index9-Uint8] >> TYdbControlPlaneStorageModifyBindingPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageModifyBindingPermissions::ShouldApplyPermissionViewPrivateAfterModify >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client3-column_type3-False] [GOOD] >> TBSVWithReboots::CreateAssignAlterIsAllowedNoVersion [GOOD] >> TYdbControlPlaneStorageDescribeBinding::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageDescribeBindingPermissions::ShouldApplyPermissionEmpty >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckIdempotencyKey [GOOD] >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckPreviousRevisionFailed >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckPermission [GOOD] >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckExist >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v1-common/simple_posix/test.json-json_each_row] [GOOD] >> TYdbControlPlaneStorageListConnectionsPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldPageToken [GOOD] >> TYdbControlPlaneStorageListConnectionsPermissions::ShouldApplyPermissionViewPrivatePublic >> TYdbControlPlaneStorageListBindings::ShouldValidate >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client4-column_type4-True] >> TYdbControlPlaneStorageCreateQuery::ShouldSaveQuery [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCheckQueryName >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v1-common/simple_posix/test.parquet-parquet] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckNotExistOldName [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckMoveToScope >> TYdbControlPlaneStorageRateLimiter::ShouldValidateDelete [GOOD] >> TYdbControlPlaneStorageRateLimiter::ShouldCreateRateLimiterResource >> TYdbControlPlaneStorageDescribeConnection::ShouldNotShowClickHousePassword [GOOD] >> TYdbControlPlaneStorageDescribeConnectionPermissions::ShouldApplyPermissionEmpty >> TYdbControlPlaneStorageListQueries::ShouldFilterType [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldFilterMode >> TYdbControlPlaneStorageGetQueryStatusPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageGetQueryStatusPermissions::ShouldApplyPermissionViewPrivate >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionManagePublic [GOOD] >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionManagePrivate >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client4-column_type4-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client5-column_type5-True] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client5-year String-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client6-year Utf8 NOT NULL-True] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TBSVWithReboots::CreateAssignAlterIsAllowedNoVersion [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:127:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:130:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:134:2058] recipient: [1:111:2142] 2025-06-03T10:45:18.891558Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:45:18.891594Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:45:18.891601Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:45:18.891608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:45:18.891626Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:45:18.891632Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:45:18.891643Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:45:18.891661Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:45:18.891815Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:45:18.891900Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:45:18.931132Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:45:18.931163Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:45:18.931313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:176:2058] recipient: [1:15:2062] 2025-06-03T10:45:18.936812Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:45:18.936862Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:45:18.936904Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:45:18.938653Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:45:18.938711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:45:18.938843Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:45:18.938941Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:45:18.940467Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:45:18.940554Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:45:18.940867Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:45:18.940879Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:45:18.940920Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:45:18.940930Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:45:18.940937Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:45:18.940964Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:45:18.950678Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:45:18.990438Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:45:18.990561Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:45:18.990663Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:45:18.990717Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:45:18.990732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:45:18.992954Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:45:18.993005Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:45:18.993079Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:45:18.993095Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:45:18.993103Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:45:18.993111Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:45:18.994685Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:45:18.994715Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:45:18.994724Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:45:18.995437Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:45:18.995453Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:45:18.995461Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:45:18.995472Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:45:18.996436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:45:18.997655Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:45:18.997734Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:45:18.998020Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:45:18.998077Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:45:18.998088Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:45:18.998169Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... LAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:45:55.573659Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-03T10:45:55.573761Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-03T10:45:55.573774Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_bsv.cpp:68: NBSVState::TConfigureParts operationId: 1003:0 ProgressState, at schemeshard72057594046678944 2025-06-03T10:45:55.577956Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1003:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 272761856 2025-06-03T10:45:55.578029Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1003, partId: 0, tablet: 72075186233409547 2025-06-03T10:45:55.578112Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409547, partId: 0 2025-06-03T10:45:55.578168Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: TxId: 1003 Origin: 72075186233409547 Status: OK 2025-06-03T10:45:55.578178Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_bsv.cpp:23: NBSVState::TConfigureParts operationId: 1003:0 HandleReply TEvSetConfigResult, at schemeshard: 72057594046678944 2025-06-03T10:45:55.578192Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1003:0 3 -> 128 2025-06-03T10:45:55.580666Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-03T10:45:55.580757Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-03T10:45:55.580768Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_bsv.cpp:192: NBSVState::TPropose operationId# 1003:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:45:55.580787Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1003 ready parts: 1/1 2025-06-03T10:45:55.580839Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1003 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:45:55.581938Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1003:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1003 msg type: 269090816 2025-06-03T10:45:55.581987Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1003, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1003 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1003 at step: 5000004 2025-06-03T10:45:55.582093Z node 79 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:45:55.582122Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1003 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 339302418539 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:45:55.582134Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_bsv.cpp:141: NBSVState::TPropose operationId# 1003:0 HandleReply TEvOperationPlan, at schemeshard: 72057594046678944 2025-06-03T10:45:55.582196Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1003:0 128 -> 240 2025-06-03T10:45:55.582244Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 FAKE_COORDINATOR: Erasing txId 1003 2025-06-03T10:45:55.583040Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:45:55.583054Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:45:55.583115Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:45:55.583122Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [79:208:2209], at schemeshard: 72057594046678944, txId: 1003, path id: 3 2025-06-03T10:45:55.583228Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-03T10:45:55.583238Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-03T10:45:55.583254Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1003:0 progress is 1/1 2025-06-03T10:45:55.583260Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-03T10:45:55.583266Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1003:0 progress is 1/1 2025-06-03T10:45:55.583269Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-03T10:45:55.583275Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: false 2025-06-03T10:45:55.583282Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-03T10:45:55.583287Z node 79 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1003:0 2025-06-03T10:45:55.583292Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1003:0 2025-06-03T10:45:55.583343Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 6 2025-06-03T10:45:55.583351Z node 79 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1003, publications: 1, subscribers: 0 2025-06-03T10:45:55.583356Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-03T10:45:55.583495Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:45:55.583511Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:45:55.583537Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:45:55.583543Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-03T10:45:55.583549Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-03T10:45:55.583568Z node 79 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-03T10:45:55.584467Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 TestModificationResult got TxId: 1003, wait until txId: 1003 2025-06-03T10:45:55.584604Z node 79 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/BSVolume_4" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:45:55.584660Z node 79 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/BSVolume_4" took 67us result status StatusSuccess 2025-06-03T10:45:55.584768Z node 79 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/BSVolume_4" PathDescription { Self { Name: "BSVolume_4" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeBlockStoreVolume CreateFinished: true CreateTxId: 1001 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 BSVVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } BlockStoreVolumeDescription { Name: "BSVolume_4" PathId: 3 VolumeConfig { BlockSize: 4096 Partitions { BlockCount: 32 } Partitions { BlockCount: 32 } Version: 2 DiskId: "foo" ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-2" } } Partitions { PartitionId: 0 TabletId: 72075186233409546 } Partitions { PartitionId: 1 TabletId: 72075186233409548 } VolumeTabletId: 72075186233409547 AlterVersion: 2 MountToken: "Owner123" TokenVersion: 1 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query[v2-client0] [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCheckQueryName [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCheckAvailableConnections >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query[v1-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client5-column_type5-True] [GOOD] >> TYdbControlPlaneStorageControlQuery::ShouldCheckPreviousRevisionFailed [GOOD] >> TYdbControlPlaneStorageControlQuery::ShouldCheckPreviousRevisionSuccess >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client6-column_type6-True] >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckPreviousRevisionFailed [GOOD] >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckPreviousRevisionSuccess >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckExist [GOOD] >> TYdbControlPlaneStorageDeleteBinding::ShouldValidate >> TYdbControlPlaneStoragePipeline::ShouldCheckClearFields [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckNodesHealthCheck >> TYdbControlPlaneStorageListBindings::ShouldValidate [GOOD] >> TYdbControlPlaneStorageListBindingsPermissions::ShouldApplyPermissionEmpty >> test_statistics.py::TestS3::test_convert[v2-client0-parquet-json_list] [GOOD] >> TYdbControlPlaneStorageRateLimiter::ShouldCreateRateLimiterResource [GOOD] >> TYdbControlPlaneStorageRateLimiter::ShouldDeleteRateLimiterResource >> TYdbControlPlaneStorageGetQueryStatusPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageGetQueryStatusPermissions::ShouldApplyPermissionViewPrivatePublic >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client6-column_type6-True] [GOOD] >> TYdbControlPlaneStorageGetResultDataPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageGetResultDataPermissions::ShouldApplyPermissionViewPrivatePublic >> test_statistics.py::TestS3::test_convert[v2-client0-parquet-json_each_row] >> TYdbControlPlaneStorageModifyConnectionPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckPermission >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-false-hive-False-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client7-column_type7-False] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-false-hive-True-client0] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v1-common/simple_posix/test.parquet-parquet] [GOOD] |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/hive/py3test >> test_drain.py::TestHive::test_drain_tablets [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v2-common/simple_posix/test.csv-csv_with_names] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client7-column_type7-False] [GOOD] |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_reassign_master[stop_node] [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckPermission [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckExist >> TYdbControlPlaneStorageListQueries::ShouldFilterMode [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldFilterVisibility >> TYdbControlPlaneStoragePipeline::ShouldCheckNodesHealthCheck [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckResultSetMeta >> TYdbControlPlaneStorageListConnectionsPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageListJobsPermissions::ShouldApplyPermissionEmpty >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionManagePrivate [GOOD] >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionManagePrivatePublic >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client8-column_type8-False] >> TYdbControlPlaneStorageModifyBindingPermissions::ShouldApplyPermissionViewPrivateAfterModify [GOOD] >> TYdbControlPlaneStorageModifyBindingPermissions::ShouldApplyPermissionViewPrivatePublic >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client6-year Utf8 NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client7-year Utf8-False] >> TYdbControlPlaneStorageGetQueryStatusPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client8-column_type8-False] [GOOD] |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[kick_tablets-std] [GOOD] >> TYdbControlPlaneStorageDeleteConnection::ShouldCheckPreviousRevisionSuccess [GOOD] >> TYdbControlPlaneStorageDeleteConnectionPermissions::ShouldApplyPermissionEmpty >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client9-column_type9-False] >> TYdbControlPlaneStorageRateLimiter::ShouldDeleteRateLimiterResource [GOOD] >> TYdbControlPlaneStorageTest::ShouldCreateTable >> TYdbControlPlaneStorageDescribeConnectionPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageDescribeConnectionPermissions::ShouldApplyPermissionViewPublic >> TYdbControlPlaneStorageModifyQuery::ShouldCheckExist [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckIdempotencyKey >> TYdbControlPlaneStorageDeleteBinding::ShouldValidate [GOOD] >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckSuperUser |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_create_many_directories_success [GOOD] >> TYdbControlPlaneStorageCreateQuery::ShouldCheckAvailableConnections [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client9-column_type9-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client10-column_type10-False] >> TYdbControlPlaneStorageControlQuery::ShouldCheckPreviousRevisionSuccess [GOOD] >> TYdbControlPlaneStorageControlQueryPermissions::ShouldApplyPermissionEmpty >> TYdbControlPlaneStorageModifyBinding::ShouldCheckMoveToScope [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckModifyTheSame >> TYdbControlPlaneStorageDescribeBindingPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageDescribeBindingPermissions::ShouldApplyPermissionViewPrivate >> TYdbControlPlaneStorageTest::ShouldCreateTable [GOOD] >> TYdbControlPlaneStorageWriteResultData::ShouldValidateWrite >> TYdbControlPlaneStorageListJobsPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageListJobsPermissions::ShouldApplyPermissionViewPublic >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client10-column_type10-False] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-false-hive-True-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client11-column_type11-False] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-false-projection-False-client0] >> test_insert.py::TestS3::test_block_insert_value[v1-client0] [GOOD] >> test_insert.py::TestS3::test_block_insert_value[v2-client0] >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query[v1-client0] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v2-common/simple_posix/test.csv-csv_with_names] [GOOD] >> TYdbControlPlaneStorageWriteResultData::ShouldValidateWrite [GOOD] >> TYdbControlPlaneStorageWriteResultData::ShouldValidateRead >> test_ydb_over_fq.py::TestYdbOverFq::test_stream_execute_scan_query[v2-client0] |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client11-column_type11-False] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v2-common/simple_posix/test.tsv-tsv_with_names] >> TCheckpointStorageTest::ShouldCreateCheckpoint >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client12-column_type12-False] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckIdempotencyKey [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckPreviousRevisionFailed >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_1_float-pk_types3-all_types3-index3-Float] >> TYdbControlPlaneStorageListQueries::ShouldFilterVisibility [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldFilterAutomatic >> TYdbControlPlaneStorageListJobsPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageListJobsPermissions::ShouldApplyPermissionViewPrivate >> test_statistics.py::TestS3::test_convert[v2-client0-parquet-json_each_row] [GOOD] >> TYdbControlPlaneStorageModifyQueryPermissions::ShouldApplyPermissionManagePrivatePublic [GOOD] >> TYdbControlPlaneStorageNodesHealthCheck::ShouldValidate >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckIdempotencyKey >> TCheckpointStorageTest::ShouldCreateCheckpoint [GOOD] >> TCheckpointStorageTest::ShouldCreateGetCheckpoints >> test_statistics.py::TestS3::test_convert[v2-client0-parquet-csv_with_names] >> TYdbControlPlaneStorageWriteResultData::ShouldValidateRead [GOOD] >> TYdbControlPlaneStorageWriteResultData::ShouldSuccess >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client7-year Utf8-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client12-column_type12-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client8-year Int32-False] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client13-column_type13-False] >> TYdbControlPlaneStorageListBindingsPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageListBindingsPermissions::ShouldApplyPermissionViewPublic >> TYdbControlPlaneStorageGetResultDataPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageGetTask::ShouldValidate ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageGetQueryStatusPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.1 0.0 167288 10392 ? Ss 10:12 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 10:12 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 10:12 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 10:12 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/0:0H-events_highpri] root 9 3.9 0.0 0 0 ? I 10:12 1:18 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 10:12 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/0] root 15 0.1 0.0 0 0 ? I 10:12 0:02 [rcu_sched] root 16 0.0 0.0 0 0 ? S 10:12 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/0] root 19 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/1] root 22 0.1 0.0 0 0 ? S 10:12 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2] root 28 0.1 0.0 0 0 ? S 10:12 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/3] root 34 0.1 0.0 0 0 ? S 10:12 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/4] root 40 0.1 0.0 0 0 ? S 10:12 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/5] root 46 0.1 0.0 0 0 ? S 10:12 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/6] root 52 0.1 0.0 0 0 ? S 10:12 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/6] root 54 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/6:0-rcu_gp] root 55 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/7] root 58 0.1 0.0 0 0 ? S 10:12 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/8] root 64 0.1 0.0 0 0 ? S 10:12 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/9] root 70 0.1 0.0 0 0 ? S 10:12 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/10] root 76 0.1 0.0 0 0 ? S 10:12 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/10] root 79 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/11] root 82 0.1 0.0 0 0 ? S 10:12 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/12] root 88 0.1 0.0 0 0 ? S 10:12 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/12] root 90 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/12:0-rcu_gp] root 91 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/13] root 94 0.1 0.0 0 0 ? S 10:12 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/14] root 100 0.1 0.0 0 0 ? S 10:12 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/14] root 103 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/14:0H-events_highpri] root 104 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/15] root 106 0.1 0.0 0 0 ? S 10:12 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/16] root 112 0.1 0.0 0 0 ? S 10:12 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/17] root 118 0.1 0.0 0 0 ? S 10:12 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/18] root 124 0.1 0.0 0 0 ? S 10:12 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/19] root 130 0.1 0.0 0 0 ? S 10:12 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/20] root 136 0.1 0.0 0 0 ? S 10:12 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/20] root 139 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/21] root 142 0.1 0.0 0 0 ? S 10:12 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/22] root 148 0.1 0.0 0 0 ? S 10:12 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/22] root 150 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/22:0-rcu_par_gp] root 151 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/23] root 154 0.1 0.0 0 0 ? S 10:12 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/24] root 160 0.1 0.0 0 0 ? S 10:12 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/25] root 165 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2 ... uiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/mappings". Create session OK 2025-06-03T10:45:57.970690Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:45:57.970693Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:45:57.970845Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes". Create session OK 2025-06-03T10:45:57.970849Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:45:57.970850Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:45:57.970918Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants". Create session OK 2025-06-03T10:45:57.970920Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:45:57.970921Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:45:57.971464Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-03T10:45:57.971479Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:45:57.971481Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:45:57.998149Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)" 2025-06-03T10:45:57.998168Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)": 2025-06-03T10:45:58.039170Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-03T10:45:58.039188Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-03T10:45:58.049510Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:45:58.049531Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-03T10:45:58.052317Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:45:58.052326Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-03T10:45:58.052572Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:45:58.052575Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-03T10:45:58.052578Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:45:58.052585Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-03T10:45:58.052662Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:45:58.052665Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-03T10:45:58.053125Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:45:58.053129Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-03T10:45:58.053336Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:45:58.053340Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-03T10:45:58.062036Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:45:58.062037Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:45:58.062053Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-03T10:45:58.062055Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-03T10:45:58.062332Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:45:58.062345Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-03T10:45:58.062465Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:45:58.062467Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-03T10:45:58.062548Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:45:58.062551Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-03T10:45:58.062626Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:45:58.062628Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageGetQueryStatusPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenant_acks": |75.5%| [TA] $(B)/ydb/tests/functional/hive/test-results/py3test/{meta.json ... results_accumulator.log} >> TCheckpointStorageTest::ShouldCreateGetCheckpoints [GOOD] >> TCheckpointStorageTest::ShouldGetCheckpointsEmpty >> TYdbControlPlaneStorageControlQueryPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageControlQueryPermissions::ShouldApplyPermissionManagePublic >> TYdbControlPlaneStorageCreateBinding::ShouldSucceed >> TYdbControlPlaneStorageDescribeConnectionPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageDescribeConnectionPermissions::ShouldApplyPermissionViewPrivate >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_0-pk_types10-all_types10-index10-Uint8] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v1-client13-column_type13-False] [GOOD] >> TYdbControlPlaneStorageNodesHealthCheck::ShouldValidate [GOOD] >> TYdbControlPlaneStoragePingTask::ShouldValidate |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> TYdbControlPlaneStoragePipeline::ShouldCheckResultSetMeta [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckRemovingOldResultSet >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client0-column_type0-True] >> TCheckpointStorageTest::ShouldGetCheckpointsEmpty [GOOD] >> TCheckpointStorageTest::ShouldDeleteGraph >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[stop_node-std] [GOOD] |75.5%| [TA] {RESULT} $(B)/ydb/tests/functional/hive/test-results/py3test/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageCreateQuery::ShouldCheckAvailableConnections [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.1 0.0 167288 10392 ? Ss 10:12 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 10:12 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 10:12 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 10:12 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/0:0H-events_highpri] root 9 3.9 0.0 0 0 ? I 10:12 1:18 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 10:12 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/0] root 15 0.1 0.0 0 0 ? I 10:12 0:02 [rcu_sched] root 16 0.0 0.0 0 0 ? S 10:12 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/0] root 19 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/1] root 22 0.1 0.0 0 0 ? S 10:12 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2] root 28 0.1 0.0 0 0 ? S 10:12 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/3] root 34 0.1 0.0 0 0 ? S 10:12 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/4] root 40 0.1 0.0 0 0 ? S 10:12 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/5] root 46 0.1 0.0 0 0 ? S 10:12 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/6] root 52 0.1 0.0 0 0 ? S 10:12 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/6] root 54 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/6:0-rcu_gp] root 55 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/7] root 58 0.1 0.0 0 0 ? S 10:12 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/8] root 64 0.1 0.0 0 0 ? S 10:12 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/9] root 70 0.1 0.0 0 0 ? S 10:12 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/10] root 76 0.1 0.0 0 0 ? S 10:12 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/10] root 79 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/11] root 82 0.1 0.0 0 0 ? S 10:12 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/12] root 88 0.1 0.0 0 0 ? S 10:12 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/12] root 90 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/12:0-rcu_gp] root 91 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/13] root 94 0.1 0.0 0 0 ? S 10:12 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/14] root 100 0.1 0.0 0 0 ? S 10:12 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/14] root 103 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/14:0H-events_highpri] root 104 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/15] root 106 0.1 0.0 0 0 ? S 10:12 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/16] root 112 0.1 0.0 0 0 ? S 10:12 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/17] root 118 0.1 0.0 0 0 ? S 10:12 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/18] root 124 0.1 0.0 0 0 ? S 10:12 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/19] root 130 0.1 0.0 0 0 ? S 10:12 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/20] root 136 0.1 0.0 0 0 ? S 10:12 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/20] root 139 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/21] root 142 0.1 0.0 0 0 ? S 10:12 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/22] root 148 0.1 0.0 0 0 ? S 10:12 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/22] root 150 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/22:0-rcu_par_gp] root 151 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/23] root 154 0.1 0.0 0 0 ? S 10:12 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/24] root 160 0.1 0.0 0 0 ? S 10:12 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/25] root 165 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2 ... E DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/bindings". Create session OK 2025-06-03T10:45:57.168824Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:45:57.168825Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:45:57.168945Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-03T10:45:57.168956Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:45:57.168957Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:45:57.169052Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/jobs". Create session OK 2025-06-03T10:45:57.169054Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:45:57.169055Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:45:57.169126Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/pending_small". Create session OK 2025-06-03T10:45:57.169127Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:45:57.169128Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:45:57.169192Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/result_sets". Create session OK 2025-06-03T10:45:57.169194Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:45:57.169196Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:45:57.206963Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)" 2025-06-03T10:45:57.206984Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)": 2025-06-03T10:45:57.325433Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-03T10:45:57.325451Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-03T10:45:57.353477Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:45:57.353492Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-03T10:45:57.353994Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:45:57.353999Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-03T10:45:57.354143Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:45:57.354148Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-03T10:45:57.357333Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:45:57.357347Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-03T10:45:57.357676Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:45:57.357683Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-03T10:45:57.360838Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:45:57.360851Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-03T10:45:57.361042Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:45:57.361044Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-03T10:45:57.361933Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:45:57.361942Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-03T10:45:57.362123Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:45:57.362126Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-03T10:45:57.362219Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:45:57.362222Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-03T10:45:57.368108Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:45:57.368126Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-03T10:45:57.368648Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:45:57.368658Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-03T10:45:57.370063Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:45:57.370073Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenant_acks": >> TYdbControlPlaneStorageGetTask::ShouldValidate [GOOD] >> TYdbControlPlaneStorageGetTask::ShouldWorkWithEmptyPending >> TYdbControlPlaneStorageWriteResultData::ShouldSuccess [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckPreviousRevisionFailed [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckPreviousRevisionSuccess >> TYdbControlPlaneStorageListQueries::ShouldFilterAutomatic [GOOD] >> TYdbControlPlaneStorageListQueriesPermissions::ShouldApplyPermissionEmpty >> TYdbControlPlaneStorageDeleteConnectionPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageDeleteConnectionPermissions::ShouldApplyPermissionViewPublic >> TYdbControlPlaneStorageListJobsPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageListJobsPermissions::ShouldApplyPermissionViewPrivatePublic >> TCheckpointStorageTest::ShouldDeleteGraph [GOOD] >> TCheckpointStorageTest::ShouldDeleteMarkedCheckpoints >> TYdbControlPlaneStorageModifyBinding::ShouldCheckModifyTheSame [GOOD] >> TYdbControlPlaneStoragePingTask::ShouldValidate [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckAbortInTerminatedState >> TYdbControlPlaneStorageModifyBinding::ShouldCheckSuperUser >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-false-projection-False-client0] [GOOD] |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-false-projection-True-client0] >> TYdbControlPlaneStorageGetTask::ShouldWorkWithEmptyPending [GOOD] >> TYdbControlPlaneStorageGetTask::ShouldBatchingGetTasks >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client0-column_type0-True] [GOOD] >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckIdempotencyKey [GOOD] >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckPreviousRevisionFailed |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v0-fifo] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client1-column_type1-True] >> TCheckpointStorageTest::ShouldDeleteMarkedCheckpoints [GOOD] >> TYdbControlPlaneStorageDescribeBindingPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageDescribeBindingPermissions::ShouldApplyPermissionViewPrivatePublic >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_1-pk_types9-all_types9-index9-Uint8] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v2-common/simple_posix/test.tsv-tsv_with_names] [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckPreviousRevisionSuccess [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckQueryName >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client8-year Int32-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client9-year Uint32-False] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v2-common/simple_posix/test.json-json_each_row] >> TYdbControlPlaneStorageModifyBindingPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckPermission >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client1-column_type1-True] [GOOD] >> TYdbControlPlaneStorageListJobsPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldCheckLimit >> test_ydb_over_fq.py::TestYdbOverFq::test_stream_execute_scan_query[v2-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client2-column_type2-True] >> test_ydb_over_fq.py::TestYdbOverFq::test_stream_execute_scan_query[v1-client0] >> TYdbControlPlaneStorageCreateBinding::ShouldSucceed [GOOD] >> TYdbControlPlaneStorageCreateBinding::ShouldCheckMaxLengthName >> TYdbControlPlaneStorageGetTask::ShouldBatchingGetTasks [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldEmptyPageToken >> test_insert.py::TestS3::test_block_insert_value[v2-client0] [GOOD] >> TYdbControlPlaneStorageDescribeConnectionPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageDescribeConnectionPermissions::ShouldApplyPermissionViewPrivatePublic >> test_select.py::TestDML::test_select[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] >> test_insert.py::TestS3::test_insert_deadlock[v1-false-client0] >> TYdbControlPlaneStorageListQueriesPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageListQueriesPermissions::ShouldApplyPermissionViewPublic ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageWriteResultData::ShouldSuccess [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.1 0.0 167288 10392 ? Ss 10:12 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 10:12 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 10:12 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 10:12 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/0:0H-events_highpri] root 9 3.9 0.0 0 0 ? I 10:12 1:18 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 10:12 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/0] root 15 0.1 0.0 0 0 ? I 10:12 0:02 [rcu_sched] root 16 0.0 0.0 0 0 ? S 10:12 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/0] root 19 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/1] root 22 0.1 0.0 0 0 ? S 10:12 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2] root 28 0.1 0.0 0 0 ? S 10:12 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/3] root 34 0.1 0.0 0 0 ? S 10:12 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/4] root 40 0.1 0.0 0 0 ? S 10:12 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/5] root 46 0.1 0.0 0 0 ? S 10:12 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/6] root 52 0.1 0.0 0 0 ? S 10:12 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/6] root 54 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/6:0-rcu_gp] root 55 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/7] root 58 0.1 0.0 0 0 ? S 10:12 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/8] root 64 0.1 0.0 0 0 ? S 10:12 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/9] root 70 0.1 0.0 0 0 ? S 10:12 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/10] root 76 0.1 0.0 0 0 ? S 10:12 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/10] root 79 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/11] root 82 0.1 0.0 0 0 ? S 10:12 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/12] root 88 0.1 0.0 0 0 ? S 10:12 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/12] root 90 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/12:0-rcu_gp] root 91 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/13] root 94 0.1 0.0 0 0 ? S 10:12 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/14] root 100 0.1 0.0 0 0 ? S 10:12 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/14] root 103 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/14:0H-events_highpri] root 104 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/15] root 106 0.1 0.0 0 0 ? S 10:12 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/16] root 112 0.1 0.0 0 0 ? S 10:12 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/17] root 118 0.1 0.0 0 0 ? S 10:12 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/18] root 124 0.1 0.0 0 0 ? S 10:12 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/19] root 130 0.1 0.0 0 0 ? S 10:12 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/20] root 136 0.1 0.0 0 0 ? S 10:12 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/20] root 139 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/21] root 142 0.1 0.0 0 0 ? S 10:12 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/22] root 148 0.1 0.0 0 0 ? S 10:12 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/22] root 150 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/22:0-rcu_par_gp] root 151 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/23] root 154 0.1 0.0 0 0 ? S 10:12 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/24] root 160 0.1 0.0 0 0 ? S 10:12 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/25] root 165 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2 ... eShouldSuccess::Execute_(NUnitTest::TTestContext&)/tenants". Create session OK 2025-06-03T10:46:01.736896Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:01.736898Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:01.737147Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-03T10:46:01.737150Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:01.737152Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:01.737264Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/pending_small". Create session OK 2025-06-03T10:46:01.737274Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:01.737275Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:01.740509Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/nodes". Create session OK 2025-06-03T10:46:01.740524Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:01.740527Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:01.745104Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/connections". Create session OK 2025-06-03T10:46:01.745124Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:01.745126Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:01.745453Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/bindings". Create session OK 2025-06-03T10:46:01.745473Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:01.745475Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:01.750512Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)" 2025-06-03T10:46:01.750541Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)": 2025-06-03T10:46:01.797065Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-03T10:46:01.797084Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-03T10:46:01.829409Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:01.829425Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-03T10:46:01.829723Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:01.829728Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-03T10:46:01.829812Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:01.829814Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-03T10:46:01.829871Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:01.829872Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-03T10:46:01.829917Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:01.829918Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-03T10:46:01.829964Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:01.829967Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-03T10:46:01.830032Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:01.830033Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-03T10:46:01.830074Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:01.830075Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-03T10:46:01.830105Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:01.830106Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-03T10:46:01.830139Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:01.830141Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-03T10:46:01.830175Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:01.830176Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-03T10:46:01.830222Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:01.830224Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-03T10:46:01.830252Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:01.830254Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageWriteResultData::TTestCaseShouldSuccess::Execute_(NUnitTest::TTestContext&)/tenants": >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client2-column_type2-True] [GOOD] >> TYdbControlPlaneStorageListBindingsPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageListBindingsPermissions::ShouldApplyPermissionViewPrivate >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_all_types_float-pk_types5-all_types5-index5-Float] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client3-column_type3-False] >> TYdbControlPlaneStoragePipeline::ShouldCheckAbortInTerminatedState [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckAst >> test_statistics.py::TestS3::test_convert[v2-client0-parquet-csv_with_names] [GOOD] >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckPreviousRevisionFailed [GOOD] >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckPreviousRevisionSuccess >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-false-projection-True-client0] [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-parquet-parquet] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_0_float-pk_types4-all_types4-index4-Float] >> test_ydb_backup.py::TestBackupRestoreInRootSchemeOnly::test_table_backup_restore_in_root_scheme_only >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-true-hive-False-client0] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckPermission [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckExist >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client3-column_type3-False] [GOOD] >> TYdbControlPlaneStorageControlQueryPermissions::ShouldApplyPermissionManagePublic [GOOD] >> TYdbControlPlaneStorageControlQueryPermissions::ShouldApplyPermissionManagePrivate >> TYdbControlPlaneStoragePipeline::ShouldCheckRemovingOldResultSet [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckPrioritySelectionEntities |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client4-column_type4-True] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_all_types-pk_types17-all_types17-index17-Int8] >> TYdbControlPlaneStorageCreateBinding::ShouldCheckMaxLengthName [GOOD] >> TYdbControlPlaneStorageCreateBinding::ShouldCheckMultipleDotsName ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_ends_request_after_kill [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v2-common/simple_posix/test.json-json_each_row] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client9-year Uint32-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client4-column_type4-True] [GOOD] >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[kick_tablets-fifo] [GOOD] |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TCheckpointStorageTest::ShouldDeleteMarkedCheckpoints [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckQueryName [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckAvailableConnections >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client10-year Int64 NOT NULL-True] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v2-common/simple_posix/test.parquet-parquet] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client5-column_type5-True] >> TYdbControlPlaneStorageListQueriesPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageListQueriesPermissions::ShouldApplyPermissionViewPrivate >> test_schemeshard_limits.py::TestSchemeShardLimitsCase1::test_too_large_acls [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckExist [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckNotExistOldName >> TYdbControlPlaneStorageListQueries::ShouldCheckLimit [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldCheckScopeVisibility >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client5-column_type5-True] [GOOD] >> TYdbControlPlaneStorageDeleteConnectionPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageDeleteConnectionPermissions::ShouldApplyPermissionViewPrivate >> TYdbControlPlaneStorageModifyBinding::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckWithoutIdempotencyKey >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client6-column_type6-True] >> test_insert.py::TestS3::test_insert_deadlock[v1-false-client0] [GOOD] >> test_insert.py::TestS3::test_insert_deadlock[v1-true-client0] >> TYdbControlPlaneStorageDeleteBinding::ShouldCheckPreviousRevisionSuccess [GOOD] >> TYdbControlPlaneStorageDeleteBindingPermissions::ShouldApplyPermissionEmpty >> TYdbControlPlaneStoragePipeline::ShouldCheckAst [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckAstClear >> TYdbControlPlaneStorageDescribeBindingPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client10-year Int64 NOT NULL-True] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-true-hive-False-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client6-column_type6-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client11-year Int64-False] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client7-column_type7-False] >> TYdbControlPlaneStorageCreateBinding::ShouldCheckMultipleDotsName [GOOD] >> TYdbControlPlaneStorageCreateBinding::ShouldCheckNotAvailable >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-true-hive-True-client0] |75.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v1-fifo] [GOOD] >> TYdbControlPlaneStorageDescribeConnectionPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageDescribeJobPermissions::ShouldApplyPermissionEmpty >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client7-column_type7-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client8-column_type8-False] >> test_ydb_over_fq.py::TestYdbOverFq::test_stream_execute_scan_query[v1-client0] [GOOD] >> TYdbControlPlaneStorageListQueriesPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageListQueriesPermissions::ShouldApplyPermissionViewPrivatePublic >> TYdbControlPlaneStorageControlQueryPermissions::ShouldApplyPermissionManagePrivate [GOOD] >> TYdbControlPlaneStorageControlQueryPermissions::ShouldApplyPermissionManagePrivatePublic >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query_results[v2-client0] |75.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v0-std] [GOOD] >> TYdbControlPlaneStorageListBindingsPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageListBindingsPermissions::ShouldApplyPermissionViewPrivatePublic >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client8-column_type8-False] [GOOD] >> TYdbControlPlaneStorageCreateBinding::ShouldCheckNotAvailable [GOOD] >> TYdbControlPlaneStorageCreateBinding::ShouldValidate >> TYdbControlPlaneStorageModifyConnection::ShouldCheckNotExistOldName [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckLowerCaseName >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client9-column_type9-False] >> test_ydb_backup.py::TestBackupRestoreInRootSchemeOnly::test_table_backup_restore_in_root_scheme_only [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_0-pk_types16-all_types16-index16-Int8] >> test_format_setting.py::TestS3::test_timestamp_simple_posix_insert[v2-common/simple_posix/test.parquet-parquet] [GOOD] >> TYdbControlPlaneStorageModifyQuery::ShouldCheckAvailableConnections [GOOD] >> test_statistics.py::TestS3::test_convert[v2-client0-parquet-parquet] [GOOD] >> test_statistics.py::TestS3::test_precompute[v1-client0] >> test_insert.py::TestS3::test_insert_deadlock[v1-true-client0] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v1-date_time/simple_iso/test.csv-csv_with_names] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client9-column_type9-False] [GOOD] >> test_insert.py::TestS3::test_insert_deadlock[v2-false-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client10-column_type10-False] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-true-hive-True-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-true-projection-False-client0] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckLowerCaseName [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckMaxLengthName >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client10-column_type10-False] [GOOD] |75.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> TYdbControlPlaneStorageListQueries::ShouldCheckScopeVisibility [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldCheckPrivateVisibility >> TYdbControlPlaneStoragePipeline::ShouldCheckPrioritySelectionEntities [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckResultSetLimit >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[stop_node-fifo] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client11-column_type11-False] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client11-year Int64-False] [GOOD] >> TYdbControlPlaneStorageListQueriesPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckAllowedSymbolsName >> TYdbControlPlaneStorageModifyBinding::ShouldCheckWithoutIdempotencyKey [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckPreviousRevisionFailed >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client12-year Uint64-False] >> TYdbControlPlaneStoragePipeline::ShouldCheckAstClear [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckAutomaticTtl ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageDescribeBindingPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.1 0.0 167288 10392 ? Ss 10:12 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 10:12 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 10:12 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 10:12 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/0:0H-events_highpri] root 9 3.9 0.0 0 0 ? I 10:12 1:18 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 10:12 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/0] root 15 0.1 0.0 0 0 ? I 10:12 0:02 [rcu_sched] root 16 0.0 0.0 0 0 ? S 10:12 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/0] root 19 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/1] root 22 0.1 0.0 0 0 ? S 10:12 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2] root 28 0.1 0.0 0 0 ? S 10:12 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/3] root 34 0.1 0.0 0 0 ? S 10:12 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/4] root 40 0.1 0.0 0 0 ? S 10:12 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/5] root 46 0.1 0.0 0 0 ? S 10:12 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/6] root 52 0.1 0.0 0 0 ? S 10:12 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/6] root 54 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/6:0-rcu_gp] root 55 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/7] root 58 0.1 0.0 0 0 ? S 10:12 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/8] root 64 0.1 0.0 0 0 ? S 10:12 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/9] root 70 0.1 0.0 0 0 ? S 10:12 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/10] root 76 0.1 0.0 0 0 ? S 10:12 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/10] root 79 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/11] root 82 0.1 0.0 0 0 ? S 10:12 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/12] root 88 0.1 0.0 0 0 ? S 10:12 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/12] root 90 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/12:0-rcu_gp] root 91 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/13] root 94 0.1 0.0 0 0 ? S 10:12 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/14] root 100 0.1 0.0 0 0 ? S 10:12 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/14] root 103 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/14:0H-events_highpri] root 104 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/15] root 106 0.1 0.0 0 0 ? S 10:12 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/16] root 112 0.1 0.0 0 0 ? S 10:12 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/17] root 118 0.1 0.0 0 0 ? S 10:12 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/18] root 124 0.1 0.0 0 0 ? S 10:12 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/19] root 130 0.1 0.0 0 0 ? S 10:12 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/20] root 136 0.1 0.0 0 0 ? S 10:12 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/20] root 139 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/21] root 142 0.1 0.0 0 0 ? S 10:12 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/22] root 148 0.1 0.0 0 0 ? S 10:12 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/22] root 150 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/22:0-rcu_par_gp] root 151 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/23] root 154 0.1 0.0 0 0 ? S 10:12 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/24] root 160 0.1 0.0 0 0 ? S 10:12 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/25] root 165 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2 ... ingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/pending_small". Create session OK 2025-06-03T10:46:04.022326Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:04.022328Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:04.022437Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes". Create session OK 2025-06-03T10:46:04.022445Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:04.022447Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:04.022539Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/result_sets". Create session OK 2025-06-03T10:46:04.022546Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:04.022547Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:04.022623Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants". Create session OK 2025-06-03T10:46:04.022630Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:04.022631Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:04.033275Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)" 2025-06-03T10:46:04.033316Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)": 2025-06-03T10:46:04.105559Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-03T10:46:04.105584Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-03T10:46:04.123634Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:04.123651Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-03T10:46:04.124158Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:04.124165Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-03T10:46:04.138546Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:04.138562Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-03T10:46:04.138843Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:04.138848Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-03T10:46:04.138956Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:04.138958Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-03T10:46:04.139056Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:04.139059Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-03T10:46:04.139165Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:04.139167Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-03T10:46:04.142331Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:04.142349Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-03T10:46:04.142609Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:04.142614Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-03T10:46:04.144296Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:04.144312Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-03T10:46:04.148469Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:04.148493Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-03T10:46:04.148739Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:04.148742Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-03T10:46:04.148832Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:04.148835Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants": >> TYdbControlPlaneStorageCreateBinding::ShouldValidate [GOOD] >> TYdbControlPlaneStorageCreateBinding::ShouldValidateFormatSetting >> TYdbControlPlaneStorageDeleteConnectionPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageDeleteConnectionPermissions::ShouldApplyPermissionViewPrivatePublic >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client11-column_type11-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client12-column_type12-False] >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query_results[v2-client0] [GOOD] >> TYdbControlPlaneStorageDeleteBindingPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageDeleteBindingPermissions::ShouldApplyPermissionViewPrivate >> TYdbControlPlaneStorageModifyConnection::ShouldCheckMaxLengthName [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckMultipleDotsName >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query_results[v1-client0] >> test_select.py::TestDML::test_select[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client12-column_type12-False] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_0-pk_types10-all_types10-index10-Uint8] >> TYdbControlPlaneStorageDescribeJobPermissions::ShouldApplyPermissionEmpty [GOOD] >> TYdbControlPlaneStorageDescribeJobPermissions::ShouldApplyPermissionViewPublic >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client13-column_type13-False] >> TCheckpointStorageTest::ShouldRegisterCoordinator >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-true-projection-False-client0] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v1-date_time/simple_iso/test.csv-csv_with_names] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-true-projection-True-client0] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v1-date_time/simple_iso/test.tsv-tsv_with_names] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_integer_type_validation[v2-client13-column_type13-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client0-column_type0-False] >> TYdbControlPlaneStorageControlQueryPermissions::ShouldApplyPermissionManagePrivatePublic [GOOD] >> TYdbControlPlaneStorageCreateBinding::ShouldCheckLowerCaseName >> TStateStorageTest::ShouldSaveGetOldSmallState >> TCheckpointStorageTest::ShouldRegisterCoordinator [GOOD] >> TCheckpointStorageTest::ShouldGetCoordinators >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client12-year Uint64-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client13-year Date-False] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client0-column_type0-False] [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldCheckPrivateVisibility [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldCheckSuperUser >> TYdbControlPlaneStoragePipeline::ShouldCheckResultSetLimit [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckGetResultDataRequest >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client1-column_type1-False] >> TYdbControlPlaneStorageCreateBinding::ShouldValidateFormatSetting [GOOD] >> TYdbControlPlaneStorageCreateBindingPermissions::ShouldApplyPermissionManagePublicSuccess >> TCheckpointStorageTest::ShouldGetCoordinators [GOOD] >> TCheckpointStorageTest::ShouldMarkCheckpointsGc >> TYdbControlPlaneStorageModifyConnection::ShouldCheckMultipleDotsName [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckAllowedSymbolsName >> TYdbControlPlaneStorageListBindingsPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldCheckLimit ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageModifyQuery::ShouldCheckAvailableConnections [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.1 0.0 167288 10392 ? Ss 10:12 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 10:12 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 10:12 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 10:12 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/0:0H-events_highpri] root 9 3.9 0.0 0 0 ? I 10:12 1:18 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 10:12 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/0] root 15 0.1 0.0 0 0 ? I 10:12 0:02 [rcu_sched] root 16 0.0 0.0 0 0 ? S 10:12 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/0] root 19 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/1] root 22 0.1 0.0 0 0 ? S 10:12 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2] root 28 0.1 0.0 0 0 ? S 10:12 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/3] root 34 0.1 0.0 0 0 ? S 10:12 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/4] root 40 0.1 0.0 0 0 ? S 10:12 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/5] root 46 0.1 0.0 0 0 ? S 10:12 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/6] root 52 0.1 0.0 0 0 ? S 10:12 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/6] root 54 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/6:0-rcu_gp] root 55 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/7] root 58 0.1 0.0 0 0 ? S 10:12 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/8] root 64 0.1 0.0 0 0 ? S 10:12 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/9] root 70 0.1 0.0 0 0 ? S 10:12 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/10] root 76 0.1 0.0 0 0 ? S 10:12 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/10] root 79 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/11] root 82 0.1 0.0 0 0 ? S 10:12 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/12] root 88 0.1 0.0 0 0 ? S 10:12 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/12] root 90 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/12:0-rcu_gp] root 91 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/13] root 94 0.1 0.0 0 0 ? S 10:12 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/14] root 100 0.1 0.0 0 0 ? S 10:12 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/14] root 103 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/14:0H-events_highpri] root 104 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/15] root 106 0.1 0.0 0 0 ? S 10:12 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/16] root 112 0.1 0.0 0 0 ? S 10:12 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/17] root 118 0.1 0.0 0 0 ? S 10:12 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/18] root 124 0.1 0.0 0 0 ? S 10:12 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/19] root 130 0.1 0.0 0 0 ? S 10:12 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/20] root 136 0.1 0.0 0 0 ? S 10:12 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/20] root 139 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/21] root 142 0.1 0.0 0 0 ? S 10:12 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/22] root 148 0.1 0.0 0 0 ? S 10:12 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/22] root 150 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/22:0-rcu_par_gp] root 151 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/23] root 154 0.1 0.0 0 0 ? S 10:12 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/24] root 160 0.1 0.0 0 0 ? S 10:12 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/25] root 165 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2 ... _CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-03T10:46:06.631165Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:06.631166Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:06.631237Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/connections". Create session OK 2025-06-03T10:46:06.631239Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:06.631240Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:06.631314Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/jobs". Create session OK 2025-06-03T10:46:06.631316Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:06.631317Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:06.631574Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/queries". Create session OK 2025-06-03T10:46:06.631578Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:06.631581Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:06.631869Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenants". Create session OK 2025-06-03T10:46:06.631872Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:06.631873Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:06.674026Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)" 2025-06-03T10:46:06.674043Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)": 2025-06-03T10:46:06.713138Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-03T10:46:06.713166Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-03T10:46:06.743263Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:06.743281Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-03T10:46:06.743579Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:06.743583Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-03T10:46:06.745220Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:06.745227Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-03T10:46:06.745379Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:06.745387Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-03T10:46:06.745869Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:06.745885Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-03T10:46:06.748260Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:06.748275Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-03T10:46:06.748487Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:06.748497Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-03T10:46:06.748576Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:06.748584Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-03T10:46:06.748656Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:06.748666Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-03T10:46:06.748719Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:06.748723Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-03T10:46:06.748813Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:06.748823Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-03T10:46:06.748873Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:06.748875Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-03T10:46:06.748925Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:06.748932Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyQuery::TTestCaseShouldCheckAvailableConnections::Execute_(NUnitTest::TTestContext&)/quotas": >> TYdbControlPlaneStoragePipeline::ShouldCheckAutomaticTtl [GOOD] >> TStateStorageTest::ShouldSaveGetOldSmallState [GOOD] >> TStateStorageTest::ShouldSaveGetOldBigState >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client1-column_type1-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client2-column_type2-False] >> TStateStorageTest::ShouldSaveGetOldBigState [GOOD] >> TStateStorageTest::ShouldSaveGetIncrementSmallState >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v1-std] [GOOD] >> TCheckpointStorageTest::ShouldMarkCheckpointsGc [GOOD] >> TCheckpointStorageTest::ShouldNotDeleteUnmarkedCheckpoints >> test_s3_0.py::TestS3::test_double_optional_types_validation[v2-client0] [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckAllowedSymbolsName [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckExist >> TStateStorageTest::ShouldSaveGetIncrementSmallState [GOOD] >> TStateStorageTest::ShouldSaveGetIncrementBigState >> test_s3_0.py::TestS3::test_json_list_validation[v1-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client2-column_type2-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client3-column_type3-False] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v1-date_time/simple_iso/test.tsv-tsv_with_names] [GOOD] >> TYdbControlPlaneStorageDescribeJobPermissions::ShouldApplyPermissionViewPublic [GOOD] >> TYdbControlPlaneStorageDescribeJobPermissions::ShouldApplyPermissionViewPrivate >> TStateStorageTest::ShouldSaveGetIncrementBigState [GOOD] >> TStateStorageTest::ShouldNotGetNonExistendState >> test_format_setting.py::TestS3::test_date_time_simple_iso[v1-date_time/simple_iso/test.json-json_each_row] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckPreviousRevisionFailed [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckPreviousRevisionSuccess >> test_select.py::TestDML::test_select[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client3-column_type3-False] [GOOD] >> TStateStorageTest::ShouldNotGetNonExistendState [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client4-column_type4-False] >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query_results[v1-client0] [GOOD] >> test_statistics.py::TestS3::test_precompute[v1-client0] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query_error[v2-client0] >> TCheckpointStorageTest::ShouldNotDeleteUnmarkedCheckpoints [GOOD] >> TCheckpointStorageTest::ShouldRetryOnExistingGraphDescId >> TYdbControlPlaneStorageModifyConnection::ShouldCheckAllowedSymbolsName [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckMoveToScope >> test_statistics.py::TestS3::test_precompute[v2-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client4-column_type4-False] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v1-true-projection-True-client0] [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldCombineFilters >> TYdbControlPlaneStorageCreateBinding::ShouldCheckLowerCaseName [GOOD] >> TYdbControlPlaneStorageCreateBinding::ShouldCheckAllowedSymbolsName >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client5-column_type5-True] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-false-hive-False-client0] >> TCheckpointStorageTest::ShouldRetryOnExistingGraphDescId [GOOD] >> TYdbControlPlaneStorageCreateBindingPermissions::ShouldApplyPermissionManagePublicSuccess [GOOD] >> TYdbControlPlaneStorageCreateBindingPermissions::ShouldApplyPermissionManagePublicFailed >> test_insert.py::TestS3::test_insert_deadlock[v2-false-client0] [GOOD] >> test_insert.py::TestS3::test_insert_deadlock[v2-true-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-false-client13-year Date-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client0-year Int32 NOT NULL-True] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client5-column_type5-True] [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckGetResultDataRequest [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldRetryQuery >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client6-column_type6-False] |75.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[stop_node-std] [GOOD] |75.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/limits/py3test >> test_schemeshard_limits.py::TestSchemeShardLimitsCase1::test_too_large_acls [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client6-column_type6-False] [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldEmptyPageToken [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldCheckLimit >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client7-column_type7-False] >> TYdbControlPlaneStorageDeleteConnectionPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckPermission >> TYdbControlPlaneStorageDeleteBindingPermissions::ShouldApplyPermissionViewPrivate [GOOD] >> TYdbControlPlaneStorageDeleteBindingPermissions::ShouldApplyPermissionViewPrivatePublic >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client7-column_type7-False] [GOOD] >> TYdbControlPlaneStorageListQueries::ShouldCombineFilters [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client8-column_type8-False] >> TYdbControlPlaneStorageCreateBindingPermissions::ShouldApplyPermissionManagePublicFailed [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckNotAvailable >> test_format_setting.py::TestS3::test_date_time_simple_iso[v1-date_time/simple_iso/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v1-date_time/simple_iso/test.parquet-parquet] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client8-column_type8-False] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStoragePipeline::ShouldCheckAutomaticTtl [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.1 0.0 167288 10392 ? Ss 10:12 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 10:12 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 10:12 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 10:12 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/0:0H-events_highpri] root 9 3.9 0.0 0 0 ? I 10:12 1:18 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 10:12 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/0] root 15 0.1 0.0 0 0 ? I 10:12 0:02 [rcu_sched] root 16 0.0 0.0 0 0 ? S 10:12 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/0] root 19 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/1] root 22 0.1 0.0 0 0 ? S 10:12 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2] root 28 0.1 0.0 0 0 ? S 10:12 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/3] root 34 0.1 0.0 0 0 ? S 10:12 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/4] root 40 0.1 0.0 0 0 ? S 10:12 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/5] root 46 0.1 0.0 0 0 ? S 10:12 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/6] root 52 0.1 0.0 0 0 ? S 10:12 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/6] root 54 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/6:0-rcu_gp] root 55 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/7] root 58 0.1 0.0 0 0 ? S 10:12 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/8] root 64 0.1 0.0 0 0 ? S 10:12 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/9] root 70 0.1 0.0 0 0 ? S 10:12 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/10] root 76 0.1 0.0 0 0 ? S 10:12 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/10] root 79 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/11] root 82 0.1 0.0 0 0 ? S 10:12 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/12] root 88 0.1 0.0 0 0 ? S 10:12 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/12] root 90 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/12:0-rcu_gp] root 91 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/13] root 94 0.1 0.0 0 0 ? S 10:12 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/14] root 100 0.1 0.0 0 0 ? S 10:12 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/14] root 103 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/14:0H-events_highpri] root 104 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/15] root 106 0.1 0.0 0 0 ? S 10:12 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/16] root 112 0.1 0.0 0 0 ? S 10:12 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/17] root 118 0.1 0.0 0 0 ? S 10:12 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/18] root 124 0.1 0.0 0 0 ? S 10:12 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/19] root 130 0.1 0.0 0 0 ? S 10:12 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/20] root 136 0.1 0.0 0 0 ? S 10:12 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/20] root 139 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/21] root 142 0.1 0.0 0 0 ? S 10:12 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/22] root 148 0.1 0.0 0 0 ? S 10:12 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/22] root 150 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/22:0-rcu_par_gp] root 151 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/23] root 154 0.1 0.0 0 0 ? S 10:12 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/24] root 160 0.1 0.0 0 0 ? S 10:12 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/25] root 165 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2 ... L_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:11.027048Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:11.027121Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-03T10:46:11.027123Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:11.027124Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:11.027205Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/mappings". Create session OK 2025-06-03T10:46:11.027207Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:11.027208Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:11.027275Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/tenants". Create session OK 2025-06-03T10:46:11.027277Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:11.027279Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:11.027340Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/queries". Create session OK 2025-06-03T10:46:11.027342Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:11.027344Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:11.027454Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/result_sets". Create session OK 2025-06-03T10:46:11.027456Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:11.027457Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:11.053658Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)" 2025-06-03T10:46:11.053679Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)": 2025-06-03T10:46:11.120786Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-03T10:46:11.120805Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-03T10:46:11.121864Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:11.121876Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-03T10:46:11.145437Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:11.145457Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-03T10:46:11.145727Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:11.145730Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-03T10:46:11.145823Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:11.145825Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-03T10:46:11.149346Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:11.149371Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-03T10:46:11.149627Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:11.149630Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-03T10:46:11.149742Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:11.149744Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-03T10:46:11.153461Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:11.153476Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-03T10:46:11.153716Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:11.153719Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-03T10:46:11.153789Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:11.153791Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-03T10:46:11.153834Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:11.153836Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-03T10:46:11.153874Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:11.153875Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-03T10:46:11.153912Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:11.153915Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldCheckAutomaticTtl::Execute_(NUnitTest::TTestContext&)/bindings": >> test_select.py::TestDML::test_select[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-false-hive-False-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client9-column_type9-False] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-false-hive-True-client0] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_1-pk_types9-all_types9-index9-Uint8] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client0-year Int32 NOT NULL-True] [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckMoveToScope [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckIdempotencyKey >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client9-column_type9-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client10-column_type10-False] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckNotAvailable [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckMaxCountConnections >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client1-year Uint32 NOT NULL-True] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client10-column_type10-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client11-column_type11-False] >> TYdbControlPlaneStorageDescribeJobPermissions::ShouldApplyPermissionViewPrivate [GOOD] |75.6%| [TA] $(B)/ydb/tests/functional/limits/test-results/py3test/{meta.json ... results_accumulator.log} >> TYdbControlPlaneStorageDescribeJobPermissions::ShouldApplyPermissionViewPrivatePublic >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client11-column_type11-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client12-column_type12-False] >> TStateStorageTest::ShouldDeleteNoCheckpoints >> test_insert.py::TestS3::test_insert_deadlock[v2-true-client0] [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldRetryQuery [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckNotAutomaticTtl |75.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStateStorageTest::ShouldNotGetNonExistendState [GOOD] |75.6%| [TA] {RESULT} $(B)/ydb/tests/functional/limits/test-results/py3test/{meta.json ... results_accumulator.log} >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckPermission [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckExist >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client12-column_type12-False] [GOOD] >> test_statistics.py::TestS3::test_precompute[v2-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client13-column_type13-False] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckPreviousRevisionSuccess [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckMoveToScopeWithPrivateConnection >> test_statistics.py::TestS3::test_sum[v1-client0] >> TStateStorageTest::ShouldDeleteNoCheckpoints [GOOD] >> TStateStorageTest::ShouldDeleteNoCheckpoints2 >> TYdbControlPlaneStorageCreateConnection::ShouldCheckMaxCountConnections [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckIdempotencyKey >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v1-client13-column_type13-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client0-column_type0-False] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client1-year Uint32 NOT NULL-True] [GOOD] |75.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TCheckpointStorageTest::ShouldRetryOnExistingGraphDescId [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v1-date_time/simple_iso/test.parquet-parquet] [GOOD] >> TStateStorageTest::ShouldDeleteNoCheckpoints2 [GOOD] >> TStateStorageTest::ShouldDeleteCheckpoints >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client2-year Uint64 NOT NULL-True] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v2-date_time/simple_iso/test.csv-csv_with_names] >> TYdbControlPlaneStorageCreateBinding::ShouldCheckAllowedSymbolsName [GOOD] >> TYdbControlPlaneStorageCreateBinding::ShouldCheckMaxCountBindings >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-false-hive-True-client0] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_3_float-pk_types1-all_types1-index1-Float] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckExist [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckIdempotencyKey >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client0-column_type0-False] [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckIdempotencyKey [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckPreviousRevisionFailed >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-false-projection-False-client0] >> TStateStorageTest::ShouldDeleteCheckpoints [GOOD] >> TStateStorageTest::ShouldDeleteGraph >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client1-column_type1-False] >> TStateStorageTest::ShouldDeleteGraph [GOOD] >> TStateStorageTest::ShouldGetMultipleStates >> TYdbControlPlaneStorageListConnections::ShouldCheckLimit [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldCheckScopeVisibility ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageListQueries::ShouldCombineFilters [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.1 0.0 167288 10392 ? Ss 10:12 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 10:12 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 10:12 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 10:12 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/0:0H-events_highpri] root 9 3.9 0.0 0 0 ? I 10:12 1:18 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 10:12 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/0] root 15 0.1 0.0 0 0 ? I 10:12 0:02 [rcu_sched] root 16 0.0 0.0 0 0 ? S 10:12 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/0] root 19 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/1] root 22 0.1 0.0 0 0 ? S 10:12 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2] root 28 0.1 0.0 0 0 ? S 10:12 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/3] root 34 0.1 0.0 0 0 ? S 10:12 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/4] root 40 0.1 0.0 0 0 ? S 10:12 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/5] root 46 0.1 0.0 0 0 ? S 10:12 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/6] root 52 0.1 0.0 0 0 ? S 10:12 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/6] root 54 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/6:0-rcu_gp] root 55 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/7] root 58 0.1 0.0 0 0 ? S 10:12 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/8] root 64 0.1 0.0 0 0 ? S 10:12 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/9] root 70 0.1 0.0 0 0 ? S 10:12 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/10] root 76 0.1 0.0 0 0 ? S 10:12 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/10] root 79 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/11] root 82 0.1 0.0 0 0 ? S 10:12 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/12] root 88 0.1 0.0 0 0 ? S 10:12 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/12] root 90 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/12:0-rcu_gp] root 91 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/13] root 94 0.1 0.0 0 0 ? S 10:12 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/14] root 100 0.1 0.0 0 0 ? S 10:12 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/14] root 103 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/14:0H-events_highpri] root 104 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/15] root 106 0.1 0.0 0 0 ? S 10:12 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/16] root 112 0.1 0.0 0 0 ? S 10:12 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/17] root 118 0.1 0.0 0 0 ? S 10:12 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/18] root 124 0.1 0.0 0 0 ? S 10:12 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/19] root 130 0.1 0.0 0 0 ? S 10:12 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/20] root 136 0.1 0.0 0 0 ? S 10:12 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/20] root 139 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/21] root 142 0.1 0.0 0 0 ? S 10:12 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/22] root 148 0.1 0.0 0 0 ? S 10:12 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/22] root 150 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/22:0-rcu_par_gp] root 151 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/23] root 154 0.1 0.0 0 0 ? S 10:12 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/24] root 160 0.1 0.0 0 0 ? S 10:12 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/25] root 165 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2 ... l create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:14.529367Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:14.529457Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/result_sets". Create session OK 2025-06-03T10:46:14.529459Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:14.529461Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:14.529531Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenant_acks". Create session OK 2025-06-03T10:46:14.529533Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:14.529534Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:14.533861Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-03T10:46:14.533875Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:14.533878Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:14.533930Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/pending_small". Create session OK 2025-06-03T10:46:14.533948Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:14.533951Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:14.534601Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/bindings". Create session OK 2025-06-03T10:46:14.534613Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:14.534615Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:14.550141Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)" 2025-06-03T10:46:14.550160Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)": 2025-06-03T10:46:14.610509Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-03T10:46:14.610535Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-03T10:46:14.648608Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:14.648626Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-03T10:46:14.649425Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:14.649434Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-03T10:46:14.650321Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:14.650327Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-03T10:46:14.650922Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:14.650930Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-03T10:46:14.651087Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:14.651091Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-03T10:46:14.651166Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:14.651168Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-03T10:46:14.651674Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:14.651681Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-03T10:46:14.651804Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:14.651807Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-03T10:46:14.651901Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:14.651903Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-03T10:46:14.651970Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:14.651972Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-03T10:46:14.652046Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:14.652048Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-03T10:46:14.652603Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:14.652610Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-03T10:46:14.654752Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:14.654762Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListQueries::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/idempotency_keys": >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client1-column_type1-False] [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckExist [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckIdempotencyKey >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client2-column_type2-False] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client2-year Uint64 NOT NULL-True] [GOOD] >> test_s3_0.py::TestS3::test_json_list_validation[v1-client0] [GOOD] >> TYdbControlPlaneStorageDescribeJobPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> TYdbControlPlaneStorageDescribeQuery::ShouldCheckPermission |75.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> TYdbControlPlaneStorageCreateConnection::ShouldCheckIdempotencyKey [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckLowerCaseName >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client3-year Date NOT NULL-False] >> test_s3_0.py::TestS3::test_json_list_validation[v2-client0] >> TStateStorageTest::ShouldGetMultipleStates [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client2-column_type2-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client3-column_type3-False] |75.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[kick_tablets-fifo] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_2-pk_types14-all_types14-index14-Int8] >> TYdbControlPlaneStorageDeleteBindingPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v2-date_time/simple_iso/test.csv-csv_with_names] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client3-column_type3-False] [GOOD] >> TYdbControlPlaneStorageModifyConnection::ShouldCheckPreviousRevisionFailed [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v2-date_time/simple_iso/test.tsv-tsv_with_names] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client4-column_type4-False] >> TYdbControlPlaneStoragePipeline::ShouldCheckNotAutomaticTtl [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckChangeAutomaticTtl >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client4-column_type4-False] [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckLowerCaseName [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckMaxLengthName >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client5-column_type5-True] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-false-projection-False-client0] [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckIdempotencyKey [GOOD] >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckPreviousRevisionFailed >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-false-projection-True-client0] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckMoveToScopeWithPrivateConnection [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldNotCreateScopeeBindingWithUnavailableConnection >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client5-column_type5-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client6-column_type6-False] >> TYdbControlPlaneStorageDescribeQuery::ShouldCheckPermission [GOOD] >> TYdbControlPlaneStorageDescribeQuery::ShouldCheckExist >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_1_float-pk_types3-all_types3-index3-Float] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query_error[v2-client0] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query_error[v1-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client3-year Date NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client4-year String NOT NULL-True] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client6-column_type6-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client7-column_type7-False] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckMaxLengthName [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckMultipleDotsName >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_1_float-pk_types3-all_types3-index3-Float] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v2-date_time/simple_iso/test.tsv-tsv_with_names] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_0-pk_types10-all_types10-index10-Uint8] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v2-date_time/simple_iso/test.json-json_each_row] >> test_ydb_backup.py::TestIncompleteBackup::test_incomplete_backup_will_not_be_restored >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client7-column_type7-False] [GOOD] >> TYdbControlPlaneStorageCreateBinding::ShouldCheckMaxCountBindings [GOOD] >> TYdbControlPlaneStorageCreateBinding::ShouldCheckIdempotencyKey |75.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStateStorageTest::ShouldGetMultipleStates [GOOD] |75.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client8-column_type8-False] >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckPreviousRevisionFailed [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-false-projection-True-client0] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageDeleteBindingPermissions::ShouldApplyPermissionViewPrivatePublic [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.1 0.0 167288 10392 ? Ss 10:12 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 10:12 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 10:12 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 10:12 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/0:0H-events_highpri] root 9 3.9 0.0 0 0 ? I 10:12 1:18 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 10:12 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/0] root 15 0.1 0.0 0 0 ? I 10:12 0:02 [rcu_sched] root 16 0.0 0.0 0 0 ? S 10:12 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/0] root 19 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/1] root 22 0.1 0.0 0 0 ? S 10:12 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2] root 28 0.1 0.0 0 0 ? S 10:12 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/3] root 34 0.1 0.0 0 0 ? S 10:12 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/4] root 40 0.1 0.0 0 0 ? S 10:12 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/5] root 46 0.1 0.0 0 0 ? S 10:12 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/6] root 52 0.1 0.0 0 0 ? S 10:12 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/6] root 54 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/6:0-rcu_gp] root 55 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/7] root 58 0.1 0.0 0 0 ? S 10:12 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/8] root 64 0.1 0.0 0 0 ? S 10:12 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/9] root 70 0.1 0.0 0 0 ? S 10:12 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/10] root 76 0.1 0.0 0 0 ? S 10:12 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/10] root 79 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/11] root 82 0.1 0.0 0 0 ? S 10:12 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/12] root 88 0.1 0.0 0 0 ? S 10:12 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/12] root 90 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/12:0-rcu_gp] root 91 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/13] root 94 0.1 0.0 0 0 ? S 10:12 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/14] root 100 0.1 0.0 0 0 ? S 10:12 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/14] root 103 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/14:0H-events_highpri] root 104 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/15] root 106 0.1 0.0 0 0 ? S 10:12 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/16] root 112 0.1 0.0 0 0 ? S 10:12 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/17] root 118 0.1 0.0 0 0 ? S 10:12 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/18] root 124 0.1 0.0 0 0 ? S 10:12 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/19] root 130 0.1 0.0 0 0 ? S 10:12 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/20] root 136 0.1 0.0 0 0 ? S 10:12 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/20] root 139 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/21] root 142 0.1 0.0 0 0 ? S 10:12 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/22] root 148 0.1 0.0 0 0 ? S 10:12 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/22] root 150 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/22:0-rcu_par_gp] root 151 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/23] root 154 0.1 0.0 0 0 ? S 10:12 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/24] root 160 0.1 0.0 0 0 ? S 10:12 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/25] root 165 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2 ... "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/pending_small". Create session OK 2025-06-03T10:46:15.615007Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:15.615008Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:15.615214Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/quotas". Create session OK 2025-06-03T10:46:15.615217Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:15.615218Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:15.615326Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/connections". Create session OK 2025-06-03T10:46:15.615328Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:15.615329Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:15.615423Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenant_acks". Create session OK 2025-06-03T10:46:15.615425Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:15.615427Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:15.647664Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)" 2025-06-03T10:46:15.647684Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)": 2025-06-03T10:46:15.689489Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-03T10:46:15.689513Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-03T10:46:15.710090Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:15.710112Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-03T10:46:15.713670Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:15.713690Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-03T10:46:15.714281Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:15.714287Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-03T10:46:15.714456Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:15.714460Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-03T10:46:15.714712Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:15.714716Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-03T10:46:15.722933Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:15.722950Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-03T10:46:15.725546Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:15.725559Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-03T10:46:15.725889Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:15.725893Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-03T10:46:15.726029Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:15.726032Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-03T10:46:15.726124Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:15.726126Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-03T10:46:15.726230Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:15.726232Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-03T10:46:15.726318Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:15.726320Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-03T10:46:15.726420Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:15.726422Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteBindingPermissions::TTestCaseShouldApplyPermissionViewPrivatePublic::Execute_(NUnitTest::TTestContext&)/pending_small": >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client8-column_type8-False] [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckIdempotencyKey [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-true-hive-False-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client9-column_type9-False] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckMultipleDotsName [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckAllowedSymbolsName >> TYdbControlPlaneStorageDescribeQuery::ShouldCheckExist [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageModifyConnection::ShouldCheckPreviousRevisionFailed [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.1 0.0 167288 10392 ? Ss 10:12 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 10:12 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 10:12 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 10:12 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/0:0H-events_highpri] root 9 3.9 0.0 0 0 ? I 10:12 1:18 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 10:12 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/0] root 15 0.1 0.0 0 0 ? I 10:12 0:02 [rcu_sched] root 16 0.0 0.0 0 0 ? S 10:12 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/0] root 19 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/1] root 22 0.1 0.0 0 0 ? S 10:12 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2] root 28 0.1 0.0 0 0 ? S 10:12 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/3] root 34 0.1 0.0 0 0 ? S 10:12 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/4] root 40 0.1 0.0 0 0 ? S 10:12 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/5] root 46 0.1 0.0 0 0 ? S 10:12 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/6] root 52 0.1 0.0 0 0 ? S 10:12 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/6] root 54 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/6:0-rcu_gp] root 55 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/7] root 58 0.1 0.0 0 0 ? S 10:12 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/8] root 64 0.1 0.0 0 0 ? S 10:12 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/9] root 70 0.1 0.0 0 0 ? S 10:12 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/10] root 76 0.1 0.0 0 0 ? S 10:12 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/10] root 79 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/11] root 82 0.1 0.0 0 0 ? S 10:12 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/12] root 88 0.1 0.0 0 0 ? S 10:12 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/12] root 90 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/12:0-rcu_gp] root 91 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/13] root 94 0.1 0.0 0 0 ? S 10:12 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/14] root 100 0.1 0.0 0 0 ? S 10:12 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/14] root 103 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/14:0H-events_highpri] root 104 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/15] root 106 0.1 0.0 0 0 ? S 10:12 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/16] root 112 0.1 0.0 0 0 ? S 10:12 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/17] root 118 0.1 0.0 0 0 ? S 10:12 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/18] root 124 0.1 0.0 0 0 ? S 10:12 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/19] root 130 0.1 0.0 0 0 ? S 10:12 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/20] root 136 0.1 0.0 0 0 ? S 10:12 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/20] root 139 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/21] root 142 0.1 0.0 0 0 ? S 10:12 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/22] root 148 0.1 0.0 0 0 ? S 10:12 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/22] root 150 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/22:0-rcu_par_gp] root 151 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/23] root 154 0.1 0.0 0 0 ? S 10:12 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/24] root 160 0.1 0.0 0 0 ? S 10:12 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/25] root 165 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2 ... estSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:18.432360Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:18.432410Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/quotas". Create session OK 2025-06-03T10:46:18.432412Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:18.432414Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:18.432443Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/jobs". Create session OK 2025-06-03T10:46:18.432445Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:18.432447Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:18.432473Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-03T10:46:18.432475Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:18.432476Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:18.439568Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)" 2025-06-03T10:46:18.439585Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)": 2025-06-03T10:46:18.489521Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-03T10:46:18.489543Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-03T10:46:18.525497Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:18.525518Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-03T10:46:18.525791Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:18.525794Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-03T10:46:18.525877Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:18.525882Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:18.525884Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-03T10:46:18.525895Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-03T10:46:18.525938Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:18.525940Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-03T10:46:18.525989Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:18.525991Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-03T10:46:18.526028Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:18.526031Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-03T10:46:18.526037Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:18.526038Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-03T10:46:18.526079Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:18.526082Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-03T10:46:18.526101Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:18.526103Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-03T10:46:18.530281Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:18.530299Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-03T10:46:18.537479Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:18.537497Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-03T10:46:18.538049Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:18.538059Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyConnection::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-03T10:46:19.203405Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:644: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:185: Revision of the connection has been changed already. Please restart the request with a new revision 2025-06-03T10:46:19.205803Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user@staff, utcue9atkdi656rfnk12] ModifyConnectionRequest: {connection_id: "utcue9atkdi656rfnk12" content { name: "test_connection_name_2" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } previous_revision: 10 } ERROR: {
: Error: Revision of the connection has been changed already. Please restart the request with a new revision, code: 1003 } >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client9-column_type9-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client10-column_type10-False] |75.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client10-column_type10-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client4-year String NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client11-column_type11-False] >> test_s3_0.py::TestS3::test_json_list_validation[v2-client0] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client5-year String-False] >> test_s3_0.py::TestS3::test_schema_validation[v1-client0] >> TYdbControlPlaneStorageModifyBinding::ShouldNotCreateScopeeBindingWithUnavailableConnection [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldNotCreatePrivateBindingWithUnavailableConnection >> TYdbControlPlaneStorageCreateConnection::ShouldCheckAllowedSymbolsName [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckCommitTransactionWrite >> TYdbControlPlaneStorageListBindings::ShouldCheckLimit [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldCheckScopeVisibility >> TStateStorageTest::ShouldSaveGetOldSmallState2Tasks >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client11-column_type11-False] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v2-date_time/simple_iso/test.json-json_each_row] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client12-column_type12-False] >> TYdbControlPlaneStorageListConnections::ShouldCheckScopeVisibility [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldCheckPrivateVisibility >> test_format_setting.py::TestS3::test_date_time_simple_iso[v2-date_time/simple_iso/test.parquet-parquet] >> TStateStorageTest::ShouldSaveGetOldSmallState2Tasks [GOOD] >> TStorageServiceTest::ShouldCreateCheckpoint >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client12-column_type12-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client13-column_type13-False] >> TYdbControlPlaneStorageCreateBinding::ShouldCheckIdempotencyKey [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_all_types-pk_types11-all_types11-index11-Uint8] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-true-hive-False-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-true-hive-True-client0] >> TYdbControlPlaneStoragePipeline::ShouldCheckChangeAutomaticTtl [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckResultsTTL >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_enum_type_validation[v2-client13-column_type13-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client0-column_type0-False] >> TStorageServiceTest::ShouldCreateCheckpoint [GOOD] >> TStorageServiceTest::ShouldGetCheckpoints >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_4-pk_types6-all_types6-index6-Uint8] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageDeleteQuery::ShouldCheckPreviousRevisionFailed [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.1 0.0 167288 10392 ? Ss 10:12 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 10:12 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 10:12 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 10:12 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/0:0H-events_highpri] root 9 3.9 0.0 0 0 ? I 10:12 1:18 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 10:12 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/0] root 15 0.1 0.0 0 0 ? I 10:12 0:02 [rcu_sched] root 16 0.0 0.0 0 0 ? S 10:12 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/0] root 19 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/1] root 22 0.1 0.0 0 0 ? S 10:12 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2] root 28 0.1 0.0 0 0 ? S 10:12 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/3] root 34 0.1 0.0 0 0 ? S 10:12 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/4] root 40 0.1 0.0 0 0 ? S 10:12 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/5] root 46 0.1 0.0 0 0 ? S 10:12 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/6] root 52 0.1 0.0 0 0 ? S 10:12 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/6] root 54 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/6:0-rcu_gp] root 55 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/7] root 58 0.1 0.0 0 0 ? S 10:12 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/8] root 64 0.1 0.0 0 0 ? S 10:12 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/9] root 70 0.1 0.0 0 0 ? S 10:12 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/10] root 76 0.1 0.0 0 0 ? S 10:12 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/10] root 79 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/11] root 82 0.1 0.0 0 0 ? S 10:12 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/12] root 88 0.1 0.0 0 0 ? S 10:12 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/12] root 90 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/12:0-rcu_gp] root 91 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/13] root 94 0.1 0.0 0 0 ? S 10:12 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/14] root 100 0.1 0.0 0 0 ? S 10:12 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/14] root 103 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/14:0H-events_highpri] root 104 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/15] root 106 0.1 0.0 0 0 ? S 10:12 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/16] root 112 0.1 0.0 0 0 ? S 10:12 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/17] root 118 0.1 0.0 0 0 ? S 10:12 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/18] root 124 0.1 0.0 0 0 ? S 10:12 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/19] root 130 0.1 0.0 0 0 ? S 10:12 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/20] root 136 0.1 0.0 0 0 ? S 10:12 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/20] root 139 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/21] root 142 0.1 0.0 0 0 ? S 10:12 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/22] root 148 0.1 0.0 0 0 ? S 10:12 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/22] root 150 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/22:0-rcu_par_gp] root 151 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/23] root 154 0.1 0.0 0 0 ? S 10:12 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/24] root 160 0.1 0.0 0 0 ? S 10:12 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/25] root 165 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2 ... s" 2025-06-03T10:46:21.081922Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/bindings". Create session OK 2025-06-03T10:46:21.081929Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:21.081930Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:21.083244Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-03T10:46:21.083251Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:21.083255Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:21.083464Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/queries". Create session OK 2025-06-03T10:46:21.083469Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:21.083471Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:21.083694Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/idempotency_keys". Create session OK 2025-06-03T10:46:21.083697Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:21.083698Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:21.117762Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)" 2025-06-03T10:46:21.117777Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)": 2025-06-03T10:46:21.145524Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-03T10:46:21.145545Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-03T10:46:21.149428Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:21.149445Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-03T10:46:21.149730Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:21.149738Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-03T10:46:21.173722Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:21.173740Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-03T10:46:21.173774Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:21.173780Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-03T10:46:21.174134Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:21.174151Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-03T10:46:21.174355Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:21.174359Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-03T10:46:21.174426Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:21.174428Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-03T10:46:21.174497Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:21.174499Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-03T10:46:21.174554Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:21.174557Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-03T10:46:21.174634Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:21.174635Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-03T10:46:21.174687Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:21.174689Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-03T10:46:21.174755Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:21.174758Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-03T10:46:21.174817Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:21.174818Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDeleteQuery::TTestCaseShouldCheckPreviousRevisionFailed::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-03T10:46:21.676737Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:185: Revision of the query has been changed already. Please restart the request with a new revision 2025-06-03T10:46:21.677029Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: DeleteQueryRequest - DeleteQueryResult: {query_id: "utque9atkavkq8dtbnuv" previous_revision: 100 } ERROR: {
: Error: Revision of the query has been changed already. Please restart the request with a new revision, code: 1003 } >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client0-column_type0-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client1-column_type1-True] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageModifyBinding::ShouldCheckIdempotencyKey [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.1 0.0 167288 10392 ? Ss 10:12 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 10:12 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 10:12 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 10:12 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/0:0H-events_highpri] root 9 3.9 0.0 0 0 ? I 10:12 1:18 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 10:12 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/0] root 15 0.1 0.0 0 0 ? I 10:12 0:02 [rcu_sched] root 16 0.0 0.0 0 0 ? S 10:12 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/0] root 19 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/1] root 22 0.1 0.0 0 0 ? S 10:12 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2] root 28 0.1 0.0 0 0 ? S 10:12 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/3] root 34 0.1 0.0 0 0 ? S 10:12 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/4] root 40 0.1 0.0 0 0 ? S 10:12 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/5] root 46 0.1 0.0 0 0 ? S 10:12 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/6] root 52 0.1 0.0 0 0 ? S 10:12 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/6] root 54 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/6:0-rcu_gp] root 55 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/7] root 58 0.1 0.0 0 0 ? S 10:12 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/8] root 64 0.1 0.0 0 0 ? S 10:12 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/9] root 70 0.1 0.0 0 0 ? S 10:12 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/10] root 76 0.1 0.0 0 0 ? S 10:12 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/10] root 79 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/11] root 82 0.1 0.0 0 0 ? S 10:12 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/12] root 88 0.1 0.0 0 0 ? S 10:12 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/12] root 90 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/12:0-rcu_gp] root 91 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/13] root 94 0.1 0.0 0 0 ? S 10:12 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/14] root 100 0.1 0.0 0 0 ? S 10:12 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/14] root 103 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/14:0H-events_highpri] root 104 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/15] root 106 0.1 0.0 0 0 ? S 10:12 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/16] root 112 0.1 0.0 0 0 ? S 10:12 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/17] root 118 0.1 0.0 0 0 ? S 10:12 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/18] root 124 0.1 0.0 0 0 ? S 10:12 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/19] root 130 0.1 0.0 0 0 ? S 10:12 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/20] root 136 0.1 0.0 0 0 ? S 10:12 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/20] root 139 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/21] root 142 0.1 0.0 0 0 ? S 10:12 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/22] root 148 0.1 0.0 0 0 ? S 10:12 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/22] root 150 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/22:0-rcu_par_gp] root 151 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/23] root 154 0.1 0.0 0 0 ? S 10:12 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/24] root 160 0.1 0.0 0 0 ? S 10:12 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/25] root 165 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2 ... stSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:19.771451Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/pending_small". Create session OK 2025-06-03T10:46:19.771453Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:19.771454Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:19.771555Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenant_acks". Create session OK 2025-06-03T10:46:19.771559Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:19.771560Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:19.771631Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/connections". Create session OK 2025-06-03T10:46:19.771633Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:19.771634Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:19.771721Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/queries". Create session OK 2025-06-03T10:46:19.771723Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:19.771725Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:19.780581Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/mappings". Create session OK 2025-06-03T10:46:19.780594Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:19.780597Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:19.790145Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)" 2025-06-03T10:46:19.790154Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)": 2025-06-03T10:46:19.865406Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-03T10:46:19.865430Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-03T10:46:19.871829Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:19.871849Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-03T10:46:19.873073Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:19.873082Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-03T10:46:19.879830Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:19.879862Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-03T10:46:19.879907Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:19.879917Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-03T10:46:19.880109Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:19.880114Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-03T10:46:19.880239Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:19.880241Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-03T10:46:19.880979Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:19.880991Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-03T10:46:19.890540Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:19.890564Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-03T10:46:19.891237Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:19.891254Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-03T10:46:19.891450Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:19.891453Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-03T10:46:19.891997Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:19.892008Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-03T10:46:19.892143Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:19.892152Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-03T10:46:19.892242Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:19.892247Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/nodes": >> TStorageServiceTest::ShouldNotCreateCheckpointAfterGenerationChanged >> test_ydb_backup.py::TestIncompleteBackup::test_incomplete_backup_will_not_be_restored [GOOD] >> TStorageServiceTest::ShouldGetCheckpoints [GOOD] >> TStorageServiceTest::ShouldAbortCheckpoint ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageDescribeQuery::ShouldCheckExist [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.1 0.0 167288 10392 ? Ss 10:12 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 10:12 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 10:12 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 10:12 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/0:0H-events_highpri] root 9 3.9 0.0 0 0 ? I 10:12 1:18 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 10:12 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/0] root 15 0.1 0.0 0 0 ? I 10:12 0:02 [rcu_sched] root 16 0.0 0.0 0 0 ? S 10:12 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/0] root 19 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/1] root 22 0.1 0.0 0 0 ? S 10:12 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2] root 28 0.1 0.0 0 0 ? S 10:12 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/3] root 34 0.1 0.0 0 0 ? S 10:12 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/4] root 40 0.1 0.0 0 0 ? S 10:12 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/5] root 46 0.1 0.0 0 0 ? S 10:12 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/6] root 52 0.1 0.0 0 0 ? S 10:12 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/6] root 54 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/6:0-rcu_gp] root 55 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/7] root 58 0.1 0.0 0 0 ? S 10:12 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/8] root 64 0.1 0.0 0 0 ? S 10:12 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/9] root 70 0.1 0.0 0 0 ? S 10:12 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/10] root 76 0.1 0.0 0 0 ? S 10:12 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/10] root 79 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/11] root 82 0.1 0.0 0 0 ? S 10:12 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/12] root 88 0.1 0.0 0 0 ? S 10:12 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/12] root 90 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/12:0-rcu_gp] root 91 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/13] root 94 0.1 0.0 0 0 ? S 10:12 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/14] root 100 0.1 0.0 0 0 ? S 10:12 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/14] root 103 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/14:0H-events_highpri] root 104 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/15] root 106 0.1 0.0 0 0 ? S 10:12 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/16] root 112 0.1 0.0 0 0 ? S 10:12 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/17] root 118 0.1 0.0 0 0 ? S 10:12 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/18] root 124 0.1 0.0 0 0 ? S 10:12 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/19] root 130 0.1 0.0 0 0 ? S 10:12 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/20] root 136 0.1 0.0 0 0 ? S 10:12 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/20] root 139 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/21] root 142 0.1 0.0 0 0 ? S 10:12 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/22] root 148 0.1 0.0 0 0 ? S 10:12 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/22] root 150 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/22:0-rcu_par_gp] root 151 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/23] root 154 0.1 0.0 0 0 ? S 10:12 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/24] root 160 0.1 0.0 0 0 ? S 10:12 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/25] root 165 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2 ... alvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:21.734136Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/nodes". Create session OK 2025-06-03T10:46:21.734139Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:21.734141Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:21.734312Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/result_sets". Create session OK 2025-06-03T10:46:21.734315Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:21.734316Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:21.734418Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/bindings". Create session OK 2025-06-03T10:46:21.734420Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:21.734421Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:21.734511Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/quotas". Create session OK 2025-06-03T10:46:21.734513Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:21.734514Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:21.734590Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/pending_small". Create session OK 2025-06-03T10:46:21.734591Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:21.734593Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:21.749988Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)" 2025-06-03T10:46:21.749995Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)": 2025-06-03T10:46:21.795053Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-03T10:46:21.795070Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-03T10:46:21.807133Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:21.807150Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-03T10:46:21.811879Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:21.811898Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-03T10:46:21.811905Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:21.811913Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-03T10:46:21.812593Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:21.812600Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-03T10:46:21.815283Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:21.815298Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-03T10:46:21.815626Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:21.815631Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-03T10:46:21.815897Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:21.815901Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-03T10:46:21.817272Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:21.817288Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-03T10:46:21.829995Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:21.830012Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-03T10:46:21.830622Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:21.830631Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-03T10:46:21.830679Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:21.830683Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-03T10:46:21.830808Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:21.830811Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-03T10:46:21.830852Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:21.830856Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageDescribeQuery::TTestCaseShouldCheckExist::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-03T10:46:22.196411Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: DescribeQueryRequest - DescribeQueryResult: {query_id: "abra" } ERROR: {
: Error: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/ydb_control_plane_storage_queries.cpp:664: Query does not exist or permission denied. Please check the id of the query or your access rights, code: 1000 } >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client1-column_type1-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client2-column_type2-False] >> test_format_setting.py::TestS3::test_date_time_simple_iso[v2-date_time/simple_iso/test.parquet-parquet] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v1-date_time/simple_iso/test.csv-csv_with_names] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client2-column_type2-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client3-column_type3-True] >> test_select.py::TestDML::test_select[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client5-year String-False] [GOOD] >> TStorageServiceTest::ShouldAbortCheckpoint [GOOD] >> TStorageServiceTest::ShouldGetState >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client6-year Utf8 NOT NULL-True] >> TStorageServiceTest::ShouldNotCreateCheckpointAfterGenerationChanged [GOOD] >> TStorageServiceTest::ShouldNotCompleteCheckpointWithoutCreation >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-true-hive-True-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client3-column_type3-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client4-column_type4-True] |75.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_sqs_writes_through_proxy_on_each_node[tables_format_v1-std] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-true-projection-False-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client4-column_type4-True] [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldNotCreatePrivateBindingWithUnavailableConnection [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldNotCreatePrivateConnectionWithDesctructionBinding >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client5-column_type5-False] >> TStorageServiceTest::ShouldNotCompleteCheckpointWithoutCreation [GOOD] >> TStorageServiceTest::ShouldNotAbortCheckpointWithoutCreation >> TYdbControlPlaneStoragePipeline::ShouldCheckResultsTTL [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckDisableCurrentIamGetTask >> TStorageServiceTest::ShouldGetState [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageCreateBinding::ShouldCheckIdempotencyKey [GOOD] Test command err: 2025-06-03T10:45:35.138193Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511672467503267358:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:45:35.138216Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001fc1/r3tmp/tmpYb4WHl/pdisk_1.dat 2025-06-03T10:45:35.391383Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 25959, node 1 2025-06-03T10:45:35.493716Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:45:35.493730Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:45:35.493732Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:45:35.493781Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:45:35.534734Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:45:35.534776Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:45:35.539734Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:1256 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:45:35.647881Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:45:35.715315Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvNodesHealthCheckRequest 2025-06-03T10:45:36.269663Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvGetTaskRequest 2025-06-03T10:45:36.271074Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvCreateQueryRequest 2025-06-03T10:45:36.278760Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvDescribeQueryRequest Wait query execution 0.002459s: STARTING 2025-06-03T10:45:37.239070Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvGetTaskRequest 2025-06-03T10:45:37.239425Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:367: QueryId: utque9atllta6o98ms19 Start run actor. Compute state: STARTING 2025-06-03T10:45:37.239435Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:458: QueryId: utque9atllta6o98ms19 FillConnections 2025-06-03T10:45:37.239460Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:2283: QueryId: utque9atllta6o98ms19 Run actors params: { QueryId: utque9atllta6o98ms19 CloudId: mock_cloud UserId: root@builtin Owner: 8c02399-db1939a6-8052d483-4697b2c02 PreviousQueryRevision: 1 Connections: 0 Bindings: 0 AccountIdSignatures: 0 QueryType: STREAMING ExecuteMode: RUN ResultId: utrue9atlkvop2bd5uuq StateLoadMode: EMPTY StreamingDisposition: { } Status: STARTING DqGraphs: 0 DqGraphIndex: 0 Resource.TopicConsumers: 0 } 2025-06-03T10:45:37.239470Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:1942: QueryId: utque9atllta6o98ms19 Compiling query ... 2025-06-03T10:45:37.241818Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvPingTaskRequest 2025-06-03T10:45:37.241972Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvGetTaskRequest 2025-06-03T10:45:37.242044Z node 1 :FQ_RUN_ACTOR TRACE: run_actor.cpp:845: QueryId: utque9atllta6o98ms19 Forward ping response. Success: 1. Cookie: 2 2025-06-03T10:45:37.242113Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvPingTaskRequest 2025-06-03T10:45:37.242151Z node 1 :FQ_RUN_ACTOR TRACE: run_actor.cpp:845: QueryId: utque9atllta6o98ms19 Forward ping response. Success: 1. Cookie: 0 2025-06-03T10:45:37.279634Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvDescribeQueryRequest Wait query execution 1.003595s: RUNNING 2025-06-03T10:45:37.560582Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:909: QueryId: utque9atllta6o98ms19 Graph (execution) with tasks: 1 2025-06-03T10:45:37.560691Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:892: QueryId: utque9atllta6o98ms19 Overall dq tasks: 1 2025-06-03T10:45:37.560808Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:742: QueryId: utque9atllta6o98ms19 Graph 0 2025-06-03T10:45:37.561082Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvPingTaskRequest 2025-06-03T10:45:37.561212Z node 1 :FQ_RUN_ACTOR TRACE: run_actor.cpp:845: QueryId: utque9atllta6o98ms19 Forward ping response. Success: 1. Cookie: 0 2025-06-03T10:45:37.561349Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvPingTaskRequest 2025-06-03T10:45:37.561408Z node 1 :FQ_RUN_ACTOR TRACE: run_actor.cpp:845: QueryId: utque9atllta6o98ms19 Forward ping response. Success: 1. Cookie: 1 2025-06-03T10:45:37.561415Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:892: QueryId: utque9atllta6o98ms19 Overall dq tasks: 1 2025-06-03T10:45:37.563055Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:1639: QueryId: utque9atllta6o98ms19 Executer: [1:7511672476093202613:2357], Controller: [1:7511672476093202615:2359], ResultIdActor: [1:7511672476093202614:2358] 2025-06-03T10:45:37.563309Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvPingTaskRequest 2025-06-03T10:45:37.563456Z node 1 :FQ_RUN_ACTOR TRACE: run_actor.cpp:845: QueryId: utque9atllta6o98ms19 Forward ping response. Success: 1. Cookie: 0 2025-06-03T10:45:37.681603Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvWriteResultDataRequest 2025-06-03T10:45:37.693231Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:1213: QueryId: utque9atllta6o98ms19 Query response SUCCESS. Result set index: 0. Issues count: 0. Rows count: 1 2025-06-03T10:45:37.697848Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:1805: QueryId: utque9atllta6o98ms19 Is about to finish query with status COMPLETED 2025-06-03T10:45:37.697868Z node 1 :FQ_RUN_ACTOR DEBUG: run_actor.cpp:1798: QueryId: utque9atllta6o98ms19 Write finalizing status: COMPLETING 2025-06-03T10:45:37.698139Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvPingTaskRequest 2025-06-03T10:45:37.698414Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvPingTaskRequest 2025-06-03T10:45:38.239488Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvGetTaskRequest 2025-06-03T10:45:38.280638Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvDescribeQueryRequest 2025-06-03T10:45:38.282913Z node 1 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEvGetResultDataRequest 2025-06-03T10:45:38.882706Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511672481420656211:2082];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:45:38.883011Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001fc1/r3tmp/tmp3ipmUF/pdisk_1.dat 2025-06-03T10:45:38.924816Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:45:38.925117Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511672481420656153:2080] 1748947538878301 != 1748947538878305 TServer::EnableGrpc on GrpcPort 25287, node 2 2025-06-03T10:45:38.945276Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:45:38.945290Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:45:38.945309Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:45:38.945358Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13537 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:45:38.996013Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:45:38.996044Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:45:38.996443Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:45:38.996904Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:45:38.999425Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:45:39.038392Z node 2 :YQ_CONTROL_PLANE_STORAGE INFO: in_memory_control_plane_storage.cpp:247: TEv ... eBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:22.233500Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/result_sets". Create session OK 2025-06-03T10:46:22.233509Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:22.233511Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:22.233984Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenant_acks". Create session OK 2025-06-03T10:46:22.233987Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:22.233990Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:22.234159Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-03T10:46:22.234161Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:22.234163Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:22.234743Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/bindings". Create session OK 2025-06-03T10:46:22.234747Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:22.234749Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:22.234913Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/idempotency_keys". Create session OK 2025-06-03T10:46:22.234915Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:22.234917Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:22.257429Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)" 2025-06-03T10:46:22.257449Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)": 2025-06-03T10:46:22.309485Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-03T10:46:22.309504Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-03T10:46:22.333552Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:22.333577Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-03T10:46:22.334030Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:22.334034Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-03T10:46:22.334154Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:22.334157Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-03T10:46:22.334994Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:22.335006Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-03T10:46:22.335210Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:22.335214Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-03T10:46:22.335295Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:22.335297Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-03T10:46:22.335361Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:22.335365Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-03T10:46:22.335441Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:22.335444Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-03T10:46:22.335506Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:22.335509Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-03T10:46:22.335606Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:22.335608Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-03T10:46:22.335676Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:22.335678Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-03T10:46:22.335727Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:22.335729Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-03T10:46:22.341549Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:22.341581Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateBinding::TTestCaseShouldCheckIdempotencyKey::Execute_(NUnitTest::TTestContext&)/result_sets": >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client5-column_type5-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client6-year Utf8 NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client6-column_type6-True] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client7-year Utf8-False] >> TStorageServiceTest::ShouldNotAbortCheckpointWithoutCreation [GOOD] >> TStorageServiceTest::ShouldNotCompleteCheckpointWithoutPending >> test_ydb_over_fq.py::TestYdbOverFq::test_execute_data_query_error[v1-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client6-column_type6-True] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_explain_data_query[v2-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client7-column_type7-True] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_4-pk_types6-all_types6-index6-Uint8] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v1-date_time/simple_iso/test.csv-csv_with_names] [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldCheckDisableCurrentIamGetTask [GOOD] >> TYdbControlPlaneStoragePipeline::ShouldReturnPartialBatchForGetTask >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v1-date_time/simple_iso/test.tsv-tsv_with_names] >> TStorageServiceTest::ShouldNotCompleteCheckpointWithoutPending [GOOD] >> TStorageServiceTest::ShouldNotCompleteCheckpointGenerationChanged >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client7-column_type7-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client8-column_type8-False] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client8-column_type8-False] [GOOD] >> test_s3_0.py::TestS3::test_schema_validation[v1-client0] [GOOD] >> test_s3_0.py::TestS3::test_schema_validation[v2-client0] >> TYdbControlPlaneStorageListConnections::ShouldCheckPrivateVisibility [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldCheckSuperUser >> TStorageServiceTest::ShouldNotCompleteCheckpointGenerationChanged [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client9-column_type9-False] >> TStateStorageTest::ShouldIssueErrorOnWrongGetStateParams >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client9-column_type9-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client10-column_type10-False] >> TStateStorageTest::ShouldIssueErrorOnWrongGetStateParams [GOOD] >> TStateStorageTest::ShouldIssueErrorOnNonExistentState |75.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/multinode/py3test >> test_multinode_cluster.py::TestSqsMultinodeCluster::test_has_messages_counters[stop_node-fifo] [GOOD] |75.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-true-projection-False-client0] [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-true-projection-True-client0] |75.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_4-pk_types12-all_types12-index12-Int8] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_1_float-pk_types3-all_types3-index3-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client10-column_type10-False] [GOOD] >> TStorageServiceTest::ShouldNotRegisterPrevGeneration >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client11-column_type11-False] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client7-year Utf8-False] [GOOD] >> TStateStorageTest::ShouldIssueErrorOnNonExistentState [GOOD] >> TStateStorageTest::ShouldLoadLastSnapshot ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStorageServiceTest::ShouldGetState [GOOD] Test command err: 2025-06-03T10:46:24.016890Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [1:7511672674038384366:2048] with connection to localhost:10736:local 2025-06-03T10:46:24.023607Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:24.062078Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-03T10:46:24.062098Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:24.062254Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-03T10:46:24.172677Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-03T10:46:24.172718Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-03T10:46:24.554255Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [2:7511672677184164150:2048] with connection to localhost:10736:local 2025-06-03T10:46:24.554311Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:24.591904Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-03T10:46:24.591928Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:24.593459Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-03T10:46:24.719061Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-03T10:46:24.719080Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-03T10:46:24.719487Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-06-03T10:46:24.748866Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:2] Checkpoint created 2025-06-03T10:46:24.748888Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-06-03T10:46:24.749208Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:3] Got TEvCreateCheckpointRequest 2025-06-03T10:46:24.769531Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:3] Checkpoint created 2025-06-03T10:46:24.769550Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:3] Send TEvCreateCheckpointResponse 2025-06-03T10:46:24.773493Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-03T10:46:24.815277Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-03T10:46:25.073437Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [3:7511672679207091744:2048] with connection to localhost:10736:local 2025-06-03T10:46:25.073513Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:25.114764Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-03T10:46:25.114785Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:25.117427Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-03T10:46:25.255175Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-03T10:46:25.255198Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-03T10:46:25.255368Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-03T10:46:25.305747Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:1] Status updated to 'PendingCommit' 2025-06-03T10:46:25.305769Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-03T10:46:25.306246Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-06-03T10:46:25.323328Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:2] Checkpoint created 2025-06-03T10:46:25.323366Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-06-03T10:46:25.323515Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:2] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-03T10:46:25.340213Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:2] Status updated to 'PendingCommit' 2025-06-03T10:46:25.340230Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:2] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-03T10:46:25.341570Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:2] Got TEvCompleteCheckpointRequest 2025-06-03T10:46:25.358696Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:2] Status updated to 'Completed' 2025-06-03T10:46:25.358716Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:2] Send TEvCompleteCheckpointResponse 2025-06-03T10:46:25.358929Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:319: [graph_graphich.17] [17:1] Got TEvAbortCheckpointRequest 2025-06-03T10:46:25.373680Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:333: [graph_graphich.17] [17:1] Checkpoint aborted 2025-06-03T10:46:25.373699Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:335: [graph_graphich.17] [17:1] Send TEvAbortCheckpointResponse 2025-06-03T10:46:25.377353Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:319: [graph_graphich.17] [17:2] Got TEvAbortCheckpointRequest 2025-06-03T10:46:25.392785Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:333: [graph_graphich.17] [17:2] Checkpoint aborted 2025-06-03T10:46:25.392816Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:335: [graph_graphich.17] [17:2] Send TEvAbortCheckpointResponse 2025-06-03T10:46:25.392990Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-03T10:46:25.424539Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-03T10:46:25.729683Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [4:7511672680598458123:2048] with connection to localhost:10736:local 2025-06-03T10:46:25.729735Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:25.772767Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-03T10:46:25.772786Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:25.772964Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-03T10:46:25.913445Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-03T10:46:25.913468Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-03T10:46:25.913658Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:365: [graph_graphich] [17:1] Got TEvSaveTaskState: task 1317 2025-06-03T10:46:25.934491Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:389: [graph_graphich] [17:1] TEvSaveTaskState Apply: task: 1317 2025-06-03T10:46:25.934521Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:404: [graph_graphich] [17:1] Send TEvSaveTaskStateResult: task: 1317 2025-06-03T10:46:25.934662Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:413: [graph_graphich] [17:1] Got TEvGetTaskState: tasks {1317} 2025-06-03T10:46:25.934684Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:532: [graph_graphich] [17:1] GetState, tasks: 1317 2025-06-03T10:46:26.024522Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:667: [graph_graphich] [17:1] ListOfStates results: 2025-06-03T10:46:26.024559Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:688: [graph_graphich] [17:1] taskId 1317 checkpoint id: 17:1, rows count: 1 2025-06-03T10:46:26.024583Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:920: [graph_graphich] [17:1] SkipStatesInFuture, skip 0 checkpoints 2025-06-03T10:46:26.024797Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:812: [graph_graphich] [17:1] SelectState: task_id 1317, seq_no 1, blob_seq_num 0 2025-06-03T10:46:26.080349Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:423: [graph_graphich] [17:1] DeserializeState, task id 1317, blob size 49 2025-06-03T10:46:26.080391Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: ydb_state_storage.cpp:979: [graph_graphich] [17:1] ApplyIncrements 2025-06-03T10:46:26.082494Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:432: [graph_graphich] [{ Id: 1 Generation: 17 }] Send TEvGetTaskStateResult: tasks: {1317} >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client8-year Int32-False] >> test_ydb_over_fq.py::TestYdbOverFq::test_explain_data_query[v2-client0] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_1-pk_types9-all_types9-index9-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_ydb_over_fq.py::TestYdbOverFq::test_explain_data_query[v1-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client11-column_type11-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client12-column_type12-False] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v1-date_time/simple_iso/test.tsv-tsv_with_names] [GOOD] >> TStateStorageTest::ShouldLoadLastSnapshot [GOOD] >> TStateStorageTest::ShouldNotGetNonExistendSnaphotState >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v1-date_time/simple_iso/test.json-json_each_row] >> TYdbControlPlaneStorageModifyBinding::ShouldNotCreatePrivateConnectionWithDesctructionBinding [GOOD] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckObjectStorageProjectionByTypes >> TYdbControlPlaneStorageCreateConnection::ShouldCheckCommitTransactionWrite [GOOD] >> TYdbControlPlaneStorageCreateConnection::ShouldCheckCommitTransactionReadWrite >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client12-column_type12-False] [GOOD] >> TStorageServiceTest::ShouldNotRegisterPrevGeneration [GOOD] >> TStorageServiceTest::ShouldNotCreateCheckpointWhenUnregistered |75.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client13-column_type13-False] >> TStateStorageTest::ShouldNotGetNonExistendSnaphotState [GOOD] >> TStateStorageTest::ShouldLoadIncrementSnapshot >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_4_float-pk_types0-all_types0-index0-Float] >> TYdbControlPlaneStoragePipeline::ShouldReturnPartialBatchForGetTask [GOOD] >> test_statistics.py::TestS3::test_sum[v1-client0] [GOOD] >> test_statistics.py::TestS3::test_sum[v2-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client13-column_type13-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client14-column_type14-False] >> TStorageServiceTest::ShouldNotCreateCheckpointWhenUnregistered [GOOD] >> TStorageServiceTest::ShouldNotCreateCheckpointTwice >> TStateStorageTest::ShouldLoadIncrementSnapshot [GOOD] >> test_s3_1.py::TestS3::test_i18n_partitioning[v2-true-projection-True-client0] [GOOD] >> test_s3_1.py::TestS3::test_huge_source[v1-false-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client14-column_type14-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client15-column_type15-False] |75.7%| [TA] $(B)/ydb/tests/functional/sqs/multinode/test-results/py3test/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStorageServiceTest::ShouldNotCompleteCheckpointGenerationChanged [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v1-client15-column_type15-False] [GOOD] Test command err: 2025-06-03T10:46:25.114342Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [1:7511672679021827627:2048] with connection to localhost:32491:local 2025-06-03T10:46:25.114408Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:25.322686Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-03T10:46:25.322723Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:25.324365Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-03T10:46:25.477532Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-03T10:46:25.477557Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-03T10:46:25.477738Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.18] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:25.485666Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.18] Graph registered 2025-06-03T10:46:25.485682Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.18] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:25.485927Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-06-03T10:46:25.494362Z node 1 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:249: [graph_graphich.17] [17:2] Failed to create checkpoint:
: Warning: Table: local/TStorageServiceTestShouldNotCreateCheckpointAfterGenerationChanged/coordinators_sync, pk: graph_graphich, current generation: 18, expected/new generation: 17, operation: Check, code: 400130 2025-06-03T10:46:25.494380Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-06-03T10:46:25.916379Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [2:7511672682005172915:2048] with connection to localhost:32491:local 2025-06-03T10:46:25.916431Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:25.953795Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-03T10:46:25.953827Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:25.953982Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:1] Got TEvCompleteCheckpointRequest 2025-06-03T10:46:25.981270Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:302: [graph_graphich.17] [17:1] Failed to set 'Completed' status:
: Warning: Failed to select checkpoint '17:1', code: 400080 2025-06-03T10:46:25.981289Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:1] Send TEvCompleteCheckpointResponse 2025-06-03T10:46:26.448822Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [3:7511672684359045609:2048] with connection to localhost:32491:local 2025-06-03T10:46:26.448877Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:26.484607Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-03T10:46:26.484638Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:26.484825Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:319: [graph_graphich.17] [17:1] Got TEvAbortCheckpointRequest 2025-06-03T10:46:26.513683Z node 3 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:331: [graph_graphich.17] [17:1] Failed to abort checkpoint:
: Warning: Failed to select checkpoint '17:1', code: 400080 2025-06-03T10:46:26.513701Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:335: [graph_graphich.17] [17:1] Send TEvAbortCheckpointResponse 2025-06-03T10:46:26.966820Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [4:7511672686832100018:2048] with connection to localhost:32491:local 2025-06-03T10:46:26.966873Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:27.007224Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-03T10:46:27.007243Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:27.008100Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-03T10:46:27.128833Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-03T10:46:27.128845Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-03T10:46:27.133548Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:1] Got TEvCompleteCheckpointRequest 2025-06-03T10:46:27.161817Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:302: [graph_graphich.17] [17:1] Failed to set 'Completed' status:
: Warning: Selected checkpoint '17:1' with status Pending, while expected PendingCommit, code: 400080 2025-06-03T10:46:27.161840Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:1] Send TEvCompleteCheckpointResponse 2025-06-03T10:46:27.483969Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [5:7511672691192395761:2048] with connection to localhost:32491:local 2025-06-03T10:46:27.484023Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:27.519576Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-03T10:46:27.519593Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:27.519807Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-03T10:46:27.639970Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-03T10:46:27.639988Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-03T10:46:27.640132Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-03T10:46:27.691961Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:1] Status updated to 'PendingCommit' 2025-06-03T10:46:27.691986Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-03T10:46:27.692522Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.18] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:27.699561Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.18] Graph registered 2025-06-03T10:46:27.699592Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.18] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:27.699786Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:1] Got TEvCompleteCheckpointRequest 2025-06-03T10:46:27.703059Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:302: [graph_graphich.17] [17:1] Failed to set 'Completed' status:
: Warning: Table: local/TStorageServiceTestShouldNotPendingCheckpointGenerationChanged/coordinators_sync, pk: graph_graphich, current generation: 18, expected/new generation: 17, operation: Check, code: 400130 2025-06-03T10:46:27.703073Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:1] Send TEvCompleteCheckpointResponse >> test_ydb_over_fq.py::TestYdbOverFq::test_explain_data_query[v1-client0] [GOOD] >> TStorageServiceTest::ShouldNotCreateCheckpointTwice [GOOD] >> TStorageServiceTest::ShouldNotPendingCheckpointWithoutCreation >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client0-column_type0-False] >> test_ydb_over_fq.py::TestYdbOverFq::test_describe_table[v2-client0] >> TYdbControlPlaneStorageModifyBinding::ShouldCheckObjectStorageProjectionByTypes [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v1-date_time/simple_iso/test.json-json_each_row] [GOOD] |75.7%| [TA] {RESULT} $(B)/ydb/tests/functional/sqs/multinode/test-results/py3test/{meta.json ... results_accumulator.log} >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client0-column_type0-False] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v1-date_time/simple_iso/test.parquet-parquet] >> TYdbControlPlaneStorageListBindings::ShouldCheckScopeVisibility [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldCheckPrivateVisibility >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_3-pk_types13-all_types13-index13-Int8] >> TStorageServiceTest::ShouldNotPendingCheckpointWithoutCreation [GOOD] >> TStorageServiceTest::ShouldNotPendingCheckpointGenerationChanged >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client1-column_type1-True] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_4_float-pk_types0-all_types0-index0-Float] >> test_select.py::TestDML::test_as_table >> test_ydb_over_fq.py::TestYdbOverFq::test_describe_table[v2-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client1-column_type1-True] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_describe_table[v1-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client2-column_type2-False] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client8-year Int32-False] [GOOD] >> TStorageServiceTest::ShouldNotPendingCheckpointGenerationChanged [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client9-year Uint32-False] >> test_select.py::TestDML::test_select[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client2-column_type2-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client3-column_type3-True] >> TYdbControlPlaneStorageListConnections::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldCheckFilterByName |75.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_1-pk_types15-all_types15-index15-Int8] >> test_ydb_over_fq.py::TestYdbOverFq::test_describe_table[v1-client0] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_insert_data_query[v2-client0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStoragePipeline::ShouldReturnPartialBatchForGetTask [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.1 0.0 167288 10392 ? Ss 10:12 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 10:12 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 10:12 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 10:12 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/0:0H-events_highpri] root 9 3.9 0.0 0 0 ? I 10:12 1:18 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 10:12 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/0] root 15 0.1 0.0 0 0 ? I 10:12 0:02 [rcu_sched] root 16 0.0 0.0 0 0 ? S 10:12 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/0] root 19 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/1] root 22 0.1 0.0 0 0 ? S 10:12 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2] root 28 0.1 0.0 0 0 ? S 10:12 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/3] root 34 0.1 0.0 0 0 ? S 10:12 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/4] root 40 0.1 0.0 0 0 ? S 10:12 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/5] root 46 0.1 0.0 0 0 ? S 10:12 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/6] root 52 0.1 0.0 0 0 ? S 10:12 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/6] root 54 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/6:0-rcu_gp] root 55 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/7] root 58 0.1 0.0 0 0 ? S 10:12 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/8] root 64 0.1 0.0 0 0 ? S 10:12 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/9] root 70 0.1 0.0 0 0 ? S 10:12 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/10] root 76 0.1 0.0 0 0 ? S 10:12 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/10] root 79 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/11] root 82 0.1 0.0 0 0 ? S 10:12 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/12] root 88 0.1 0.0 0 0 ? S 10:12 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/12] root 90 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/12:0-rcu_gp] root 91 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/13] root 94 0.1 0.0 0 0 ? S 10:12 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/14] root 100 0.1 0.0 0 0 ? S 10:12 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/14] root 103 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/14:0H-events_highpri] root 104 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/15] root 106 0.1 0.0 0 0 ? S 10:12 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/16] root 112 0.1 0.0 0 0 ? S 10:12 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/17] root 118 0.1 0.0 0 0 ? S 10:12 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/18] root 124 0.1 0.0 0 0 ? S 10:12 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/19] root 130 0.1 0.0 0 0 ? S 10:12 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/20] root 136 0.1 0.0 0 0 ? S 10:12 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/20] root 139 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/21] root 142 0.1 0.0 0 0 ? S 10:12 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/22] root 148 0.1 0.0 0 0 ? S 10:12 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/22] root 150 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/22:0-rcu_par_gp] root 151 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/23] root 154 0.1 0.0 0 0 ? S 10:12 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/24] root 160 0.1 0.0 0 0 ? S 10:12 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/25] root 165 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2 ... schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:27.401380Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/quotas". Create session OK 2025-06-03T10:46:27.401399Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:27.401401Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:27.401724Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/bindings". Create session OK 2025-06-03T10:46:27.401733Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:27.401734Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:27.401852Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/connections". Create session OK 2025-06-03T10:46:27.401853Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:27.401854Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:27.401929Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/idempotency_keys". Create session OK 2025-06-03T10:46:27.401931Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:27.401932Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:27.414945Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)" 2025-06-03T10:46:27.414963Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)": 2025-06-03T10:46:27.438246Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-03T10:46:27.438266Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-03T10:46:27.457270Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:27.457290Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-03T10:46:27.457315Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:27.457323Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-03T10:46:27.457541Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:27.457545Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:27.457548Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-03T10:46:27.457550Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-03T10:46:27.457635Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:27.457637Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-03T10:46:27.457685Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:27.457688Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-03T10:46:27.473749Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:27.473761Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:27.473769Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-03T10:46:27.473774Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-03T10:46:27.474005Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:27.474008Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-03T10:46:27.474197Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:27.474222Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-03T10:46:27.474419Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:27.474423Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-03T10:46:27.474539Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:27.474542Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-03T10:46:27.474581Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:27.474585Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStoragePipeline::TTestCaseShouldReturnPartialBatchForGetTask::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-03T10:46:28.600341Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: task_ping.cpp:427: PingTaskRequest (resign): UNAVAILABLE 1 2025-06-03T10:46:28.600288Z 0.000000s 2025-06-03T10:46:28.901549Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: task_ping.cpp:427: PingTaskRequest (resign): UNAVAILABLE 1 2025-06-03T10:46:28.901527Z 0.000000s 2025-06-03T10:46:29.107890Z node 17 :YQ_CONTROL_PLANE_STORAGE ERROR: ydb_control_plane_storage.cpp:608: Validation: (NYql::TCodeLineException) :0: Error parsing proto message for query. Please contact internal support |75.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStateStorageTest::ShouldLoadIncrementSnapshot [GOOD] >> test_s3_0.py::TestS3::test_schema_validation[v2-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client3-column_type3-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client4-column_type4-True] >> test_inflight.py::TestS3::test_inflight[v1-client0-kikimr_params1] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_insert.py::TestS3::test_insert_deadlock[v2-true-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258d/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_insert/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258d/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_insert/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=904376) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258d/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_insert/testing_out_stuff/test_insert.py.TestS3.test_insert.v1-false-client0-json_list-dataset/default/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 906722 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258d/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_insert/testing_out_stuff/test_insert.py.TestS3.test_insert.v1-false-client0-json_list-dataset/cp/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v1-date_time/simple_iso/test.parquet-parquet] [GOOD] >> test_inflight.py::TestS3::test_inflight[v1-client0-kikimr_params2] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v2-date_time/simple_iso/test.csv-csv_with_names] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client4-column_type4-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client5-column_type5-False] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_3-pk_types7-all_types7-index7-Uint8] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageModifyBinding::ShouldCheckObjectStorageProjectionByTypes [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.1 0.0 167288 10392 ? Ss 10:12 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 10:12 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 10:12 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 10:12 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/0:0H-events_highpri] root 9 3.9 0.0 0 0 ? I 10:12 1:18 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 10:12 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/0] root 15 0.1 0.0 0 0 ? I 10:12 0:02 [rcu_sched] root 16 0.0 0.0 0 0 ? S 10:12 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/0] root 19 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/1] root 22 0.1 0.0 0 0 ? S 10:12 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2] root 28 0.1 0.0 0 0 ? S 10:12 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/3] root 34 0.1 0.0 0 0 ? S 10:12 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/4] root 40 0.1 0.0 0 0 ? S 10:12 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/5] root 46 0.1 0.0 0 0 ? S 10:12 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/6] root 52 0.1 0.0 0 0 ? S 10:12 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/6] root 54 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/6:0-rcu_gp] root 55 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/7] root 58 0.1 0.0 0 0 ? S 10:12 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/8] root 64 0.1 0.0 0 0 ? S 10:12 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/9] root 70 0.1 0.0 0 0 ? S 10:12 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/10] root 76 0.1 0.0 0 0 ? S 10:12 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/10] root 79 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/11] root 82 0.1 0.0 0 0 ? S 10:12 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/12] root 88 0.1 0.0 0 0 ? S 10:12 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/12] root 90 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/12:0-rcu_gp] root 91 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/13] root 94 0.1 0.0 0 0 ? S 10:12 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/14] root 100 0.1 0.0 0 0 ? S 10:12 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/14] root 103 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/14:0H-events_highpri] root 104 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/15] root 106 0.1 0.0 0 0 ? S 10:12 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/16] root 112 0.1 0.0 0 0 ? S 10:12 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/17] root 118 0.1 0.0 0 0 ? S 10:12 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/18] root 124 0.1 0.0 0 0 ? S 10:12 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/19] root 130 0.1 0.0 0 0 ? S 10:12 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/20] root 136 0.1 0.0 0 0 ? S 10:12 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/20] root 139 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/21] root 142 0.1 0.0 0 0 ? S 10:12 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/22] root 148 0.1 0.0 0 0 ? S 10:12 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/22] root 150 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/22:0-rcu_par_gp] root 151 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/23] root 154 0.1 0.0 0 0 ? S 10:12 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/24] root 160 0.1 0.0 0 0 ? S 10:12 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/25] root 165 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2 ... ding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:29.665572Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:29.666182Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/jobs". Create session OK 2025-06-03T10:46:29.666187Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:29.666189Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:29.666422Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/mappings". Create session OK 2025-06-03T10:46:29.666425Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:29.666426Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:29.666522Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/tenants". Create session OK 2025-06-03T10:46:29.666524Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:29.666525Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:29.685063Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)" 2025-06-03T10:46:29.685092Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)": 2025-06-03T10:46:29.745205Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-03T10:46:29.745221Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-03T10:46:29.765546Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:29.765565Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-03T10:46:29.773206Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:29.773226Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-03T10:46:29.773463Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:29.773466Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-03T10:46:29.773524Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:29.773526Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-03T10:46:29.773594Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:29.773596Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-03T10:46:29.782592Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:29.782616Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-03T10:46:29.782966Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:29.782969Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-03T10:46:29.783061Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:29.783063Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-03T10:46:29.783137Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:29.783139Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-03T10:46:29.783215Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:29.783217Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-03T10:46:29.783279Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:29.783281Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-03T10:46:29.783362Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:29.783364Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-03T10:46:29.783437Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:29.783439Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageModifyBinding::TTestCaseShouldCheckObjectStorageProjectionByTypes::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-03T10:46:30.414095Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage_bindings.cpp:81: [yandexcloud://test_folder_id_1, test_user@staff, utbue9atk290jnfcnash] CreateBindingRequest, validation failed: **** (D7BA8005) content { name: "test_binding_name_1" connection_id: "utcue9atk2r0p63ks4hk" setting { object_storage { subset { path_pattern: "/root/" schema { column { name: "a" type { type_id: BOOL } } } partitioned_by: "a" } } } acl { visibility: PRIVATE } } error:
: Error: Column "a" from projection does not support Bool type, code: 400010 >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client5-column_type5-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client9-year Uint32-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client6-column_type6-True] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client10-year Int64 NOT NULL-True] >> TYdbControlPlaneStorageListConnections::ShouldCheckFilterByName [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldCheckFilterByMe >> test_ydb_over_fq.py::TestYdbOverFq::test_insert_data_query[v2-client0] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_0-pk_types10-all_types10-index10-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStorageServiceTest::ShouldNotPendingCheckpointGenerationChanged [GOOD] Test command err: 2025-06-03T10:46:28.808508Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [1:7511672693045313654:2048] with connection to localhost:29681:local 2025-06-03T10:46:28.808560Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:29.002709Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-03T10:46:29.002727Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:29.002951Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.16] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:29.006764Z node 1 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:197: [graph_graphich.16] Failed to register graph:
: Warning: Table: local/TStorageServiceTestShouldNotRegisterPrevGeneration/coordinators_sync, pk: graph_graphich, current generation: 17, expected/new generation: 16, operation: RegisterCheck, code: 400130 2025-06-03T10:46:29.006781Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.16] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:29.417429Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [2:7511672700933583226:2048] with connection to localhost:29681:local 2025-06-03T10:46:29.417493Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-03T10:46:29.523875Z node 2 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:249: [graph_graphich.17] [17:1] Failed to create checkpoint:
: Warning: Table: local/TStorageServiceTestShouldNotCreateCheckpointWhenUnregistered/coordinators_sync, pk: graph_graphich, current generation: 0, expected/new generation: 17, operation: Check, code: 400130 2025-06-03T10:46:29.523899Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-03T10:46:29.979729Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [3:7511672700275167887:2048] with connection to localhost:29681:local 2025-06-03T10:46:29.979797Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:30.026000Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-03T10:46:30.026031Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:30.026275Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-03T10:46:30.170949Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-03T10:46:30.170972Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-03T10:46:30.171224Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-03T10:46:30.203603Z node 3 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:249: [graph_graphich.17] [17:1] Failed to create checkpoint:
: Error: Constraint violated. Table: `local/TStorageServiceTestShouldNotCreateCheckpointTwice/checkpoints_metadata`., code: 2012
: Error: Conflict with existing key., code: 2012 2025-06-03T10:46:30.203623Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-03T10:46:30.741444Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [4:7511672704140630186:2048] with connection to localhost:29681:local 2025-06-03T10:46:30.741510Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:30.797723Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-03T10:46:30.797748Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:30.797990Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-03T10:46:30.836810Z node 4 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:274: [graph_graphich.17] [17:1] Failed to set 'PendingCommit' status:
: Warning: Failed to select checkpoint '17:1', code: 400080 2025-06-03T10:46:30.836828Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-03T10:46:31.192229Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [5:7511672707824398800:2048] with connection to localhost:29681:local 2025-06-03T10:46:31.192304Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:31.231047Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-03T10:46:31.231070Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:31.231285Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-03T10:46:31.369648Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-03T10:46:31.369671Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-03T10:46:31.370090Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.18] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:31.393702Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.18] Graph registered 2025-06-03T10:46:31.393726Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.18] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:31.393894Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-03T10:46:31.397037Z node 5 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:274: [graph_graphich.17] [17:1] Failed to set 'PendingCommit' status:
: Warning: Table: local/TStorageServiceTestShouldNotPendingCheckpointGenerationChanged/coordinators_sync, pk: graph_graphich, current generation: 18, expected/new generation: 17, operation: Check, code: 400130 2025-06-03T10:46:31.397058Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse >> test_ydb_over_fq.py::TestYdbOverFq::test_insert_data_query[v1-client0] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client6-column_type6-True] [GOOD] >> test_select.py::TestDML::test_as_table [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client7-column_type7-True] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_validation.py::TestS3::test_nested_type[v2-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258c/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_validation/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258c/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_validation/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=904375) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 906375 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> TStorageServiceTest::ShouldRegister >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client7-column_type7-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client8-column_type8-False] >> test_quota_exhaustion.py::TestYdbWorkload::test_delete [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_4-pk_types12-all_types12-index12-Int8] >> test_s3_1.py::TestS3::test_huge_source[v1-false-client0] [GOOD] >> TStorageServiceTest::ShouldRegister [GOOD] >> TStorageServiceTest::ShouldRegisterNextGeneration >> test_s3_1.py::TestS3::test_huge_source[v1-true-client0] >> test_select.py::TestDML::test_select[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client8-column_type8-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client10-year Int64 NOT NULL-True] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client9-column_type9-False] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client11-year Int64-False] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v2-date_time/simple_iso/test.csv-csv_with_names] [GOOD] >> test_ydb_over_fq.py::TestYdbOverFq::test_insert_data_query[v1-client0] [GOOD] >> test_select.py::TestDML::test_select[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] |75.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v2-date_time/simple_iso/test.tsv-tsv_with_names] >> TYdbControlPlaneStorageListConnections::ShouldCheckFilterByMe [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldCombineFilters >> TStorageServiceTest::ShouldRegisterNextGeneration [GOOD] >> TStorageServiceTest::ShouldPendingAndCompleteCheckpoint >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client9-column_type9-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client10-column_type10-False] >> TYdbControlPlaneStorageListBindings::ShouldCheckPrivateVisibility [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldCheckSuperUser >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client10-column_type10-False] [GOOD] >> TStorageServiceTest::ShouldPendingAndCompleteCheckpoint [GOOD] >> TStorageServiceTest::ShouldSaveState >> TYdbControlPlaneStorageCreateConnection::ShouldCheckCommitTransactionReadWrite [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_3-pk_types7-all_types7-index7-Uint8] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client11-column_type11-False] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_2_float-pk_types2-all_types2-index2-Float] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client11-column_type11-False] [GOOD] >> test_ydb_backup.py::TestAlterBackupRestore::test_alter_table_with_data_backup_restore >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client12-column_type12-False] >> TStorageServiceTest::ShouldSaveState [GOOD] >> TStorageServiceTest::ShouldUseGc >> test_select.py::TestDML::test_select[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] >> TYdbControlPlaneStorageListConnections::ShouldCombineFilters [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldCheckFilterByConnectionType >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client12-column_type12-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client13-column_type13-False] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v2-date_time/simple_iso/test.tsv-tsv_with_names] [GOOD] |75.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v2-date_time/simple_iso/test.json-json_each_row] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client13-column_type13-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client11-year Int64-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client14-column_type14-False] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client12-year Uint64-False] >> TStorageServiceTest::ShouldUseGc [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_all_types_float-pk_types5-all_types5-index5-Float] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client14-column_type14-False] [GOOD] >> test_select.py::TestDML::test_select[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_all_types-pk_types17-all_types17-index17-Int8] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client15-column_type15-False] >> test_explicit_partitioning_1.py::TestS3::test_binding_projection_date_type_validation[v2-client15-column_type15-False] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_raw_format[v1-false-client0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageCreateConnection::ShouldCheckCommitTransactionReadWrite [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.1 0.0 167288 10392 ? Ss 10:12 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 10:12 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 10:12 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 10:12 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/0:0H-events_highpri] root 9 3.9 0.0 0 0 ? I 10:12 1:18 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 10:12 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/0] root 15 0.1 0.0 0 0 ? I 10:12 0:02 [rcu_sched] root 16 0.0 0.0 0 0 ? S 10:12 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/0] root 19 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/1] root 22 0.1 0.0 0 0 ? S 10:12 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2] root 28 0.1 0.0 0 0 ? S 10:12 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/3] root 34 0.1 0.0 0 0 ? S 10:12 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/4] root 40 0.1 0.0 0 0 ? S 10:12 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/5] root 46 0.1 0.0 0 0 ? S 10:12 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/6] root 52 0.1 0.0 0 0 ? S 10:12 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/6] root 54 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/6:0-rcu_gp] root 55 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/7] root 58 0.1 0.0 0 0 ? S 10:12 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/8] root 64 0.1 0.0 0 0 ? S 10:12 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/9] root 70 0.1 0.0 0 0 ? S 10:12 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/10] root 76 0.1 0.0 0 0 ? S 10:12 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/10] root 79 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/11] root 82 0.1 0.0 0 0 ? S 10:12 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/12] root 88 0.1 0.0 0 0 ? S 10:12 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/12] root 90 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/12:0-rcu_gp] root 91 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/13] root 94 0.1 0.0 0 0 ? S 10:12 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/14] root 100 0.1 0.0 0 0 ? S 10:12 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/14] root 103 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/14:0H-events_highpri] root 104 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/15] root 106 0.1 0.0 0 0 ? S 10:12 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/16] root 112 0.1 0.0 0 0 ? S 10:12 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/17] root 118 0.1 0.0 0 0 ? S 10:12 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/18] root 124 0.1 0.0 0 0 ? S 10:12 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/19] root 130 0.1 0.0 0 0 ? S 10:12 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/20] root 136 0.1 0.0 0 0 ? S 10:12 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/20] root 139 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/21] root 142 0.1 0.0 0 0 ? S 10:12 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/22] root 148 0.1 0.0 0 0 ? S 10:12 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/22] root 150 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/22:0-rcu_par_gp] root 151 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/23] root 154 0.1 0.0 0 0 ? S 10:12 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/24] root 160 0.1 0.0 0 0 ? S 10:12 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/25] root 165 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2 ... 465227Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue9atjug9b4ddkt8q] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-03T10:46:34.571471Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-03T10:46:34.571783Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue9atjudbolkuejst] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-03T10:46:34.705953Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-03T10:46:34.706318Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue9atjua3o0mqve39] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-03T10:46:34.829589Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-03T10:46:34.829947Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue9atju604r2ig6nb] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-03T10:46:34.977171Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-03T10:46:34.977535Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue9atju27hthn3suu] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-03T10:46:35.015829Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage.cpp:437: DB Error, Status: BAD_SESSION, Issues: [ {
: Error: Exceeded maximum allowed number of active transactions, code: 2014 } {
: Error: ydb/core/kqp/session_actor/kqp_session_actor.cpp:861: Too many transactions, current active: 10 MaxTxPerSession: 10 } ], Query: --!syntax_v1 -- Query name: Unknown query name PRAGMA TablePathPrefix("local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageCreateConnection::TTestCaseShouldCheckCommitTransactionReadWrite::Execute_(NUnitTest::TTestContext&)"); DECLARE $idempotency_key as String; DECLARE $scope as String; SELECT `response` FROM `idempotency_keys` WHERE `scope` = $scope AND `idempotency_key` = $idempotency_key; 2025-06-03T10:46:35.165446Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-03T10:46:35.165795Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue9atjttne3v6bqpm] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-03T10:46:35.292941Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-03T10:46:35.293423Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue9atjtnvgvc9tmpv] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-03T10:46:35.405924Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-03T10:46:35.406279Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue9atjtk2t0ghb60j] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-03T10:46:35.554022Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-03T10:46:35.555204Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue9atjtgkkj1q8m4h] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-03T10:46:35.664003Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-03T10:46:35.664342Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue9atjtc34rec7173] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-03T10:46:35.792131Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-03T10:46:35.792538Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue9atjt8okvoclj15] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-03T10:46:35.949622Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-03T10:46:35.949983Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue9atjt4rhhrhuqvt] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-03T10:46:36.074026Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-03T10:46:36.074472Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue9atjt016eff4sdi] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-03T10:46:36.185646Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-03T10:46:36.185998Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue9atjss84udp8hae] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } 2025-06-03T10:46:36.289657Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: ydb_control_plane_storage.cpp:499: Validation: (NYql::TCodeLineException) ydb/core/fq/libs/control_plane_storage/validators.cpp:46: Connection with the same name already exists. Please choose another name 2025-06-03T10:46:36.289922Z node 17 :YQ_CONTROL_PLANE_STORAGE WARN: ydb_control_plane_storage_impl.h:770: [yandexcloud://test_folder_id_1, test_user2@staff, utcue9atjsonvggdfi17] CreateConnectionRequest: {content { name: "test_connection_name_1" setting { data_streams { database_id: "my_database_id" auth { current_iam { } } } } acl { visibility: SCOPE } } idempotency_key: "aba" } ERROR: {
: Error: Connection with the same name already exists. Please choose another name, code: 1003 } >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_1-pk_types9-all_types9-index9-Uint8] [GOOD] >> TYdbControlPlaneStorageListConnections::ShouldCheckFilterByConnectionType [GOOD] >> test_s3_1.py::TestS3::test_huge_source[v1-true-client0] [GOOD] >> test_s3_1.py::TestS3::test_huge_source[v2-false-client0] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v2-date_time/simple_iso/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v2-date_time/simple_iso/test.parquet-parquet] >> test_alter_compression.py::TestAlterCompression::test[alter_compression] [GOOD] >> test_alter_compression.py::TestAlterCompression::test_multi[alter_compression] [GOOD] >> TCheckpointStorageTest::ShouldUpdateCheckpointStatusForCheckpointsWithTheSameGenAndNo >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_3-pk_types13-all_types13-index13-Int8] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStorageServiceTest::ShouldUseGc [GOOD] Test command err: 2025-06-03T10:46:34.754714Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [1:7511672719032621546:2048] with connection to localhost:15386:local 2025-06-03T10:46:34.754780Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:34.977842Z node 1 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-03T10:46:34.977864Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:35.417884Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [2:7511672726737686683:2048] with connection to localhost:15386:local 2025-06-03T10:46:35.417932Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:35.459586Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-03T10:46:35.459619Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:35.459783Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.18] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:35.470591Z node 2 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.18] Graph registered 2025-06-03T10:46:35.470615Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.18] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:35.470848Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:35.477094Z node 2 :STREAMS_STORAGE_SERVICE WARN: storage_proxy.cpp:197: [graph_graphich.17] Failed to register graph:
: Warning: Table: local/TStorageServiceTestShouldRegisterNextGeneration/coordinators_sync, pk: graph_graphich, current generation: 18, expected/new generation: 17, operation: RegisterCheck, code: 400130 2025-06-03T10:46:35.477108Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:35.953074Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [3:7511672724292099906:2048] with connection to localhost:15386:local 2025-06-03T10:46:35.953156Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:35.990439Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-03T10:46:35.990458Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:35.990604Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-03T10:46:36.122102Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-03T10:46:36.122121Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-03T10:46:36.125758Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-03T10:46:36.176790Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:1] Status updated to 'PendingCommit' 2025-06-03T10:46:36.176812Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-03T10:46:36.176978Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-06-03T10:46:36.192150Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:2] Checkpoint created 2025-06-03T10:46:36.192173Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-06-03T10:46:36.192326Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:2] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-03T10:46:36.207208Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:2] Status updated to 'PendingCommit' 2025-06-03T10:46:36.207223Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:2] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-03T10:46:36.207372Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:2] Got TEvCompleteCheckpointRequest 2025-06-03T10:46:36.219607Z node 3 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:2] Status updated to 'Completed' 2025-06-03T10:46:36.219621Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:2] Send TEvCompleteCheckpointResponse 2025-06-03T10:46:36.219849Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-03T10:46:36.258601Z node 3 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-03T10:46:36.594618Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [4:7511672730712110551:2048] with connection to localhost:15386:local 2025-06-03T10:46:36.594706Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:36.627056Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-03T10:46:36.627083Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:36.632312Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-03T10:46:36.773065Z node 4 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-03T10:46:36.773090Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-03T10:46:36.773355Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:365: [graph_graphich] [17:1] Got TEvSaveTaskState: task 1317 2025-06-03T10:46:36.797581Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:389: [graph_graphich] [17:1] TEvSaveTaskState Apply: task: 1317 2025-06-03T10:46:36.797607Z node 4 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:404: [graph_graphich] [17:1] Send TEvSaveTaskStateResult: task: 1317 2025-06-03T10:46:37.113934Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:178: Successfully bootstrapped TStorageProxy [5:7511672731054729416:2048] with connection to localhost:15386:local 2025-06-03T10:46:37.113967Z node 5 :STREAMS_STORAGE_SERVICE INFO: gc.cpp:83: Successfully bootstrapped storage GC [5:7511672735349696815:2130] 2025-06-03T10:46:37.113980Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:185: [graph_graphich.17] Got TEvRegisterCoordinatorRequest 2025-06-03T10:46:37.153422Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:199: [graph_graphich.17] Graph registered 2025-06-03T10:46:37.153446Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:201: [graph_graphich.17] Send TEvRegisterCoordinatorResponse 2025-06-03T10:46:37.161377Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:1] Got TEvCreateCheckpointRequest 2025-06-03T10:46:37.285961Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:1] Checkpoint created 2025-06-03T10:46:37.285989Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:1] Send TEvCreateCheckpointResponse 2025-06-03T10:46:37.286337Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:1] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-03T10:46:37.340131Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:1] Status updated to 'PendingCommit' 2025-06-03T10:46:37.340149Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:1] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-03T10:46:37.340346Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:1] Got TEvCompleteCheckpointRequest 2025-06-03T10:46:37.357951Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:1] Status updated to 'Completed' 2025-06-03T10:46:37.357966Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:307: [graph_graphich.17] [17:1] Send TEvNewCheckpointSucceeded 2025-06-03T10:46:37.357982Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:1] Send TEvCompleteCheckpointResponse 2025-06-03T10:46:37.358126Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 17:1 for graph 'graph_graphich' 2025-06-03T10:46:37.358300Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:2] Got TEvCreateCheckpointRequest 2025-06-03T10:46:37.401474Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:2] Checkpoint created 2025-06-03T10:46:37.401495Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:2] Send TEvCreateCheckpointResponse 2025-06-03T10:46:37.401890Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:2] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-03T10:46:37.439571Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:2] Status updated to 'PendingCommit' 2025-06-03T10:46:37.439594Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:2] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-03T10:46:37.441934Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:2] Got TEvCompleteCheckpointRequest 2025-06-03T10:46:37.460098Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:170: GC deleted checkpoints of graph 'graph_graphich' up to 17:1 2025-06-03T10:46:37.488374Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:2] Status updated to 'Completed' 2025-06-03T10:46:37.488396Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:307: [graph_graphich.17] [17:2] Send TEvNewCheckpointSucceeded 2025-06-03T10:46:37.488414Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:2] Send TEvCompleteCheckpointResponse 2025-06-03T10:46:37.488467Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 17:2 for graph 'graph_graphich' 2025-06-03T10:46:37.489660Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:209: [graph_graphich.17] [17:3] Got TEvCreateCheckpointRequest 2025-06-03T10:46:37.519183Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:170: GC deleted checkpoints of graph 'graph_graphich' up to 17:2 2025-06-03T10:46:37.541599Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:251: [graph_graphich.17] [17:3] Checkpoint created 2025-06-03T10:46:37.541629Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:253: [graph_graphich.17] [17:3] Send TEvCreateCheckpointResponse 2025-06-03T10:46:37.549491Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:261: [graph_graphich.17] [17:3] Got TEvSetCheckpointPendingCommitStatusRequest 2025-06-03T10:46:37.597794Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:276: [graph_graphich.17] [17:3] Status updated to 'PendingCommit' 2025-06-03T10:46:37.597815Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:278: [graph_graphich.17] [17:3] Send TEvSetCheckpointPendingCommitStatusResponse 2025-06-03T10:46:37.598268Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:286: [graph_graphich.17] [17:3] Got TEvCompleteCheckpointRequest 2025-06-03T10:46:37.636702Z node 5 :STREAMS_STORAGE_SERVICE INFO: storage_proxy.cpp:304: [graph_graphich.17] [17:3] Status updated to 'Completed' 2025-06-03T10:46:37.636721Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:307: [graph_graphich.17] [17:3] Send TEvNewCheckpointSucceeded 2025-06-03T10:46:37.636732Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:311: [graph_graphich.17] [17:3] Send TEvCompleteCheckpointResponse 2025-06-03T10:46:37.636929Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 17:3 for graph 'graph_graphich' 2025-06-03T10:46:37.641386Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-03T10:46:37.680419Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:170: GC deleted checkpoints of graph 'graph_graphich' up to 17:3 2025-06-03T10:46:37.701855Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-03T10:46:37.809559Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-03T10:46:37.815547Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse 2025-06-03T10:46:37.917410Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:343: [graph_graphich] Got TEvGetCheckpointsMetadataRequest 2025-06-03T10:46:37.937470Z node 5 :STREAMS_STORAGE_SERVICE DEBUG: storage_proxy.cpp:356: [graph_graphich] Send TEvGetCheckpointsMetadataResponse >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_0_float-pk_types4-all_types4-index4-Float] [GOOD] >> test_ydb_backup.py::TestAlterBackupRestore::test_alter_table_with_data_backup_restore [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client12-year Uint64-False] [GOOD] >> TCheckpointStorageTest::ShouldUpdateCheckpointStatusForCheckpointsWithTheSameGenAndNo [GOOD] >> TGcTest::ShouldRemovePreviousCheckpoints >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client13-year Date-False] >> test_explicit_partitioning_1.py::TestS3::test_raw_format[v1-false-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_raw_format[v1-true-client0] >> TYdbControlPlaneStorageListBindings::ShouldCheckSuperUser [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldCheckFilterByConnectionId ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_4-pk_types12-all_types12-index12-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_statistics.py::TestS3::test_sum[v2-client0] [GOOD] >> test_statistics.py::TestS3::test_aborted_by_user[v1-client0] >> test_format_setting.py::TestS3::test_date_time_simple_iso_insert[v2-date_time/simple_iso/test.parquet-parquet] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v1-common/simple_posix/test.csv-csv_with_names] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_s3_0.py::TestS3::test_schema_validation[v2-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258e/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_0/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258e/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_0/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=903870) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258e/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_0/testing_out_stuff/test_s3_0.py.TestS3.test_csv.v1-false-client0/default/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258e/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_0/testing_out_stuff/test_s3_0.py.TestS3.test_csv.v1-false-client0/cp/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 906588 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback ::1 - - [03/Jun/2025 10:45:24] send response localhost:63473/?database=local ::1 - - [03/Jun/2025 10:45:24] "GET /database?databaseId=FakeDatabaseId HTTP/1.1" 200 - contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_vector_index.py::TestVectorIndex::test_vector_index[table_all_types-pk_types17-all_types17-index17-Int8] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageListConnections::ShouldCheckFilterByConnectionType [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.1 0.0 167288 10392 ? Ss 10:12 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 10:12 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 10:12 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 10:12 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/0:0H-events_highpri] root 9 3.9 0.0 0 0 ? I 10:12 1:18 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 10:12 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/0] root 15 0.1 0.0 0 0 ? I 10:12 0:02 [rcu_sched] root 16 0.0 0.0 0 0 ? S 10:12 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/0] root 19 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/1] root 22 0.1 0.0 0 0 ? S 10:12 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2] root 28 0.1 0.0 0 0 ? S 10:12 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/3] root 34 0.1 0.0 0 0 ? S 10:12 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/4] root 40 0.1 0.0 0 0 ? S 10:12 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/5] root 46 0.1 0.0 0 0 ? S 10:12 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/6] root 52 0.1 0.0 0 0 ? S 10:12 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/6] root 54 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/6:0-rcu_gp] root 55 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/7] root 58 0.1 0.0 0 0 ? S 10:12 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/8] root 64 0.1 0.0 0 0 ? S 10:12 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/9] root 70 0.1 0.0 0 0 ? S 10:12 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/10] root 76 0.1 0.0 0 0 ? S 10:12 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/10] root 79 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/11] root 82 0.1 0.0 0 0 ? S 10:12 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/12] root 88 0.1 0.0 0 0 ? S 10:12 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/12] root 90 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/12:0-rcu_gp] root 91 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/13] root 94 0.1 0.0 0 0 ? S 10:12 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/14] root 100 0.1 0.0 0 0 ? S 10:12 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/14] root 103 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/14:0H-events_highpri] root 104 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/15] root 106 0.1 0.0 0 0 ? S 10:12 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/16] root 112 0.1 0.0 0 0 ? S 10:12 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/17] root 118 0.1 0.0 0 0 ? S 10:12 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/18] root 124 0.1 0.0 0 0 ? S 10:12 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/19] root 130 0.1 0.0 0 0 ? S 10:12 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/20] root 136 0.1 0.0 0 0 ? S 10:12 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/20] root 139 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/21] root 142 0.1 0.0 0 0 ? S 10:12 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/22] root 148 0.1 0.0 0 0 ? S 10:12 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/22] root 150 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/22:0-rcu_par_gp] root 151 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/23] root 154 0.1 0.0 0 0 ? S 10:12 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/24] root 160 0.1 0.0 0 0 ? S 10:12 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/25] root 165 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2 ... E_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:37.540920Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:37.541039Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-03T10:46:37.541048Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:37.541049Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:37.541128Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/tenant_acks". Create session OK 2025-06-03T10:46:37.541135Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:37.541137Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:37.541830Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/nodes". Create session OK 2025-06-03T10:46:37.541841Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:37.541843Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:37.542007Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/pending_small". Create session OK 2025-06-03T10:46:37.542017Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:37.542019Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:37.566114Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)" 2025-06-03T10:46:37.566134Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)": 2025-06-03T10:46:37.608167Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-03T10:46:37.608186Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-03T10:46:37.624413Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:37.624432Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-03T10:46:37.630183Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:37.630198Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-03T10:46:37.630257Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:37.630266Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-03T10:46:37.630916Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:37.630926Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-03T10:46:37.631110Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:37.631115Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-03T10:46:37.631124Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:37.631127Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-03T10:46:37.631216Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:37.631219Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-03T10:46:37.631221Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:37.631223Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-03T10:46:37.631274Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:37.631276Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/tenant_acks": 2025-06-03T10:46:37.631277Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:37.631279Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-03T10:46:37.631336Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:37.631338Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-03T10:46:37.631342Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:37.631344Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-03T10:46:37.631380Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:37.631381Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListConnections::TTestCaseShouldCheckFilterByConnectionType::Execute_(NUnitTest::TTestContext&)/connections": ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_as_table [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_s3_1.py::TestS3::test_huge_source[v2-false-client0] [GOOD] >> TGcTest::ShouldRemovePreviousCheckpoints [GOOD] >> TGcTest::ShouldIgnoreIncrementCheckpoint >> test_s3_1.py::TestS3::test_huge_source[v2-true-client0] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v1-common/simple_posix/test.csv-csv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v1-common/simple_posix/test.tsv-tsv_with_names] >> test_explicit_partitioning_0.py::TestS3::test_projection_integer_type_validation[v2-true-client13-year Date-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client0-year Int32 NOT NULL-False] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_0-pk_types16-all_types16-index16-Int8] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_select.py::TestDML::test_select[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_raw_format[v1-true-client0] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v1-common/simple_posix/test.tsv-tsv_with_names] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_raw_format[v2-false-client0] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v1-common/simple_posix/test.json-json_each_row] >> test_select.py::TestDML::test_select[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] >> TYdbControlPlaneStorageListBindings::ShouldCheckFilterByConnectionId [GOOD] >> TYdbControlPlaneStorageListBindings::ShouldCombineFilters >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_0-pk_types16-all_types16-index16-Int8] [GOOD] >> test_select.py::TestDML::test_select[table_all_types-pk_types12-all_types12-index12---] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_0-pk_types10-all_types10-index10-Uint8] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v1-common/simple_posix/test.json-json_each_row] [GOOD] >> test_select.py::TestDML::test_select[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v1-common/simple_posix/test.parquet-parquet] >> test_statistics.py::TestS3::test_aborted_by_user[v1-client0] [GOOD] >> test_statistics.py::TestS3::test_aborted_by_user[v2-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client0-year Int32 NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client1-year Uint32 NOT NULL-False] >> TGcTest::ShouldIgnoreIncrementCheckpoint [GOOD] >> TStateStorageTest::ShouldCountStates >> test_s3_1.py::TestS3::test_huge_source[v2-true-client0] [GOOD] |75.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_s3_1.py::TestS3::test_top_level_listing[v1-false-client0] >> TStateStorageTest::ShouldCountStates [GOOD] >> TStateStorageTest::ShouldCountStatesNonExistentCheckpoint >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_2_float-pk_types2-all_types2-index2-Float] >> test_explicit_partitioning_1.py::TestS3::test_raw_format[v2-false-client0] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v1-common/simple_posix/test.parquet-parquet] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_raw_format[v2-true-client0] >> TStateStorageTest::ShouldCountStatesNonExistentCheckpoint [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v2-common/simple_posix/test.csv-csv_with_names] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_all_types_float-pk_types5-all_types5-index5-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_all_types-pk_types17-all_types17-index17-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> TYdbControlPlaneStorageListBindings::ShouldCombineFilters [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client1-year Uint32 NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client2-year Uint64 NOT NULL-False] >> test_explicit_partitioning_1.py::TestS3::test_raw_format[v2-true-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_parquet[v1-false-client0] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v2-common/simple_posix/test.csv-csv_with_names] [GOOD] >> test_s3_1.py::TestS3::test_top_level_listing[v1-false-client0] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v2-common/simple_posix/test.tsv-tsv_with_names] >> test_s3_1.py::TestS3::test_top_level_listing[v1-true-client0] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_1-pk_types15-all_types15-index15-Int8] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/fq/libs/checkpoint_storage/ut/unittest >> TStateStorageTest::ShouldCountStatesNonExistentCheckpoint [GOOD] Test command err: 2025-06-03T10:46:41.201708Z node 1 :STREAMS_STORAGE_SERVICE INFO: gc.cpp:83: Successfully bootstrapped storage GC [1:36:2083] Count graph descriptions query: --!syntax_v1 PRAGMA TablePathPrefix("local/TGcTestShouldRemovePreviousCheckpoints"); SELECT * FROM checkpoints_graphs_description; 2025-06-03T10:46:41.261800Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 11:3 for graph 'graph' 2025-06-03T10:46:41.406428Z node 1 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:170: GC deleted checkpoints of graph 'graph' up to 11:3 Count graph descriptions query: --!syntax_v1 PRAGMA TablePathPrefix("local/TGcTestShouldRemovePreviousCheckpoints"); SELECT * FROM checkpoints_graphs_description; 2025-06-03T10:46:42.719863Z node 2 :STREAMS_STORAGE_SERVICE INFO: gc.cpp:83: Successfully bootstrapped storage GC [2:36:2083] Count graph descriptions query: --!syntax_v1 PRAGMA TablePathPrefix("local/ShouldIgnoreIncrementCheckpoint"); SELECT * FROM checkpoints_graphs_description; 2025-06-03T10:46:42.778934Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:93: GC received upperbound checkpoint 11:3 for graph 'graph' 2025-06-03T10:46:42.778969Z node 2 :STREAMS_STORAGE_SERVICE DEBUG: gc.cpp:96: GC skip increment checkpoint for graph 'graph' |75.8%| [TA] $(B)/ydb/core/fq/libs/checkpoint_storage/ut/test-results/unittest/{meta.json ... results_accumulator.log} |75.8%| [TA] {RESULT} $(B)/ydb/core/fq/libs/checkpoint_storage/ut/test-results/unittest/{meta.json ... results_accumulator.log} >> test_select.py::TestDML::test_select[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client2-year Uint64 NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client3-year Date NOT NULL-False] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v2-common/simple_posix/test.tsv-tsv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v2-common/simple_posix/test.json-json_each_row] >> test_select.py::TestDML::test_select[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/control_plane_storage/unittest >> TYdbControlPlaneStorageListBindings::ShouldCombineFilters [GOOD] Test command err: Netstat: sh: 1: netstat: not found Process stat: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.1 0.0 167288 10392 ? Ss 10:12 0:03 /sbin/init root 2 0.0 0.0 0 0 ? S 10:12 0:00 [kthreadd] root 3 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_gp] root 4 0.0 0.0 0 0 ? I< 10:12 0:00 [rcu_par_gp] root 5 0.0 0.0 0 0 ? I< 10:12 0:00 [slub_flushwq] root 6 0.0 0.0 0 0 ? I< 10:12 0:00 [netns] root 8 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/0:0H-events_highpri] root 9 3.9 0.0 0 0 ? I 10:12 1:18 [kworker/u128:0-ext4-rsv-conversion] root 11 0.0 0.0 0 0 ? I< 10:12 0:00 [mm_percpu_wq] root 12 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_rude_] root 13 0.0 0.0 0 0 ? S 10:12 0:00 [rcu_tasks_trace] root 14 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/0] root 15 0.1 0.0 0 0 ? I 10:12 0:02 [rcu_sched] root 16 0.0 0.0 0 0 ? S 10:12 0:00 [migration/0] root 17 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/0] root 19 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/0] root 20 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/1] root 21 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/1] root 22 0.1 0.0 0 0 ? S 10:12 0:02 [migration/1] root 23 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/1] root 25 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/1:0H-events_highpri] root 26 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/2] root 27 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2] root 28 0.1 0.0 0 0 ? S 10:12 0:02 [migration/2] root 29 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/2] root 31 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/2:0H-events_highpri] root 32 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/3] root 33 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/3] root 34 0.1 0.0 0 0 ? S 10:12 0:02 [migration/3] root 35 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/3] root 37 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/3:0H-events_highpri] root 38 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/4] root 39 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/4] root 40 0.1 0.0 0 0 ? S 10:12 0:02 [migration/4] root 41 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/4] root 43 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/4:0H-events_highpri] root 44 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/5] root 45 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/5] root 46 0.1 0.0 0 0 ? S 10:12 0:02 [migration/5] root 47 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/5] root 49 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/5:0H-events_highpri] root 50 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/6] root 51 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/6] root 52 0.1 0.0 0 0 ? S 10:12 0:02 [migration/6] root 53 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/6] root 54 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/6:0-rcu_gp] root 55 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/6:0H-events_highpri] root 56 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/7] root 57 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/7] root 58 0.1 0.0 0 0 ? S 10:12 0:02 [migration/7] root 59 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/7] root 61 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/7:0H-events_highpri] root 62 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/8] root 63 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/8] root 64 0.1 0.0 0 0 ? S 10:12 0:02 [migration/8] root 65 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/8] root 67 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/8:0H-events_highpri] root 68 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/9] root 69 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/9] root 70 0.1 0.0 0 0 ? S 10:12 0:02 [migration/9] root 71 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/9] root 73 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/9:0H-events_highpri] root 74 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/10] root 75 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/10] root 76 0.1 0.0 0 0 ? S 10:12 0:02 [migration/10] root 77 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/10] root 79 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/10:0H-events_highpri] root 80 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/11] root 81 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/11] root 82 0.1 0.0 0 0 ? S 10:12 0:02 [migration/11] root 83 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/11] root 85 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/11:0H-events_highpri] root 86 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/12] root 87 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/12] root 88 0.1 0.0 0 0 ? S 10:12 0:02 [migration/12] root 89 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/12] root 90 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/12:0-rcu_gp] root 91 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/12:0H-events_highpri] root 92 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/13] root 93 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/13] root 94 0.1 0.0 0 0 ? S 10:12 0:02 [migration/13] root 95 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/13] root 97 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/13:0H-events_highpri] root 98 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/14] root 99 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/14] root 100 0.1 0.0 0 0 ? S 10:12 0:02 [migration/14] root 101 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/14] root 103 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/14:0H-events_highpri] root 104 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/15] root 105 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/15] root 106 0.1 0.0 0 0 ? S 10:12 0:02 [migration/15] root 107 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/15] root 109 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/15:0H-events_highpri] root 110 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/16] root 111 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/16] root 112 0.1 0.0 0 0 ? S 10:12 0:02 [migration/16] root 113 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/16] root 115 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/16:0H-events_highpri] root 116 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/17] root 117 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/17] root 118 0.1 0.0 0 0 ? S 10:12 0:02 [migration/17] root 119 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/17] root 121 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/17:0H-events_highpri] root 122 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/18] root 123 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/18] root 124 0.1 0.0 0 0 ? S 10:12 0:02 [migration/18] root 125 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/18] root 127 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/18:0H-events_highpri] root 128 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/19] root 129 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/19] root 130 0.1 0.0 0 0 ? S 10:12 0:02 [migration/19] root 131 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/19] root 133 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/19:0H-events_highpri] root 134 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/20] root 135 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/20] root 136 0.1 0.0 0 0 ? S 10:12 0:02 [migration/20] root 137 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/20] root 139 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/20:0H-events_highpri] root 140 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/21] root 141 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/21] root 142 0.1 0.0 0 0 ? S 10:12 0:02 [migration/21] root 143 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/21] root 145 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/21:0H-events_highpri] root 146 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/22] root 147 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/22] root 148 0.1 0.0 0 0 ? S 10:12 0:02 [migration/22] root 149 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/22] root 150 0.0 0.0 0 0 ? I 10:12 0:00 [kworker/22:0-rcu_par_gp] root 151 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/22:0H-events_highpri] root 152 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/23] root 153 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/23] root 154 0.1 0.0 0 0 ? S 10:12 0:02 [migration/23] root 155 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/23] root 157 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/23:0H-events_highpri] root 158 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/24] root 159 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/24] root 160 0.1 0.0 0 0 ? S 10:12 0:02 [migration/24] root 161 0.0 0.0 0 0 ? S 10:12 0:00 [ksoftirqd/24] root 163 0.0 0.0 0 0 ? I< 10:12 0:00 [kworker/24:0H-kblockd] root 164 0.0 0.0 0 0 ? S 10:12 0:00 [cpuhp/25] root 165 0.0 0.0 0 0 ? S 10:12 0:00 [idle_inject/2 ... tualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:44.139823Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:44.140136Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenants". Create session OK 2025-06-03T10:46:44.140144Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:44.140146Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:44.140147Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/idempotency_keys". Create session OK 2025-06-03T10:46:44.140151Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:44.140153Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:44.140295Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/result_sets". Create session OK 2025-06-03T10:46:44.140303Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:44.140304Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:44.145571Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/compute_databases". Create session OK 2025-06-03T10:46:44.145579Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:293: Create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/nodes". Create session OK 2025-06-03T10:46:44.145587Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:44.145588Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:113: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:44.145590Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:44.145590Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:354: Call create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:44.200075Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)" 2025-06-03T10:46:44.200104Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create directory "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)": 2025-06-03T10:46:44.222597Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:155: Successfully created coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha" 2025-06-03T10:46:44.222621Z node 17 :YQ_RATE_LIMITER DEBUG: schema.cpp:122: Reply for create coordination node "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)_rate_limiter/alpha": 2025-06-03T10:46:44.241634Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/idempotency_keys" 2025-06-03T10:46:44.241652Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/idempotency_keys": 2025-06-03T10:46:44.241969Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/result_sets" 2025-06-03T10:46:44.241981Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/result_sets": 2025-06-03T10:46:44.245433Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/nodes" 2025-06-03T10:46:44.245453Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/nodes": 2025-06-03T10:46:44.249611Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenants" 2025-06-03T10:46:44.249639Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenants": 2025-06-03T10:46:44.249951Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/connections" 2025-06-03T10:46:44.249964Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/connections": 2025-06-03T10:46:44.250084Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/mappings" 2025-06-03T10:46:44.250108Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/mappings": 2025-06-03T10:46:44.250190Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/pending_small" 2025-06-03T10:46:44.250205Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/pending_small": 2025-06-03T10:46:44.253485Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/bindings" 2025-06-03T10:46:44.253502Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/bindings": 2025-06-03T10:46:44.253756Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/quotas" 2025-06-03T10:46:44.253764Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/quotas": 2025-06-03T10:46:44.253941Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/compute_databases" 2025-06-03T10:46:44.253946Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/compute_databases": 2025-06-03T10:46:44.255812Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/jobs" 2025-06-03T10:46:44.255831Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/jobs": 2025-06-03T10:46:44.256075Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/queries" 2025-06-03T10:46:44.256078Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/queries": 2025-06-03T10:46:44.256079Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:155: Successfully created table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenant_acks" 2025-06-03T10:46:44.256086Z node 17 :YQ_CONTROL_PLANE_STORAGE DEBUG: schema.cpp:122: Reply for create table "local/virtualvoidNFq::NTestSuiteTYdbControlPlaneStorageListBindings::TTestCaseShouldCombineFilters::Execute_(NUnitTest::TTestContext&)/tenant_acks": |75.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_statistics.py::TestS3::test_aborted_by_user[v2-client0] [GOOD] >> test_s3_1.py::TestS3::test_top_level_listing[v1-true-client0] [GOOD] >> test_s3_1.py::TestS3::test_top_level_listing[v2-false-client0] >> test_select.py::TestDML::test_select[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client3-year Date NOT NULL-False] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_1-pk_types15-all_types15-index15-Int8] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client4-year Utf8 NOT NULL-False] >> test_explicit_partitioning_1.py::TestS3::test_parquet[v1-false-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_parquet[v1-true-client0] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v2-common/simple_posix/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v2-common/simple_posix/test.parquet-parquet] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_1-pk_types9-all_types9-index9-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_s3_1.py::TestS3::test_top_level_listing[v2-false-client0] [GOOD] >> test_s3_1.py::TestS3::test_top_level_listing[v2-true-client0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_0-pk_types10-all_types10-index10-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_1_float-pk_types3-all_types3-index3-Float] [GOOD] >> test_select.py::TestDML::test_select[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_3_float-pk_types1-all_types1-index1-Float] >> test_ydb_backup.py::TestPermissionsBackupRestoreSingleTable::test_single_table |75.8%| [TA] $(B)/ydb/tests/fq/control_plane_storage/test-results/unittest/{meta.json ... results_accumulator.log} |75.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/py3test >> test_quota_exhaustion.py::TestYdbWorkload::test_delete [GOOD] Test command err: Database name /Root/test upsert #0 ok, result: [] Quota exceeded False upsert #1 ok, result: [] Quota exceeded False upsert #2 ok, result: [] Quota exceeded False upsert #3 ok, result: [] Quota exceeded False upsert #4 ok, result: [] Quota exceeded False upsert #5 ok, result: [] Quota exceeded False upsert #6 ok, result: [] Quota exceeded False upsert #7 ok, result: [] Quota exceeded False upsert #8 ok, result: [] Quota exceeded False upsert #9 ok, result: [] Quota exceeded False upsert #10 ok, result: [] Quota exceeded False upsert #11 ok, result: [] Quota exceeded False upsert #12 ok, result: [] Quota exceeded False upsert #13 ok, result: [] Quota exceeded False upsert: got overload issue delete #0 ok delete #1 ok delete #2 ok delete #3 ok delete #4 ok delete #5 ok delete #6 ok delete #7 ok delete #8 ok delete #9 ok delete #10 ok delete #11 ok delete #12 ok delete #13 ok delete #14 ok delete #15 ok delete #16 ok delete #17 ok delete #18 ok delete #19 ok delete #20 ok delete #21 ok delete #22 ok delete #23 ok delete #24 ok delete #25 ok delete #26 ok delete #27 ok delete #28 ok delete #29 ok delete #30 ok delete #31 ok delete #32 ok delete #33 ok delete #34 ok delete #35 ok delete #36 ok delete #37 ok delete #38 ok delete #39 ok delete #40 ok delete #41 ok delete #42 ok delete #43 ok delete #44 ok delete #45 ok delete #46 ok delete #47 ok delete #48 ok delete #49 ok delete #50 ok delete #51 ok delete #52 ok delete #53 ok delete #54 ok delete #55 ok delete #56 ok delete #57 ok delete #58 ok delete #59 ok delete #60 ok delete #61 ok delete #62 ok delete #63 ok delete #64 ok delete #65 ok delete #66 ok delete #67 ok delete #68 ok delete #69 ok delete #70 ok delete #71 ok delete #72 ok delete #73 ok delete #74 ok delete #75 ok delete #76 ok delete #77 ok delete #78 ok delete #79 ok delete #80 ok delete #81 ok delete #82 ok delete #83 ok delete #84 ok delete #85 ok delete #86 ok delete #87 ok delete #88 ok delete #89 ok delete #90 ok delete #91 ok delete #92 ok delete #93 ok delete #94 ok delete #95 ok delete #96 ok delete #97 ok delete #98 ok delete #99 ok delete #100 ok delete #101 ok delete #102 ok delete #103 ok delete #104 ok delete #105 ok delete #106 ok delete #107 ok delete #108 ok delete #109 ok delete #110 ok delete #111 ok delete #112 ok delete #113 ok delete #114 ok delete #115 ok delete #116 ok delete #117 ok delete #118 ok delete #119 ok delete #120 ok delete #121 ok delete #122 ok delete #123 ok delete #124 ok delete #125 ok delete #126 ok delete #127 ok delete #128 ok delete #129 ok delete #130 ok delete #131 ok delete #132 ok delete #133 ok delete #134 ok delete #135 ok delete #136 ok delete #137 ok delete #138 ok delete #139 ok delete #140 ok delete #141 ok delete #142 ok delete #143 ok delete #144 ok delete #145 ok delete #146 ok delete #147 ok delete #148 ok delete #149 ok delete #150 ok delete #151 ok delete #152 ok delete #153 ok delete #154 ok delete #155 ok delete #156 ok delete #157 ok delete #158 ok delete #159 ok delete #160 ok delete #161 ok delete #162 ok delete #163 ok delete #164 ok delete #165 ok delete #166 ok delete #167 ok delete #168 ok delete #169 ok delete #170 ok delete #171 ok delete #172 ok delete #173 ok delete #174 ok delete #175 ok delete #176 ok delete #177 ok delete #178 ok delete #179 ok delete #180 ok delete #181 ok delete #182 ok delete #183 ok delete #184 ok delete #185 ok delete #186 ok delete #187 ok delete #188 ok delete #189 ok delete #190 ok delete #191 ok delete #192 ok delete #193 ok delete #194 ok delete #195 ok delete #196 ok delete #197 ok delete #198 ok delete #199 ok delete #200 ok delete #201 ok delete #202 ok delete #203 ok delete #204 ok delete #205 ok delete #206 ok delete #207 ok delete #208 ok delete #209 ok delete #210 ok delete #211 ok delete #212 ok delete #213 ok delete #214 ok delete #215 ok delete #216 ok delete #217 ok delete #218 ok delete #219 ok delete #220 ok delete #221 ok delete #222 ok delete #223 ok delete #224 ok delete #225 ok delete #226 ok delete #227 ok delete #228 ok delete #229 ok delete #230 ok delete #231 ok delete #232 ok delete #233 ok delete #234 ok delete #235 ok delete #236 ok delete #237 ok delete #238 ok delete #239 ok delete #240 ok delete #241 ok delete #242 ok delete #243 ok delete #244 ok delete #245 ok delete #246 ok delete #247 ok delete #248 ok delete #249 ok delete #250 ok delete #251 ok delete #252 ok delete #253 ok delete #254 ok delete #255 ok delete #256 ok delete #257 ok delete #258 ok delete #259 ok delete #260 ok delete #261 ok delete #262 ok delete #263 ok delete #264 ok delete #265 ok delete #266 ok delete #267 ok delete #268 ok delete #269 ok delete #270 ok delete #271 ok delete #272 ok delete #273 ok delete #274 ok delete #275 ok delete #276 ok delete #277 ok delete #278 ok delete #279 ok delete #280 ok delete #281 ok delete #282 ok delete #283 ok delete #284 ok delete #285 ok delete #286 ok delete #287 ok delete #288 ok delete #289 ok delete #290 ok delete #291 ok delete #292 ok delete #293 ok delete #294 ok delete #295 ok delete #296 ok delete #297 ok delete #298 ok delete #299 ok delete #300 ok delete #301 ok delete #302 ok delete #303 ok delete #304 ok delete #305 ok delete #306 ok delete #307 ok delete #308 ok delete #309 ok delete #310 ok delete #311 ok delete #312 ok delete #313 ok delete #314 ok delete #315 ok delete #316 ok delete #317 ok delete #318 ok delete #319 ok delete #320 ok delete #321 ok delete #322 ok delete #323 ok delete #324 ok delete #325 ok delete #326 ok delete #327 ok delete #328 ok delete #329 ok delete #330 ok delete #331 ok delete #332 ok delete #333 ok delete #334 ok delete #335 ok delete #336 ok delete #337 ok delete #338 ok delete #339 ok delete #340 ok delete #341 ok delete #342 ok delete #343 ok delete #344 ok delete #345 ok delete #346 ok delete #347 ok delete #348 ok delete #349 ok delete #350 ok delete #351 ok delete #352 ok delete #353 ok delete #354 ok delete #355 ok delete #356 ok delete #357 ok delete #358 ok delete #359 ok delete #360 ok delete #361 ok delete #362 ok delete #363 ok delete #364 ok delete #365 ok delete #366 ok delete #367 ok delete #368 ok delete #369 ok delete #370 ok delete #371 ok delete #372 ok delete #373 ok delete #374 ok delete #375 ok delete #376 ok delete #377 ok delete #378 ok delete #379 ok delete #380 ok delete #381 ok delete #382 ok delete #383 ok delete #384 ok delete #385 ok delete #386 ok delete #387 ok delete #388 ok delete #389 ok delete #390 ok delete #391 ok delete #392 ok delete #393 ok delete #394 ok delete #395 ok delete #396 ok delete #397 ok delete #398 ok delete #399 ok delete #400 ok delete #401 ok delete #402 ok delete #403 ok delete #404 ok delete #405 ok delete #406 ok delete #407 ok delete #408 ok delete #409 ok delete #410 ok delete #411 ok delete #412 ok delete #413 ok delete #414 ok delete #415 ok delete #416 ok delete #417 ok delete #418 ok delete #419 ok delete #420 ok delete #421 ok delete #422 ok delete #423 ok delete #424 ok delete #425 ok delete #426 ok delete #427 ok delete #428 ok delete #429 ok delete #430 ok delete #431 ok delete #432 ok delete #433 ok delete #434 ok delete #435 ok delete #436 ok delete #437 ok delete #438 ok delete #439 ok delete #440 ok delete #441 ok delete #442 ok delete #443 ok delete #444 ok delete #445 ok delete #446 ok delete #447 ok delete #448 ok delete #449 ok delete #450 ok delete #451 ok delete #452 ok delete #453 ok delete #454 ok delete #455 ok delete #456 ok delete #457 ok delete #458 ok delete #459 ok delete #460 ok delete #461 ok delete #462 ok delete #463 ok delete #464 ok delete #465 ok delete #466 ok delete #467 ok delete #468 ok delete #469 ok delete #470 ok delete #471 ok delete #472 ok delete #473 ok delete #474 ok delete #475 ok delete #476 ok delete #477 ok delete #478 ok delete #479 ok delete #480 ok delete #481 ok delete #482 ok delete #483 ok delete #484 ok delete #485 ok delete #486 ok delete #487 ok delete #488 ok delete #489 ok delete #490 ok delete #491 ok delete #492 ok delete #493 ok delete #494 ok delete #495 ok delete #496 ok delete #497 ok delete #498 ok delete #499 ok ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_0_float-pk_types4-all_types4-index4-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_vector_index.py::TestVectorIndex::test_vector_index[table_all_types-pk_types17-all_types17-index17-Int8] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix[v2-common/simple_posix/test.parquet-parquet] [GOOD] >> test_quota_exhaustion.py::TestYdbWorkload::test_duplicates [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v1-common/simple_posix/test.csv-csv_with_names] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client4-year Utf8 NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client5-year Int64 NOT NULL-False] |75.9%| [TA] {RESULT} $(B)/ydb/tests/fq/control_plane_storage/test-results/unittest/{meta.json ... results_accumulator.log} >> test_s3_1.py::TestS3::test_top_level_listing[v2-true-client0] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_all_types-pk_types11-all_types11-index11-Uint8] >> test_explicit_partitioning_1.py::TestS3::test_parquet[v1-true-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_parquet[v2-false-client0] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_3_float-pk_types1-all_types1-index1-Float] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_all_types_float-pk_types5-all_types5-index5-Float] |75.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client5-year Int64 NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client6-year Int32-False] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_all_types-pk_types11-all_types11-index11-Uint8] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_2-pk_types8-all_types8-index8-Uint8] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v1-common/simple_posix/test.csv-csv_with_names] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v1-common/simple_posix/test.tsv-tsv_with_names] |75.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_ydb_backup.py::TestPermissionsBackupRestoreSingleTable::test_single_table [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_4_UNIQUE_SYNC-pk_types0-all_types0-index0--UNIQUE-SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_0-pk_types16-all_types16-index16-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_select.py::TestDML::test_select[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client6-year Int32-False] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v1-common/simple_posix/test.tsv-tsv_with_names] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client7-year Uint32-False] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v1-common/simple_posix/test.json-json_each_row] >> test_explicit_partitioning_1.py::TestS3::test_parquet[v2-false-client0] [GOOD] >> test_select.py::TestDML::test_select[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] |75.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_2-pk_types14-all_types14-index14-Int8] >> test_explicit_partitioning_1.py::TestS3::test_parquet[v2-true-client0] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_2-pk_types8-all_types8-index8-Uint8] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v1-common/simple_posix/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v1-common/simple_posix/test.parquet-parquet] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_0_float-pk_types4-all_types4-index4-Float] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client7-year Uint32-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client8-year Int64-False] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_all_types-pk_types11-all_types11-index11-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_4-pk_types6-all_types6-index6-Uint8] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_0-pk_types16-all_types16-index16-Int8] [GOOD] |75.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |75.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v1-common/simple_posix/test.parquet-parquet] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v2-common/simple_posix/test.csv-csv_with_names] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_all_types-pk_types17-all_types17-index17-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |75.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client8-year Int64-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client9-year Uint64-False] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_2-pk_types14-all_types14-index14-Int8] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_parquet[v2-true-client0] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_0-pk_types16-all_types16-index16-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_explicit_partitioning_1.py::TestS3::test_valid_projected_column_values[v1-true-client0] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v2-common/simple_posix/test.csv-csv_with_names] [GOOD] >> test_select.py::TestDML::test_select[table_ttl_Date-pk_types18-all_types18-index18-Date--] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v2-common/simple_posix/test.tsv-tsv_with_names] |75.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/scenario/py3test >> test_alter_compression.py::TestAlterCompression::test_multi[alter_compression] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_3_float-pk_types1-all_types1-index1-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client9-year Uint64-False] [GOOD] |75.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client10-year String NOT NULL-True] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_1-pk_types15-all_types15-index15-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |75.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_4-pk_types6-all_types6-index6-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v2-common/simple_posix/test.tsv-tsv_with_names] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_valid_projected_column_values[v1-true-client0] [GOOD] >> test_explicit_partitioning_1.py::TestS3::test_valid_projected_column_values[v2-true-client0] |75.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v2-common/simple_posix/test.json-json_each_row] |76.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_statistics.py::TestS3::test_aborted_by_user[v2-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00257a/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_statistics/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00257a/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_statistics/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=919449) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00257a/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_statistics/testing_out_stuff/test_statistics.py.TestS3.test_egress.v1-client0-json_list/default/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00257a/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_statistics/testing_out_stuff/test_statistics.py.TestS3.test_egress.v1-client0-json_list/cp/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 922381 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_1_float-pk_types3-all_types3-index3-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |76.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client10-year String NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client11-year String-False] >> test_explicit_partitioning_1.py::TestS3::test_valid_projected_column_values[v2-true-client0] [GOOD] >> test_select.py::TestDML::test_select[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |76.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_select.py::TestDML::test_select[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] |76.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_select.py::TestDML::test_select[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_ydb_over_fq.py::TestYdbOverFq::test_insert_data_query[v1-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00257c/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_ydb_over_fq/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00257c/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_ydb_over_fq/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=919189) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 922382 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v2-common/simple_posix/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v2-common/simple_posix/test.parquet-parquet] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_all_types-pk_types11-all_types11-index11-Uint8] [GOOD] >> test_select.py::TestDML::test_select[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_all_types_float-pk_types5-all_types5-index5-Float] [GOOD] |76.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_select.py::TestDML::test_select[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client11-year String-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client12-year Utf8-False] |76.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_4_float-pk_types0-all_types0-index0-Float] [GOOD] |76.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_ydb_backup.py::TestPermissionsBackupRestoreFolderWithTable::test_folder_with_table >> test_auditlog.py::test_single_dml_query_logged[replace] |76.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_format_setting.py::TestS3::test_date_time_simple_posix_insert[v2-common/simple_posix/test.parquet-parquet] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client12-year Utf8-False] [GOOD] |76.0%| [TA] $(B)/ydb/tests/olap/scenario/test-results/py3test/{meta.json ... results_accumulator.log} >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_3-pk_types13-all_types13-index13-Int8] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_SECONDS] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_2-pk_types14-all_types14-index14-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client13-year Date-False] |76.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test |76.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_select.py::TestDML::test_select[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_SECONDS] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_auditlog.py::test_cloud_ids_are_logged[attrs1] |76.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test |76.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_MICROSECONDS] >> test_select.py::TestDML::test_select[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] |76.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_all_types_float-pk_types5-all_types5-index5-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/data_quotas/py3test >> test_quota_exhaustion.py::TestYdbWorkload::test_duplicates [GOOD] Test command err: Database name /Root/test upsert #0 ok, result: [] Quota exceeded False upsert #1 ok, result: [] Quota exceeded False upsert #2 ok, result: [] Quota exceeded False upsert #3 ok, result: [] Quota exceeded False upsert #4 ok, result: [] Quota exceeded False upsert #5 ok, result: [] Quota exceeded False upsert #6 ok, result: [] Quota exceeded False upsert #7 ok, result: [] Quota exceeded False upsert #8 ok, result: [] Quota exceeded False upsert #9 ok, result: [] Quota exceeded False upsert #10 ok, result: [] Quota exceeded False upsert #11 ok, result: [] Quota exceeded False upsert #12 ok, result: [] Quota exceeded False upsert #13 ok, result: [] Quota exceeded False upsert: got overload issue >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-false-client13-year Date-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client0-year Int32 NOT NULL-False] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_3-pk_types7-all_types7-index7-Uint8] [GOOD] |76.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_ydb_backup.py::TestPermissionsBackupRestoreFolderWithTable::test_folder_with_table [GOOD] |76.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_MICROSECONDS] [GOOD] |76.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_4_float-pk_types0-all_types0-index0-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_MILLISECONDS] |76.1%| [TA] {RESULT} $(B)/ydb/tests/olap/scenario/test-results/py3test/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_s3_1.py::TestS3::test_top_level_listing[v2-true-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258f/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_1/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258f/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_1/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=903735) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258f/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_1/testing_out_stuff/test_s3_1.py.TestS3.test_write_result.v1-kikimr_params0-client0/cp/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 906178 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258f/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_1/testing_out_stuff/test_s3_1.py.TestS3.test_top_level_listing_2.v1-kikimr_params0-false-client0/default/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258f/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_1/testing_out_stuff/test_s3_1.py.TestS3.test_top_level_listing_2.v1-kikimr_params0-false-client0/cp/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258f/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_1/testing_out_stuff/test_s3_1.py.TestS3.test_precompute.v1-false-client0/default/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback ydb/tests/tools/fq_runner/kikimr_runner.py:184: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00258f/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_s3_1/testing_out_stuff/test_s3_1.py.TestS3.test_precompute.v1-false-client0/cp/node_1/metering.bill' mode='r' encoding='utf-8'> meterings_loaded = sum(1 for _ in open(bill_fname)) ResourceWarning: Enable tracemalloc to get the object allocation traceback ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_good_dynconfig] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client0-year Int32 NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client1-year Uint32 NOT NULL-False] >> test_auditlog.py::test_dml_requests_arent_logged_when_anonymous |76.1%| [TA] $(B)/ydb/tests/olap/data_quotas/test-results/py3test/{meta.json ... results_accumulator.log} |76.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.1%| [TA] {RESULT} $(B)/ydb/tests/olap/data_quotas/test-results/py3test/{meta.json ... results_accumulator.log} |76.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_MILLISECONDS] [GOOD] |76.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_SECONDS] >> test_auditlog.py::test_single_dml_query_logged[replace] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client1-year Uint32 NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client2-year Uint64 NOT NULL-False] |76.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_4_float-pk_types0-all_types0-index0-Float] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_4-pk_types6-all_types6-index6-Uint8] [GOOD] |76.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_SECONDS] [GOOD] |76.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_MICROSECONDS] >> test_auditlog.py::test_dynconfig >> test_auditlog.py::test_single_dml_query_logged[upsert] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_0_float-pk_types4-all_types4-index4-Float] [GOOD] |76.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_cloud_ids_are_logged[attrs1] [GOOD] |76.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_3-pk_types13-all_types13-index13-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |76.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client2-year Uint64 NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client3-year Date NOT NULL-False] >> test_select.py::TestDML::test_select[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] |76.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_3-pk_types7-all_types7-index7-Uint8] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_MICROSECONDS] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_2_float-pk_types2-all_types2-index2-Float] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |76.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_MILLISECONDS] |76.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_inflight.py::TestS3::test_inflight[v1-client0-kikimr_params2] [GOOD] |76.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_3-pk_types7-all_types7-index7-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_auditlog.py::test_single_dml_query_logged[select] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_1-pk_types15-all_types15-index15-Int8] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_all_types-pk_types11-all_types11-index11-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client3-year Date NOT NULL-False] [GOOD] |76.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client4-year Utf8 NOT NULL-False] |76.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_good_dynconfig] [GOOD] >> test_auditlog.py::test_dml_requests_arent_logged_when_anonymous [GOOD] |76.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_4-pk_types12-all_types12-index12-Int8] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_MILLISECONDS] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_SECONDS] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_explicit_partitioning_1.py::TestS3::test_valid_projected_column_values[v2-true-client0] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/002591/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_explicit_partitioning_1/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/002591/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_explicit_partitioning_1/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=901979) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 905914 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback |76.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client4-year Utf8 NOT NULL-False] [GOOD] |76.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client5-year Int64 NOT NULL-False] >> test_select.py::TestDML::test_select[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] |76.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_SECONDS] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_MICROSECONDS] |76.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_2_float-pk_types2-all_types2-index2-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_2_float-pk_types2-all_types2-index2-Float] [GOOD] |76.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[upsert] [GOOD] |76.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_1-pk_types15-all_types15-index15-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |76.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_logged_when_sid_is_unexpected >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_MICROSECONDS] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client5-year Int64 NOT NULL-False] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_MILLISECONDS] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client6-year Int32-False] |76.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_3-pk_types13-all_types13-index13-Int8] [GOOD] >> test_auditlog.py::test_dml_requests_arent_logged_when_sid_is_expected >> test_auditlog.py::test_dml_requests_logged_when_unauthorized >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_bad_dynconfig] |76.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_ydb_backup.py::TestPermissionsBackupRestoreDontOverwriteOnAlreadyExisting::test_dont_overwrite_on_already_existing ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[replace] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001e78/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk19/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.replace/audit.txt 2025-06-03T10:47:15.201716Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:15.201699Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"replace into `/Root/test_auditlog.py/test-table` (id, value) values (2, 3), (3, 3)","start_time":"2025-06-03T10:47:15.176884Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_MILLISECONDS] [GOOD] >> test_auditlog.py::test_single_dml_query_logged[insert] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_cloud_ids_are_logged[attrs1] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001e66/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk9/testing_out_stuff/test_auditlog.py.test_cloud_ids_are_logged.attrs1/audit.txt 2025-06-03T10:47:17.485056Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:17.485038Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-03T10:47:17.452781Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","folder_id":"folder-id-B","component":"grpc-proxy"} >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_SECONDS] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_0_float-pk_types4-all_types4-index4-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |76.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_select.py::TestDML::test_select[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] |76.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client6-year Int32-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client7-year Uint32-False] >> test_auditlog.py::test_single_dml_query_logged[select] [GOOD] |76.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_2_float-pk_types2-all_types2-index2-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |76.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/py3test >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_SECONDS] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_MICROSECONDS] >> test_auditlog.py::test_dynconfig [GOOD] |76.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_ttl.py::TestTTLOnIndexedTable::test_case >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client7-year Uint32-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client8-year Int64-False] |76.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/py3test |76.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_cloud_ids_are_logged[attrs0] |76.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_ydb_backup.py::TestPermissionsBackupRestoreDontOverwriteOnAlreadyExisting::test_dont_overwrite_on_already_existing [GOOD] |76.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_4_float-pk_types0-all_types0-index0-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_MICROSECONDS] [GOOD] |76.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/py3test >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_MILLISECONDS] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_4-pk_types6-all_types6-index6-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |76.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_arent_logged_when_anonymous [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001e5a/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk12/testing_out_stuff/test_auditlog.py.test_dml_requests_arent_logged_when_anonymous/audit.txt |76.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001e5c/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk7/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_root-_good_dynconfig/audit.txt 2025-06-03T10:47:20.060299Z: {"sanitized_token":"**** (B6C6F477)","subject":"root@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} >> test_ttl.py::TestTTLAlterSettings::test_case ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_bad_dynconfig] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client8-year Int64-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client9-year Uint64-False] |76.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_ttl.py::TestTTLValueSinceUnixEpoch::test_case >> test_select.py::TestDML::test_select[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] >> test_auditlog.py::test_single_dml_query_logged[delete] |76.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_logged_when_sid_is_unexpected [GOOD] >> test_auditlog.py::test_dml_requests_arent_logged_when_sid_is_expected [GOOD] >> test_auditlog.py::test_single_dml_query_logged[insert] [GOOD] >> test_auditlog.py::test_dml_begin_commit_logged >> test_auditlog.py::test_dml_requests_logged_when_unauthorized [GOOD] |76.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v1-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_MILLISECONDS] [GOOD] >> test_auditlog.py::test_create_and_remove_tenant ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_SECONDS] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_good_dynconfig] |76.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_3-pk_types7-all_types7-index7-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_3-pk_types13-all_types13-index13-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |76.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client9-year Uint64-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client10-year String NOT NULL-True] |76.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLAlterSettings::test_case [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_2-pk_types14-all_types14-index14-Int8] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dynconfig [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001e39/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk16/testing_out_stuff/test_auditlog.py.test_dynconfig/audit.txt 2025-06-03T10:47:27.031633Z: {"sanitized_token":"**** (B6C6F477)","subject":"root@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[upsert] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001e37/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk22/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.upsert/audit.txt 2025-06-03T10:47:23.198949Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:23.198930Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"upsert into `/Root/test_auditlog.py/test-table` (id, value) values (4, 4), (5, 5)","start_time":"2025-06-03T10:47:23.184147Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |76.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_select.py::TestDML::test_select[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_good_dynconfig] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client10-year String NOT NULL-True] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client11-year String-False] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_SECONDS] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_2__SYNC-pk_types7-all_types7-index7---SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_MICROSECONDS] |76.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_ttl.py::TestTTLDefaultEnv::test_case |76.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[update] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_MICROSECONDS] [GOOD] |76.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_MILLISECONDS] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_bad_dynconfig] [GOOD] >> test_auditlog.py::test_cloud_ids_are_logged[attrs0] [GOOD] >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_2-pk_types8-all_types8-index8-Uint8] [GOOD] |76.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_2-pk_types8-all_types8-index8-Uint8] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client11-year String-False] [GOOD] >> test_copy_ops.py::TestSchemeShardCopyOps::test_given_table_when_create_copy_of_it_then_ok >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client12-year Utf8-False] |76.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[select] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001e21/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk20/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.select/audit.txt 2025-06-03T10:47:25.763916Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:25.763899Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"select id from `/Root/test_auditlog.py/test-table`","start_time":"2025-06-03T10:47:25.747671Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |76.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_select.py::TestDML::test_select[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] |76.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_3_float-pk_types1-all_types1-index1-Float] [GOOD] |76.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_bad_dynconfig] [GOOD] |76.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |76.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.csv-csv_with_names-UNIX_TIME_MILLISECONDS] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_SECONDS] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client12-year Utf8-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client13-year Date-False] |76.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_select.py::TestDML::test_select[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] |76.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_bad_dynconfig] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_good_dynconfig] [GOOD] |76.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_Date-pk_types18-all_types18-index18-Date--] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |76.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_SECONDS] [GOOD] |76.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_2-pk_types8-all_types8-index8-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_MICROSECONDS] |76.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_4-pk_types12-all_types12-index12-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |76.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v1-true-client13-year Date-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client0-year Int32 NOT NULL-False] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_good_dynconfig] [GOOD] >> test_auditlog.py::test_create_and_remove_tenant [GOOD] >> test_copy_ops.py::TestSchemeShardCopyOps::test_given_table_when_create_copy_of_it_then_ok [GOOD] |76.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_MICROSECONDS] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_MILLISECONDS] |76.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_inflight.py::TestS3::test_inflight[v1-client0-kikimr_params2] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/002590/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_inflight/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/002590/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_inflight/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=902022) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 905684 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback |76.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_logged_when_sid_is_unexpected [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001de6/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk14/testing_out_stuff/test_auditlog.py.test_dml_requests_logged_when_sid_is_unexpected/audit.txt 2025-06-03T10:47:29.818815Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:29.818796Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"insert into `/Root/test_auditlog.py/test-table` (id, value) values (100, 100), (101, 101)","start_time":"2025-06-03T10:47:29.803830Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-03T10:47:29.950133Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:29.950116Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"delete from `/Root/test_auditlog.py/test-table` where id = 100 or id = 101","start_time":"2025-06-03T10:47:29.924173Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-03T10:47:30.081032Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:30.081014Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"select id from `/Root/test_auditlog.py/test-table`","start_time":"2025-06-03T10:47:30.055531Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-03T10:47:30.213422Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:30.213407Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-03T10:47:30.187413Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-03T10:47:30.331959Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:30.331944Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"replace into `/Root/test_auditlog.py/test-table` (id, value) values (2, 3), (3, 3)","start_time":"2025-06-03T10:47:30.318373Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-03T10:47:30.462971Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:30.462954Z","sanitized_token":"othe****ltin (27F910A9)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"upsert into `/Root/test_auditlog.py/test-table` (id, value) values (4, 4), (5, 5)","start_time":"2025-06-03T10:47:30.437102Z","subject":"other-user@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |76.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_arent_logged_when_sid_is_expected [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001de1/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk13/testing_out_stuff/test_auditlog.py.test_dml_requests_arent_logged_when_sid_is_expected/audit.txt ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_root-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001ddd/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk6/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_root-_bad_dynconfig/audit.txt 2025-06-03T10:47:34.238032Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"**** (B6C6F477)","remote_address":"127.0.0.1","status":"ERROR","subject":"root@builtin","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} |76.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLAlterSettings::test_case [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[insert] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001dd7/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk18/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.insert/audit.txt 2025-06-03T10:47:30.727759Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:30.727743Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"insert into `/Root/test_auditlog.py/test-table` (id, value) values (100, 100), (101, 101)","start_time":"2025-06-03T10:47:30.714305Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index[table_index_2-pk_types14-all_types14-index14-Int8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_auditlog.py::test_single_dml_query_logged[update] [GOOD] >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_bad_dynconfig] |76.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.tsv-tsv_with_names-UNIX_TIME_MILLISECONDS] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client0-year Int32 NOT NULL-False] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_SECONDS] >> test_auditlog.py::test_single_dml_query_logged[delete] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_requests_logged_when_unauthorized [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001de0/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk15/testing_out_stuff/test_auditlog.py.test_dml_requests_logged_when_unauthorized/audit.txt 2025-06-03T10:47:30.303326Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:30.303314Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"insert into `/Root/test_auditlog.py/test-table` (id, value) values (100, 100), (101, 101)","start_time":"2025-06-03T10:47:30.299658Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-03T10:47:30.413577Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:30.413557Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"delete from `/Root/test_auditlog.py/test-table` where id = 100 or id = 101","start_time":"2025-06-03T10:47:30.408944Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-03T10:47:30.523429Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:30.523411Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"select id from `/Root/test_auditlog.py/test-table`","start_time":"2025-06-03T10:47:30.519357Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-03T10:47:30.635983Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:30.635970Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-03T10:47:30.630665Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-03T10:47:30.746335Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:30.746319Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"replace into `/Root/test_auditlog.py/test-table` (id, value) values (2, 3), (3, 3)","start_time":"2025-06-03T10:47:30.742040Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-03T10:47:30.857268Z: {"database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:30.857253Z","sanitized_token":"**** (C877DF61)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"ERROR","query_text":"upsert into `/Root/test_auditlog.py/test-table` (id, value) values (4, 4), (5, 5)","start_time":"2025-06-03T10:47:30.853105Z","subject":"__bad__@builtin","detailed_status":"SCHEME_ERROR","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |76.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client1-year Uint32 NOT NULL-False] |76.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_ydb_backup.py::TestPermissionsBackupRestoreSchemeOnly::test_scheme_only |76.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |76.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_begin_commit_logged [GOOD] |76.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |77.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |77.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |77.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |77.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |77.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |77.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_2-pk_types8-all_types8-index8-Uint8] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). |77.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_select.py::TestDML::test_select[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_SECONDS] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_MICROSECONDS] |77.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |77.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |77.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |77.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_good_dynconfig] |77.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |77.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |77.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |77.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client1-year Uint32 NOT NULL-False] [GOOD] |77.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |77.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test |77.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_bad_dynconfig] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client2-year Uint64 NOT NULL-False] |77.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_MICROSECONDS] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_MILLISECONDS] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/vector_index/py3test >> test_vector_index.py::TestVectorIndex::test_vector_index_prefix[table_index_3_float-pk_types1-all_types1-index1-Float] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_cloud_ids_are_logged[attrs0] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001d92/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk8/testing_out_stuff/test_auditlog.py.test_cloud_ids_are_logged.attrs0/audit.txt 2025-06-03T10:47:34.278795Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","cloud_id":"cloud-id-A","end_time":"2025-06-03T10:47:34.278777Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-03T10:47:34.254432Z","subject":"root@builtin","detailed_status":"SUCCESS","resource_id":"database-id-C","operation":"ExecuteDataQueryRequest","folder_id":"folder-id-B","component":"grpc-proxy"} >> data_migration_when_alter_ttl.py::TestDataMigrationWhenAlterTtl::test >> unstable_connection.py::TestUnstableConnection::test >> ttl_unavailable_s3.py::TestUnavailableS3::test >> data_correctness.py::TestDataCorrectness::test >> ttl_delete_s3.py::TestDeleteTtl::test_ttl_delete >> test_ydb_backup.py::TestPermissionsBackupRestoreSchemeOnly::test_scheme_only [GOOD] >> tier_delete.py::TestTierDelete::test_delete_s3_ttl |77.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/py3test >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.json-json_each_row-UNIX_TIME_MILLISECONDS] [GOOD] >> ttl_delete_s3.py::TestDeleteS3Ttl::test_data_unchanged_after_ttl_change |77.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/py3test |77.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/py3test >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_SECONDS] |77.1%| [TA] $(B)/ydb/tests/datashard/vector_index/test-results/py3test/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client2-year Uint64 NOT NULL-False] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] |77.1%| [TA] {RESULT} $(B)/ydb/tests/datashard/vector_index/test-results/py3test/{meta.json ... results_accumulator.log} >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client3-year Date NOT NULL-False] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] |77.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] >> test_select.py::TestDML::test_select[table_all_types-pk_types12-all_types12-index12---] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_all_types-pk_types7-all_types7-index7---] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Date-pk_types13-all_types13-index13-Date--] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001d6f/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk2/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_no_auth-_bad_dynconfig/audit.txt 2025-06-03T10:47:35.768027Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"{none}","remote_address":"127.0.0.1","status":"ERROR","subject":"{none}","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} |77.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test |77.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[delete] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001d5c/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk17/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.delete/audit.txt 2025-06-03T10:47:40.231630Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:40.231610Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"delete from `/Root/test_auditlog.py/test-table` where id = 100 or id = 101","start_time":"2025-06-03T10:47:40.203216Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} |77.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_index_4__SYNC-pk_types5-all_types5-index5---SYNC] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_bad_dynconfig] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_SECONDS] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_MICROSECONDS] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_dml_begin_commit_logged [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001d4f/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk11/testing_out_stuff/test_auditlog.py.test_dml_begin_commit_logged/audit.txt 2025-06-03T10:47:40.823268Z: {"tx_id":"01jwtpew0q7rw1mbeytr8r7d23","database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:40.823256Z","sanitized_token":"**** (B6C6F477)","remote_address":"127.0.0.1","status":"SUCCESS","start_time":"2025-06-03T10:47:40.822997Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"BeginTransactionRequest","component":"grpc-proxy"} 2025-06-03T10:47:40.850710Z: {"tx_id":"01jwtpew0q7rw1mbeytr8r7d23","database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:40.850698Z","sanitized_token":"**** (B6C6F477)","remote_address":"127.0.0.1","commit_tx":"0","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-03T10:47:40.826902Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} 2025-06-03T10:47:40.855596Z: {"tx_id":"01jwtpew0q7rw1mbeytr8r7d23","database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:40.855584Z","sanitized_token":"**** (B6C6F477)","remote_address":"127.0.0.1","status":"SUCCESS","start_time":"2025-06-03T10:47:40.854036Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"CommitTransactionRequest","component":"grpc-proxy"} |77.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test |77.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_create_and_remove_tenant [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001d4e/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk10/testing_out_stuff/test_auditlog.py.test_create_and_remove_tenant/audit.txt 2025-06-03T10:47:33.613594Z: {"sanitized_token":"{none}","subject":"{none}","status":"SUCCESS","component":"console","operation":"BEGIN INIT DATABASE CONFIG","remote_address":"::1","database":"/Root/users/database"} 2025-06-03T10:47:33.616061Z: {"paths":"[/Root/users/database]","tx_id":"281474976715660","database":"/Root","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"CREATE DATABASE","component":"schemeshard"} 2025-06-03T10:47:33.623426Z: {"paths":"[/Root/users/database]","tx_id":"281474976715661","database":"/Root","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"ALTER DATABASE","component":"schemeshard"} 2025-06-03T10:47:35.538139Z: {"sanitized_token":"{none}","subject":"{none}","status":"SUCCESS","component":"console","operation":"END INIT DATABASE CONFIG","remote_address":"::1","database":"/Root/users/database"} 2025-06-03T10:47:35.934764Z: {"paths":"[.metadata/workload_manager/pools/default]","tx_id":"281474976720657","new_owner":"metadata@system","acl_add":"[+(SR|DS):all-users@well-known, +(SR|DS):root@builtin]","database":"/Root/users/database","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"metadata@system","detailed_status":"StatusAccepted","operation":"CREATE RESOURCE POOL","component":"schemeshard"} 2025-06-03T10:47:36.035176Z: {"reason":"Check failed: path: '/Root/users/database/.metadata/workload_manager/pools/default', error: path exist, request accepts it (id: [OwnerId: 72075186224037897, LocalPathId: 5], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92","paths":"[default]","tx_id":"281474976720658","new_owner":"metadata@system","acl_add":"[+(SR|DS):all-users@well-known, +(SR|DS):root@builtin]","database":"/Root/users/database","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"metadata@system","detailed_status":"StatusAlreadyExists","operation":"CREATE RESOURCE POOL","component":"schemeshard"} 2025-06-03T10:47:37.122361Z: {"sanitized_token":"{none}","subject":"{none}","status":"SUCCESS","component":"console","operation":"BEGIN REMOVE DATABASE","remote_address":"::1","database":"/Root/users/database"} 2025-06-03T10:47:37.124430Z: {"paths":"[/Root/users/database]","tx_id":"281474976715662","database":"/Root","sanitized_token":"{none}","remote_address":"{none}","status":"SUCCESS","subject":"{none}","detailed_status":"StatusAccepted","operation":"DROP DATABASE","component":"schemeshard"} 2025-06-03T10:47:37.130712Z: {"sanitized_token":"{none}","subject":"{none}","status":"SUCCESS","component":"console","operation":"END REMOVE DATABASE","remote_address":"::1","database":"/Root/users/database"} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_no_auth-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001d4c/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk3/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_no_auth-_good_dynconfig/audit.txt 2025-06-03T10:47:37.296259Z: {"sanitized_token":"{none}","subject":"{none}","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client3-year Date NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client4-year Utf8 NOT NULL-False] |77.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |77.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_MICROSECONDS] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_MILLISECONDS] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-fifo] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_sending_duplicates >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v1] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-fifo] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-fifo] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_ymq_send_read_delete ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001d2e/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk1/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_bad_auth-_good_dynconfig/audit.txt 2025-06-03T10:47:38.651291Z: {"sanitized_token":"**** (C877DF61)","subject":"__bad__@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v0] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client4-year Utf8 NOT NULL-False] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v0] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-fifo] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client5-year Int64 NOT NULL-False] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v0] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v0] >> test_format_setting.py::TestS3::test_timestamp_unix_time_insert[v2-timestamp/unix_time/test.parquet-parquet-UNIX_TIME_MILLISECONDS] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v1-common/simple_format/test.csv-csv_with_names] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-fifo] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_single_dml_query_logged[update] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001d0d/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk21/testing_out_stuff/test_auditlog.py.test_single_dml_query_logged.update/audit.txt 2025-06-03T10:47:40.015854Z: {"tx_id":"{none}","database":"/Root/test_auditlog.py","end_time":"2025-06-03T10:47:40.015838Z","sanitized_token":"**** (B6C6F477)","begin_tx":"1","remote_address":"127.0.0.1","commit_tx":"1","status":"SUCCESS","query_text":"update `/Root/test_auditlog.py/test-table` set value = 0 where id = 1","start_time":"2025-06-03T10:47:39.991247Z","subject":"root@builtin","detailed_status":"SUCCESS","operation":"ExecuteDataQueryRequest","component":"grpc-proxy"} >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-fifo] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v1-common/simple_format/test.csv-csv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v1-common/simple_format/test.tsv-tsv_with_names] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client5-year Int64 NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client6-year Int32-False] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-fifo] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001cd7/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk4/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_other-_bad_dynconfig/audit.txt 2025-06-03T10:47:42.806012Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"othe****ltin (27F910A9)","remote_address":"127.0.0.1","status":"ERROR","subject":"other-user@builtin","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_good_dynconfig] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/select/py3test >> test_select.py::TestDML::test_select[table_all_types-pk_types12-all_types12-index12---] [GOOD] Test command err: contrib/python/ydb/py3/ydb/types.py:59: DeprecationWarning: datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version. Use timezone-aware objects to represent datetimes in UTC: datetime.datetime.fromtimestamp(timestamp, datetime.UTC). >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v1-common/simple_format/test.tsv-tsv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v1-common/simple_format/test.json-json_each_row] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client6-year Int32-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client7-year Uint32-False] |77.2%| [TA] $(B)/ydb/tests/datashard/select/test-results/py3test/{meta.json ... results_accumulator.log} |77.2%| [TA] {RESULT} $(B)/ydb/tests/datashard/select/test-results/py3test/{meta.json ... results_accumulator.log} >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v1-common/simple_format/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v1-common/simple_format/test.parquet-parquet] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_sending_duplicates [GOOD] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_ymq_send_read_delete [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_bad_auth-_bad_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001cba/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk0/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_bad_auth-_bad_dynconfig/audit.txt 2025-06-03T10:47:45.838608Z: {"reason":"ydb/library/fyamlcpp/fyamlcpp.cpp:1068: \n6:12 plain scalar cannot start with '%'","sanitized_token":"**** (C877DF61)","remote_address":"127.0.0.1","status":"ERROR","subject":"__bad__@builtin","operation":"REPLACE DYNCONFIG","new_config":"\n---\n123metadata:\n kind: MainConfig\n cluster: \"\"\n version: %s\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","component":"console"} >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-fifo] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client7-year Uint32-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client8-year Int64-False] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-std] >> test_ydb_backup.py::TestPermissionsBackupRestoreEmptyDir::test_empty_dir >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v1-common/simple_format/test.parquet-parquet] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v2-common/simple_format/test.csv-csv_with_names] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v1] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-fifo] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_ymq_send_read_delete [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-std] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-std] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v2-common/simple_format/test.csv-csv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v2-common/simple_format/test.tsv-tsv_with_names] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-std] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client8-year Int64-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client9-year Uint64-False] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/audit/py3test >> test_auditlog.py::test_broken_dynconfig[_client_session_pool_with_auth_other-_good_dynconfig] [GOOD] Test command err: AAA /home/runner/.ya/build/build_root/u93c/001c7b/ydb/tests/functional/audit/test-results/py3test/testing_out_stuff/test_auditlog/chunk5/testing_out_stuff/test_auditlog.py.test_broken_dynconfig._client_session_pool_with_auth_other-_good_dynconfig/audit.txt 2025-06-03T10:47:52.448171Z: {"sanitized_token":"othe****ltin (27F910A9)","subject":"other-user@builtin","new_config":"\n---\nmetadata:\n kind: MainConfig\n cluster: \"\"\n version: 0\nconfig:\n yaml_config_enabled: true\nallowed_labels:\n node_id:\n type: string\n host:\n type: string\n tenant:\n type: string\nselector_config: []\n ","status":"SUCCESS","component":"console","operation":"REPLACE DYNCONFIG","remote_address":"127.0.0.1"} >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_retryable_iam_error[tables_format_v0] >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v1] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_sending_duplicates [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-std] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-std] >> test_ydb_backup.py::TestPermissionsBackupRestoreEmptyDir::test_empty_dir [GOOD] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_purge_queue_counters >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v2-common/simple_format/test.tsv-tsv_with_names] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client9-year Uint64-False] [GOOD] |77.2%| [TA] $(B)/ydb/tests/functional/audit/test-results/py3test/{meta.json ... results_accumulator.log} >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v2-common/simple_format/test.json-json_each_row] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client10-year String NOT NULL-True] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v0] [GOOD] |77.2%| [TA] {RESULT} $(B)/ydb/tests/functional/audit/test-results/py3test/{meta.json ... results_accumulator.log} >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v1] >> test_ydb_backup.py::TestRestoreACLOption::test_restore_acl_option >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_clouds >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_clouds [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v1] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client10-year String NOT NULL-True] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client11-year String-False] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-std] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v2-common/simple_format/test.json-json_each_row] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v2-common/simple_format/test.parquet-parquet] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v1] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Timestamp-pk_types12-all_types12-index12-Timestamp--] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b6a/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk11/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b6a/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk11/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 1181197 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint64-pk_types10-all_types10-index10-Uint64--] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b85/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk13/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b85/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk13/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 1181081 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-std] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Uint32-pk_types9-all_types9-index9-Uint32--] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b13/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk12/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b13/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk12/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 1181436 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-std] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client11-year String-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client12-year Utf8-False] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v0] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_simple_format_insert[v2-common/simple_format/test.parquet-parquet] [GOOD] >> test_ydb_backup.py::TestRestoreACLOption::test_restore_acl_option [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v1] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-fifo] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v1-common/simple_format/test.csv-csv_with_names] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-std] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Date-pk_types13-all_types13-index13-Date--] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b11/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk8/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b11/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk8/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 1181466 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v1-common/simple_format/test.csv-csv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v1-common/simple_format/test.tsv-tsv_with_names] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_auth_header >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v0] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client12-year Utf8-False] [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client13-year Date-False] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_Datetime-pk_types11-all_types11-index11-Datetime--] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b5f/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk9/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b5f/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk9/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 1181062 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_s3.py::TestYdbS3TTL::test_s3[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v1-common/simple_format/test.tsv-tsv_with_names] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-std] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v1-common/simple_format/test.json-json_each_row] |77.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/script_execution/py3test >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-fifo] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-std] |77.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_copy_ops.py::TestSchemeShardCopyOps::test_given_table_when_create_copy_of_it_then_ok [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-false-client13-year Date-False] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__ASYNC-pk_types5-all_types5-index5---ASYNC] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b79/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk3/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b79/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk3/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 1181162 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client0-year Int32 NOT NULL-False] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast-`.metadata/script_executions`] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v1] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v1-common/simple_format/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v1-common/simple_format/test.parquet-parquet] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v0] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-std] [GOOD] |77.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/script_execution/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client0-year Int32 NOT NULL-False] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v1-common/simple_format/test.parquet-parquet] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client1-year Uint32 NOT NULL-False] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v2-common/simple_format/test.csv-csv_with_names] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__SYNC-pk_types4-all_types4-index4---SYNC] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b6f/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk2/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b6f/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk2/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 1181161 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-fifo] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_1__SYNC-pk_types3-all_types3-index3---SYNC] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b15/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk4/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b15/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk4/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 1181339 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-std] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-std] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_purge_queue_counters [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_0__ASYNC-pk_types6-all_types6-index6---ASYNC] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b04/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk1/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b04/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk1/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 1181513 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[fifo-tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_create_queue[tables_format_v1-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_auth_header [GOOD] >> test_s3.py::TestYdbS3TTL::test_s3[table_all_types-pk_types7-all_types7-index7---] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v2-common/simple_format/test.csv-csv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v2-common/simple_format/test.tsv-tsv_with_names] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_ttl_DyNumber-pk_types8-all_types8-index8-DyNumber--] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b6d/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk10/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b6d/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk10/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 1181059 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client1-year Uint32 NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client2-year Uint64 NOT NULL-False] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v1] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-std] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-fifo] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-fifo] |77.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast, DROP COLUMN stats-`.metadata/script_executions`] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v0] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v2-common/simple_format/test.tsv-tsv_with_names] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[fifo-tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |77.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/script_execution/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_clouds [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v2-common/simple_format/test.json-json_each_row] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax-`.metadata/script_executions`] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_4__SYNC-pk_types0-all_types0-index0---SYNC] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b58/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk7/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b58/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk7/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 1181058 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client2-year Uint64 NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client3-year Date NOT NULL-False] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_ymq_expiring_counters >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-fifo] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_2__SYNC-pk_types2-all_types2-index2---SYNC] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b53/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk5/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b53/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk5/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 1181164 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-std] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_purge_queue_counters [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_list_queues_for_unknown_cloud[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[DROP TABLE {}-`.metadata/script_executions`] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v0] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v2-common/simple_format/test.json-json_each_row] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v2-common/simple_format/test.parquet-parquet] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_queues_with_iam_token[tables_format_v1-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client3-year Date NOT NULL-False] [GOOD] >> test_ydb_backup.py::TestRestoreNoData::test_restore_no_data >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client4-year Utf8 NOT NULL-False] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |77.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/script_execution/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-std] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_sqs_action_counters >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-std] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_format_insert[v2-common/simple_format/test.parquet-parquet] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v1-common/simple_posix/big.csv-csv_with_names-POSIX] |77.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/script_execution/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client4-year Utf8 NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client5-year Int64 NOT NULL-False] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_cloud_double_create_queue[std-tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v0] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-std] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v1-common/simple_posix/big.csv-csv_with_names-POSIX] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v1-common/simple_format/big.csv-csv_with_names-%Y-%m-%d] >> test_ydb_backup.py::TestRestoreNoData::test_restore_no_data [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_index_3__SYNC-pk_types1-all_types1-index1---SYNC] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b7f/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk6/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b7f/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk6/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 1181163 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client5-year Int64 NOT NULL-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client6-year Int32-False] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v0] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-fifo] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v1] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/s3/py3test >> test_s3.py::TestYdbS3TTL::test_s3[table_all_types-pk_types7-all_types7-index7---] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b19/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk0/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001b19/ydb/tests/datashard/s3/test-results/py3test/testing_out_stuff/chunk0/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 1181410 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-std] |77.4%| [TA] $(B)/ydb/tests/datashard/s3/test-results/py3test/{meta.json ... results_accumulator.log} ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead |77.4%| [TA] {RESULT} $(B)/ydb/tests/datashard/s3/test-results/py3test/{meta.json ... results_accumulator.log} >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v1-common/simple_format/big.csv-csv_with_names-%Y-%m-%d] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v1-date_time/simple_iso/big.csv-csv_with_names-ISO] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v1] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client6-year Int32-False] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-fifo] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client7-year Uint32-False] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v1] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v1-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-std] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v1] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-std] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v1-date_time/simple_iso/big.csv-csv_with_names-ISO] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v2-common/simple_posix/big.csv-csv_with_names-POSIX] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v1] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client7-year Uint32-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client8-year Int64-False] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-std] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-std] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v2-common/simple_posix/big.csv-csv_with_names-POSIX] [GOOD] >> test_disk.py::TestSafeDiskBreak::test_erase_method >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v2-common/simple_format/big.csv-csv_with_names-%Y-%m-%d] >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-std] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-std] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client8-year Int64-False] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client9-year Uint64-False] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v0] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v1] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v1] [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-std] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v2-common/simple_format/big.csv-csv_with_names-%Y-%m-%d] [GOOD] >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v2-date_time/simple_iso/big.csv-csv_with_names-ISO] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v1] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_sqs_action_counters [GOOD] >> test_tablet.py::TestMassiveKills::test_tablets_are_ok_after_many_kills ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_fifo_groups_with_dlq_in_cloud[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client9-year Uint64-False] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client10-year String NOT NULL-True] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_format_setting.py::TestS3::test_date_time_simple_posix_big_file[v2-date_time/simple_iso/big.csv-csv_with_names-ISO] [GOOD] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v1-yql_types-yql_syntax-client0] |77.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_body[tables_format_v0] |77.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_ymq_expiring_counters [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queues_count_over_limit[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v1-yql_types-yql_syntax-client0] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v1-yql_types-pg_syntax-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client10-year String NOT NULL-True] [GOOD] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v1-yql_types-pg_syntax-client0] [SKIPPED] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client11-year String-False] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v1-pg_types-yql_syntax-client0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_set_very_big_visibility_timeout[tables_format_v1] >> test_ttl.py::TestTTLOnIndexedTable::test_case [GOOD] >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-std] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_sqs_action_counters [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v1-fifo] >> test_ydb_backup.py::TestClusterBackup::test_cluster_backup >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_can_read_new_written_data_on_visibility_timeout[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-std] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v1-pg_types-yql_syntax-client0] [GOOD] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v1-pg_types-pg_syntax-client0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v0-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_multi_read_dont_stall[tables_format_v1] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client11-year String-False] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_body[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_body[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v1] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client12-year Utf8-False] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v1-pg_types-pg_syntax-client0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_body[tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-fifo] [GOOD] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v2-yql_types-yql_syntax-client0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_expires_on_wait_timeout[tables_format_v0] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-std] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_reading_from_empty_queue >> test_ttl.py::TestTTLValueSinceUnixEpoch::test_case [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_yc_events_processor[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_generic_messaging.py::TestYandexAttributesPrefix::test_allows_yandex_message_attribute_prefix[tables_format_v1] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_set_very_big_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attribute_value[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attribute_value[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attribute_value[tables_format_v1] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v2-yql_types-yql_syntax-client0] [GOOD] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v2-yql_types-pg_syntax-client0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attribute_value[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attributes[tables_format_v0] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v2-yql_types-pg_syntax-client0] [SKIPPED] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client12-year Utf8-False] [GOOD] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v2-pg_types-yql_syntax-client0] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client13-year Date-False] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attributes[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attributes[tables_format_v1] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-fifo] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v0-fifo] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast-`.metadata/script_executions`] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v0-fifo] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_queue_counters_are_in_folder[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_not_throttling_with_custom_queue_name[std-tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-fifo] [GOOD] >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client13-year Date-False] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-std] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v2-pg_types-yql_syntax-client0] [GOOD] >> test_ttl.py::TestTTLDefaultEnv::test_case [GOOD] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v2-pg_types-pg_syntax-client0] >> test_generic_messaging.py::TestYandexAttributesPrefix::test_allows_yandex_message_attribute_prefix[tables_format_v1] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-std] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_setup_in_cloud[tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attributes[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v1-std] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_create_queue[tables_format_v1-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v0] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-fifo] [GOOD] |77.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLOnIndexedTable::test_case [GOOD] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-std] >> test_format_setting.py::TestS3::test_precompute_with_pg_binding[v2-pg_types-pg_syntax-client0] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_completeness_iso[v1-timestamp/completeness_iso/test.csv-csv_with_names] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v1-by_deduplication_id] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_read_dont_stall[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_read_dont_stall[tables_format_v0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v1-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_count_queues[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v0-fifo] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_empty_access_key_id[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-std] >> test_format_setting.py::TestS3::test_timestamp_completeness_iso[v1-timestamp/completeness_iso/test.csv-csv_with_names] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v1-fifo] >> test_format_setting.py::TestS3::test_timestamp_completeness_iso[v2-timestamp/completeness_iso/test.csv-csv_with_names] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-fifo] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast, DROP COLUMN stats-`.metadata/script_executions`] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v1-fifo] [GOOD] >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_reading_from_empty_queue [GOOD] |77.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_delete_message_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_q_twice[tables_format_v1-std] [GOOD] |77.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLValueSinceUnixEpoch::test_case [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_queue_by_nonexistent_user_fails[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_timeout_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_queue_by_nonexistent_user_fails[tables_format_v0] [GOOD] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax-`.metadata/script_executions`] [GOOD] >> test_format_setting.py::TestS3::test_timestamp_completeness_iso[v2-timestamp/completeness_iso/test.csv-csv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_time_completeness_iso[v1-date_time/completeness_iso/test.csv-csv_with_names] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_queue_by_nonexistent_user_fails[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_body[tables_format_v0] >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[DROP TABLE {}-`.metadata/script_executions`] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v1-fifo] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithPath::test_private_create_queue[tables_format_v1-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] >> test_format_setting.py::TestS3::test_date_time_completeness_iso[v1-date_time/completeness_iso/test.csv-csv_with_names] [GOOD] >> test_format_setting.py::TestS3::test_date_time_completeness_iso[v2-date_time/completeness_iso/test.csv-csv_with_names] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_expires_on_wait_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_expires_on_wait_timeout[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v1-fifo] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonSqsYandexCloudMode::test_private_queue_recreation[tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_tablet.py::TestMassiveKills::test_tablets_are_ok_after_many_kills [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v1-after_crutch_batch] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_multi_read_dont_stall[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_partial_delete_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_zero_visibility_timeout_works[tables_format_v1-std] [GOOD] >> test_format_setting.py::TestS3::test_date_time_completeness_iso[v2-date_time/completeness_iso/test.csv-csv_with_names] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_queue_counters.py::TestYmqQueueCounters::test_counters_when_reading_from_empty_queue [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( contrib/python/PyHamcrest/py3/hamcrest/core/base_description.py:43: DeprecationWarning: Call append_description_of instead of append_value >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-fifo] >> test_format_setting.py::TestS3::test_date_null[v1-date_null/as_default/test.csv] |77.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/ttl/py3test >> test_ttl.py::TestTTLDefaultEnv::test_case [GOOD] >> test_disk.py::TestSafeDiskBreak::test_erase_method [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] [GOOD] |77.6%| [TA] $(B)/ydb/tests/functional/ttl/test-results/py3test/{meta.json ... results_accumulator.log} |77.6%| [TA] {RESULT} $(B)/ydb/tests/functional/ttl/test-results/py3test/{meta.json ... results_accumulator.log} >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_queue_by_nonexistent_user_fails[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v0-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v1-by_deduplication_id] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v1-content_based] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_change_visibility] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_read_dont_stall[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_receive_with_very_big_visibility_timeout[tables_format_v0] >> test_format_setting.py::TestS3::test_date_null[v1-date_null/as_default/test.csv] [GOOD] >> test_ydb_backup.py::TestClusterBackup::test_cluster_backup [GOOD] >> test_format_setting.py::TestS3::test_date_null[v1-date_null/parse_error/test.csv] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_receive_with_very_big_visibility_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_receive_with_very_big_visibility_timeout[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_receive_with_very_big_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v0-fifo] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v1-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_change_visibility] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v0-std] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-std] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_format_setting.py::TestS3::test_date_null[v1-date_null/parse_error/test.csv] [GOOD] >> test_format_setting.py::TestS3::test_date_null[v2-date_null/as_default/test.csv] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_body[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_body[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_body[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_expires_on_wait_timeout[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v1-content_based] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_delete_message_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-fifo] >> test_stream_query.py::TestStreamQuery::test_sql_suite[plan-window.test] [GOOD] >> test_format_setting.py::TestS3::test_date_null[v2-date_null/as_default/test.csv] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v0-by_deduplication_id] >> test_format_setting.py::TestS3::test_date_null[v2-date_null/parse_error/test.csv] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_change_visibility] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_delete_message] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_deduplication_id[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v0-fifo] [GOOD] >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-std] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_validates_message_attributes[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_delete_message] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_timeout_works[tables_format_v0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v0-tables_format_v1-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes_batch[tables_format_v0] >> test_format_setting.py::TestS3::test_date_null[v2-date_null/parse_error/test.csv] [GOOD] >> test_format_setting.py::TestS3::test_date_null_with_not_null_type[v1-date_null/as_default/test.csv] |77.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v1-std] [GOOD] >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-std] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test >> test_tablet.py::TestMassiveKills::test_tablets_are_ok_after_many_kills [GOOD] Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/fq/s3/py3test >> test_explicit_partitioning_0.py::TestS3::test_projection_enum_type_invalid_validation[v2-true-client13-year Date-False] [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00257b/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_explicit_partitioning_0/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/00257b/ydb/tests/fq/s3/test-results/py3test/testing_out_stuff/test_explicit_partitioning_0/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/popen_fork.py:66: DeprecationWarning: This process (pid=919442) is multi-threaded, use of fork() may lead to deadlocks in the child. contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/multiprocessing/process.py:125: ResourceWarning: unclosed ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 922866 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_queue_attributes[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_can_read_new_written_data_on_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_partial_delete_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_partial_delete_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_message_visibility_with_very_big_timeout[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_read_dont_stall[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_read_dont_stall[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes_batch[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes_batch[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithPath::test_micro_batch_read[tables_format_v0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/tools/nemesis/ut/py3test >> test_disk.py::TestSafeDiskBreak::test_erase_method [GOOD] Test command err: contrib/python/Flask/py3/flask/scaffold.py:109: DeprecationWarning: 'pkgutil.find_loader' is deprecated and slated for removal in Python 3.14; use importlib.util.find_spec() instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:751: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:748: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/tools/python3/Lib/ast.py:587: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:755: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: Attribute s is deprecated and will be removed in Python 3.14; use value instead contrib/python/Werkzeug/py3/werkzeug/routing/rules.py:756: DeprecationWarning: ast.Str is deprecated and will be removed in Python 3.14; use ast.Constant instead >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_message_visibility_with_very_big_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_message_visibility_with_very_big_timeout[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_works[tables_format_v1] >> test_format_setting.py::TestS3::test_date_null_with_not_null_type[v1-date_null/as_default/test.csv] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_expires_on_wait_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v0-std] >> test_format_setting.py::TestS3::test_date_null_with_not_null_type[v1-date_null/parse_error/test.csv] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v0-by_deduplication_id] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v0-content_based] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_message_visibility_with_very_big_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v1-fifo] [GOOD] |77.6%| [TA] $(B)/ydb/tests/tools/nemesis/ut/test-results/py3test/{meta.json ... results_accumulator.log} >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v1-std] |77.6%| [TA] {RESULT} $(B)/ydb/tests/tools/nemesis/ut/test-results/py3test/{meta.json ... results_accumulator.log} >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_change_visibility] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes_batch[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_read_dont_stall[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v0-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_delete_message_works[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_delete_message_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v1-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v0-with_delete_message] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_change_visibility] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v0-long_polling-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_invalid_queue_url[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_q_twice[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_read_dont_stall[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_change_visibility] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_delete_message] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v1-fifo] >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-std] [GOOD] >> test_format_setting.py::TestS3::test_date_null_with_not_null_type[v1-date_null/parse_error/test.csv] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_change_disables_receive_attempt_id[tables_format_v1-with_delete_message] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] >> test_format_setting.py::TestS3::test_date_null_with_not_null_type[v2-date_null/as_default/test.csv] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_queue_attributes[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_queue_attributes[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v1-after_crutch_batch] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v1-standard_mode] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_queue_attributes[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_delete_message_works[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_delete_message_works[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_read_dont_stall[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithPath::test_micro_batch_read[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithPath::test_micro_batch_read[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithPath::test_micro_batch_read[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_deduplication_id[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_group_id[tables_format_v0] >> test_generic_messaging.py::TestYandexAttributesPrefix::test_allows_yandex_message_attribute_prefix[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v1-standard_mode] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v0-content_based] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-by_deduplication_id] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_group_id[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_group_id[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_group_id[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_receive_attempt_id[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_invalid_queue_url[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_receive_attempt_id[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_receive_attempt_id[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_invalid_queue_url[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_invalid_queue_url[tables_format_v1] [GOOD] >> test_format_setting.py::TestS3::test_date_null_with_not_null_type[v2-date_null/as_default/test.csv] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_receive_attempt_id[tables_format_v1] [GOOD] >> test_format_setting.py::TestS3::test_date_null_with_not_null_type[v2-date_null/parse_error/test.csv] |77.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_stream_query.py::TestStreamQuery::test_sql_suite[plan-window.test] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_list_queues_of_nonexistent_user[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_to_nonexistent_queue[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_list_queues_of_nonexistent_user[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_to_nonexistent_queue[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_list_queues_of_nonexistent_user[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_to_nonexistent_queue[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_list_queues_of_nonexistent_user[tables_format_v1] [GOOD] |77.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast-`.metadata/script_executions`] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_to_nonexistent_queue[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_set_very_big_visibility_timeout[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v0-std] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v0-long_polling-fifo] [GOOD] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v0-long_polling-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_expires_on_wait_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_expires_on_wait_timeout[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v0] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_create_queue_by_nonexistent_user_fails[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v1-std] >> test_format_setting.py::TestS3::test_date_null_with_not_null_type[v2-date_null/parse_error/test.csv] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join0.test] >> test_format_setting.py::TestS3::test_date_null_multi[v1-date_null/as_default/multi_null.csv] >> test_generic_messaging.py::TestYandexAttributesPrefix::test_allows_yandex_message_attribute_prefix[tables_format_v0] [GOOD] >> ttl_unavailable_s3.py::TestUnavailableS3::test [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v0-long_polling-std] [GOOD] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v1-long_polling-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_partial_delete_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v0-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-by_deduplication_id] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-content_based] |77.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[DROP TABLE {}-`.metadata/script_executions`] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_and_read_message[tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_mechanics_in_cloud[tables_format_v1-tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_delete_message_works[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_fifo_read_delete_single_message >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_set_very_big_visibility_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v0-std] >> test_format_setting.py::TestS3::test_date_null_multi[v1-date_null/as_default/multi_null.csv] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_works[tables_format_v1] >> test_format_setting.py::TestS3::test_date_null_multi[v1-date_null/parse_error/multi_null.csv] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_fifo_read_delete_single_message [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v0-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_only_single_read_infly_from_fifo >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v1-long_polling-fifo] [GOOD] >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v1-long_polling-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v1-std] >> test_format_setting.py::TestS3::test_date_null_multi[v1-date_null/parse_error/multi_null.csv] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v1] >> test_format_setting.py::TestS3::test_date_null_multi[v2-date_null/as_default/multi_null.csv] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v1] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-15.test] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_timeout_works[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_and_read_to_different_groups[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_delete_message_works[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_fifo_read_delete_single_message >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v1-long_polling-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_invalid_queue_url[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v0-fifo] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_yandex_cloud_mode.py::TestSqsYandexCloudMode::test_dlq_message_counters_in_cloud[tables_format_v1-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_only_single_read_infly_from_fifo [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v0] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_not_in_flight[tables_format_v1-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |77.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_and_read_to_different_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_and_read_to_different_groups[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_fifo_read_delete_single_message [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_only_single_read_infly_from_fifo >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v0-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v0] [GOOD] |77.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax, DROP COLUMN ast, DROP COLUMN stats-`.metadata/script_executions`] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_and_read_to_different_groups[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_read_delete_many_groups[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] >> test_format_setting.py::TestS3::test_date_null_multi[v2-date_null/as_default/multi_null.csv] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-content_based] [GOOD] >> test_format_setting.py::TestS3::test_date_null_multi[v2-date_null/parse_error/multi_null.csv] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] [GOOD] |77.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/script_execution/py3test >> test_update_script_tables.py::TestUpdateScriptTablesYdb::test_recreate_tables[ALTER TABLE {} DROP COLUMN syntax-`.metadata/script_executions`] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/py3test >> ttl_unavailable_s3.py::TestUnavailableS3::test [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001c35/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk5/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001c35/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk5/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback !!! simulating S3 hang up -- sending SIGSTOP !!! simulating S3 recovery -- sending SIGCONT contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 1182628 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_only_single_read_infly_from_fifo [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v0] [GOOD] |77.7%| [TA] $(B)/ydb/tests/functional/script_execution/test-results/py3test/{meta.json ... results_accumulator.log} |77.7%| [TA] {RESULT} $(B)/ydb/tests/functional/script_execution/test-results/py3test/{meta.json ... results_accumulator.log} >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_attribute_name[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_timeout_works[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_timeout_works[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_can_read_from_different_groups[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_read_dont_stall[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-after_crutch_batch] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_attribute_name[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_attribute_name[tables_format_v1] |77.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test |77.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_attribute_name[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_delete_fails[tables_format_v0] >> test_format_setting.py::TestS3::test_date_null_multi[v2-date_null/parse_error/multi_null.csv] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v0-std] >> test_format_setting.py::TestS3::test_string_not_null_multi[v1-date_null/as_default/multi_null.csv] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_delete_fails[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_delete_fails[tables_format_v1] |77.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_delete_fails[tables_format_v1] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v1-std] >> test_postgres.py::TestPGSQL::test_sql_suite[results-create_table.test] |77.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_read_dont_stall[tables_format_v0] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_format_setting.py::TestS3::test_string_not_null_multi[v1-date_null/as_default/multi_null.csv] [GOOD] >> test_format_setting.py::TestS3::test_string_not_null_multi[v1-date_null/parse_error/multi_null.csv] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_send_and_read_multiple_messages[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_deduplication_id[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_partial_delete_works[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_deduplication_id[tables_format_v0] [GOOD] |77.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithTenant::test_private_queue_recreation[tables_format_v0-std] [GOOD] >> test_format_setting.py::TestS3::test_string_not_null_multi[v1-date_null/parse_error/multi_null.csv] [GOOD] >> test_format_setting.py::TestS3::test_string_not_null_multi[v2-date_null/as_default/multi_null.csv] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attribute_value[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_works[tables_format_v0] |77.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/cloud/py3test >> test_common.py::TestCommonYandexWithPath::test_private_queue_recreation[tables_format_v1-std] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select2-1.test] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_expires_on_wait_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attribute_value[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attribute_value[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_read_dont_stall[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_read_dont_stall[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attribute_value[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v1] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-6.test] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-14.test] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-15.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-2.test] >> test_format_setting.py::TestS3::test_string_not_null_multi[v2-date_null/as_default/multi_null.csv] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v1] [GOOD] >> test_format_setting.py::TestS3::test_string_not_null_multi[v2-date_null/parse_error/multi_null.csv] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_invalid_queue_url[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_invalid_queue_url[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_invalid_queue_url[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_list_queues_of_nonexistent_user[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_can_read_from_different_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_can_read_from_different_groups[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_list_queues_of_nonexistent_user[tables_format_v0] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_receive_attempt_id[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_read_delete_many_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_read_delete_many_groups[tables_format_v1] >> test_format_setting.py::TestS3::test_string_not_null_multi[v2-date_null/parse_error/multi_null.csv] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_list_queues_of_nonexistent_user[tables_format_v1] >> test_format_setting.py::TestS3::test_parquet_converters_to_timestamp[v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_list_queues_of_nonexistent_user[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_multi_read_dont_stall[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-after_crutch_batch] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v1-fifo] >> data_migration_when_alter_ttl.py::TestDataMigrationWhenAlterTtl::test [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_zero_visibility_timeout_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_can_read_from_different_groups[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v0-std] |77.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestYandexAttributesPrefix::test_allows_yandex_message_attribute_prefix[tables_format_v0] [GOOD] >> test_ydb_backup.py::TestDatabaseBackup::test_database_backup >> test_postgres.py::TestPGSQL::test_sql_suite[results-create_table.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/coalesce-and-join.test] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-std] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_set_very_big_visibility_timeout[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithTenant::test_micro_batch_read[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_create_queue_by_nonexistent_user_fails[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v0-fifo] |77.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_queue_attributes[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v1-std] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-1.test] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/coalesce-and-join.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join-group-by-with-null.test] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-create_table.test] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_multi_read_dont_stall[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_partial_delete_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_partial_delete_works[tables_format_v1] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_queue_attributes[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |77.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test |77.7%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_deduplication[tables_format_v1-content_based] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_read_delete_many_groups[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v1] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join-group-by-with-null.test] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_read_dont_stall[tables_format_v1] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-insert.test] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_wrong_delete_fails[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_visibility_timeout_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_attribute_name[tables_format_v0] |77.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithTenant::test_micro_batch_read[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithTenant::test_micro_batch_read[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_visibility_timeout_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_and_read_to_different_groups[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_attribute_name[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_attribute_name[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_attribute_name[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMicroBatchesWithTenant::test_micro_batch_read[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_delete_fails[tables_format_v0] |77.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_polling.py::TestSqsPolling::test_receive_message_with_polling[tables_format_v1-long_polling-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_and_read_to_different_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_and_read_to_different_groups[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_delete_fails[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_delete_fails[tables_format_v1] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_validates_deduplication_id[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/py3test >> data_migration_when_alter_ttl.py::TestDataMigrationWhenAlterTtl::test [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001c42/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk1/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001c42/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk1/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 1182362 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_delete_fails[tables_format_v1] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_message_batch[tables_format_v1-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message_batch[tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_and_read_to_different_groups[tables_format_v1] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-create_table.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/coalesce-and-join.test] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_read_dont_stall[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v0-after_crutch_batch] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-std] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join3.test] |77.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join-group-by-with-null.test] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_validates_message_attributes[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-2.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-3.test] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_only_single_read_infly_from_fifo [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |77.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v0-std] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select2-1.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select2-2.test] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v1-std] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_multi_read_dont_stall[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-6.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-7.test] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v1-fifo] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/coalesce-and-join.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join-group-by-with-null.test] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_read_dont_stall[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_receive_with_very_big_visibility_timeout[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_empty_queue_url[tables_format_v0] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select2-1.test] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_receive_with_very_big_visibility_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_receive_with_very_big_visibility_timeout[tables_format_v1] |77.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_empty_queue_url[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_partial_delete_works[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_receive_with_very_big_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v0-fifo] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join-group-by-with-null.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-insert.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select1-1.test] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v0-by_deduplication_id] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v1-fifo] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select1-1.test] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join3.test] |77.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_read_delete_many_groups[tables_format_v0] |77.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v1-after_crutch_batch] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_empty_queue_url[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v0-std] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join3.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join4.test] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_empty_queue_url[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_empty_queue_url[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v0-fifo] |77.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_runtime_attributes[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v0-fifo] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-select_distinct.test] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v0-std] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_visibility_timeout_works[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v0-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v0-after_crutch_batch] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_multi_read_dont_stall[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |77.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v0-by_deduplication_id] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v0-content_based] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes_batch[tables_format_v0] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] [GOOD] >> TQueueBackpressureTest::PerfInFlight >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v1-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v1-std] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_empty_queue_url[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_empty_queue_url[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-fifo] >> TQueueBackpressureTest::PerfInFlight [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_empty_queue_url[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_to_nonexistent_queue[tables_format_v0] >> test_format_setting.py::TestS3::test_parquet_converters_to_timestamp[v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v0-fifo] |77.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_write_read_delete_many_groups[tables_format_v1] [GOOD] >> test_format_setting.py::TestS3::test_parquet_converters_to_timestamp[v2] |77.8%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/backpressure/ut/unittest >> TQueueBackpressureTest::PerfInFlight [GOOD] |77.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_to_nonexistent_queue[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_to_nonexistent_queue[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v0-std] >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_delete_path_with_folder_then_get_error_response >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_to_nonexistent_queue[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v0] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] >> TQueueBackpressureTest::PerfTrivial ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_to_zero_works[tables_format_v1-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v1-fifo] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_batch_works[tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes_batch[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes_batch[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v1-std] [GOOD] |77.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/backpressure/ut/unittest >> TQueueBackpressureTest::PerfTrivial [GOOD] >> KqpScheme::FamilyColumnTest >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v0-content_based] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join3.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join4.test] |77.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join-group-by-with-null.test] [GOOD] |77.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/backpressure/ut/unittest >> TQueueBackpressureTest::PerfTrivial [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] >> test_ydb_backup.py::TestDatabaseBackup::test_database_backup [GOOD] >> KqpScheme::CreateAndAlterTableWithPartitioningBySizeUncompat >> TBSVWithReboots::CreateAssignAlterIsAllowed >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes_batch[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_read_dont_stall[tables_format_v0] >> KqpScheme::FamilyColumnTest [GOOD] >> KqpScheme::Int8Int16 >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_delete_path_with_folder_then_get_error_response [GOOD] >> TBSVWithReboots::CreateDrop >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-7.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-8.test] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_read_dont_stall[tables_format_v0] [GOOD] >> KqpScheme::CreateAndAlterTableWithPartitioningBySizeUncompat [GOOD] >> KqpScheme::CreateAndAlterTableWithPartitioningBySizeCompat >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-3.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-4.test] >> KqpScheme::Int8Int16 [GOOD] >> KqpScheme::Int8Int16Olap >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-fifo] |77.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-std] >> KqpScheme::CreateAndAlterTableWithPartitioningBySizeCompat [GOOD] >> KqpScheme::CreateAndAlterTableWithPartitionSizeUncompat |77.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_message_visibility_with_very_big_timeout[tables_format_v0] >> KqpScheme::Int8Int16Olap [GOOD] >> KqpScheme::DropTransfer >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_read_delete_many_groups[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_read_delete_many_groups[tables_format_v1] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select1-1.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select1-2.test] >> KqpScheme::CreateAndAlterTableWithPartitionSizeUncompat [GOOD] >> KqpScheme::CreateAndAlterTableWithPartitioningByLoadUncompat >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_message_visibility_with_very_big_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_message_visibility_with_very_big_timeout[tables_format_v1] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v1-after_crutch_batch] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v1-standard_mode] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_message_visibility_with_very_big_timeout[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v0-fifo] >> KqpScheme::DropTransfer [GOOD] >> KqpScheme::DropTransfer_QueryService >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select2-2.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select2-3.test] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v1-std] >> KqpScheme::CreateAndAlterTableWithPartitioningByLoadUncompat [GOOD] >> KqpScheme::CreateAndAlterTableWithPartitioningByLoadCompat >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v0-std] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v1-standard_mode] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v0] |77.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v1-fifo] >> KqpScheme::DropTransfer_QueryService [GOOD] >> KqpScheme::DropResourcePoolClassifier >> KqpScheme::CreateAndAlterTableWithPartitioningByLoadCompat [GOOD] >> KqpScheme::CreateAndDropGroup ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_does_not_change_visibility_for_deleted_message[tables_format_v1-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_wrong_delete_fails[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v0-std] |77.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest |77.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_and_read_to_different_groups[tables_format_v1] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-jointest/join4.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-select.test] >> KqpScheme::CreateAndDropGroup [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-std] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateAndDropGroup [GOOD] Test command err: Trying to start YDB, gRPC: 64472, MsgBus: 20508 2025-06-03T10:49:31.824242Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511673482463480179:2062];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:49:31.824265Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016ca/r3tmp/tmpay2st8/pdisk_1.dat 2025-06-03T10:49:31.919990Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:49:31.920086Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511673482463480150:2079] 1748947771824120 != 1748947771824123 TServer::EnableGrpc on GrpcPort 64472, node 1 2025-06-03T10:49:31.940248Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:49:31.940261Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:49:31.940263Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:49:31.940314Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:20508 2025-06-03T10:49:31.990505Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:31.990535Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:49:31.991806Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:20508 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:49:32.014265Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:32.018004Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:49:32.022381Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:32.047526Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:49:32.077649Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:49:32.096197Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:32.266272Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673486758449102:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:32.266314Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:32.324607Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:49:32.337657Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:49:32.350568Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:49:32.366612Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:49:32.380692Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:49:32.394088Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:49:32.451411Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:49:32.465891Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673486758449754:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:32.465939Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:32.465955Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673486758449759:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:32.466877Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:49:32.475094Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511673486758449761:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:49:32.529981Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511673486758449812:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:49:32.697426Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:49:32.716584Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 18927, MsgBus: 18052 2025-06-03T10:49:32.930432Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511673485445517674:2070];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:49:32.930453Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016ca/r3tmp/tmppNNTIA/pdisk_1.dat 2025-06-03T10:49:32.952202Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 18927, node 2 2025-06-03T10:49:32.966004Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:49:32.966016Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:49:32.966018Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:49:32.966064Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18052 TClient is connected to server localhost:18052 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_De ... TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableWithPartitioningByLoad" Columns { Name: "Key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "String" TypeId: 4097 Id: 2 NotNull: false IsBuildInProgress... (TRUNCATED) 2025-06-03T10:49:37.158014Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 TClient::Ls request: /Root/TableWithPartitioningByLoad TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableWithPartitioningByLoad" PathId: 17 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715672 CreateStep: 1748947777192 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 4 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 4 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 2 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableWithPartitioningByLoad" Columns { Name: "Key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "String" TypeId: 4097 Id: 2 NotNull: false IsBuildInProgress... (TRUNCATED) 2025-06-03T10:49:37.171450Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 TClient::Ls request: /Root/TableWithPartitioningByLoad TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "TableWithPartitioningByLoad" PathId: 17 SchemeshardId: 72057594046644480 PathType: EPathTypeTable CreateFinished: true CreateTxId: 281474976715672 CreateStep: 1748947777192 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 5 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 5 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 TableSchemaVersion: 3 TablePartitionVersion: 1 } ChildrenExist: false } Table { Name: "TableWithPartitioningByLoad" Columns { Name: "Key" Type: "Uint64" TypeId: 4 Id: 1 NotNull: false IsBuildInProgress: false } Columns { Name: "Value" Type: "String" TypeId: 4097 Id: 2 NotNull: false IsBuildInProgress... (TRUNCATED) Trying to start YDB, gRPC: 31711, MsgBus: 32415 2025-06-03T10:49:37.534920Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511673505906945380:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:49:37.534939Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016ca/r3tmp/tmp7cL7md/pdisk_1.dat 2025-06-03T10:49:37.554892Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:49:37.557495Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7511673505906945341:2079] 1748947777534711 != 1748947777534714 TServer::EnableGrpc on GrpcPort 31711, node 6 2025-06-03T10:49:37.567030Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:49:37.567044Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:49:37.567047Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:49:37.567096Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:32415 TClient is connected to server localhost:32415 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:49:37.639766Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:37.639805Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:49:37.640024Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:37.640644Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:49:37.650176Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:49:37.661443Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 2025-06-03T10:49:37.685198Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:37.698402Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:37.960616Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673505906946993:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:37.960651Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:37.970171Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:49:37.979685Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:49:37.991587Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:49:38.005911Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:49:38.019705Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:49:38.033558Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:49:38.047997Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:49:38.064671Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673510201914942:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:38.064707Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:38.064713Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673510201914947:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:38.065515Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:49:38.075232Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511673510201914949:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:49:38.160547Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511673510201915000:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } |77.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> KqpScheme::DropResourcePoolClassifier [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::DropResourcePoolClassifier [GOOD] Test command err: Trying to start YDB, gRPC: 2295, MsgBus: 19088 2025-06-03T10:49:31.143676Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511673479527947484:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:49:31.143779Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016c1/r3tmp/tmp0GTCTI/pdisk_1.dat 2025-06-03T10:49:31.223280Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 2295, node 1 2025-06-03T10:49:31.241165Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:49:31.241181Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:49:31.241183Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:49:31.241236Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:49:31.245023Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:31.245067Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:49:31.246188Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:19088 TClient is connected to server localhost:19088 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:49:31.333391Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 2025-06-03T10:49:31.348551Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:31.384866Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:31.436533Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:49:31.453272Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 2025-06-03T10:49:31.610698Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673479527949077:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:31.610733Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:31.669941Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:49:31.679495Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:49:31.691953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:49:31.711241Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:49:31.720516Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:49:31.735041Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:49:31.749042Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:49:31.766216Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673479527949729:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:31.766251Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:31.766298Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673479527949734:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:31.767394Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:49:31.775414Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511673479527949736:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:49:31.833964Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511673479527949787:3400] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:49:32.014264Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 31736, MsgBus: 3741 2025-06-03T10:49:32.264123Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511673486457225597:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:49:32.264520Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016c1/r3tmp/tmpr4iMa2/pdisk_1.dat 2025-06-03T10:49:32.279911Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 31736, node 2 2025-06-03T10:49:32.290026Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:49:32.290040Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:49:32.290043Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:49:32.290100Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:3741 TClient is connected to server localhost:3741 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:49:32.366001Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:32.366036Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:49:32.369756Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:49:32.369980Z node 2 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Ope ... , but propose itself is undo unsafe, suboperation type: ESchemeOpAlterPersQueueGroup, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:49:36.788425Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpDropTransferCascade, opId: 281474976715676:0, at schemeshard: 72057594046644480 2025-06-03T10:49:36.790889Z node 5 :REPLICATION_CONTROLLER ERROR: stream_consumer_remover.cpp:61: [StreamConsumerRemover][rid 1][tid 1] Error: status# NOT_FOUND, issues# {
: Error: some consumers in drop_consumers are missing already, code: 500003 } 2025-06-03T10:49:36.790951Z node 5 :REPLICATION_CONTROLLER ERROR: tx_drop_stream_result.cpp:59: [controller 72075186224037925][TxDropStreamResult] Drop stream error: rid# 1, tid# 1, status# NOT_FOUND, issue# {
: Error: some consumers in drop_consumers are missing already, code: 500003 } 2025-06-03T10:49:36.793050Z node 5 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 5, TabletId: 72075186224037925 not found Trying to start YDB, gRPC: 62613, MsgBus: 22940 2025-06-03T10:49:37.140672Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511673504988677870:2068];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:49:37.140706Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016c1/r3tmp/tmpVMi21R/pdisk_1.dat 2025-06-03T10:49:37.162345Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 62613, node 6 2025-06-03T10:49:37.172623Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:49:37.172641Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:49:37.172643Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:49:37.172693Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22940 TClient is connected to server localhost:22940 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:49:37.246167Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:37.246201Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:49:37.246707Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:49:37.247234Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected waiting... 2025-06-03T10:49:37.258260Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:37.271037Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:37.332926Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:37.347255Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:37.588723Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673504988679474:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:37.588761Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:37.599456Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:49:37.610609Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:49:37.621608Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:49:37.635450Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:49:37.649224Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:49:37.663913Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:49:37.678089Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:49:37.698273Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673504988680125:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:37.698300Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:37.698311Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673504988680130:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:37.699185Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:49:37.704314Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511673504988680132:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:49:37.804590Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511673504988680183:3393] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:49:38.142748Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480 2025-06-03T10:49:38.240706Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:49:38.328706Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:1, at schemeshard: 72057594046644480 2025-06-03T10:49:38.423917Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480 2025-06-03T10:49:38.517511Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715684:0, at schemeshard: 72057594046644480 2025-06-03T10:49:38.596625Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715687:0, at schemeshard: 72057594046644480 2025-06-03T10:49:38.957486Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673509283648482:2713], DatabaseId: /Root, PoolId: test_pool, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool test_pool not found or you don't have access permissions } 2025-06-03T10:49:38.957512Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool test_pool, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool test_pool not found or you don't have access permissions } >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-10.test] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_to_zero_works[tables_format_v1-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |77.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> test_postgres.py::TestPGSQL::test_sql_suite[plan-select_distinct.test] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v1-std] |77.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v0] >> KqpScheme::DisableExternalDataSourcesOnServerless |77.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_receive_attempt_reloads_same_messages[tables_format_v0-standard_mode] [GOOD] >> test_pdisk_format_info.py::TestPDiskInfo::test_read_disk_state >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_one_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_works[tables_format_v0] |77.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_queue_attributes[tables_format_v0-std] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_write_read_delete_many_groups[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v1] |77.9%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |78.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-4.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-5.test] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_get_queue_attributes_only_attributes_table[tables_format_v1-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_set_very_big_visibility_timeout[tables_format_v0] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delay_message_batch[tables_format_v1-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> KqpScheme::CreateExternalDataSourceWithSa >> KqpScheme::DisableExternalDataSourcesOnServerless [GOOD] >> KqpScheme::DisableResourcePools >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v1-fifo] |78.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_message[tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |78.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest |78.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |78.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_deduplication[tables_format_v0-content_based] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_works[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_works[tables_format_v1] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v0-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v1-fifo] >> test_pdisk_format_info.py::TestPDiskInfo::test_read_disk_state [GOOD] >> KqpScheme::DisableResourcePools [GOOD] >> KqpScheme::DisableResourcePoolsOnServerless |78.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> ReadBatcher::ReadBatcher |78.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest |78.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> TBlobStorageHullCompactDeferredQueueTest::Basic >> KqpOlapScheme::NullColumnError >> test_postgres.py::TestPGSQL::test_sql_suite[results-jointest/join4.test] [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[results-select.test] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] [GOOD] >> KqpScheme::CreateExternalDataSourceWithSa [GOOD] >> KqpScheme::CreateExternalDataSourceValidationAuthMethod >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_works[tables_format_v1] [GOOD] >> ReadBatcher::ReadBatcher [GOOD] |78.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test |78.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest |78.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v1] >> KqpOlapScheme::NullColumnError [GOOD] >> KqpOlapScheme::TenThousandColumns >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-8.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-9.test] |78.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> ReadBatcher::ReadBatcher [GOOD] >> KqpScheme::DisableResourcePoolsOnServerless [GOOD] >> KqpScheme::DisableResourcePoolClassifiers |78.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |78.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> KqpScheme::CreateExternalDataSourceValidationAuthMethod [GOOD] >> KqpScheme::CreateExternalDataSourceValidationLocation >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_deduplication_id[tables_format_v0] |78.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_deduplication_id[tables_format_v0] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_deduplication_id[tables_format_v1] >> KqpScheme::SchemaVersionMissmatchWithWrite >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_deduplication_id[tables_format_v1] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select1-2.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select1-3.test] |78.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |78.0%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |78.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> KqpScheme::CreateExternalDataSourceValidationLocation [GOOD] >> KqpScheme::CreateExternalTable >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v0-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v0-std] >> KqpScheme::DisableResourcePoolClassifiers [GOOD] >> KqpScheme::DisableResourcePoolClassifiersOnServerless >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v1-std] >> KqpScheme::SchemaVersionMissmatchWithWrite [GOOD] >> KqpScheme::TouchIndexAfterMoveIndexRead |78.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> KqpScheme::CreateExternalTable [GOOD] >> KqpScheme::CreateExternalTableCheckPrimaryKey >> test_format_setting.py::TestS3::test_parquet_converters_to_timestamp[v2] [GOOD] |78.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_format_setting.py::TestS3::test_parquet_converters_to_datetime[v1] |78.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TBlobStorageHullCompactDeferredQueueTest::Basic [GOOD] |78.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |78.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/scheme_shard/py3test >> test_scheme_shard_operations.py::TestSchemeShardSimpleOps::test_when_delete_path_with_folder_then_get_error_response [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> TBlobStorageHullCompactDeferredQueueTest::Basic [GOOD] Test command err: STEP 1 STEP 2 StringToId# 63 numItems# 110271 >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select2-3.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select2-4.test] >> KqpScheme::TouchIndexAfterMoveIndexRead [GOOD] >> KqpScheme::TouchIndexAfterMoveIndexWrite |78.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_read_dont_stall[tables_format_v0] [GOOD] >> KqpScheme::CreateExternalTableCheckPrimaryKey [GOOD] >> KqpScheme::CreateExternalTableValidation >> KqpScheme::DescribeIndexTable >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3-dc] |78.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> KqpScheme::AlterIndexImplTable-VectorIndex |78.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest |78.1%| [TA] $(B)/ydb/tests/functional/scheme_shard/test-results/py3test/{meta.json ... results_accumulator.log} |78.1%| [TA] {RESULT} $(B)/ydb/tests/functional/scheme_shard/test-results/py3test/{meta.json ... results_accumulator.log} |78.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v1-fifo] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v1-std] |78.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_pdisk_format_info.py::TestPDiskInfo::test_read_disk_state [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_send_message_batch[tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |78.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_and_read_multiple_messages[tables_format_v1] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-fifo] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-14.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-15.test] |78.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_change_visibility_works[tables_format_v1-std] [GOOD] >> KqpScheme::DescribeIndexTable [GOOD] >> KqpScheme::CreatedAt >> KqpScheme::TouchIndexAfterMoveIndexWrite [GOOD] >> KqpScheme::TouchIndexAfterMoveIndexReadReplace >> KqpScheme::CreateExternalTableValidation [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-5.test] [GOOD] |78.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::CreateExternalTableValidation [GOOD] Test command err: Trying to start YDB, gRPC: 9316, MsgBus: 4278 2025-06-03T10:49:43.615642Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511673532604302139:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:49:43.615797Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00169b/r3tmp/tmpCvgVNj/pdisk_1.dat 2025-06-03T10:49:43.698046Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 9316, node 1 2025-06-03T10:49:43.717277Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:43.717330Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:49:43.718669Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:49:43.727376Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:49:43.727389Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:49:43.727392Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:49:43.727442Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:4278 TClient is connected to server localhost:4278 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:49:43.801261Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:43.804789Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976710657, at schemeshard: 72057594046644480 2025-06-03T10:49:43.812611Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:49:43.839099Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:43.881833Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 2025-06-03T10:49:43.900931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:44.082097Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673536899270877:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:44.082133Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:44.149852Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:49:44.159634Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:49:44.173115Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:49:44.194954Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:49:44.204868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:49:44.216325Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:49:44.230118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:49:44.254545Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673536899271528:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:44.254584Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:44.254732Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673536899271533:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:44.256249Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:49:44.260161Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511673536899271535:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:49:44.318455Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511673536899271586:3395] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:49:44.621255Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710672:1, at schemeshard: 72057594046644480 2025-06-03T10:49:44.733832Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710675:0, at schemeshard: 72057594046644480 2025-06-03T10:49:44.820745Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710678:1, at schemeshard: 72057594046644480 2025-06-03T10:49:44.915401Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710681:0, at schemeshard: 72057594046644480 2025-06-03T10:49:45.009112Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710684:0, at schemeshard: 72057594046644480 2025-06-03T10:49:45.114295Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976710687:0, at schemeshard: 72057594046644480 2025-06-03T10:49:45.217195Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976715758:2, at schemeshard: 72057594046644480 2025-06-03T10:49:45.235739Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976715759:0, at schemeshard: 72057594046644480 2025-06-03T10:49:45.632975Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExternalDataSource, opId: 281474976710702:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 14565, MsgBus: 27610 2025-06-03T10:49:45.980837Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511673542002994000:2072];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:49:45.981170Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00169b/r3tmp/tmpsKthw0/pdisk_1.dat TServer::EnableGrpc on GrpcPort 14565, node 2 2025-06-03T10:49:46.005001Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 20 ... s undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:49:50.034059Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:49:50.049674Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:49:50.060355Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:49:50.118958Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:49:50.140266Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511673562510483927:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:50.140289Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:50.140392Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511673562510483932:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:50.141117Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:49:50.143196Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7511673562510483934:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:49:50.228968Z node 5 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [5:7511673562510483985:3400] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 31954, MsgBus: 7765 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00169b/r3tmp/tmpkNBfU9/pdisk_1.dat 2025-06-03T10:49:50.828266Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:49:50.830896Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:49:50.832238Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7511673564611129331:2079] 1748947790797903 != 1748947790797906 TServer::EnableGrpc on GrpcPort 31954, node 6 2025-06-03T10:49:50.845146Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:49:50.845167Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:49:50.845170Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:49:50.845226Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:7765 2025-06-03T10:49:50.903741Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:50.903782Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting TClient is connected to server localhost:7765 2025-06-03T10:49:50.905012Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:49:50.910118Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:50.917547Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:50.931815Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:51.001328Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:51.017191Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:51.449000Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673568906098273:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:51.449051Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:51.465995Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.483362Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.503189Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.530664Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.566232Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.587202Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.607396Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.630566Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673568906098927:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:51.630596Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:51.630734Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673568906098932:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:51.631769Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:49:51.639322Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:49:51.639478Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511673568906098934:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:49:51.713738Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511673568906098985:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } |78.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> KqpScheme::AlterIndexImplTable-VectorIndex [GOOD] >> KqpScheme::AlterIndexImplTableUsingPublicAPI >> TBSVWithReboots::CreateAssignAlterIsAllowed [GOOD] |78.1%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> KqpScheme::CreatedAt [GOOD] >> KqpScheme::DisableCreateExternalDataSource |78.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> KqpScheme::TouchIndexAfterMoveIndexReadReplace [GOOD] >> KqpScheme::TouchIndexAfterMoveIndexWriteReplace |78.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/backpressure/ut/unittest |78.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test |78.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TBSVWithReboots::CreateAssignAlterIsAllowed [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:49:32.022272Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:49:32.022302Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:49:32.022307Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:49:32.022313Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:49:32.022328Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:49:32.022333Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:49:32.022343Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:49:32.022356Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:49:32.022481Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:49:32.022555Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:49:32.039876Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:49:32.039909Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:49:32.040019Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:49:32.043126Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:49:32.043253Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:49:32.043289Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:49:32.045200Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:49:32.045253Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:49:32.045403Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:49:32.045473Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:49:32.046022Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:49:32.046086Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:49:32.046353Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:49:32.046365Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:49:32.046383Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:49:32.046392Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:49:32.046398Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:49:32.046439Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:49:32.047938Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:49:32.071556Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:49:32.071653Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:49:32.071728Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:49:32.071779Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:49:32.071790Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:49:32.072704Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:49:32.072739Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:49:32.072797Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:49:32.072808Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:49:32.072814Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:49:32.072820Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:49:32.073404Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:49:32.073419Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:49:32.073426Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:49:32.078389Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:49:32.078420Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:49:32.078429Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:49:32.078441Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:49:32.079098Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:49:32.079798Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:49:32.079850Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:49:32.080069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:49:32.080102Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:49:32.080112Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:49:32.080187Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... LAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 2025-06-03T10:49:53.169464Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-03T10:49:53.169565Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-03T10:49:53.169579Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_bsv.cpp:68: NBSVState::TConfigureParts operationId: 1003:0 ProgressState, at schemeshard72057594046678944 2025-06-03T10:49:53.178240Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1003:0 from tablet: 72057594046678944 to tablet: 72075186233409547 cookie: 72057594046678944:2 msg type: 272761856 2025-06-03T10:49:53.178312Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1003, partId: 0, tablet: 72075186233409547 2025-06-03T10:49:53.178388Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1765: TOperation FindRelatedPartByTabletId, TxId: 1003, tablet: 72075186233409547, partId: 0 2025-06-03T10:49:53.178443Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:619: TTxOperationReply execute, operationId: 1003:0, at schemeshard: 72057594046678944, message: TxId: 1003 Origin: 72075186233409547 Status: OK 2025-06-03T10:49:53.178452Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_bsv.cpp:23: NBSVState::TConfigureParts operationId: 1003:0 HandleReply TEvSetConfigResult, at schemeshard: 72057594046678944 2025-06-03T10:49:53.178466Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1003:0 3 -> 128 2025-06-03T10:49:53.179393Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:647: TTxOperationReply complete, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-03T10:49:53.179460Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-03T10:49:53.179469Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_bsv.cpp:192: NBSVState::TPropose operationId# 1003:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:49:53.179485Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1003 ready parts: 1/1 2025-06-03T10:49:53.179532Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1003 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:49:53.180242Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1003:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1003 msg type: 269090816 2025-06-03T10:49:53.180285Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1003, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1003 at step: 5000004 FAKE_COORDINATOR: advance: minStep5000004 State->FrontStep: 5000003 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1003 at step: 5000004 2025-06-03T10:49:53.180378Z node 79 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000004, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:49:53.180405Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1003 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 339302418539 } } Step: 5000004 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:49:53.180420Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_bsv.cpp:141: NBSVState::TPropose operationId# 1003:0 HandleReply TEvOperationPlan, at schemeshard: 72057594046678944 2025-06-03T10:49:53.180474Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1003:0 128 -> 240 2025-06-03T10:49:53.180522Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 FAKE_COORDINATOR: Erasing txId 1003 2025-06-03T10:49:53.181093Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:49:53.181105Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1003, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:49:53.181171Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:49:53.181178Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [79:208:2209], at schemeshard: 72057594046678944, txId: 1003, path id: 3 2025-06-03T10:49:53.181284Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1003:0, at schemeshard: 72057594046678944 2025-06-03T10:49:53.181314Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1003:0 ProgressState 2025-06-03T10:49:53.181332Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1003:0 progress is 1/1 2025-06-03T10:49:53.181338Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-03T10:49:53.181345Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1003:0 progress is 1/1 2025-06-03T10:49:53.181350Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-03T10:49:53.181356Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1003, ready parts: 1/1, is published: false 2025-06-03T10:49:53.181363Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1003 ready parts: 1/1 2025-06-03T10:49:53.181369Z node 79 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1003:0 2025-06-03T10:49:53.181374Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1003:0 2025-06-03T10:49:53.181423Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 6 2025-06-03T10:49:53.181431Z node 79 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1003, publications: 1, subscribers: 0 2025-06-03T10:49:53.181436Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1003, [OwnerId: 72057594046678944, LocalPathId: 3], 3 2025-06-03T10:49:53.181576Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:49:53.181598Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 3 PathOwnerId: 72057594046678944, cookie: 1003 2025-06-03T10:49:53.181604Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1003 2025-06-03T10:49:53.181609Z node 79 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1003, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 3 2025-06-03T10:49:53.181614Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-03T10:49:53.181632Z node 79 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1003, subscribers: 0 2025-06-03T10:49:53.183022Z node 79 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1003 TestModificationResult got TxId: 1003, wait until txId: 1003 2025-06-03T10:49:53.183165Z node 79 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/BSVolume_4" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:49:53.183239Z node 79 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/BSVolume_4" took 85us result status StatusSuccess 2025-06-03T10:49:53.183353Z node 79 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/BSVolume_4" PathDescription { Self { Name: "BSVolume_4" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeBlockStoreVolume CreateFinished: true CreateTxId: 1001 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 3 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 3 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 BSVVersion: 2 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 3 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } BlockStoreVolumeDescription { Name: "BSVolume_4" PathId: 3 VolumeConfig { BlockSize: 4096 Partitions { BlockCount: 32 } Partitions { BlockCount: 32 } Version: 2 DiskId: "foo" ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-2" } } Partitions { PartitionId: 0 TabletId: 72075186233409546 } Partitions { PartitionId: 1 TabletId: 72075186233409548 } VolumeTabletId: 72075186233409547 AlterVersion: 2 MountToken: "Owner123" TokenVersion: 1 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> TDelayedResponsesTests::Test [GOOD] |78.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delay_one_message[tables_format_v1-std] [GOOD] >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] |78.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> TDelayedResponsesTests::Test [GOOD] >> ttl_delete_s3.py::TestDeleteTtl::test_ttl_delete [GOOD] >> KqpScheme::DisableCreateExternalDataSource [GOOD] >> KqpScheme::DisableDropExternalDataSource >> KqpScheme::InvalidationAfterDropCreate |78.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest |78.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> KqpScheme::DisableResourcePoolClassifiersOnServerless [GOOD] >> KqpScheme::DisableMetadataObjectsOnServerless >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-10.test] [GOOD] |78.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-11.test] >> KqpScheme::AlterIndexImplTableUsingPublicAPI [GOOD] >> KqpScheme::AlterResourcePool >> tier_delete.py::TestTierDelete::test_delete_s3_ttl [GOOD] |78.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |78.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> KqpScheme::TouchIndexAfterMoveIndexWriteReplace [GOOD] >> KqpScheme::TouchIndexAfterMoveTableRead |78.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> KqpScheme::DisableDropExternalDataSource [GOOD] >> KqpScheme::DisableCreateExternalTable >> KqpScheme::AlterTableAlterIndex+UseQueryService >> KqpScheme::InvalidationAfterDropCreate [GOOD] >> KqpScheme::InvalidationAfterDropCreateCompatSchema >> TBSVWithReboots::Create >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-fifo] [GOOD] >> KqpScheme::AlterResourcePool [GOOD] >> KqpScheme::AlterNonExistingResourcePool |78.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> KqpScheme::TouchIndexAfterMoveTableRead [GOOD] >> KqpScheme::DisableCreateExternalTable [GOOD] >> KqpScheme::DisableDropExternalTable |78.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> KqpScheme::InvalidationAfterDropCreateCompatSchema [GOOD] >> KqpScheme::InvalidationAfterDropCreateTable2 |78.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |78.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::TouchIndexAfterMoveTableRead [GOOD] Test command err: Trying to start YDB, gRPC: 27491, MsgBus: 1548 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001691/r3tmp/tmp0bAYxv/pdisk_1.dat 2025-06-03T10:49:48.084143Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511673551965963954:2067];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:49:48.084158Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:49:48.160375Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 27491, node 1 2025-06-03T10:49:48.185162Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:48.185197Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:49:48.187026Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:49:48.188799Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:49:48.188813Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:49:48.188815Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:49:48.188864Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:1548 TClient is connected to server localhost:1548 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:49:48.296729Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:48.300384Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:49:48.311196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:48.399405Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:48.450349Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:48.471765Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:48.593121Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673551965965556:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:48.593163Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:48.657502Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:49:48.667064Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:49:48.682569Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:49:48.695312Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:49:48.710066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:49:48.723934Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:49:48.737868Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:49:48.766743Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673551965966208:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:48.766792Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:48.766829Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673551965966213:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:48.767960Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:49:48.770821Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511673551965966215:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:49:48.844390Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511673551965966266:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:49:49.048293Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 29461, MsgBus: 61008 2025-06-03T10:49:49.449237Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511673558545850675:2075];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:49:49.449442Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001691/r3tmp/tmpxmw3Qo/pdisk_1.dat 2025-06-03T10:49:49.479191Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 29461, node 2 2025-06-03T10:49:49.494943Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:49:49.494961Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:49:49.494964Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:49:49.495024Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:61008 2025-06-03T10:49:49.549115Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:49.549163Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:49:49.550323Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:61008 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 Process ... 69], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:54.502522Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:54.503468Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:49:54.512782Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7511673580649640407:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:49:54.594391Z node 5 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [5:7511673580649640458:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:49:54.779882Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:49:54.794913Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 2025-06-03T10:49:54.825668Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710762:2, at schemeshard: 72057594046644480 2025-06-03T10:49:54.846441Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710763:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:54.983136Z node 5 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 5, TabletId: 72075186224037923 not found Trying to start YDB, gRPC: 16869, MsgBus: 7043 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001691/r3tmp/tmpYmtsph/pdisk_1.dat 2025-06-03T10:49:55.685809Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; TServer::EnableGrpc on GrpcPort 16869, node 6 2025-06-03T10:49:55.694556Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:49:55.694571Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:49:55.694574Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:49:55.694628Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:49:55.694910Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:49:55.698283Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7511673584195820272:2079] 1748947795664346 != 1748947795664349 TClient is connected to server localhost:7043 TClient is connected to server localhost:7043 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:49:55.769949Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:55.769980Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:49:55.770551Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:55.772779Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:49:55.775193Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:55.837971Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:55.859140Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:55.873581Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:56.181948Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673588490789204:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:56.181974Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:56.192813Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:49:56.207942Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:49:56.222035Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:49:56.244111Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:49:56.256344Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:49:56.270874Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:49:56.332765Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:49:56.353603Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673588490789860:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:56.353652Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:56.353771Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673588490789865:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:56.354786Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:49:56.358771Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511673588490789867:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:49:56.444405Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511673588490789918:3399] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:49:56.639975Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpInitiateBuildIndexImplTable, opId: 281474976710758:2, at schemeshard: 72057594046644480 2025-06-03T10:49:56.656367Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpFinalizeBuildIndexMainTable, opId: 281474976710759:0, at schemeshard: 72057594046644480 |78.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |78.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-5.test] [GOOD] >> KqpScheme::AlterTableAlterIndex+UseQueryService [GOOD] >> test_ydb_backup.py::TestClusterBackupRestore::test_cluster_backup_restore >> KqpScheme::AlterTableAlterIndex-UseQueryService >> KqpScheme::AlterNonExistingResourcePool [GOOD] >> KqpScheme::AlterResourcePoolClassifier |78.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> KqpScheme::DisableDropExternalTable [GOOD] |78.2%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> KqpScheme::InvalidationAfterDropCreateTable2 [GOOD] >> KqpScheme::InvalidationAfterDropCreateTable2MultiStageTx |78.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] [GOOD] |78.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::DisableDropExternalTable [GOOD] Test command err: Trying to start YDB, gRPC: 14483, MsgBus: 24239 2025-06-03T10:49:50.834445Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511673562281538287:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:49:50.834474Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00166d/r3tmp/tmpQkE6tN/pdisk_1.dat 2025-06-03T10:49:50.913606Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 14483, node 1 2025-06-03T10:49:50.945602Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:49:50.945619Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:49:50.945621Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:49:50.945673Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24239 2025-06-03T10:49:50.983274Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:50.983306Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:49:50.984295Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:24239 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:49:51.046559Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:51.057616Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:49:51.059214Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:51.099443Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:51.129608Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:51.157827Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:51.298019Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673566576507192:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:51.298051Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:51.356754Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.370489Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.383773Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.395374Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.410929Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.454359Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.475732Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.553544Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673566576507849:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:51.553583Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:51.553853Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673566576507854:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:51.554947Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:49:51.565170Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:49:51.565317Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511673566576507856:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:49:51.647378Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511673566576507907:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:49:51.908156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 28571, MsgBus: 21271 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00166d/r3tmp/tmpD0MzjO/pdisk_1.dat 2025-06-03T10:49:52.189501Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511673570147071894:2196];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:49:52.199592Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; TServer::EnableGrpc on GrpcPort 28571, node 2 2025-06-03T10:49:52.222115Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:49:52.222126Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:49:52.222128Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:49:52.222382Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:49:52.225450Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:49:52.226419Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511673570147071737:2079] 1748947792187977 != 1748947792187980 TClient is connected to server localhost:21271 TClient is connected to server localhost:21271 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRU ... boperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:49:56.368278Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:49:56.382901Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:49:56.396786Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:49:56.411728Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:49:56.442878Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511673590051701395:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:56.442923Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:56.443164Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511673590051701400:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:56.444314Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:49:56.449267Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:49:56.449370Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7511673590051701402:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:49:56.526509Z node 5 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [5:7511673590051701453:3395] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } Trying to start YDB, gRPC: 21367, MsgBus: 24140 2025-06-03T10:49:57.120877Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511673592806658077:2157];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00166d/r3tmp/tmpxkXyMq/pdisk_1.dat 2025-06-03T10:49:57.134721Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:49:57.145024Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21367, node 6 2025-06-03T10:49:57.157440Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:49:57.157452Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:49:57.157455Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:49:57.157505Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:24140 TClient is connected to server localhost:24140 2025-06-03T10:49:57.228218Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:57.228253Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:49:57.229286Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:49:57.237887Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:57.246400Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:57.264020Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:57.302887Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:57.377254Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:57.591102Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673592806659560:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:57.591130Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:57.604826Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:49:57.619254Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:49:57.631346Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:49:57.645162Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:49:57.661854Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:49:57.721763Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:49:57.732094Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:49:57.750572Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673592806660213:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:57.750603Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:57.750650Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673592806660218:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:57.751383Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:49:57.759625Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511673592806660220:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:49:57.808895Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511673592806660271:3396] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } |78.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest |78.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_can_read_from_different_groups[tables_format_v0] >> unstable_connection.py::TestUnstableConnection::test [GOOD] |78.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_delete_message_batch_works[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( >> KqpScheme::AlterTableAlterIndex-UseQueryService [GOOD] >> KqpScheme::AlterTableAlterVectorIndex >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v1-std] [GOOD] >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[block-4-2] |78.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> KqpScheme::InvalidationAfterDropCreateTable2MultiStageTx [GOOD] >> KqpScheme::InvalidationAfterDropCreateTable2NoEffects >> TBSVWithReboots::CreateDrop [GOOD] |78.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest |78.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hullop/ut/unittest |78.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hullop/ut/unittest >> KqpScheme::AlterTableAlterVectorIndex [GOOD] >> KqpScheme::AlterTableAlterMissedIndex >> KqpScheme::AlterResourcePoolClassifier [GOOD] >> KqpScheme::AlterNonExistingResourcePoolClassifier |78.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TBSVWithReboots::CreateDrop [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:112:2142] Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:126:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:132:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:137:2058] recipient: [1:112:2142] 2025-06-03T10:49:32.704132Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:49:32.704163Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:49:32.704170Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:49:32.704176Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:49:32.704189Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:49:32.704194Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:49:32.704203Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:49:32.704218Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:49:32.704342Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:49:32.704422Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:49:32.721285Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:49:32.721337Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:49:32.721454Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:131:2154] sender: [1:179:2058] recipient: [1:15:2062] 2025-06-03T10:49:32.724902Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:49:32.725032Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:49:32.725069Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:49:32.728105Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:49:32.728166Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:49:32.728327Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:49:32.728413Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:49:32.729141Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:49:32.729222Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:49:32.729585Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:49:32.729605Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:49:32.729624Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:49:32.729638Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:49:32.729645Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:49:32.729711Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:49:32.731676Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:124:2149] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:49:32.753433Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:49:32.753536Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:49:32.753612Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:49:32.753665Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:49:32.753678Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:49:32.754648Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:49:32.754678Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:49:32.754731Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:49:32.754743Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:49:32.754750Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:49:32.754756Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:49:32.755338Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:49:32.755357Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:49:32.755363Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:49:32.755931Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:49:32.755951Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:49:32.755959Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:49:32.755968Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:49:32.756802Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:49:32.757464Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:49:32.757534Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:49:32.757783Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:49:32.757817Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:49:32.757826Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:49:32.757895Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... G: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 2 2025-06-03T10:49:59.976652Z node 100 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1002, publications: 3, subscribers: 0 2025-06-03T10:49:59.976657Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1002, [OwnerId: 72057594046678944, LocalPathId: 1], 7 2025-06-03T10:49:59.976661Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1002, [OwnerId: 72057594046678944, LocalPathId: 2], 7 2025-06-03T10:49:59.976666Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1002, [OwnerId: 72057594046678944, LocalPathId: 3], 18446744073709551615 2025-06-03T10:49:59.977256Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:1 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:49:59.977270Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_shard_deleter.cpp:46: Free shard 72057594046678944:2 hive 72057594037968897 at ss 72057594046678944 2025-06-03T10:49:59.977325Z node 100 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:49:59.977333Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1002, path id: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:49:59.977388Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1002, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:49:59.977401Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1002, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:49:59.977433Z node 100 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:49:59.977439Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [100:206:2207], at schemeshard: 72057594046678944, txId: 1002, path id: 1 2025-06-03T10:49:59.977446Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [100:206:2207], at schemeshard: 72057594046678944, txId: 1002, path id: 2 2025-06-03T10:49:59.977451Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [100:206:2207], at schemeshard: 72057594046678944, txId: 1002, path id: 3 FAKE_COORDINATOR: Erasing txId 1002 2025-06-03T10:49:59.977622Z node 100 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:49:59.977637Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 18446744073709551615 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:49:59.977643Z node 100 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 3, at schemeshard: 72057594046678944, txId: 1002 2025-06-03T10:49:59.977649Z node 100 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 18446744073709551615 2025-06-03T10:49:59.977656Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 1 2025-06-03T10:49:59.977723Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:123: TTxCleanDroppedPaths Execute, 1 paths in candidate queue, at schemeshard: 72057594046678944 2025-06-03T10:49:59.977730Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__clean_pathes.cpp:137: TTxCleanDroppedPaths: PersistRemovePath for PathId# [OwnerId: 72057594046678944, LocalPathId: 3], at schemeshard: 72057594046678944 2025-06-03T10:49:59.977744Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:49:59.977821Z node 100 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:49:59.977831Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 1 Version: 7 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:49:59.977836Z node 100 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1002 2025-06-03T10:49:59.977840Z node 100 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], version: 7 2025-06-03T10:49:59.977845Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 2 2025-06-03T10:49:59.977892Z node 100 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 1 TxId_Deprecated: 1 2025-06-03T10:49:59.977960Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 1 ShardOwnerId: 72057594046678944 ShardLocalIdx: 1, at schemeshard: 72057594046678944 2025-06-03T10:49:59.978003Z node 100 :HIVE INFO: tablet_helpers.cpp:1356: [72057594037968897] TEvDeleteTablet, msg: ShardOwnerId: 72057594046678944 ShardLocalIdx: 2 TxId_Deprecated: 2 2025-06-03T10:49:59.978071Z node 100 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:49:59.978086Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 7 PathOwnerId: 72057594046678944, cookie: 1002 2025-06-03T10:49:59.978090Z node 100 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1002 2025-06-03T10:49:59.978095Z node 100 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1002, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 7 2025-06-03T10:49:59.978100Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:49:59.978112Z node 100 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1002, subscribers: 0 2025-06-03T10:49:59.978226Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5943: Free tablet reply, message: Status: OK Origin: 72057594037968897 TxId_Deprecated: 2 ShardOwnerId: 72057594046678944 ShardLocalIdx: 2, at schemeshard: 72057594046678944 2025-06-03T10:49:59.978750Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-06-03T10:49:59.979075Z node 100 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__clean_pathes.cpp:160: TTxCleanDroppedPaths Complete, done PersistRemovePath for 1 paths, skipped 0, left 0 candidates, at schemeshard: 72057594046678944 2025-06-03T10:49:59.979098Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-06-03T10:49:59.979499Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:1 2025-06-03T10:49:59.979524Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1002 2025-06-03T10:49:59.979540Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__delete_tablet_reply.cpp:174: Deleted shardIdx 72057594046678944:2 TestModificationResult got TxId: 1002, wait until txId: 1002 TestWaitNotification wait txId: 1002 2025-06-03T10:49:59.979625Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1002: send EvNotifyTxCompletion 2025-06-03T10:49:59.979634Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1002 2025-06-03T10:49:59.979705Z node 100 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1002, at schemeshard: 72057594046678944 2025-06-03T10:49:59.979725Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1002: got EvNotifyTxCompletionResult 2025-06-03T10:49:59.979731Z node 100 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1002: satisfy waiter [100:419:2398] TestWaitNotification: OK eventTxId 1002 2025-06-03T10:49:59.979809Z node 100 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/BSVolume_3" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:49:59.979849Z node 100 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/BSVolume_3" took 53us result status StatusPathDoesNotExist 2025-06-03T10:49:59.979897Z node 100 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusPathDoesNotExist Reason: "Check failed: path: \'/MyRoot/DirA/BSVolume_3\', error: path hasn\'t been resolved, nearest resolved path: \'/MyRoot/DirA\' (id: [OwnerId: 72057594046678944, LocalPathId: 2]), source_location: ydb/core/tx/schemeshard/schemeshard_path_describer.cpp:1157" Path: "/MyRoot/DirA/BSVolume_3" PathId: 18446744073709551615 LastExistedPrefixPath: "/MyRoot/DirA" LastExistedPrefixPathId: 2 LastExistedPrefixDescription { Self { Name: "DirA" PathId: 2 SchemeshardId: 72057594046678944 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1000 CreateStep: 5000002 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathSubType: EPathSubTypeEmpty ChildrenExist: false } } PathOwnerId: 18446744073709551615, at schemeshard: 72057594046678944 |78.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test |78.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |78.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> KqpScheme::InvalidationAfterDropCreateTable2NoEffects [GOOD] >> KqpScheme::InvalidationAfterDropCreateTable2MultiStageTxNoEffects >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v0-std] [GOOD] |78.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest |78.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> AssignTxId::Basic |78.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |78.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest >> KqpScheme::AlterTableAlterMissedIndex [GOOD] >> KqpScheme::AlterTableAddUniqIndexSqlFeatureOff >> KqpScheme::DisableMetadataObjectsOnServerless [GOOD] |78.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest |78.3%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |78.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::DisableMetadataObjectsOnServerless [GOOD] Test command err: 2025-06-03T10:49:41.374206Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511673523992682967:2079];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:49:41.374424Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0016b5/r3tmp/tmpG7UDEC/pdisk_1.dat 2025-06-03T10:49:41.461856Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 6283, node 1 2025-06-03T10:49:41.474694Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:41.474723Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:49:41.477490Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:49:41.489584Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:49:41.489603Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:49:41.489605Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:49:41.489667Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:12166 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:49:41.527794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:41.797601Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:440: [WorkloadService] [Service] Started workload service initialization 2025-06-03T10:49:41.797643Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:241: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7511673523992683755:2330], Start check tables existence, number paths: 2 2025-06-03T10:49:41.801474Z node 1 :KQP_WORKLOAD_SERVICE TRACE: kqp_workload_service.cpp:125: [WorkloadService] [Service] Updated node info, noode count: 3 2025-06-03T10:49:41.801487Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:100: [WorkloadService] [Service] Subscribed for config changes 2025-06-03T10:49:41.801492Z node 1 :KQP_WORKLOAD_SERVICE INFO: kqp_workload_service.cpp:111: [WorkloadService] [Service] Resource pools was enanbled 2025-06-03T10:49:41.802333Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=1&id=MTRjOTdhM2ItN2YxYmEyYjMtODg5NjE3NGYtMjQ4MjdhN2M=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id MTRjOTdhM2ItN2YxYmEyYjMtODg5NjE3NGYtMjQ4MjdhN2M= 2025-06-03T10:49:41.805066Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7511673523992683755:2330], Describe table /Root/.metadata/workload_manager/delayed_requests status PathErrorUnknown 2025-06-03T10:49:41.805080Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:182: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7511673523992683755:2330], Describe table /Root/.metadata/workload_manager/running_requests status PathErrorUnknown 2025-06-03T10:49:41.805085Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: table_queries.cpp:289: [WorkloadService] [TCleanupTablesActor] ActorId: [1:7511673523992683755:2330], Successfully finished 2025-06-03T10:49:41.805113Z node 1 :KQP_WORKLOAD_SERVICE DEBUG: kqp_workload_service.cpp:367: [WorkloadService] [Service] Cleanup completed, tables exists: 0 2025-06-03T10:49:41.805123Z node 1 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=1&id=MTRjOTdhM2ItN2YxYmEyYjMtODg5NjE3NGYtMjQ4MjdhN2M=, ActorId: [1:7511673523992683763:2331], ActorState: unknown state, session actor bootstrapped 2025-06-03T10:49:41.826093Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:49:41.831353Z node 3 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[3:7511673522030905055:2076];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:49:41.835223Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:41.835258Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:49:41.836869Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 3 Cookie 3 2025-06-03T10:49:41.833437Z node 3 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/test-dedicated/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:49:41.837158Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:49:41.893500Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:49:41.895759Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:49:41.895791Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:49:41.895805Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:49:41.895816Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:49:41.895828Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:49:41.895839Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:49:41.895866Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:49:41.895876Z node 3 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224037888 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:49:41.937182Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:41.937234Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:49:41.939507Z node 3 :HIVE WARN: node_info.cpp:25: HIVE#72075186224037888 Node(3, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:49:41.965868Z node 3 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:49:41.968443Z node 3 :STATISTICS WARN: tx_init.cpp:287: [72075186224037894] TTxInit::Complete. EnableColumnStatistics=false 2025-06-03T10:49:42.044066Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateExtSubDomain, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:49:42.051043Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511673528181736355:2074];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:49:42.051402Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/test-shared/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:49:42.057174Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:42.057205Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:49:42.058421Z node 1 :HIVE WARN: hive_impl.cpp:771: HIVE#72057594037968897 Handle TEvInterconnect::TEvNodeConnected, NodeId 2 Cookie 2 2025-06-03T10:49:42.058816Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:49:42.109148Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:49:42.109213Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:49:42.109226Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:49:42.109244Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:49:42.109258Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:49:42.109268Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:49:42.109282Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:49:42.109307Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:49:42.109317Z node 2 :HIVE WARN: tx__create_tablet.cpp:342: HIVE#72075186224038889 THive::TTxCreateTablet::Execute CreateTablet Postponed 2025-06-03T10:49:42.157666Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:42.157695Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:49:42.160117Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72075186224038889 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:49:42.180287Z node 2 :STATISTICS WARN: tx_init.cpp:287: [72075186224038895] TTxInit::Complete. EnableColumnStatisti ... cpp:264: SessionId: ydb://session/3?node_id=13&id=YTA2MzcwYzAtNmU4YmFhMjYtYTA1NWZmMjItYTI4OWQwMA==, ActorId: [13:7511673609921505972:3052], ActorState: ReadyState, TraceId: 01jwtpk5r2492zp72m6d09yvft, request placed into pool from cache: default 2025-06-03T10:50:01.858997Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:575: SessionId: ydb://session/3?node_id=13&id=YTA2MzcwYzAtNmU4YmFhMjYtYTA1NWZmMjItYTI4OWQwMA==, ActorId: [13:7511673609921505972:3052], ActorState: ExecuteState, TraceId: 01jwtpk5r2492zp72m6d09yvft, Sending CompileQuery request 2025-06-03T10:50:01.861394Z node 13 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:948: [main][13:7511673592741634859:2959][/Root/test-shared/.metadata/secrets/values] Sync is done: cookie# 52, size# 3, half# 1, successes# 0, faulires# 2, partial# 1 2025-06-03T10:50:01.861435Z node 13 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:948: [main][13:7511673592741634859:2959][/Root/test-shared/.metadata/secrets/values] Sync is done: cookie# 53, size# 3, half# 1, successes# 0, faulires# 2, partial# 1 2025-06-03T10:50:01.861789Z node 13 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [13:7511673609921505975:3054], status: UNAVAILABLE, issues:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/secrets/values]
: Error: LookupError, code: 2005 2025-06-03T10:50:01.862425Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=13&id=YTA2MzcwYzAtNmU4YmFhMjYtYTA1NWZmMjItYTI4OWQwMA==, ActorId: [13:7511673609921505972:3052], ActorState: ExecuteState, TraceId: 01jwtpk5r2492zp72m6d09yvft, ReplyQueryCompileError, status UNAVAILABLE remove tx with tx_id: 2025-06-03T10:50:01.862435Z node 13 :KQP_SESSION INFO: kqp_session_actor.cpp:2481: SessionId: ydb://session/3?node_id=13&id=YTA2MzcwYzAtNmU4YmFhMjYtYTA1NWZmMjItYTI4OWQwMA==, ActorId: [13:7511673609921505972:3052], ActorState: ExecuteState, TraceId: 01jwtpk5r2492zp72m6d09yvft, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-03T10:50:01.862439Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2542: SessionId: ydb://session/3?node_id=13&id=YTA2MzcwYzAtNmU4YmFhMjYtYTA1NWZmMjItYTI4OWQwMA==, ActorId: [13:7511673609921505972:3052], ActorState: ExecuteState, TraceId: 01jwtpk5r2492zp72m6d09yvft, EndCleanup, isFinal: 0 2025-06-03T10:50:01.862494Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2278: SessionId: ydb://session/3?node_id=13&id=YTA2MzcwYzAtNmU4YmFhMjYtYTA1NWZmMjItYTI4OWQwMA==, ActorId: [13:7511673609921505972:3052], ActorState: ExecuteState, TraceId: 01jwtpk5r2492zp72m6d09yvft, Sent query response back to proxy, proxyRequestId: 84, proxyId: [13:7511673584151698701:2265] 2025-06-03T10:50:01.862706Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor_cb.h:34;event=unexpected reply;response=operation { ready: true status: UNAVAILABLE issues { message: "Table metadata loading" issue_code: 1050 severity: 1 issues { position { row: 1 column: 1 } message: "Failed to load metadata for table: db.[//Root/test-shared/.metadata/secrets/values]" end_position { row: 1 column: 1 } severity: 1 issues { message: "LookupError" issue_code: 2005 severity: 1 } } } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { } } } } ; 2025-06-03T10:50:01.862769Z node 13 :METADATA_PROVIDER ERROR: log.h:466: accessor_snapshot_base.cpp:16 :cannot construct snapshot: on request failed:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/secrets/values]
: Error: LookupError, code: 2005 2025-06-03T10:50:01.862795Z node 13 :KQP_SESSION INFO: kqp_session_actor.cpp:2323: SessionId: ydb://session/3?node_id=13&id=YTA2MzcwYzAtNmU4YmFhMjYtYTA1NWZmMjItYTI4OWQwMA==, ActorId: [13:7511673609921505972:3052], ActorState: ReadyState, Session closed due to explicit close event 2025-06-03T10:50:01.862800Z node 13 :KQP_SESSION INFO: kqp_session_actor.cpp:2481: SessionId: ydb://session/3?node_id=13&id=YTA2MzcwYzAtNmU4YmFhMjYtYTA1NWZmMjItYTI4OWQwMA==, ActorId: [13:7511673609921505972:3052], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-03T10:50:01.862802Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2542: SessionId: ydb://session/3?node_id=13&id=YTA2MzcwYzAtNmU4YmFhMjYtYTA1NWZmMjItYTI4OWQwMA==, ActorId: [13:7511673609921505972:3052], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-03T10:50:01.862804Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2554: SessionId: ydb://session/3?node_id=13&id=YTA2MzcwYzAtNmU4YmFhMjYtYTA1NWZmMjItYTI4OWQwMA==, ActorId: [13:7511673609921505972:3052], ActorState: unknown state, Cleanup temp tables: 0 2025-06-03T10:50:01.862820Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2645: SessionId: ydb://session/3?node_id=13&id=YTA2MzcwYzAtNmU4YmFhMjYtYTA1NWZmMjItYTI4OWQwMA==, ActorId: [13:7511673609921505972:3052], ActorState: unknown state, Session actor destroyed 2025-06-03T10:50:01.896266Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:223: SessionId: ydb://session/3?node_id=13&id=ZDU3ZTg1MzktZDI2MzQwZGItYjc5YTIxZDgtYzdhNzQyNWE=, ActorId: [0:0:0], ActorState: unknown state, Create session actor with id ZDU3ZTg1MzktZDI2MzQwZGItYjc5YTIxZDgtYzdhNzQyNWE= 2025-06-03T10:50:01.896327Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:227: SessionId: ydb://session/3?node_id=13&id=ZDU3ZTg1MzktZDI2MzQwZGItYjc5YTIxZDgtYzdhNzQyNWE=, ActorId: [13:7511673609921505984:3059], ActorState: unknown state, session actor bootstrapped 2025-06-03T10:50:01.896494Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:443: SessionId: ydb://session/3?node_id=13&id=ZDU3ZTg1MzktZDI2MzQwZGItYjc5YTIxZDgtYzdhNzQyNWE=, ActorId: [13:7511673609921505984:3059], ActorState: ReadyState, TraceId: 01jwtpk5s8a0ngzyy63s8a8065, received request, proxyRequestId: 86 prepared: 0 tx_control: 1 action: QUERY_ACTION_EXECUTE type: QUERY_TYPE_SQL_DML text: SELECT * FROM `//Root/test-shared/.metadata/initialization/migrations`; rpcActor: [13:7511673609921505985:3060] database: /Root/test-shared databaseId: /Root/test-shared pool id: default 2025-06-03T10:50:01.896502Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:264: SessionId: ydb://session/3?node_id=13&id=ZDU3ZTg1MzktZDI2MzQwZGItYjc5YTIxZDgtYzdhNzQyNWE=, ActorId: [13:7511673609921505984:3059], ActorState: ReadyState, TraceId: 01jwtpk5s8a0ngzyy63s8a8065, request placed into pool from cache: default 2025-06-03T10:50:01.896518Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:575: SessionId: ydb://session/3?node_id=13&id=ZDU3ZTg1MzktZDI2MzQwZGItYjc5YTIxZDgtYzdhNzQyNWE=, ActorId: [13:7511673609921505984:3059], ActorState: ExecuteState, TraceId: 01jwtpk5s8a0ngzyy63s8a8065, Sending CompileQuery request 2025-06-03T10:50:01.898985Z node 13 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:948: [main][13:7511673588446666855:2542][/Root/test-shared/.metadata/initialization/migrations] Sync is done: cookie# 38, size# 3, half# 1, successes# 0, faulires# 2, partial# 1 2025-06-03T10:50:01.899011Z node 13 :SCHEME_BOARD_SUBSCRIBER WARN: subscriber.cpp:948: [main][13:7511673588446666855:2542][/Root/test-shared/.metadata/initialization/migrations] Sync is done: cookie# 39, size# 3, half# 1, successes# 0, faulires# 2, partial# 1 2025-06-03T10:50:01.899281Z node 13 :KQP_COMPILE_ACTOR ERROR: kqp_compile_actor.cpp:564: Compilation failed, self: [13:7511673609921505987:3061], status: UNAVAILABLE, issues:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/initialization/migrations]
: Error: LookupError, code: 2005 2025-06-03T10:50:01.900014Z node 13 :KQP_SESSION WARN: kqp_session_actor.cpp:2155: SessionId: ydb://session/3?node_id=13&id=ZDU3ZTg1MzktZDI2MzQwZGItYjc5YTIxZDgtYzdhNzQyNWE=, ActorId: [13:7511673609921505984:3059], ActorState: ExecuteState, TraceId: 01jwtpk5s8a0ngzyy63s8a8065, ReplyQueryCompileError, status UNAVAILABLE remove tx with tx_id: 2025-06-03T10:50:01.900031Z node 13 :KQP_SESSION INFO: kqp_session_actor.cpp:2481: SessionId: ydb://session/3?node_id=13&id=ZDU3ZTg1MzktZDI2MzQwZGItYjc5YTIxZDgtYzdhNzQyNWE=, ActorId: [13:7511673609921505984:3059], ActorState: ExecuteState, TraceId: 01jwtpk5s8a0ngzyy63s8a8065, Cleanup start, isFinal: 0 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-03T10:50:01.900034Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2542: SessionId: ydb://session/3?node_id=13&id=ZDU3ZTg1MzktZDI2MzQwZGItYjc5YTIxZDgtYzdhNzQyNWE=, ActorId: [13:7511673609921505984:3059], ActorState: ExecuteState, TraceId: 01jwtpk5s8a0ngzyy63s8a8065, EndCleanup, isFinal: 0 2025-06-03T10:50:01.900098Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2278: SessionId: ydb://session/3?node_id=13&id=ZDU3ZTg1MzktZDI2MzQwZGItYjc5YTIxZDgtYzdhNzQyNWE=, ActorId: [13:7511673609921505984:3059], ActorState: ExecuteState, TraceId: 01jwtpk5s8a0ngzyy63s8a8065, Sent query response back to proxy, proxyRequestId: 86, proxyId: [13:7511673584151698701:2265] 2025-06-03T10:50:01.900331Z node 13 :METADATA_PROVIDER ERROR: log.cpp:784: fline=request_actor_cb.h:34;event=unexpected reply;response=operation { ready: true status: UNAVAILABLE issues { message: "Table metadata loading" issue_code: 1050 severity: 1 issues { position { row: 1 column: 1 } message: "Failed to load metadata for table: db.[//Root/test-shared/.metadata/initialization/migrations]" end_position { row: 1 column: 1 } severity: 1 issues { message: "LookupError" issue_code: 2005 severity: 1 } } } result { [type.googleapis.com/Ydb.Table.ExecuteQueryResult] { tx_meta { } } } } ; 2025-06-03T10:50:01.900413Z node 13 :METADATA_PROVIDER ERROR: log.h:466: accessor_snapshot_base.cpp:16 :cannot construct snapshot: on request failed:
: Error: Table metadata loading, code: 1050
:1:1: Error: Failed to load metadata for table: db.[//Root/test-shared/.metadata/initialization/migrations]
: Error: LookupError, code: 2005 2025-06-03T10:50:01.900443Z node 13 :KQP_SESSION INFO: kqp_session_actor.cpp:2323: SessionId: ydb://session/3?node_id=13&id=ZDU3ZTg1MzktZDI2MzQwZGItYjc5YTIxZDgtYzdhNzQyNWE=, ActorId: [13:7511673609921505984:3059], ActorState: ReadyState, Session closed due to explicit close event 2025-06-03T10:50:01.900452Z node 13 :KQP_SESSION INFO: kqp_session_actor.cpp:2481: SessionId: ydb://session/3?node_id=13&id=ZDU3ZTg1MzktZDI2MzQwZGItYjc5YTIxZDgtYzdhNzQyNWE=, ActorId: [13:7511673609921505984:3059], ActorState: ReadyState, Cleanup start, isFinal: 1 CleanupCtx: 0 TransactionsToBeAborted.size(): 0 WorkerId: [0:0:0] WorkloadServiceCleanup: 0 2025-06-03T10:50:01.900454Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2542: SessionId: ydb://session/3?node_id=13&id=ZDU3ZTg1MzktZDI2MzQwZGItYjc5YTIxZDgtYzdhNzQyNWE=, ActorId: [13:7511673609921505984:3059], ActorState: ReadyState, EndCleanup, isFinal: 1 2025-06-03T10:50:01.900457Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2554: SessionId: ydb://session/3?node_id=13&id=ZDU3ZTg1MzktZDI2MzQwZGItYjc5YTIxZDgtYzdhNzQyNWE=, ActorId: [13:7511673609921505984:3059], ActorState: unknown state, Cleanup temp tables: 0 2025-06-03T10:50:01.900478Z node 13 :KQP_SESSION DEBUG: kqp_session_actor.cpp:2645: SessionId: ydb://session/3?node_id=13&id=ZDU3ZTg1MzktZDI2MzQwZGItYjc5YTIxZDgtYzdhNzQyNWE=, ActorId: [13:7511673609921505984:3059], ActorState: unknown state, Session actor destroyed >> KqpScheme::InvalidationAfterDropCreateTable2MultiStageTxNoEffects [GOOD] |78.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> KqpScheme::AlterNonExistingResourcePoolClassifier [GOOD] >> AssignTxId::Basic [GOOD] >> test_postgres.py::TestPGSQL::test_sql_suite[plan-select.test] [GOOD] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::InvalidationAfterDropCreateTable2MultiStageTxNoEffects [GOOD] Test command err: Trying to start YDB, gRPC: 3502, MsgBus: 17372 2025-06-03T10:49:54.739104Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511673581392813340:2071];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:49:54.739129Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00165a/r3tmp/tmpBz8iGy/pdisk_1.dat 2025-06-03T10:49:54.832383Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 3502, node 1 2025-06-03T10:49:54.843631Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:54.843660Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:49:54.846411Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:49:54.850912Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:49:54.850924Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:49:54.850926Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:49:54.850973Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:17372 TClient is connected to server localhost:17372 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:49:54.943457Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976710657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:54.954309Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:54.995202Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:55.060168Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:55.075633Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:55.281628Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673585687782250:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:55.281676Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:55.339558Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710662:0, at schemeshard: 72057594046644480 2025-06-03T10:49:55.360156Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710663:0, at schemeshard: 72057594046644480 2025-06-03T10:49:55.374303Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710664:0, at schemeshard: 72057594046644480 2025-06-03T10:49:55.387913Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710665:0, at schemeshard: 72057594046644480 2025-06-03T10:49:55.396945Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710666:0, at schemeshard: 72057594046644480 2025-06-03T10:49:55.408971Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710667:0, at schemeshard: 72057594046644480 2025-06-03T10:49:55.423781Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710668:0, at schemeshard: 72057594046644480 2025-06-03T10:49:55.442663Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673585687782904:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:55.442689Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:55.442803Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673585687782909:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:55.443858Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976710669:3, at schemeshard: 72057594046644480 2025-06-03T10:49:55.449169Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511673585687782911:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976710669 completed, doublechecking } 2025-06-03T10:49:55.511093Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511673585687782962:3398] txid# 281474976710670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:49:55.764143Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976710675:0, at schemeshard: 72057594046644480 2025-06-03T10:49:55.767811Z node 1 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 1, TabletId: 72075186224037911 not found Trying to start YDB, gRPC: 28530, MsgBus: 18129 2025-06-03T10:49:56.127715Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511673590447971510:2215];send_to=[0:7307199536658146131:7762515]; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00165a/r3tmp/tmpckmBc0/pdisk_1.dat 2025-06-03T10:49:56.130181Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; 2025-06-03T10:49:56.153779Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:49:56.153913Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511673590447971320:2079] 1748947796118745 != 1748947796118748 TServer::EnableGrpc on GrpcPort 28530, node 2 2025-06-03T10:49:56.168678Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:49:56.168690Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:49:56.168693Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:49:56.168742Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:18129 TClient is connected to server localhost:18129 2025-06-03T10:49:56.229686Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:56.229719Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:49:56.230396Z node 2 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(2, (0,0,0,0)) VolatileState: Connecting -> Connected WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: t ... ration part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:50:00.483245Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:50:00.504156Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:50:00.528954Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511673604123892839:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:50:00.528991Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:50:00.529400Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511673604123892844:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:50:00.531122Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:50:00.535495Z node 5 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [5:7511673604123892846:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:50:00.631679Z node 5 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [5:7511673604123892897:3393] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:50:00.892639Z node 5 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:50:00.911716Z node 5 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 5, TabletId: 72075186224037911 not found Trying to start YDB, gRPC: 1953, MsgBus: 28945 test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/00165a/r3tmp/tmp5Kv2Zu/pdisk_1.dat 2025-06-03T10:50:01.465734Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=incorrect path status: LookupError; 2025-06-03T10:50:01.492467Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 1953, node 6 2025-06-03T10:50:01.519168Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:50:01.519185Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:50:01.519187Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:50:01.519249Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:28945 2025-06-03T10:50:01.558917Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:50:01.558946Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:50:01.560207Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:28945 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:50:01.596482Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:50:01.603611Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:50:01.621551Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:50:01.675078Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:50:01.695215Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:50:01.999265Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673611341356965:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:50:01.999311Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:50:02.005038Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:50:02.063838Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:50:02.079607Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:50:02.094412Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:50:02.108904Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:50:02.122527Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:50:02.138243Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:50:02.158841Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673615636324918:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:50:02.158878Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:50:02.158885Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673615636324923:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:50:02.159899Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:50:02.162759Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511673615636324925:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:50:02.246165Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511673615636324976:3401] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:50:02.491528Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:50:02.492329Z node 6 :HIVE WARN: hive_impl.cpp:491: HIVE#72057594037968897 Handle TEvLocal::TEvTabletStatus from node 6, TabletId: 72075186224037911 not found |78.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-9.test] [GOOD] >> KqpScheme::AlterTableAddUniqIndexSqlFeatureOff [GOOD] >> KqpScheme::AlterTableAddUniqIndexPublicApiFeatureOff >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-insert.test] |78.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |78.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::AlterNonExistingResourcePoolClassifier [GOOD] Test command err: Trying to start YDB, gRPC: 20748, MsgBus: 62520 2025-06-03T10:49:50.981275Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511673562958597250:2145];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:49:50.982745Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001684/r3tmp/tmpnaNPZn/pdisk_1.dat 2025-06-03T10:49:51.062933Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 20748, node 1 2025-06-03T10:49:51.083486Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:51.083517Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:49:51.085266Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected 2025-06-03T10:49:51.092600Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:49:51.092614Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:49:51.092618Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:49:51.092670Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:62520 TClient is connected to server localhost:62520 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:49:51.200969Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:51.217593Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:51.310685Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:51.356622Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:51.380794Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:51.558044Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673567253566061:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:51.558068Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:51.624534Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.650938Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.663491Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.726968Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.749106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.775540Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.789548Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:49:51.810996Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673567253566714:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:51.811036Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:51.811260Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673567253566719:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:51.812362Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:49:51.816442Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715669, at schemeshard: 72057594046644480 2025-06-03T10:49:51.816549Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511673567253566721:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:49:51.915712Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511673567253566772:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:49:52.340931Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:49:52.371075Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:49:52.387196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:49:52.403705Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:49:52.741523Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7511673571548535011:3927], for# user@builtin, access# DescribeSchema 2025-06-03T10:49:52.741537Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7511673571548535011:3927], for# user@builtin, access# DescribeSchema 2025-06-03T10:49:52.747881Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7511673571548535017:3929], for# user@builtin, access# DescribeSchema 2025-06-03T10:49:52.747896Z node 1 :TX_PROXY_SCHEME_CACHE WARN: cache.cpp:304: Access denied: self# [1:7511673571548535017:3929], for# user@builtin, access# DescribeSchema 2025-06-03T10:49:52.753955Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715678:0, at schemeshard: 72057594046644480 2025-06-03T10:49:52.759419Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715679:0, at schemeshard: 72057594046644480 2025-06-03T10:49:52.766018Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715680:1, at schemeshard: 72057594046644480 2025-06-03T10:49:52.777025Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715681:1, at schemeshard: 72057594046644480 2025-06-03T10:49:52.797116Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511673571548535179:4028] txid# 281474976715682, issues: { message: "Check failed: path: \'/Root/SecondaryKeys/Index/indexImplTable\', error: path is not a common path (id: [OwnerId: 72057594046644480, LocalPathId: 19], type: EPathTypeTable, state: EPathStateNoChanges), source_location: y ... , status: NOT_FOUND, issues: {
: Error: Resource pool test_pool not found or you don't have access permissions } 2025-06-03T10:50:00.053609Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511673607469923141:2824], DatabaseId: /Root, PoolId: test_pool, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool test_pool not found or you don't have access permissions } 2025-06-03T10:50:00.053702Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool test_pool, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool test_pool not found or you don't have access permissions } 2025-06-03T10:50:00.273219Z node 5 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [5:7511673607469923297:2880], DatabaseId: /Root, PoolId: test_pool, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool test_pool not found or you don't have access permissions } 2025-06-03T10:50:00.273506Z node 5 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool test_pool, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool test_pool not found or you don't have access permissions } Trying to start YDB, gRPC: 4912, MsgBus: 22116 2025-06-03T10:50:00.907492Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511673605679509844:2208];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:50:00.912289Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001684/r3tmp/tmpxTsawX/pdisk_1.dat 2025-06-03T10:50:00.958972Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:50:00.961549Z node 6 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [6:7511673605679509662:2079] 1748947800899047 != 1748947800899050 TServer::EnableGrpc on GrpcPort 4912, node 6 2025-06-03T10:50:00.981683Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:50:00.981695Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:50:00.981697Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:50:00.981757Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:22116 2025-06-03T10:50:01.018363Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:50:01.018410Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:50:01.023973Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:22116 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:50:01.110063Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:50:01.116500Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:50:01.130703Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:50:01.202524Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:50:01.275003Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:50:01.295556Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:50:01.438601Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673609974478606:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:50:01.438669Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:50:01.447023Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:50:01.474556Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:50:01.488956Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:50:01.505008Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:50:01.524373Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:50:01.546286Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:50:01.556147Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:50:01.578752Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673609974479257:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:50:01.578795Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:50:01.578969Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673609974479262:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:50:01.580424Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:50:01.589427Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511673609974479264:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:50:01.679858Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511673609974479315:3394] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:50:01.909719Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:1, at schemeshard: 72057594046644480 2025-06-03T10:50:02.015934Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715675:0, at schemeshard: 72057594046644480 2025-06-03T10:50:02.110592Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715678:1, at schemeshard: 72057594046644480 2025-06-03T10:50:02.215849Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715681:0, at schemeshard: 72057594046644480 2025-06-03T10:50:02.316120Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715684:0, at schemeshard: 72057594046644480 2025-06-03T10:50:02.404066Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpModifyACL, opId: 281474976715687:0, at schemeshard: 72057594046644480 |78.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest >> AssignTxId::Basic [GOOD] Test command err: 2025-06-03T10:50:02.042682Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511673612722186630:2065];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:50:02.042914Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/0015da/r3tmp/tmpWFvwVd/pdisk_1.dat 2025-06-03T10:50:02.193780Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:50:02.193826Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:50:02.202390Z node 1 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [1:7511673612722186607:2079] 1748947802041128 != 1748947802041131 2025-06-03T10:50:02.205766Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:50:02.206311Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:15145 TServer::EnableGrpc on GrpcPort 15316, node 1 2025-06-03T10:50:02.329091Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:50:02.329105Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:50:02.329107Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:50:02.329242Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:15145 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:50:02.508382Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:50:02.514967Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:50:02.594325Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673612722187277:2328], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:50:02.594376Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:50:02.881814Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateReplication, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:50:02.891619Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:41: [controller 72075186224037888] OnActivateExecutor 2025-06-03T10:50:02.891654Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_init_schema.cpp:17: [controller 72075186224037888][TxInitSchema] Execute 2025-06-03T10:50:02.895693Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_init_schema.cpp:26: [controller 72075186224037888][TxInitSchema] Complete 2025-06-03T10:50:02.895727Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_init.cpp:239: [controller 72075186224037888][TxInit] Execute 2025-06-03T10:50:02.895798Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_init.cpp:244: [controller 72075186224037888][TxInit] Complete 2025-06-03T10:50:02.895800Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:113: [controller 72075186224037888] SwitchToWork 2025-06-03T10:50:02.937117Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:142: [controller 72075186224037888] Handle NKikimrReplication.TEvCreateReplication PathId { OwnerId: 72057594046644480 LocalId: 2 } OperationId { TxId: 281474976715658 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:15316" Database: "/Root" OAuthToken { Token: "***" } EnableSsl: false } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Global { CommitIntervalMilliSeconds: 10000 } } } 2025-06-03T10:50:02.937196Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_create_replication.cpp:22: [controller 72075186224037888][TxCreateReplication] Execute: NKikimrReplication.TEvCreateReplication PathId { OwnerId: 72057594046644480 LocalId: 2 } OperationId { TxId: 281474976715658 PartId: 0 } Config { SrcConnectionParams { Endpoint: "localhost:15316" Database: "/Root" OAuthToken { Token: "***" } EnableSsl: false } Specific { Targets { SrcPath: "/Root/table" DstPath: "/Root/replica" } } ConsistencySettings { Global { CommitIntervalMilliSeconds: 10000 } } } 2025-06-03T10:50:02.937242Z node 1 :REPLICATION_CONTROLLER NOTICE: tx_create_replication.cpp:43: [controller 72075186224037888][TxCreateReplication] Add replication: rid# 1, pathId# [OwnerId: 72057594046644480, LocalPathId: 2] 2025-06-03T10:50:02.941165Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_create_replication.cpp:57: [controller 72075186224037888][TxCreateReplication] Complete 2025-06-03T10:50:02.942930Z node 1 :REPLICATION_CONTROLLER TRACE: tenant_resolver.cpp:33: [TenantResolver][rid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root/replication TableId: [72057594046644480:2:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindReplication DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:50:02.943102Z node 1 :REPLICATION_CONTROLLER TRACE: tenant_resolver.cpp:33: [TenantResolver][rid 1] Handle NKikimr::TEvTxProxySchemeCache::TEvNavigateKeySetResult: entry# { Path: Root TableId: [72057594046644480:1:0] RequestType: ByTableId Operation: OpPath RedirectRequired: false ShowPrivatePath: false SyncVersion: false Status: Ok Kind: KindPath DomainInfo { DomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] ResourcesDomainKey: [OwnerId: 72057594046644480, LocalPathId: 1] Params { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } ServerlessComputeResourcesMode: (empty maybe) Users: [] Groups: [] } } 2025-06-03T10:50:02.943123Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:252: [controller 72075186224037888] Handle NKikimr::NReplication::NController::TEvPrivate::TEvResolveTenantResult { ReplicationId: 1 Tenant: /Root Sucess: 1 } 2025-06-03T10:50:02.943127Z node 1 :REPLICATION_CONTROLLER NOTICE: controller.cpp:267: [controller 72075186224037888] Tenant resolved: rid# 1, tenant# /Root 2025-06-03T10:50:02.943132Z node 1 :REPLICATION_CONTROLLER INFO: controller.cpp:271: [controller 72075186224037888] Discover tenant nodes: tenant# /Root 2025-06-03T10:50:02.943482Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:297: [controller 72075186224037888] Handle NKikimr::TEvDiscovery::TEvDiscoveryData 2025-06-03T10:50:02.943493Z node 1 :REPLICATION_CONTROLLER DEBUG: controller.cpp:321: [controller 72075186224037888] Create session: nodeId# 1 TClient::Ls request: /Root/replication TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "replication" PathId: 2 SchemeshardId: 72057594046644480 PathType: EPathTypeReplication CreateFinished: true CreateTxId: 281474976715658 CreateStep: 1748947802994 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 ReplicationVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 7 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 Mediators: 72057594046382081 } DomainKey { SchemeShard: 72057594046644480 PathId: 1 } PathsIns... (TRUNCATED) 2025-06-03T10:50:02.968478Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 1 TxId: 0 } 2025-06-03T10:50:02.968523Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 0, allocated# 0 2025-06-03T10:50:02.968544Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 1, assigned# 0, allocated# 0, exhausted# 1 2025-06-03T10:50:02.968582Z node 1 :REPLICATION_CONTROLLER TRACE: tx_assign_tx_id.cpp:174: [controller 72075186224037888] Handle NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-06-03T10:50:02.973382Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 0, allocated# 5 2025-06-03T10:50:02.981476Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 1, allocated# 4, exhausted# 0 2025-06-03T10:50:02.981970Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 9999 TxId: 0 } 2025-06-03T10:50:02.981999Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 1, allocated# 4 2025-06-03T10:50:02.982026Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 1, allocated# 4, exhausted# 0 2025-06-03T10:50:02.982196Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 9999 TxId: 18446744073709551615 } 2025-06-03T10:50:02.982205Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 1, allocated# 4 2025-06-03T10:50:02.982211Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 1, allocated# 4, exhausted# 0 2025-06-03T10:50:02.982395Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 10000 TxId: 0 } 2025-06-03T10:50:02.982401Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 1, allocated# 4 2025-06-03T10:50:02.982621Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 2, allocated# 3, exhausted# 0 2025-06-03T10:50:02.982802Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 5000 TxId: 0 } 2025-06-03T10:50:02.982808Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 2, allocated# 3 2025-06-03T10:50:02.982812Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 2, allocated# 3, exhausted# 0 2025-06-03T10:50:02.982991Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 20000 TxId: 0 } Versions { Step: 30000 TxId: 0 } Versions { Step: 40000 TxId: 0 } 2025-06-03T10:50:02.982998Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 3, assigned# 2, allocated# 3 2025-06-03T10:50:02.983132Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 5, allocated# 0, exhausted# 0 2025-06-03T10:50:02.983161Z node 1 :REPLICATION_CONTROLLER TRACE: tx_assign_tx_id.cpp:174: [controller 72075186224037888] Handle NKikimr::TEvTxAllocatorClient::TEvAllocateResult 2025-06-03T10:50:02.983164Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 0, assigned# 5, allocated# 5 2025-06-03T10:50:02.983168Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 5, allocated# 5, exhausted# 0 2025-06-03T10:50:02.983354Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:757: [controller 72075186224037888] Handle NKikimrReplication.TEvGetTxId Versions { Step: 50000 TxId: 0 } 2025-06-03T10:50:02.983359Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:76: [controller 72075186224037888][TxAssignTxId] Execute: pending# 1, assigned# 5, allocated# 5 2025-06-03T10:50:02.983481Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_assign_tx_id.cpp:142: [controller 72075186224037888][TxAssignTxId] Complete: pending# 0, assigned# 5, allocated# 5, exhausted# 0 2025-06-03T10:50:02.985453Z node 1 :REPLICATION_CONTROLLER TRACE: target_discoverer.cpp:27: [TargetDiscoverer][rid 1] Handle NKikimr::NReplication::TEvYdbProxy::TEvDescribePathResponse { Result: { status: SCHEME_ERROR, issues: {
: Error: Path not found } } } 2025-06-03T10:50:02.985468Z node 1 :REPLICATION_CONTROLLER ERROR: target_discoverer.cpp:78: [TargetDiscoverer][rid 1] Describe path failed: path# /Root/table, status# SCHEME_ERROR, issues# {
: Error: Path not found } 2025-06-03T10:50:02.985514Z node 1 :REPLICATION_CONTROLLER TRACE: controller.cpp:172: [controller 72075186224037888] Handle NKikimr::NReplication::NController::TEvPrivate::TEvDiscoveryTargetsResult { ReplicationId: 1 ToAdd [] ToDelete [] Failed [/Root/table: SCHEME_ERROR ({
: Error: Path not found })] } 2025-06-03T10:50:02.985537Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_discovery_targets_result.cpp:24: [controller 72075186224037888][TxDiscoveryTargetsResult] Execute: NKikimr::NReplication::NController::TEvPrivate::TEvDiscoveryTargetsResult { ReplicationId: 1 ToAdd [] ToDelete [] Failed [/Root/table: SCHEME_ERROR ({
: Error: Path not found })] } 2025-06-03T10:50:02.985544Z node 1 :REPLICATION_CONTROLLER ERROR: tx_discovery_targets_result.cpp:76: [controller 72075186224037888][TxDiscoveryTargetsResult] Discovery error: rid# 1, error# /Root/table: SCHEME_ERROR ({
: Error: Path not found }) 2025-06-03T10:50:02.987028Z node 1 :REPLICATION_CONTROLLER DEBUG: tx_discovery_targets_result.cpp:89: [controller 72075186224037888][TxDiscoveryTargetsResult] Complete >> TBSVWithReboots::CreateAlter |78.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/py3test >> tier_delete.py::TestTierDelete::test_delete_s3_ttl [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001c05/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk2/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001c05/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk2/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 1182919 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback |78.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |78.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select1-3.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select1-4.test] >> KqpScheme::AlterTableAddUniqIndexPublicApiFeatureOff [GOOD] |78.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/kqp/ut/scheme/unittest >> KqpScheme::AlterTableAddUniqIndexPublicApiFeatureOff [GOOD] Test command err: Trying to start YDB, gRPC: 21284, MsgBus: 4433 2025-06-03T10:49:56.115260Z node 1 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[1:7511673589454445702:2209];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:49:56.115298Z node 1 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001649/r3tmp/tmpXsdkm3/pdisk_1.dat 2025-06-03T10:49:56.195623Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 21284, node 1 2025-06-03T10:49:56.208797Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:49:56.208810Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:49:56.208812Z node 1 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:49:56.208865Z node 1 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration 2025-06-03T10:49:56.218474Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:49:56.218512Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:49:56.219412Z node 1 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(1, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:4433 TClient is connected to server localhost:4433 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. 2025-06-03T10:49:56.295640Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:56.300029Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 2025-06-03T10:49:56.312890Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:56.383723Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:56.453107Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:56.475992Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:49:56.574704Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673589454447180:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:56.574730Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:56.635118Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:49:56.645412Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:49:56.658880Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:49:56.669150Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:49:56.727766Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:49:56.739106Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:49:56.752641Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:49:56.768669Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673589454447833:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:56.768702Z node 1 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:56.768718Z node 1 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [1:7511673589454447838:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:49:56.769853Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:49:56.779115Z node 1 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [1:7511673589454447840:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:49:56.845084Z node 1 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [1:7511673589454447891:3398] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:49:57.044719Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 2025-06-03T10:49:57.061838Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715673:0, at schemeshard: 72057594046644480 2025-06-03T10:49:57.075310Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715674:0, at schemeshard: 72057594046644480 2025-06-03T10:49:57.417953Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715677:1, at schemeshard: 72057594046644480 2025-06-03T10:49:57.438435Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715678:1, at schemeshard: 72057594046644480 2025-06-03T10:49:57.466196Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterTable, opId: 281474976715679:1, at schemeshard: 72057594046644480 Trying to start YDB, gRPC: 11523, MsgBus: 5580 2025-06-03T10:49:57.688059Z node 2 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[2:7511673590910694466:2200];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:49:57.688994Z node 2 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001649/r3tmp/tmpYqvPBG/pdisk_1.dat 2025-06-03T10:49:57.724926Z node 2 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:49:57.725188Z node 2 :CONFIGS_DISPATCHER ERROR: configs_dispatcher.cpp:1228: Notification cookie mismatch for subscription [2:7511673590910694305:2079] 1748947797687249 != 1748947797687252 TServer::EnableGrpc on GrpcPort 11523, node 2 2025-06-03T10:49:57.737288Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:49:57.737324Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:49:57.737327Z node 2 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:49:57.737399Z node 2 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:5580 2025-06-03T10:49:57.804010Z node ... ault}. Got request, become WaitResolveState 2025-06-03T10:50:03.033035Z node 5 :KQP_EXECUTER DEBUG: kqp_table_resolver.cpp:271: TxId: 281474976715674. Resolved key sets: 0 2025-06-03T10:50:03.033066Z node 5 :KQP_EXECUTER DEBUG: kqp_planner.cpp:553: TxId: 281474976715674. Ctx: { TraceId: 01jwtpk6wj19vq1tw9rrhgmh96, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=YzUxNDA0NzktZTI0M2UzZTUtNjM2ZmEzZjMtOGRhN2JiMWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: true, 0 scan tasks on 0 nodes, localComputeTasks: 0, snapshot: {0, 0} 2025-06-03T10:50:03.033092Z node 5 :KQP_EXECUTER INFO: kqp_data_executer.cpp:2805: ActorId: [5:7511673617556371146:2519] TxId: 281474976715674. Ctx: { TraceId: 01jwtpk6wj19vq1tw9rrhgmh96, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=YzUxNDA0NzktZTI0M2UzZTUtNjM2ZmEzZjMtOGRhN2JiMWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Total tasks: 0, readonly: 1, datashardTxs: 0, evWriteTxs: 0, topicTxs: 0, volatile: 0, immediate: 1, pending compute tasks0, useFollowers: 0 2025-06-03T10:50:03.033095Z node 5 :KQP_EXECUTER TRACE: kqp_data_executer.cpp:2808: ActorId: [5:7511673617556371146:2519] TxId: 281474976715674. Ctx: { TraceId: 01jwtpk6wj19vq1tw9rrhgmh96, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=YzUxNDA0NzktZTI0M2UzZTUtNjM2ZmEzZjMtOGRhN2JiMWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Updating channels after the creation of compute actors 2025-06-03T10:50:03.033131Z node 5 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:2151: ActorId: [5:7511673617556371146:2519] TxId: 281474976715674. Ctx: { TraceId: 01jwtpk6wj19vq1tw9rrhgmh96, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=YzUxNDA0NzktZTI0M2UzZTUtNjM2ZmEzZjMtOGRhN2JiMWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. terminate execution. 2025-06-03T10:50:03.033134Z node 5 :KQP_EXECUTER TRACE: kqp_executer_impl.h:2165: ActorId: [5:7511673617556371146:2519] TxId: 281474976715674. Ctx: { TraceId: 01jwtpk6wj19vq1tw9rrhgmh96, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=YzUxNDA0NzktZTI0M2UzZTUtNjM2ZmEzZjMtOGRhN2JiMWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Terminate, become ZombieState 2025-06-03T10:50:03.033156Z node 5 :KQP_EXECUTER DEBUG: kqp_executer_impl.h:839: ActorId: [5:7511673617556371146:2519] TxId: 281474976715674. Ctx: { TraceId: 01jwtpk6wj19vq1tw9rrhgmh96, Database: /Root, DatabaseId: /Root, SessionId: ydb://session/3?node_id=5&id=YzUxNDA0NzktZTI0M2UzZTUtNjM2ZmEzZjMtOGRhN2JiMWM=, CurrentExecutionId: , CustomerSuppliedId: , PoolId: default}. Resource usage for last stat interval: ComputeTime: 0.000000s ReadRows: 0 ReadBytes: 0 ru: 1 rate limiter was not found force flag: 1 Trying to start YDB, gRPC: 4386, MsgBus: 13101 2025-06-03T10:50:03.415508Z node 6 :METADATA_PROVIDER WARN: log.cpp:784: fline=table_exists.cpp:54;actor=TTableExistsActor;event=undelivered;self_id=[6:7511673619372836544:2091];send_to=[0:7307199536658146131:7762515]; 2025-06-03T10:50:03.415849Z node 6 :METADATA_PROVIDER ERROR: log.cpp:784: fline=accessor_snapshot_base.cpp:71;action=cannot detect path existence;path=//Root/.metadata/initialization/migrations;error=scheme_cache_undelivered_message; test_client.cpp: SetPath # /home/runner/.ya/build/build_root/u93c/001649/r3tmp/tmpGJ70AQ/pdisk_1.dat 2025-06-03T10:50:03.450373Z node 6 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded TServer::EnableGrpc on GrpcPort 4386, node 6 2025-06-03T10:50:03.466006Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:190: distributable config is empty, broken or outdated, will use file: (empty maybe) 2025-06-03T10:50:03.466028Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:196: will try to initialize from file: (empty maybe) 2025-06-03T10:50:03.466030Z node 6 :NET_CLASSIFIER WARN: net_classifier.cpp:204: failed to initialize from file: (empty maybe) 2025-06-03T10:50:03.466078Z node 6 :NET_CLASSIFIER ERROR: net_classifier.cpp:228: got bad distributable configuration TClient is connected to server localhost:13101 2025-06-03T10:50:03.526548Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Unknown -> Disconnected 2025-06-03T10:50:03.526573Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Disconnected -> Connecting 2025-06-03T10:50:03.527664Z node 6 :HIVE WARN: node_info.cpp:25: HIVE#72057594037968897 Node(6, (0,0,0,0)) VolatileState: Connecting -> Connected TClient is connected to server localhost:13101 WaitRootIsUp 'Root'... TClient::Ls request: Root TClient::Ls response: Status: 1 StatusCode: SUCCESS SchemeStatus: 0 PathDescription { Self { Name: "Root" PathId: 1 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 1 CreateStep: 0 ParentPathId: 1 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 1 SubDomainVersion: 0 SecurityStateVersion: 0 } ChildrenExist: false } Children { Name: ".sys" PathId: 18446744073709551615 SchemeshardId: 72057594046644480 PathType: EPathTypeDir CreateFinished: true CreateTxId: 0 CreateStep: 0 ParentPathId: 18446744073709551615 } DomainDescription { SchemeShardId_Depricated: 72057594046644480 PathId_Depricated: 1 ProcessingParams { Version: 0 Pl... (TRUNCATED) WaitRootIsUp 'Root' success. waiting... 2025-06-03T10:50:03.566451Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 281474976715657:0, at schemeshard: 72057594046644480 2025-06-03T10:50:03.568842Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 281474976715657, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:50:03.574709Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715658:0, at schemeshard: 72057594046644480 2025-06-03T10:50:03.606467Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715659:0, at schemeshard: 72057594046644480 waiting... waiting... 2025-06-03T10:50:03.642022Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715660:0, at schemeshard: 72057594046644480 2025-06-03T10:50:03.656767Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715661:0, at schemeshard: 72057594046644480 waiting... 2025-06-03T10:50:03.865615Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673619372838127:2403], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:50:03.865636Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:50:03.877781Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715662:0, at schemeshard: 72057594046644480 2025-06-03T10:50:03.887707Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715663:0, at schemeshard: 72057594046644480 2025-06-03T10:50:03.899731Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715664:0, at schemeshard: 72057594046644480 2025-06-03T10:50:03.914418Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715665:0, at schemeshard: 72057594046644480 2025-06-03T10:50:03.927478Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715666:0, at schemeshard: 72057594046644480 2025-06-03T10:50:03.941949Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715667:0, at schemeshard: 72057594046644480 2025-06-03T10:50:03.955845Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715668:0, at schemeshard: 72057594046644480 2025-06-03T10:50:03.973985Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673619372838780:2466], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:50:03.974013Z node 6 :KQP_WORKLOAD_SERVICE WARN: kqp_workload_service.cpp:256: [WorkloadService] [Service] Failed to fetch pool default, DatabaseId: /Root, status: NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:50:03.974125Z node 6 :KQP_WORKLOAD_SERVICE WARN: scheme_actors.cpp:225: [WorkloadService] [TPoolFetcherActor] ActorId: [6:7511673619372838785:2469], DatabaseId: /Root, PoolId: default, Failed to fetch pool info, NOT_FOUND, issues: {
: Error: Resource pool default not found or you don't have access permissions } 2025-06-03T10:50:03.975138Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateResourcePool, opId: 281474976715669:3, at schemeshard: 72057594046644480 2025-06-03T10:50:03.983199Z node 6 :KQP_WORKLOAD_SERVICE WARN: helpers.h:73: [WorkloadService] [TPoolCreatorActor] ActorId: [6:7511673619372838787:2470], DatabaseId: /Root, PoolId: default, Scheduled retry for error: {
: Error: Transaction 281474976715669 completed, doublechecking } 2025-06-03T10:50:04.055080Z node 6 :TX_PROXY ERROR: schemereq.cpp:546: Actor# [6:7511673623667806134:3397] txid# 281474976715670, issues: { message: "Check failed: path: \'/Root/.metadata/workload_manager/pools/default\', error: path exist, request accepts it (id: [OwnerId: 72057594046644480, LocalPathId: 16], type: EPathTypeResourcePool, state: EPathStateNoChanges), source_location: ydb/core/tx/schemeshard/schemeshard__operation_create_resource_pool.cpp:92" severity: 1 } 2025-06-03T10:50:04.215152Z node 6 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpCreateTable, opId: 281474976715672:0, at schemeshard: 72057594046644480 |78.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] [GOOD] |78.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |78.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> data_correctness.py::TestDataCorrectness::test [GOOD] >> zip_bomb.py::TestZipBomb::test ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithPath::test_validates_deduplication_id[tables_format_v1] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |78.4%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-1.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-10.test] >> test_log_scenario.py::TestLogScenario::test_log_deviation[1051200] |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/py3test >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select2-4.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select2-5.test] |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> TBSVWithReboots::CreateAssignUnassignDrop |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_format_setting.py::TestS3::test_parquet_converters_to_datetime[v1] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-11.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-12.test] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-insert.test] [GOOD] >> test_format_setting.py::TestS3::test_parquet_converters_to_datetime[v2] |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/backpressure/ut/unittest |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/blobstorage/vdisk/hullop/ut/unittest |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/py3test |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test |78.5%| [TA] $(B)/ydb/core/blobstorage/backpressure/ut/test-results/unittest/{meta.json ... results_accumulator.log} |78.5%| [TA] {RESULT} $(B)/ydb/core/blobstorage/backpressure/ut/test-results/unittest/{meta.json ... results_accumulator.log} |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/replication/controller/ut_assign_tx_id/unittest >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldRequestCompactionsSchemeshardRestart |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/py3test >> TSchemeshardCompactionQueueTest::UpdateBelowThreshold [GOOD] >> TSchemeshardCompactionQueueTest::UpdateWithEmptyShard [GOOD] >> TSchemeshardCompactionQueueTest::EnqueueBelowSearchHeightThreshold [GOOD] >> TSchemeshardCompactionQueueTest::EnqueueBelowRowDeletesThreshold [GOOD] >> TSchemeshardCompactionQueueTest::CheckOrderWhenAllQueues [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_can_read_from_different_groups[tables_format_v0] [GOOD] |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_can_read_from_different_groups[tables_format_v1] |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardCompactionQueueTest::UpdateWithEmptyShard [GOOD] |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_compaction/unittest >> TSchemeshardCompactionQueueTest::CheckOrderWhenAllQueues [GOOD] >> TSchemeshardBackgroundCompactionTest::SchemeshardShouldNotCompactBackups >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select2-5.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[plan-select3-1.test] >> test_log_scenario.py::TestLogScenario::test_log_deviation[180] >> test_log_scenario.py::TestLogScenario::test_log_uniform |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_can_read_new_written_data_on_visibility_timeout[tables_format_v0] [GOOD] >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3-dc] [GOOD] |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/py3test |78.5%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/py3test |78.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/py3test >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_can_read_from_different_groups[tables_format_v1] [GOOD] >> test_fifo_messaging.py::TestSqsFifoMessagingWithTenant::test_crutch_groups_selection_algorithm_selects_second_group_batch[tables_format_v0] >> test_dml.py::TestDML::test_dml[table_index_0__SYNC-pk_types9-all_types9-index9---SYNC] >> test_dml.py::TestDML::test_dml[table_index_3__SYNC-pk_types6-all_types6-index6---SYNC] >> test_dml.py::TestDML::test_dml[table_index_1_UNIQUE_SYNC-pk_types3-all_types3-index3--UNIQUE-SYNC] >> test_dml.py::TestDML::test_dml[table_ttl_Uint32-pk_types14-all_types14-index14-Uint32--] >> test_dml.py::TestDML::test_dml[table_index_1__SYNC-pk_types8-all_types8-index8---SYNC] >> test_dml.py::TestDML::test_dml[table_index_0__ASYNC-pk_types11-all_types11-index11---ASYNC] >> test_dml.py::TestDML::test_dml[table_all_types-pk_types12-all_types12-index12---] |78.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/blobstorage/py3test >> test_replication.py::TestReplicationAfterNodesRestart::test_replication[mirror-3-dc] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-15.test] [GOOD] >> test_sql_logic.py::TestSQLLogic::test_sql_suite[results-select3-2.test] >> TBSVWithReboots::Create [GOOD] |78.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_works[tables_format_v1-std] [GOOD] >> test.py::test[join-opt_on_opt_side-off-Results] [SKIPPED] >> test.py::test[join-premap_common_semi--Results] >> test.py::test[action-action_eval_cluster_use_compact_named_exprs--Results] [SKIPPED] >> test.py::test[action-nested_subquery--Results] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/core/tx/schemeshard/ut_bsvolume_reboots/unittest >> TBSVWithReboots::Create [GOOD] Test command err: ==== RunWithTabletReboots =========== RUN: Trace =========== Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] IGNORE Leader for TabletID 72057594046678944 is [0:0:0] sender: [1:115:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] IGNORE Leader for TabletID 72057594046447617 is [0:0:0] sender: [1:116:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] IGNORE Leader for TabletID 72057594046316545 is [0:0:0] sender: [1:117:2058] recipient: [1:111:2142] Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:127:2058] recipient: [1:109:2140] Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:130:2058] recipient: [1:110:2141] Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:134:2058] recipient: [1:111:2142] 2025-06-03T10:49:56.663578Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7493: BackgroundCompactionQueue configured: Timeout# 600.000000s, compact single parted# no, Rate# 1, WakeupInterval# 60.000000s, RoundInterval# 172800.000000s, InflightLimit# 1, MinCompactionRepeatDelaySeconds# 600.000000s, MaxRate# 1 2025-06-03T10:49:56.663608Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7521: BorrowedCompactionQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:49:56.663616Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7407: StatsBatching config: StatsBatchTimeout# 0.000000s, StatsMaxBatchSize# 0, StatsMaxExecuteTime# 0.010000s 2025-06-03T10:49:56.663622Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7423: OperationsProcessing config: using default configuration 2025-06-03T10:49:56.663636Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxMergeTablePartition, limit 10000 2025-06-03T10:49:56.663641Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7429: OperationsProcessing config: type TxSplitTablePartition, limit 10000 2025-06-03T10:49:56.663651Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7553: BackgroundCleaningQueue configured: Timeout# 15.000000s, Rate# 0, WakeupInterval# 1.000000s, InflightLimit# 10 2025-06-03T10:49:56.663670Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:39: [RootDataErasureManager] Created: Timeout# 600, Rate# 0, InflightLimit# 10, DataErasureInterval# 604800.000000s, DataErasureBSCInterval# 600.000000s, CurrentWakeupInterval# 604800.000000s, IsManualStartup# false 2025-06-03T10:49:56.663792Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# 2025-06-03T10:49:56.663859Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:14: TxInitSchema.Execute 2025-06-03T10:49:56.676754Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:7656: Got new config: QueryServiceConfig { AllExternalDataSourcesAreAvailable: true } 2025-06-03T10:49:56.676782Z node 1 :IMPORT WARN: schemeshard_import.cpp:303: Table profiles were not loaded 2025-06-03T10:49:56.676882Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_impl.cpp:7624: ExternalSources configured: HostnamePatterns# , AvailableExternalDataSources# Leader for TabletID 72057594046447617 is [1:129:2153] sender: [1:176:2058] recipient: [1:15:2062] 2025-06-03T10:49:56.682346Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__init_schema.cpp:20: TxInitSchema.Complete 2025-06-03T10:49:56.682414Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:96: TTxUpgradeSchema.Execute 2025-06-03T10:49:56.682480Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__upgrade_schema.cpp:44: UpgradeInitState as Uninitialized, schemeshardId: 72057594046678944 2025-06-03T10:49:56.690865Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__upgrade_schema.cpp:122: TTxUpgradeSchema.Complete 2025-06-03T10:49:56.690943Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__background_cleaning.cpp:445: Clear TempDirsState with owners number: 0 2025-06-03T10:49:56.691094Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init.cpp:1348: TTxInit, SS hasn't been configured yet, state: 1, at schemeshard: 72057594046678944 2025-06-03T10:49:56.691241Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:32: TTxInitRoot DoExecute, path: MyRoot, pathId: [OwnerId: 72057594046678944, LocalPathId: 1], at schemeshard: 72057594046678944 2025-06-03T10:49:56.694871Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__init_root.cpp:157: TTxInitRoot DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:49:56.694979Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__root_data_erasure_manager.cpp:84: [RootDataErasureManager] Stop 2025-06-03T10:49:56.695380Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:49:56.695401Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:49:56.695446Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:28: TTxServerlessStorageBilling.Execute 2025-06-03T10:49:56.695460Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__serverless_storage_billing.cpp:38: TTxServerlessStorageBilling: unable to make a bill, domain is not a serverless db, schemeshardId: 72057594046678944, domainId: [OwnerId: 72057594046678944, LocalPathId: 1] 2025-06-03T10:49:56.695467Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__serverless_storage_billing.cpp:202: TTxServerlessStorageBilling.Complete 2025-06-03T10:49:56.695496Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:6676: Handle: TEvAllocateResult: Cookie# 0, at schemeshard: 72057594046678944 Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] IGNORE Leader for TabletID 72057594037968897 is [0:0:0] sender: [1:213:2058] recipient: [1:211:2211] Leader for TabletID 72057594037968897 is [1:217:2215] sender: [1:218:2058] recipient: [1:211:2211] 2025-06-03T10:49:56.697363Z node 1 :HIVE INFO: tablet_helpers.cpp:1130: [72057594037968897] started, primary subdomain 0:0 Leader for TabletID 72057594046678944 is [1:126:2151] sender: [1:238:2058] recipient: [1:15:2062] 2025-06-03T10:49:56.718896Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:372: TTxOperationPropose Execute, message: Transaction { WorkingDir: "/" OperationType: ESchemeOpAlterSubDomain SubDomain { Name: "MyRoot" StoragePools { Name: "pool-1" Kind: "pool-kind-1" } StoragePools { Name: "pool-2" Kind: "pool-kind-2" } } } TxId: 1 TabletId: 72057594046678944 , at schemeshard: 72057594046678944 2025-06-03T10:49:56.719018Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_alter_subdomain.cpp:89: TAlterSubDomain Propose, path: //MyRoot, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:49:56.719099Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason transaction target path for pathId [OwnerId: 72057594046678944, LocalPathId: 1] was 0 2025-06-03T10:49:56.719152Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:130: IgniteOperation, opId: 1:1, propose status:StatusAccepted, reason: , at schemeshard: 72057594046678944 2025-06-03T10:49:56.719166Z node 1 :FLAT_TX_SCHEMESHARD WARN: schemeshard__operation.cpp:178: Operation part proposed ok, but propose itself is undo unsafe, suboperation type: ESchemeOpAlterSubDomain, opId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:49:56.720292Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:451: TTxOperationPropose Complete, txId: 1, response: Status: StatusAccepted TxId: 1 SchemeshardId: 72057594046678944 PathId: 1, at schemeshard: 72057594046678944 2025-06-03T10:49:56.720331Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard_audit_log.cpp:222: AUDIT: txId: 1, subject: , status: StatusAccepted, operation: ALTER DATABASE, path: //MyRoot 2025-06-03T10:49:56.720397Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:49:56.720410Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:313: TCreateParts opId# 1:0 ProgressState, operation type: TxAlterSubDomain, at tablet# 72057594046678944 2025-06-03T10:49:56.720418Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common.cpp:367: TCreateParts opId# 1:0 ProgressState no shards to create, do next state 2025-06-03T10:49:56.720424Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 2 -> 3 2025-06-03T10:49:56.721025Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:49:56.721040Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:141: NSubDomainState::TConfigureParts operationId# 1:0 ProgressState, at schemeshard: 72057594046678944 2025-06-03T10:49:56.721045Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1:0 3 -> 128 2025-06-03T10:49:56.721416Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:49:56.721430Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:373: NSubDomainState::TPropose ProgressState, operationId: 1:0, at schemeshard: 72057594046678944 2025-06-03T10:49:56.721436Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_common_subdomain.cpp:389: NSubDomainState::TPropose ProgressState leave, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:49:56.721444Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1651: TOperation IsReadyToPropose , TxId: 1 ready parts: 1/1 2025-06-03T10:49:56.722283Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1720: TOperation DoPropose send propose to coordinator: 72057594046316545 message:Transaction { AffectedSet { TabletId: 72057594046678944 Flags: 2 } ExecLevel: 0 TxId: 1 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:49:56.723613Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1 msg type: 269090816 2025-06-03T10:49:56.723676Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1, partId: 4294967295, tablet: 72057594046316545 Leader for TabletID 72057594046316545 is [1:133:2155] sender: [1:253:2058] recipient: [1:15:2062] FAKE_COORDINATOR: Add transaction: 1 at step: 5000001 FAKE_COORDINATOR: advance: minStep5000001 State->FrontStep: 0 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1 at step: 5000001 2025-06-03T10:49:56.723965Z node 1 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000001, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:49:56.724009Z node 1 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1 Coordinator: 72057594046316545 AckTo { RawX1: 133 RawX2: 4294969451 } } Step: 5000001 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:49:56.724019Z node 1 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_subdomain.cpp:303: NSubDomainState::TPropose HandleReply TEvOperationPlan, operationId 1:0, at tablet# 72057594046678944 2025-06-03T10:49:56.724094Z node 1 :FLAT_TX_SCHEMESHARD INFO: sch ... ags: 2 } ExecLevel: 0 TxId: 1001 MinStep: 0 MaxStep: 18446744073709551615 IgnoreLowDiskSpace: true } CoordinatorID: 72057594046316545 2025-06-03T10:50:12.349234Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:652: Send tablet strongly msg operationId: 1001:4294967295 from tablet: 72057594046678944 to tablet: 72057594046316545 cookie: 0:1001 msg type: 269090816 2025-06-03T10:50:12.349271Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1752: TOperation RegisterRelationByTabletId, TxId: 1001, partId: 4294967295, tablet: 72057594046316545 FAKE_COORDINATOR: Add transaction: 1001 at step: 5000003 FAKE_COORDINATOR: advance: minStep5000003 State->FrontStep: 5000002 FAKE_COORDINATOR: Send Plan to tablet 72057594046678944 for txId: 1001 at step: 5000003 2025-06-03T10:50:12.349388Z node 54 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation.cpp:676: TTxOperationPlanStep Execute, stepId: 5000003, transactions count in step: 1, at schemeshard: 72057594046678944 2025-06-03T10:50:12.349417Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:680: TTxOperationPlanStep Execute, message: Transactions { TxId: 1001 Coordinator: 72057594046316545 AckTo { RawX1: 134 RawX2: 231928236140 } } Step: 5000003 MediatorID: 0 TabletID: 72057594046678944, at schemeshard: 72057594046678944 2025-06-03T10:50:12.349431Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common_bsv.cpp:141: NBSVState::TPropose operationId# 1001:0 HandleReply TEvOperationPlan, at schemeshard: 72057594046678944 2025-06-03T10:50:12.349498Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:2500: Change state for txid 1001:0 128 -> 240 2025-06-03T10:50:12.349557Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 1 2025-06-03T10:50:12.349573Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:476: IncrementPathDbRefCount reason publish path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-03T10:50:12.350100Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:28: TTxPublishToSchemeBoard DoExecute, at schemeshard: 72057594046678944 2025-06-03T10:50:12.350116Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1001, path id: [OwnerId: 72057594046678944, LocalPathId: 2] 2025-06-03T10:50:12.350163Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:40: TTxPublishToSchemeBoard DescribePath, at schemeshard: 72057594046678944, txId: 1001, path id: [OwnerId: 72057594046678944, LocalPathId: 3] 2025-06-03T10:50:12.350192Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:54: TTxPublishToSchemeBoard DoComplete, at schemeshard: 72057594046678944 2025-06-03T10:50:12.350198Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [54:206:2207], at schemeshard: 72057594046678944, txId: 1001, path id: 2 2025-06-03T10:50:12.350205Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:66: TTxPublishToSchemeBoard Send, to populator: [54:206:2207], at schemeshard: 72057594046678944, txId: 1001, path id: 3 FAKE_COORDINATOR: Erasing txId 1001 2025-06-03T10:50:12.350314Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:485: TTxOperationProgress Execute, operationId: 1001:0, at schemeshard: 72057594046678944 2025-06-03T10:50:12.350324Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_common.cpp:491: [72057594046678944] TDone opId# 1001:0 ProgressState 2025-06-03T10:50:12.350339Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1001:0 progress is 1/1 2025-06-03T10:50:12.350344Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1001 ready parts: 1/1 2025-06-03T10:50:12.350350Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard__operation_side_effects.cpp:906: Part operation is done id#1001:0 progress is 1/1 2025-06-03T10:50:12.350353Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1001 ready parts: 1/1 2025-06-03T10:50:12.350358Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1607: TOperation IsReadyToNotify, TxId: 1001, ready parts: 1/1, is published: false 2025-06-03T10:50:12.350364Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation.cpp:1642: TOperation IsReadyToDone TxId: 1001 ready parts: 1/1 2025-06-03T10:50:12.350369Z node 54 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:973: Operation and all the parts is done, operation id: 1001:0 2025-06-03T10:50:12.350374Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:5179: RemoveTx for txid 1001:0 2025-06-03T10:50:12.350429Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove txstate target path for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 5 2025-06-03T10:50:12.350440Z node 54 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__operation_side_effects.cpp:982: Publication still in progress, tx: 1001, publications: 2, subscribers: 0 2025-06-03T10:50:12.350445Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1001, [OwnerId: 72057594046678944, LocalPathId: 2], 5 2025-06-03T10:50:12.350448Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__operation_side_effects.cpp:989: Publication details: tx: 1001, [OwnerId: 72057594046678944, LocalPathId: 3], 2 2025-06-03T10:50:12.350609Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 1001 2025-06-03T10:50:12.350623Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 2 Version: 5 PathOwnerId: 72057594046678944, cookie: 1001 2025-06-03T10:50:12.350628Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 2, at schemeshard: 72057594046678944, txId: 1001 2025-06-03T10:50:12.350633Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1001, pathId: [OwnerId: 72057594046678944, LocalPathId: 2], version: 5 2025-06-03T10:50:12.350637Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 2] was 2 2025-06-03T10:50:12.350824Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard_impl.cpp:5839: Handle TEvUpdateAck, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1001 2025-06-03T10:50:12.350840Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:96: TTxAckPublishToSchemeBoard Execute, at schemeshard: 72057594046678944, msg: Owner: 72057594046678944 Generation: 2 LocalPathId: 3 Version: 2 PathOwnerId: 72057594046678944, cookie: 1001 2025-06-03T10:50:12.350845Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:147: Publication in-flight, count: 1, at schemeshard: 72057594046678944, txId: 1001 2025-06-03T10:50:12.350850Z node 54 :FLAT_TX_SCHEMESHARD INFO: schemeshard__publish_to_scheme_board.cpp:194: AckPublish, at schemeshard: 72057594046678944, txId: 1001, pathId: [OwnerId: 72057594046678944, LocalPathId: 3], version: 2 2025-06-03T10:50:12.350855Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard_impl.cpp:487: DecrementPathDbRefCount reason remove publishing for pathId [OwnerId: 72057594046678944, LocalPathId: 3] was 4 2025-06-03T10:50:12.350867Z node 54 :FLAT_TX_SCHEMESHARD NOTICE: schemeshard__publish_to_scheme_board.cpp:155: Publication complete, notify & remove, at schemeshard: 72057594046678944, txId: 1001, subscribers: 0 2025-06-03T10:50:12.352065Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1001 2025-06-03T10:50:12.352140Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: schemeshard__publish_to_scheme_board.cpp:175: TTxAckPublishToSchemeBoard Complete, at schemeshard: 72057594046678944, cookie: 1001 TestModificationResult got TxId: 1001, wait until txId: 1001 TestWaitNotification wait txId: 1001 2025-06-03T10:50:12.352211Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:210: tests -- TTxNotificationSubscriber for txId 1001: send EvNotifyTxCompletion 2025-06-03T10:50:12.352220Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:256: tests -- TTxNotificationSubscriber, SendToSchemeshard, txId 1001 2025-06-03T10:50:12.352299Z node 54 :FLAT_TX_SCHEMESHARD WARN: schemeshard__notify.cpp:122: NotifyTxCompletion, unknown transaction, txId: 1001, at schemeshard: 72057594046678944 2025-06-03T10:50:12.352325Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:227: tests -- TTxNotificationSubscriber for txId 1001: got EvNotifyTxCompletionResult 2025-06-03T10:50:12.352342Z node 54 :FLAT_TX_SCHEMESHARD DEBUG: test_env.cpp:236: tests -- TTxNotificationSubscriber for txId 1001: satisfy waiter [54:376:2355] TestWaitNotification: OK eventTxId 1001 2025-06-03T10:50:12.352431Z node 54 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:31: TTxDescribeScheme DoExecute, record: Path: "/MyRoot/DirA/BSVolume_1" Options { ReturnPartitioningInfo: false ReturnPartitionConfig: false BackupInfo: false ReturnBoundaries: false ShowPrivateTable: false }, at schemeshard: 72057594046678944 2025-06-03T10:50:12.352483Z node 54 :SCHEMESHARD_DESCRIBE INFO: schemeshard__describe_scheme.cpp:50: Tablet 72057594046678944 describe path "/MyRoot/DirA/BSVolume_1" took 65us result status StatusSuccess 2025-06-03T10:50:12.352579Z node 54 :SCHEMESHARD_DESCRIBE DEBUG: schemeshard__describe_scheme.cpp:56: TTxDescribeScheme DoComplete, result: Status: StatusSuccess Path: "/MyRoot/DirA/BSVolume_1" PathDescription { Self { Name: "BSVolume_1" PathId: 3 SchemeshardId: 72057594046678944 PathType: EPathTypeBlockStoreVolume CreateFinished: true CreateTxId: 1001 CreateStep: 5000003 ParentPathId: 2 PathState: EPathStateNoChanges Owner: "root@builtin" ACL: "" EffectiveACL: "" PathVersion: 2 PathSubType: EPathSubTypeEmpty Version { GeneralVersion: 2 ACLVersion: 0 EffectiveACLVersion: 0 UserAttrsVersion: 1 ChildrenVersion: 0 BSVVersion: 1 } ChildrenExist: false } DomainDescription { SchemeShardId_Depricated: 72057594046678944 PathId_Depricated: 1 ProcessingParams { Version: 1 PlanResolution: 50 Coordinators: 72057594046316545 TimeCastBucketsPerMediator: 2 } DomainKey { SchemeShard: 72057594046678944 PathId: 1 } PathsInside: 2 PathsLimit: 10000 ShardsInside: 2 ShardsLimit: 200000 ResourcesDomainKey { SchemeShard: 72057594046678944 PathId: 1 } DiskSpaceUsage { Tables { TotalSize: 0 DataSize: 0 IndexSize: 0 } Topics { ReserveSize: 0 AccountSize: 0 DataSize: 0 UsedReserveSize: 0 } } PQPartitionsInside: 0 PQPartitionsLimit: 1000000 } BlockStoreVolumeDescription { Name: "BSVolume_1" PathId: 3 VolumeConfig { BlockSize: 4096 Partitions { BlockCount: 16 } Version: 1 DiskId: "foo" ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-1" } ExplicitChannelProfiles { PoolKind: "pool-kind-2" } } Partitions { PartitionId: 0 TabletId: 72075186233409546 } VolumeTabletId: 72075186233409547 AlterVersion: 1 MountToken: "" TokenVersion: 0 } } PathId: 3 PathOwnerId: 72057594046678944, at schemeshard: 72057594046678944 >> test.py::test[column_group-hint_append_fail-diff_grp-Results] [SKIPPED] >> test.py::test[column_order-select_action-default.txt-Results] >> test_dml.py::TestDML::test_dml[table_ttl_Datetime-pk_types16-all_types16-index16-Datetime--] >> test_dml.py::TestDML::test_dml[table_ttl_DyNumber-pk_types13-all_types13-index13-DyNumber--] >> test_dml.py::TestDML::test_dml[table_index_1__ASYNC-pk_types10-all_types10-index10---ASYNC] ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/olap/ttl_tiering/py3test >> ttl_delete_s3.py::TestDeleteTtl::test_ttl_delete [GOOD] Test command err: library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001c06/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk4/testing_out_stuff/moto_server.out.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback library/recipes/common/__init__.py:29: ResourceWarning: unclosed file <_io.TextIOWrapper name='/home/runner/.ya/build/build_root/u93c/001c06/ydb/tests/olap/ttl_tiering/test-results/py3test/testing_out_stuff/chunk4/testing_out_stuff/moto_server.err.log' mode='w' encoding='utf-8'> process = subprocess.Popen( ResourceWarning: Enable tracemalloc to get the object allocation traceback contrib/tools/python3/Lib/subprocess.py:1129: ResourceWarning: subprocess 1182654 is still running ResourceWarning: Enable tracemalloc to get the object allocation traceback ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_send_message[tables_format_v0-fifo] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ------- [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithTenant::test_change_visibility_batch_works[tables_format_v0-std] [GOOD] Test command err: ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( ydb/tests/library/sqs/requests_client.py:140: DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead logger.warn("Last request failed with code {}, reason '{}' and text '{}'".format( |78.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_dml.py::TestDML::test_dml[table_index_3_UNIQUE_SYNC-pk_types1-all_types1-index1--UNIQUE-SYNC] |78.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/datashard/dml/py3test >> test_postgres.py::TestPGSQL::test_sql_suite[results-select.test] [GOOD] >> test_dml.py::TestDML::test_dml[table_index_0_UNIQUE_SYNC-pk_types4-all_types4-index4--UNIQUE-SYNC] >> test_dml.py::TestDML::test_dml[table_ttl_Timestamp-pk_types17-all_types17-index17-Timestamp--] |78.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/suite_tests/py3test >> test_postgres.py::TestPGSQL::test_sql_suite[plan-select.test] [GOOD] |78.6%| [TM] {default-linux-x86_64, relwithdebinfo} ydb/tests/functional/sqs/messaging/py3test >> test_generic_messaging.py::TestSqsGenericMessagingWithPath::test_delete_message_batch_deduplicates_receipt_handle[tables_format_v0-fifo] [GOOD] >> test_dml.py::TestDML::test_dml[table_ttl_Uint64-pk_types15-all_types15-index15-Uint64--] >> test_dml.py::TestDML::test_dml[table_index_2_UNIQUE_SYNC-pk_types2-all_types2-index2--UNIQUE-SYNC] >> test_dml.py::TestDML::test_dml[table_ttl_Date-pk_types18-all_types18-index18-Date--]